summaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-18 14:40:30 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-18 14:40:30 -0700
commita57793651ff1a09ef18bade998632435ca2dc13f (patch)
treefffc839d7b001f196421f09f0a06491588835fe1 /net/ipv4
parent9cf52b2921fbe62566b6b2ee79f71203749c9e5e (diff)
parent52f095ee88d8851866bc7694ab991ca5abf21d5e (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (51 commits) [IPV6]: Fix again the fl6_sock_lookup() fixed locking [NETFILTER]: nf_conntrack_tcp: fix connection reopening fix [IPV6]: Fix race in ipv6_flowlabel_opt() when inserting two labels [IPV6]: Lost locking in fl6_sock_lookup [IPV6]: Lost locking when inserting a flowlabel in ipv6_fl_list [NETFILTER]: xt_sctp: fix mistake to pass a pointer where array is required [NET]: Fix OOPS due to missing check in dev_parse_header(). [TCP]: Remove lost_retrans zero seqno special cases [NET]: fix carrier-on bug? [NET]: Fix uninitialised variable in ip_frag_reasm() [IPSEC]: Rename mode to outer_mode and add inner_mode [IPSEC]: Disallow combinations of RO and AH/ESP/IPCOMP [IPSEC]: Use the top IPv4 route's peer instead of the bottom [IPSEC]: Store afinfo pointer in xfrm_mode [IPSEC]: Add missing BEET checks [IPSEC]: Move type and mode map into xfrm_state.c [IPSEC]: Fix length check in xfrm_parse_spi [IPSEC]: Move ip_summed zapping out of xfrm6_rcv_spi [IPSEC]: Get nexthdr from caller in xfrm6_rcv_spi [IPSEC]: Move tunnel parsing for IPv4 out of xfrm4_input ...
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/inet_fragment.c89
-rw-r--r--net/ipv4/ip_fragment.c159
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv4/xfrm4_input.c40
-rw-r--r--net/ipv4/xfrm4_mode_beet.c1
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c1
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv4/xfrm4_policy.c27
-rw-r--r--net/ipv4/xfrm4_state.c1
-rw-r--r--net/ipv4/xfrm4_tunnel.c11
10 files changed, 175 insertions, 162 deletions
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 484cf512858f..e15e04fc6661 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -136,7 +136,9 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
*work -= f->qsize;
atomic_sub(f->qsize, &f->mem);
- f->destructor(q);
+ if (f->destructor)
+ f->destructor(q);
+ kfree(q);
}
EXPORT_SYMBOL(inet_frag_destroy);
@@ -172,3 +174,88 @@ int inet_frag_evictor(struct inet_frags *f)
return evicted;
}
EXPORT_SYMBOL(inet_frag_evictor);
+
+static struct inet_frag_queue *inet_frag_intern(struct inet_frag_queue *qp_in,
+ struct inet_frags *f, unsigned int hash, void *arg)
+{
+ struct inet_frag_queue *qp;
+#ifdef CONFIG_SMP
+ struct hlist_node *n;
+#endif
+
+ write_lock(&f->lock);
+#ifdef CONFIG_SMP
+ /* With SMP race we have to recheck hash table, because
+ * such entry could be created on other cpu, while we
+ * promoted read lock to write lock.
+ */
+ hlist_for_each_entry(qp, n, &f->hash[hash], list) {
+ if (f->match(qp, arg)) {
+ atomic_inc(&qp->refcnt);
+ write_unlock(&f->lock);
+ qp_in->last_in |= COMPLETE;
+ inet_frag_put(qp_in, f);
+ return qp;
+ }
+ }
+#endif
+ qp = qp_in;
+ if (!mod_timer(&qp->timer, jiffies + f->ctl->timeout))
+ atomic_inc(&qp->refcnt);
+
+ atomic_inc(&qp->refcnt);
+ hlist_add_head(&qp->list, &f->hash[hash]);
+ list_add_tail(&qp->lru_list, &f->lru_list);
+ f->nqueues++;
+ write_unlock(&f->lock);
+ return qp;
+}
+
+static struct inet_frag_queue *inet_frag_alloc(struct inet_frags *f, void *arg)
+{
+ struct inet_frag_queue *q;
+
+ q = kzalloc(f->qsize, GFP_ATOMIC);
+ if (q == NULL)
+ return NULL;
+
+ f->constructor(q, arg);
+ atomic_add(f->qsize, &f->mem);
+ setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
+ spin_lock_init(&q->lock);
+ atomic_set(&q->refcnt, 1);
+
+ return q;
+}
+
+static struct inet_frag_queue *inet_frag_create(struct inet_frags *f,
+ void *arg, unsigned int hash)
+{
+ struct inet_frag_queue *q;
+
+ q = inet_frag_alloc(f, arg);
+ if (q == NULL)
+ return NULL;
+
+ return inet_frag_intern(q, f, hash, arg);
+}
+
+struct inet_frag_queue *inet_frag_find(struct inet_frags *f, void *key,
+ unsigned int hash)
+{
+ struct inet_frag_queue *q;
+ struct hlist_node *n;
+
+ read_lock(&f->lock);
+ hlist_for_each_entry(q, n, &f->hash[hash], list) {
+ if (f->match(q, key)) {
+ atomic_inc(&q->refcnt);
+ read_unlock(&f->lock);
+ return q;
+ }
+ }
+ read_unlock(&f->lock);
+
+ return inet_frag_create(f, key, hash);
+}
+EXPORT_SYMBOL(inet_frag_find);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 443b3f89192f..2143bf30597a 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -108,6 +108,11 @@ int ip_frag_mem(void)
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
struct net_device *dev);
+struct ip4_create_arg {
+ struct iphdr *iph;
+ u32 user;
+};
+
static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
{
return jhash_3words((__force u32)id << 16 | prot,
@@ -123,6 +128,19 @@ static unsigned int ip4_hashfn(struct inet_frag_queue *q)
return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
}
+static int ip4_frag_match(struct inet_frag_queue *q, void *a)
+{
+ struct ipq *qp;
+ struct ip4_create_arg *arg = a;
+
+ qp = container_of(q, struct ipq, q);
+ return (qp->id == arg->iph->id &&
+ qp->saddr == arg->iph->saddr &&
+ qp->daddr == arg->iph->daddr &&
+ qp->protocol == arg->iph->protocol &&
+ qp->user == arg->user);
+}
+
/* Memory Tracking Functions. */
static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work)
{
@@ -132,6 +150,20 @@ static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work)
kfree_skb(skb);
}
+static void ip4_frag_init(struct inet_frag_queue *q, void *a)
+{
+ struct ipq *qp = container_of(q, struct ipq, q);
+ struct ip4_create_arg *arg = a;
+
+ qp->protocol = arg->iph->protocol;
+ qp->id = arg->iph->id;
+ qp->saddr = arg->iph->saddr;
+ qp->daddr = arg->iph->daddr;
+ qp->user = arg->user;
+ qp->peer = sysctl_ipfrag_max_dist ?
+ inet_getpeer(arg->iph->saddr, 1) : NULL;
+}
+
static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
{
struct ipq *qp;
@@ -139,17 +171,6 @@ static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
qp = container_of(q, struct ipq, q);
if (qp->peer)
inet_putpeer(qp->peer);
- kfree(qp);
-}
-
-static __inline__ struct ipq *frag_alloc_queue(void)
-{
- struct ipq *qp = kzalloc(sizeof(struct ipq), GFP_ATOMIC);
-
- if (!qp)
- return NULL;
- atomic_add(sizeof(struct ipq), &ip4_frags.mem);
- return qp;
}
@@ -185,7 +206,9 @@ static void ip_evictor(void)
*/
static void ip_expire(unsigned long arg)
{
- struct ipq *qp = (struct ipq *) arg;
+ struct ipq *qp;
+
+ qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
spin_lock(&qp->q.lock);
@@ -210,112 +233,30 @@ out:
ipq_put(qp);
}
-/* Creation primitives. */
-
-static struct ipq *ip_frag_intern(struct ipq *qp_in)
+/* Find the correct entry in the "incomplete datagrams" queue for
+ * this IP datagram, and create new one, if nothing is found.
+ */
+static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
{
- struct ipq *qp;
-#ifdef CONFIG_SMP
- struct hlist_node *n;
-#endif
+ struct inet_frag_queue *q;
+ struct ip4_create_arg arg;
unsigned int hash;
- write_lock(&ip4_frags.lock);
- hash = ipqhashfn(qp_in->id, qp_in->saddr, qp_in->daddr,
- qp_in->protocol);
-#ifdef CONFIG_SMP
- /* With SMP race we have to recheck hash table, because
- * such entry could be created on other cpu, while we
- * promoted read lock to write lock.
- */
- hlist_for_each_entry(qp, n, &ip4_frags.hash[hash], q.list) {
- if (qp->id == qp_in->id &&
- qp->saddr == qp_in->saddr &&
- qp->daddr == qp_in->daddr &&
- qp->protocol == qp_in->protocol &&
- qp->user == qp_in->user) {
- atomic_inc(&qp->q.refcnt);
- write_unlock(&ip4_frags.lock);
- qp_in->q.last_in |= COMPLETE;
- ipq_put(qp_in);
- return qp;
- }
- }
-#endif
- qp = qp_in;
-
- if (!mod_timer(&qp->q.timer, jiffies + ip4_frags_ctl.timeout))
- atomic_inc(&qp->q.refcnt);
+ arg.iph = iph;
+ arg.user = user;
+ hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
- atomic_inc(&qp->q.refcnt);
- hlist_add_head(&qp->q.list, &ip4_frags.hash[hash]);
- INIT_LIST_HEAD(&qp->q.lru_list);
- list_add_tail(&qp->q.lru_list, &ip4_frags.lru_list);
- ip4_frags.nqueues++;
- write_unlock(&ip4_frags.lock);
- return qp;
-}
-
-/* Add an entry to the 'ipq' queue for a newly received IP datagram. */
-static struct ipq *ip_frag_create(struct iphdr *iph, u32 user)
-{
- struct ipq *qp;
-
- if ((qp = frag_alloc_queue()) == NULL)
+ q = inet_frag_find(&ip4_frags, &arg, hash);
+ if (q == NULL)
goto out_nomem;
- qp->protocol = iph->protocol;
- qp->id = iph->id;
- qp->saddr = iph->saddr;
- qp->daddr = iph->daddr;
- qp->user = user;
- qp->peer = sysctl_ipfrag_max_dist ? inet_getpeer(iph->saddr, 1) : NULL;
-
- /* Initialize a timer for this entry. */
- init_timer(&qp->q.timer);
- qp->q.timer.data = (unsigned long) qp; /* pointer to queue */
- qp->q.timer.function = ip_expire; /* expire function */
- spin_lock_init(&qp->q.lock);
- atomic_set(&qp->q.refcnt, 1);
-
- return ip_frag_intern(qp);
+ return container_of(q, struct ipq, q);
out_nomem:
LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n");
return NULL;
}
-/* Find the correct entry in the "incomplete datagrams" queue for
- * this IP datagram, and create new one, if nothing is found.
- */
-static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
-{
- __be16 id = iph->id;
- __be32 saddr = iph->saddr;
- __be32 daddr = iph->daddr;
- __u8 protocol = iph->protocol;
- unsigned int hash;
- struct ipq *qp;
- struct hlist_node *n;
-
- read_lock(&ip4_frags.lock);
- hash = ipqhashfn(id, saddr, daddr, protocol);
- hlist_for_each_entry(qp, n, &ip4_frags.hash[hash], q.list) {
- if (qp->id == id &&
- qp->saddr == saddr &&
- qp->daddr == daddr &&
- qp->protocol == protocol &&
- qp->user == user) {
- atomic_inc(&qp->q.refcnt);
- read_unlock(&ip4_frags.lock);
- return qp;
- }
- }
- read_unlock(&ip4_frags.lock);
-
- return ip_frag_create(iph, user);
-}
-
/* Is the fragment too far ahead to be part of ipq? */
static inline int ip_frag_too_far(struct ipq *qp)
{
@@ -545,7 +486,6 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
if (prev) {
head = prev->next;
fp = skb_clone(head, GFP_ATOMIC);
-
if (!fp)
goto out_nomem;
@@ -571,7 +511,6 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
goto out_oversize;
/* Head of list must not be cloned. */
- err = -ENOMEM;
if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
goto out_nomem;
@@ -627,6 +566,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
out_nomem:
LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
"queue %p\n", qp);
+ err = -ENOMEM;
goto out_fail;
out_oversize:
if (net_ratelimit())
@@ -671,9 +611,12 @@ void __init ipfrag_init(void)
{
ip4_frags.ctl = &ip4_frags_ctl;
ip4_frags.hashfn = ip4_hashfn;
+ ip4_frags.constructor = ip4_frag_init;
ip4_frags.destructor = ip4_frag_free;
ip4_frags.skb_free = NULL;
ip4_frags.qsize = sizeof(struct ipq);
+ ip4_frags.match = ip4_frag_match;
+ ip4_frags.frag_expire = ip_expire;
inet_frags_init(&ip4_frags);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0f00966b1784..9288220b73a8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1121,7 +1121,7 @@ static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto)
struct sk_buff *skb;
int flag = 0;
int cnt = 0;
- u32 new_low_seq = 0;
+ u32 new_low_seq = tp->snd_nxt;
tcp_for_write_queue(skb, sk) {
u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
@@ -1153,7 +1153,7 @@ static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto)
NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
}
} else {
- if (!new_low_seq || before(ack_seq, new_low_seq))
+ if (before(ack_seq, new_low_seq))
new_low_seq = ack_seq;
cnt += tcp_skb_pcount(skb);
}
@@ -1242,7 +1242,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
int reord = tp->packets_out;
int prior_fackets;
- u32 highest_sack_end_seq = 0;
+ u32 highest_sack_end_seq = tp->lost_retrans_low;
int flag = 0;
int found_dup_sack = 0;
int cached_fack_count;
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index e9bbfde19ac3..5e95c8a07efb 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -16,19 +16,6 @@
#include <net/ip.h>
#include <net/xfrm.h>
-static int xfrm4_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
-{
- switch (nexthdr) {
- case IPPROTO_IPIP:
- case IPPROTO_IPV6:
- *spi = ip_hdr(skb)->saddr;
- *seq = 0;
- return 0;
- }
-
- return xfrm_parse_spi(skb, nexthdr, spi, seq);
-}
-
#ifdef CONFIG_NETFILTER
static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb)
{
@@ -46,28 +33,29 @@ drop:
}
#endif
-static int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
+int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type)
{
- __be32 spi, seq;
+ int err;
+ __be32 seq;
struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH];
struct xfrm_state *x;
int xfrm_nr = 0;
int decaps = 0;
- int err = xfrm4_parse_spi(skb, ip_hdr(skb)->protocol, &spi, &seq);
unsigned int nhoff = offsetof(struct iphdr, protocol);
- if (err != 0)
+ seq = 0;
+ if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0)
goto drop;
do {
const struct iphdr *iph = ip_hdr(skb);
- int nexthdr;
if (xfrm_nr == XFRM_MAX_DEPTH)
goto drop;
x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi,
- iph->protocol != IPPROTO_IPV6 ? iph->protocol : IPPROTO_IPIP, AF_INET);
+ nexthdr, AF_INET);
if (x == NULL)
goto drop;
@@ -103,15 +91,15 @@ static int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
xfrm_vec[xfrm_nr++] = x;
- if (x->mode->input(x, skb))
+ if (x->outer_mode->input(x, skb))
goto drop;
- if (x->props.mode == XFRM_MODE_TUNNEL) {
+ if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) {
decaps = 1;
break;
}
- err = xfrm_parse_spi(skb, ip_hdr(skb)->protocol, &spi, &seq);
+ err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
if (err < 0)
goto drop;
} while (!err);
@@ -165,6 +153,7 @@ drop:
kfree_skb(skb);
return 0;
}
+EXPORT_SYMBOL(xfrm4_rcv_encap);
/* If it's a keepalive packet, then just eat it.
* If it's an encapsulated packet, then pass it to the
@@ -252,11 +241,8 @@ int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
__skb_pull(skb, len);
skb_reset_transport_header(skb);
- /* modify the protocol (it's ESP!) */
- iph->protocol = IPPROTO_ESP;
-
/* process ESP */
- ret = xfrm4_rcv_encap(skb, encap_type);
+ ret = xfrm4_rcv_encap(skb, IPPROTO_ESP, 0, encap_type);
return ret;
drop:
@@ -266,7 +252,7 @@ drop:
int xfrm4_rcv(struct sk_buff *skb)
{
- return xfrm4_rcv_encap(skb, 0);
+ return xfrm4_rcv_spi(skb, ip_hdr(skb)->protocol, 0);
}
EXPORT_SYMBOL(xfrm4_rcv);
diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c
index 73d2338bec55..e42e122414be 100644
--- a/net/ipv4/xfrm4_mode_beet.c
+++ b/net/ipv4/xfrm4_mode_beet.c
@@ -114,6 +114,7 @@ static struct xfrm_mode xfrm4_beet_mode = {
.output = xfrm4_beet_output,
.owner = THIS_MODULE,
.encap = XFRM_MODE_BEET,
+ .flags = XFRM_MODE_FLAG_TUNNEL,
};
static int __init xfrm4_beet_init(void)
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 1ae9d32276f0..e4deecba6dd2 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -139,6 +139,7 @@ static struct xfrm_mode xfrm4_tunnel_mode = {
.output = xfrm4_tunnel_output,
.owner = THIS_MODULE,
.encap = XFRM_MODE_TUNNEL,
+ .flags = XFRM_MODE_FLAG_TUNNEL,
};
static int __init xfrm4_tunnel_init(void)
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index a4edd666318b..c4a7156962bd 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -47,7 +47,7 @@ static inline int xfrm4_output_one(struct sk_buff *skb)
struct iphdr *iph;
int err;
- if (x->props.mode == XFRM_MODE_TUNNEL) {
+ if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) {
err = xfrm4_tunnel_check_size(skb);
if (err)
goto error_nolock;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 329825ca68fe..cc86fb110dd8 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -117,7 +117,7 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
header_len += xfrm[i]->props.header_len;
trailer_len += xfrm[i]->props.trailer_len;
- if (xfrm[i]->props.mode == XFRM_MODE_TUNNEL) {
+ if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
unsigned short encap_family = xfrm[i]->props.family;
switch (encap_family) {
case AF_INET:
@@ -151,7 +151,6 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
i = 0;
for (; dst_prev != &rt->u.dst; dst_prev = dst_prev->child) {
struct xfrm_dst *x = (struct xfrm_dst*)dst_prev;
- struct xfrm_state_afinfo *afinfo;
x->u.rt.fl = *fl;
dst_prev->xfrm = xfrm[i++];
@@ -169,27 +168,17 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
/* Copy neighbout for reachability confirmation */
dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour);
dst_prev->input = rt->u.dst.input;
- /* XXX: When IPv6 module can be unloaded, we should manage reference
- * to xfrm6_output in afinfo->output. Miyazawa
- * */
- afinfo = xfrm_state_get_afinfo(dst_prev->xfrm->props.family);
- if (!afinfo) {
- dst = *dst_p;
- err = -EAFNOSUPPORT;
- goto error;
- }
- dst_prev->output = afinfo->output;
- xfrm_state_put_afinfo(afinfo);
- if (dst_prev->xfrm->props.family == AF_INET && rt->peer)
- atomic_inc(&rt->peer->refcnt);
- x->u.rt.peer = rt->peer;
+ dst_prev->output = dst_prev->xfrm->outer_mode->afinfo->output;
+ if (rt0->peer)
+ atomic_inc(&rt0->peer->refcnt);
+ x->u.rt.peer = rt0->peer;
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
x->u.rt.rt_flags = rt0->rt_flags&(RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL);
- x->u.rt.rt_type = rt->rt_type;
+ x->u.rt.rt_type = rt0->rt_type;
x->u.rt.rt_src = rt0->rt_src;
x->u.rt.rt_dst = rt0->rt_dst;
- x->u.rt.rt_gateway = rt->rt_gateway;
+ x->u.rt.rt_gateway = rt0->rt_gateway;
x->u.rt.rt_spec_dst = rt0->rt_spec_dst;
x->u.rt.idev = rt0->idev;
in_dev_hold(rt0->idev);
@@ -291,7 +280,7 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
if (likely(xdst->u.rt.idev))
in_dev_put(xdst->u.rt.idev);
- if (dst->xfrm && dst->xfrm->props.family == AF_INET && likely(xdst->u.rt.peer))
+ if (likely(xdst->u.rt.peer))
inet_putpeer(xdst->u.rt.peer);
xfrm_dst_destroy(xdst);
}
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 93e2c061cdda..13d54a1c3337 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -49,6 +49,7 @@ __xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl,
static struct xfrm_state_afinfo xfrm4_state_afinfo = {
.family = AF_INET,
+ .owner = THIS_MODULE,
.init_flags = xfrm4_init_flags,
.init_tempsel = __xfrm4_init_tempsel,
.output = xfrm4_output,
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index 1312417608e2..326845195620 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -18,7 +18,7 @@ static int ipip_output(struct xfrm_state *x, struct sk_buff *skb)
static int ipip_xfrm_rcv(struct xfrm_state *x, struct sk_buff *skb)
{
- return IPPROTO_IP;
+ return ip_hdr(skb)->protocol;
}
static int ipip_init_state(struct xfrm_state *x)
@@ -48,20 +48,25 @@ static struct xfrm_type ipip_type = {
.output = ipip_output
};
+static int xfrm_tunnel_rcv(struct sk_buff *skb)
+{
+ return xfrm4_rcv_spi(skb, IPPROTO_IP, ip_hdr(skb)->saddr);
+}
+
static int xfrm_tunnel_err(struct sk_buff *skb, u32 info)
{
return -ENOENT;
}
static struct xfrm_tunnel xfrm_tunnel_handler = {
- .handler = xfrm4_rcv,
+ .handler = xfrm_tunnel_rcv,
.err_handler = xfrm_tunnel_err,
.priority = 2,
};
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
static struct xfrm_tunnel xfrm64_tunnel_handler = {
- .handler = xfrm4_rcv,
+ .handler = xfrm_tunnel_rcv,
.err_handler = xfrm_tunnel_err,
.priority = 2,
};