From ea2bc483ff5caada7c4aa0d5fbf87d3a6590273d Mon Sep 17 00:00:00 2001 From: Tsutomu Fujii Date: Tue, 17 Apr 2007 12:49:53 -0700 Subject: [SCTP]: Fix assertion (!atomic_read(&sk->sk_rmem_alloc)) failed message In current implementation, LKSCTP does receive buffer accounting for data in sctp_receive_queue and pd_lobby. However, LKSCTP don't do accounting for data in frag_list when data is fragmented. In addition, LKSCTP doesn't do accounting for data in reasm and lobby queue in structure sctp_ulpq. When there are date in these queue, assertion failed message is printed in inet_sock_destruct because sk_rmem_alloc of oldsk does not become 0 when socket is destroyed. Signed-off-by: Tsutomu Fujii Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- net/sctp/socket.c | 48 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 4 deletions(-) (limited to 'net/sctp') diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 536298c2eda2..523e73ee354a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -5638,6 +5638,36 @@ void sctp_wait_for_close(struct sock *sk, long timeout) finish_wait(sk->sk_sleep, &wait); } +static void sctp_sock_rfree_frag(struct sk_buff *skb) +{ + struct sk_buff *frag; + + if (!skb->data_len) + goto done; + + /* Don't forget the fragments. */ + for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) + sctp_sock_rfree_frag(frag); + +done: + sctp_sock_rfree(skb); +} + +static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) +{ + struct sk_buff *frag; + + if (!skb->data_len) + goto done; + + /* Don't forget the fragments. */ + for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) + sctp_skb_set_owner_r_frag(frag, sk); + +done: + sctp_skb_set_owner_r(skb, sk); +} + /* Populate the fields of the newsk from the oldsk and migrate the assoc * and its messages to the newsk. */ @@ -5692,10 +5722,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { - sctp_sock_rfree(skb); + sctp_sock_rfree_frag(skb); __skb_unlink(skb, &oldsk->sk_receive_queue); __skb_queue_tail(&newsk->sk_receive_queue, skb); - sctp_skb_set_owner_r(skb, newsk); + sctp_skb_set_owner_r_frag(skb, newsk); } } @@ -5723,10 +5753,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { - sctp_sock_rfree(skb); + sctp_sock_rfree_frag(skb); __skb_unlink(skb, &oldsp->pd_lobby); __skb_queue_tail(queue, skb); - sctp_skb_set_owner_r(skb, newsk); + sctp_skb_set_owner_r_frag(skb, newsk); } } @@ -5738,6 +5768,16 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, } + sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) { + sctp_sock_rfree_frag(skb); + sctp_skb_set_owner_r_frag(skb, newsk); + } + + sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) { + sctp_sock_rfree_frag(skb); + sctp_skb_set_owner_r_frag(skb, newsk); + } + /* Set the type of socket to indicate that it is peeled off from the * original UDP-style socket or created with the accept() call on a * TCP-style socket.. -- cgit v1.2.3 From 0304ff8a2d5f57defb011c7f261b4c1b3eff96d1 Mon Sep 17 00:00:00 2001 From: Paolo Galtieri Date: Tue, 17 Apr 2007 12:52:36 -0700 Subject: [SCTP]: Unmap v4mapped addresses during SCTP_BINDX_REM_ADDR operation. During the sctp_bindx() call to add additional addresses to the endpoint, any v4mapped addresses are converted and stored as regular v4 addresses. However, when trying to remove these addresses, the v4mapped addresses are not converted and the operation fails. This patch unmaps the addresses on during the remove operation as well. Signed-off-by: Paolo Galtieri Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- net/sctp/socket.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'net/sctp') diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 523e73ee354a..a1d026f12b0e 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -627,6 +627,12 @@ int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) retval = -EINVAL; goto err_bindx_rem; } + + if (!af->addr_valid(sa_addr, sp, NULL)) { + retval = -EADDRNOTAVAIL; + goto err_bindx_rem; + } + if (sa_addr->v4.sin_port != htons(bp->port)) { retval = -EINVAL; goto err_bindx_rem; -- cgit v1.2.3 From d0cf0d9940ef27b46fcbbd9e0cc8427c30fe05eb Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Wed, 18 Apr 2007 14:11:06 -0700 Subject: [SCTP]: Do not interleave non-fragments when in partial delivery The way partial delivery is currently implemnted, it is possible to intereleave a message (either from another steram, or unordered) that is not part of partial delivery process. The only way to this is for a message to not be a fragment and be 'in order' or unorderd for a given stream. This will result in bypassing the reassembly/ordering queues where things live duing partial delivery, and the message will be delivered to the socket in the middle of partial delivery. This is a two-fold problem, in that: 1. the app now must check the stream-id and flags which it may not be doing. 2. this clearing partial delivery state from the association and results in ulp hanging. This patch is a band-aid over a much bigger problem in that we don't do stream interleave. Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- net/sctp/ulpqueue.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'net/sctp') diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index bfb197e37da3..b29e3e4b72c9 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -190,7 +190,14 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) if (!sctp_sk(sk)->pd_mode) { queue = &sk->sk_receive_queue; } else if (ulpq->pd_mode) { - if (event->msg_flags & MSG_NOTIFICATION) + /* If the association is in partial delivery, we + * need to finish delivering the partially processed + * packet before passing any other data. This is + * because we don't truly support stream interleaving. + */ + if ((event->msg_flags & MSG_NOTIFICATION) || + (SCTP_DATA_NOT_FRAG == + (event->msg_flags & SCTP_DATA_FRAG_MASK))) queue = &sctp_sk(sk)->pd_lobby; else { clear_pd = event->msg_flags & MSG_EOR; -- cgit v1.2.3 From 31c7711b509d470ab1e175e7bb98ea66a82aa916 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 10 Mar 2007 19:04:55 -0300 Subject: [SK_BUFF]: Some more simple skb_reset_network_header conversions This time of the type: skb->nh.iph = (struct iphdr *)skb->data; That is completely equivalent to: skb->nh.raw = skb->data; Wonder why people love casts... :-) Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- net/sctp/input.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/sctp') diff --git a/net/sctp/input.c b/net/sctp/input.c index 71db66873695..2b0863aba3fb 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -528,7 +528,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) /* Fix up skb to look at the embedded net header. */ saveip = skb->nh.raw; savesctp = skb->h.raw; - skb->nh.iph = iph; + skb_reset_network_header(skb); skb->h.raw = (char *)sh; sk = sctp_err_lookup(AF_INET, skb, sh, &asoc, &transport); /* Put back, the original pointers. */ -- cgit v1.2.3 From 1ced98e81d1c2f1ce965ecf8d0032e02ffa07bf0 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 10 Mar 2007 19:57:15 -0300 Subject: [SK_BUFF] ipv6: More skb_reset_network_header conversions related to skb_pull Now related to this form: skb->nh.ipv6h = (struct ipv6hdr *)skb_put(skb, length); That, as the others, is done when skb->tail is still equal to skb->data, making the conversion to skb_reset_network_header possible. Also one more case equivalent to skb->nh.raw = skb->data, of this form: iph = (struct ipv6hdr *)skb->data; skb->nh.ipv6h = iph; Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- net/sctp/ipv6.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'net/sctp') diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 0b9c49b3a100..5f9b145b0b90 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -122,7 +122,6 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, int type, int code, int offset, __be32 info) { struct inet6_dev *idev; - struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; struct sctphdr *sh = (struct sctphdr *)(skb->data + offset); struct sock *sk; struct sctp_association *asoc; @@ -136,7 +135,7 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, /* Fix up skb to look at the embedded net header. */ saveip = skb->nh.raw; savesctp = skb->h.raw; - skb->nh.ipv6h = iph; + skb_reset_network_header(skb); skb->h.raw = (char *)sh; sk = sctp_err_lookup(AF_INET6, skb, sh, &asoc, &transport); /* Put back, the original pointers. */ -- cgit v1.2.3 From eddc9ec53be2ecdbf4efe0efd4a83052594f0ac0 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 20 Apr 2007 22:47:35 -0700 Subject: [SK_BUFF]: Introduce ip_hdr(), remove skb->nh.iph Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- net/sctp/input.c | 2 +- net/sctp/ipv6.c | 4 ++-- net/sctp/protocol.c | 8 ++++---- net/sctp/sm_make_chunk.c | 4 ++-- net/sctp/sm_statefuns.c | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) (limited to 'net/sctp') diff --git a/net/sctp/input.c b/net/sctp/input.c index 2b0863aba3fb..595fe32b3d41 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -154,7 +154,7 @@ int sctp_rcv(struct sk_buff *skb) if (skb->len < sizeof(struct sctp_chunkhdr)) goto discard_it; - family = ipver2af(skb->nh.iph->version); + family = ipver2af(ip_hdr(skb)->version); af = sctp_get_af_specific(family); if (unlikely(!af)) goto discard_it; diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 5f9b145b0b90..742f9ff42fbf 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -770,9 +770,9 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname, /* Map ipv4 address into v4-mapped-on-v6 address. */ if (sctp_sk(skb->sk)->v4mapped && - skb->nh.iph->version == 4) { + ip_hdr(skb)->version == 4) { sctp_v4_map_v6((union sctp_addr *)sin6); - sin6->sin6_addr.s6_addr32[3] = skb->nh.iph->saddr; + sin6->sin6_addr.s6_addr32[3] = ip_hdr(skb)->saddr; return; } diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index e17a823ca90f..08f92ba4ebd7 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -238,10 +238,10 @@ static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb, sh = (struct sctphdr *) skb->h.raw; if (is_saddr) { *port = sh->source; - from = &skb->nh.iph->saddr; + from = &ip_hdr(skb)->saddr; } else { *port = sh->dest; - from = &skb->nh.iph->daddr; + from = &ip_hdr(skb)->daddr; } memcpy(&addr->v4.sin_addr.s_addr, from, sizeof(struct in_addr)); } @@ -530,7 +530,7 @@ static int sctp_v4_skb_iif(const struct sk_buff *skb) /* Was this packet marked by Explicit Congestion Notification? */ static int sctp_v4_is_ce(const struct sk_buff *skb) { - return INET_ECN_is_ce(skb->nh.iph->tos); + return INET_ECN_is_ce(ip_hdr(skb)->tos); } /* Create and initialize a new sk for the socket returned by accept(). */ @@ -739,7 +739,7 @@ static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len) sin = (struct sockaddr_in *)msgname; sh = (struct sctphdr *)skb->h.raw; sin->sin_port = sh->source; - sin->sin_addr.s_addr = skb->nh.iph->saddr; + sin->sin_addr.s_addr = ip_hdr(skb)->saddr; } } diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index f7fb29d5a0c7..60c5b59d4c65 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -86,7 +86,7 @@ int sctp_chunk_iif(const struct sctp_chunk *chunk) struct sctp_af *af; int iif = 0; - af = sctp_get_af_specific(ipver2af(chunk->skb->nh.iph->version)); + af = sctp_get_af_specific(ipver2af(ip_hdr(chunk->skb)->version)); if (af) iif = af->skb_iif(chunk->skb); @@ -1233,7 +1233,7 @@ struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep, asoc->temp = 1; skb = chunk->skb; /* Create an entry for the source address of the packet. */ - af = sctp_get_af_specific(ipver2af(skb->nh.iph->version)); + af = sctp_get_af_specific(ipver2af(ip_hdr(skb)->version)); if (unlikely(!af)) goto fail; af->from_skb(&asoc->c.peer_addr, skb, 1); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index e9097cf614ba..bf502c499c81 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -5286,7 +5286,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, chunk->ecn_ce_done = 1; af = sctp_get_af_specific( - ipver2af(chunk->skb->nh.iph->version)); + ipver2af(ip_hdr(chunk->skb)->version)); if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) { /* Do real work as sideffect. */ -- cgit v1.2.3 From 0660e03f6b18f19b6bbafe7583265a51b90daf36 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 25 Apr 2007 17:54:47 -0700 Subject: [SK_BUFF]: Introduce ipv6_hdr(), remove skb->nh.ipv6h Now the skb->nh union has just one member, .raw, i.e. it is just like the skb->mac union, strange, no? I'm just leaving it like that till the transport layer is done with, when we'll rename skb->mac.raw to skb->mac_header (or ->mac_header_offset?), ditto for ->{h,nh}. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- net/sctp/ipv6.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'net/sctp') diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 742f9ff42fbf..001be2de0b3c 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -393,10 +393,10 @@ static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb, sh = (struct sctphdr *) skb->h.raw; if (is_saddr) { *port = sh->source; - from = &skb->nh.ipv6h->saddr; + from = &ipv6_hdr(skb)->saddr; } else { *port = sh->dest; - from = &skb->nh.ipv6h->daddr; + from = &ipv6_hdr(skb)->daddr; } ipv6_addr_copy(&addr->v6.sin6_addr, from); } @@ -698,7 +698,7 @@ static int sctp_v6_skb_iif(const struct sk_buff *skb) /* Was this packet marked by Explicit Congestion Notification? */ static int sctp_v6_is_ce(const struct sk_buff *skb) { - return *((__u32 *)(skb->nh.ipv6h)) & htonl(1<<20); + return *((__u32 *)(ipv6_hdr(skb))) & htonl(1 << 20); } /* Dump the v6 addr to the seq file. */ @@ -777,7 +777,7 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname, } /* Otherwise, just copy the v6 address. */ - ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr); + ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr); if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) { struct sctp_ulpevent *ev = sctp_skb2event(skb); sin6->sin6_scope_id = ev->iif; -- cgit v1.2.3 From ea2ae17d6443abddc79480dc9f7af8feacabddc4 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 25 Apr 2007 17:55:53 -0700 Subject: [SK_BUFF]: Introduce skb_transport_offset() For the quite common 'skb->h.raw - skb->data' sequence. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- net/sctp/input.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/sctp') diff --git a/net/sctp/input.c b/net/sctp/input.c index 595fe32b3d41..9311b5ddf5c0 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -141,7 +141,7 @@ int sctp_rcv(struct sk_buff *skb) sh = (struct sctphdr *) skb->h.raw; /* Pull up the IP and SCTP headers. */ - __skb_pull(skb, skb->h.raw - skb->data); + __skb_pull(skb, skb_transport_offset(skb)); if (skb->len < sizeof(struct sctphdr)) goto discard_it; if ((skb->ip_summed != CHECKSUM_UNNECESSARY) && -- cgit v1.2.3 From 2c0fd387b00a6758550b5ca1aae4408374483fe7 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 13 Mar 2007 13:59:32 -0300 Subject: [SCTP]: Introduce sctp_hdr() For consistency with all the other skb->h.raw accessors. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- net/sctp/input.c | 14 +++++--------- net/sctp/ipv6.c | 4 ++-- net/sctp/protocol.c | 10 ++++------ 3 files changed, 11 insertions(+), 17 deletions(-) (limited to 'net/sctp') diff --git a/net/sctp/input.c b/net/sctp/input.c index 9311b5ddf5c0..3a322c584c74 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -79,14 +79,10 @@ static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); /* Calculate the SCTP checksum of an SCTP packet. */ static inline int sctp_rcv_checksum(struct sk_buff *skb) { - struct sctphdr *sh; - __u32 cmp, val; struct sk_buff *list = skb_shinfo(skb)->frag_list; - - sh = (struct sctphdr *) skb->h.raw; - cmp = ntohl(sh->checksum); - - val = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); + struct sctphdr *sh = sctp_hdr(skb); + __u32 cmp = ntohl(sh->checksum); + __u32 val = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); for (; list; list = list->next) val = sctp_update_cksum((__u8 *)list->data, skb_headlen(list), @@ -138,7 +134,7 @@ int sctp_rcv(struct sk_buff *skb) if (skb_linearize(skb)) goto discard_it; - sh = (struct sctphdr *) skb->h.raw; + sh = sctp_hdr(skb); /* Pull up the IP and SCTP headers. */ __skb_pull(skb, skb_transport_offset(skb)); @@ -905,7 +901,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb, struct sctp_association *asoc; union sctp_addr addr; union sctp_addr *paddr = &addr; - struct sctphdr *sh = (struct sctphdr *) skb->h.raw; + struct sctphdr *sh = sctp_hdr(skb); sctp_chunkhdr_t *ch; union sctp_params params; sctp_init_chunk_t *init; diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 001be2de0b3c..0992bc5bb528 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -390,7 +390,7 @@ static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb, addr->v6.sin6_flowinfo = 0; /* FIXME */ addr->v6.sin6_scope_id = ((struct inet6_skb_parm *)skb->cb)->iif; - sh = (struct sctphdr *) skb->h.raw; + sh = sctp_hdr(skb); if (is_saddr) { *port = sh->source; from = &ipv6_hdr(skb)->saddr; @@ -765,7 +765,7 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname, if (msgname) { sctp_inet6_msgname(msgname, addr_len); sin6 = (struct sockaddr_in6 *)msgname; - sh = (struct sctphdr *)skb->h.raw; + sh = sctp_hdr(skb); sin6->sin6_port = sh->source; /* Map ipv4 address into v4-mapped-on-v6 address. */ diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 08f92ba4ebd7..7c28c9b959e2 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -235,7 +235,7 @@ static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb, port = &addr->v4.sin_port; addr->v4.sin_family = AF_INET; - sh = (struct sctphdr *) skb->h.raw; + sh = sctp_hdr(skb); if (is_saddr) { *port = sh->source; from = &ip_hdr(skb)->saddr; @@ -731,13 +731,11 @@ static void sctp_inet_event_msgname(struct sctp_ulpevent *event, char *msgname, /* Initialize and copy out a msgname from an inbound skb. */ static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len) { - struct sctphdr *sh; - struct sockaddr_in *sin; - if (msgname) { + struct sctphdr *sh = sctp_hdr(skb); + struct sockaddr_in *sin = (struct sockaddr_in *)msgname; + sctp_inet_msgname(msgname, len); - sin = (struct sockaddr_in *)msgname; - sh = (struct sctphdr *)skb->h.raw; sin->sin_port = sh->source; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; } -- cgit v1.2.3 From 88c7664f13bd1a36acb8566b93892a4c58759ac6 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 13 Mar 2007 14:43:18 -0300 Subject: [SK_BUFF]: Introduce icmp_hdr(), remove skb->h.icmph Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- net/sctp/input.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net/sctp') diff --git a/net/sctp/input.c b/net/sctp/input.c index 3a322c584c74..40d0df80183f 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -507,8 +507,8 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) { struct iphdr *iph = (struct iphdr *)skb->data; struct sctphdr *sh = (struct sctphdr *)(skb->data + (iph->ihl <<2)); - int type = skb->h.icmph->type; - int code = skb->h.icmph->code; + const int type = icmp_hdr(skb)->type; + const int code = icmp_hdr(skb)->code; struct sock *sk; struct sctp_association *asoc = NULL; struct sctp_transport *transport; -- cgit v1.2.3 From a27ef749e7be3b06fb58df53d94eb97a21f18707 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 13 Mar 2007 17:17:10 -0300 Subject: [SCTP]: Eliminate some pointer attributions to the skb layer headers Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- net/sctp/input.c | 8 ++++---- net/sctp/ipv6.c | 5 ++--- 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'net/sctp') diff --git a/net/sctp/input.c b/net/sctp/input.c index 40d0df80183f..f38e91b38719 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -506,7 +506,7 @@ void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) void sctp_v4_err(struct sk_buff *skb, __u32 info) { struct iphdr *iph = (struct iphdr *)skb->data; - struct sctphdr *sh = (struct sctphdr *)(skb->data + (iph->ihl <<2)); + const int ihlen = iph->ihl * 4; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct sock *sk; @@ -516,7 +516,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) char *saveip, *savesctp; int err; - if (skb->len < ((iph->ihl << 2) + 8)) { + if (skb->len < ihlen + 8) { ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); return; } @@ -525,8 +525,8 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) saveip = skb->nh.raw; savesctp = skb->h.raw; skb_reset_network_header(skb); - skb->h.raw = (char *)sh; - sk = sctp_err_lookup(AF_INET, skb, sh, &asoc, &transport); + skb_set_transport_header(skb, ihlen); + sk = sctp_err_lookup(AF_INET, skb, sctp_hdr(skb), &asoc, &transport); /* Put back, the original pointers. */ skb->nh.raw = saveip; skb->h.raw = savesctp; diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 0992bc5bb528..cd0af9238782 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -122,7 +122,6 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, int type, int code, int offset, __be32 info) { struct inet6_dev *idev; - struct sctphdr *sh = (struct sctphdr *)(skb->data + offset); struct sock *sk; struct sctp_association *asoc; struct sctp_transport *transport; @@ -136,8 +135,8 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, saveip = skb->nh.raw; savesctp = skb->h.raw; skb_reset_network_header(skb); - skb->h.raw = (char *)sh; - sk = sctp_err_lookup(AF_INET6, skb, sh, &asoc, &transport); + skb_set_transport_header(skb, offset); + sk = sctp_err_lookup(AF_INET6, skb, sctp_hdr(skb), &asoc, &transport); /* Put back, the original pointers. */ skb->nh.raw = saveip; skb->h.raw = savesctp; -- cgit v1.2.3 From b0e380b1d8a8e0aca215df97702f99815f05c094 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 10 Apr 2007 21:21:55 -0700 Subject: [SK_BUFF]: unions of just one member don't get anything done, kill them Renaming skb->h to skb->transport_header, skb->nh to skb->network_header and skb->mac to skb->mac_header, to match the names of the associated helpers (skb[_[re]set]_{transport,network,mac}_header). Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- net/sctp/input.c | 8 ++++---- net/sctp/ipv6.c | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'net/sctp') diff --git a/net/sctp/input.c b/net/sctp/input.c index f38e91b38719..87feee166da9 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -522,14 +522,14 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) } /* Fix up skb to look at the embedded net header. */ - saveip = skb->nh.raw; - savesctp = skb->h.raw; + saveip = skb->network_header; + savesctp = skb->transport_header; skb_reset_network_header(skb); skb_set_transport_header(skb, ihlen); sk = sctp_err_lookup(AF_INET, skb, sctp_hdr(skb), &asoc, &transport); /* Put back, the original pointers. */ - skb->nh.raw = saveip; - skb->h.raw = savesctp; + skb->network_header = saveip; + skb->transport_header = savesctp; if (!sk) { ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); return; diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index cd0af9238782..afcb0093c290 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -132,14 +132,14 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, idev = in6_dev_get(skb->dev); /* Fix up skb to look at the embedded net header. */ - saveip = skb->nh.raw; - savesctp = skb->h.raw; + saveip = skb->network_header; + savesctp = skb->transport_header; skb_reset_network_header(skb); skb_set_transport_header(skb, offset); sk = sctp_err_lookup(AF_INET6, skb, sctp_hdr(skb), &asoc, &transport); /* Put back, the original pointers. */ - skb->nh.raw = saveip; - skb->h.raw = savesctp; + skb->network_header = saveip; + skb->transport_header = savesctp; if (!sk) { ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS); goto out; -- cgit v1.2.3 From 2e07fa9cd3bac1e28cfe3131ed86b053afb02fc9 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 10 Apr 2007 21:22:35 -0700 Subject: [SK_BUFF]: Use offsets for skb->{mac,network,transport}_header on 64bit architectures With this we save 8 bytes per network packet, leaving a 4 bytes hole to be used in further shrinking work, likely with the offsetization of other pointers, such as ->{data,tail,end}, at the cost of adds, that were minimized by the usual practice of setting skb->{mac,nh,n}.raw to a local variable that is then accessed multiple times in each function, it also is not more expensive than before with regards to most of the handling of such headers, like setting one of these headers to another (transport to network, etc), or subtracting, adding to/from it, comparing them, etc. Now we have this layout for sk_buff on a x86_64 machine: [acme@mica net-2.6.22]$ pahole vmlinux sk_buff struct sk_buff { struct sk_buff * next; /* 0 8 */ struct sk_buff * prev; /* 8 8 */ struct rb_node rb; /* 16 24 */ struct sock * sk; /* 40 8 */ ktime_t tstamp; /* 48 8 */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct net_device * input_dev; /* 64 8 */ sk_buff_data_t transport_header; /* 72 4 */ sk_buff_data_t network_header; /* 76 4 */ sk_buff_data_t mac_header; /* 80 4 */ /* XXX 4 bytes hole, try to pack */ struct dst_entry * dst; /* 88 8 */ struct sec_path * sp; /* 96 8 */ char cb[48]; /* 104 48 */ /* cacheline 2 boundary (128 bytes) was 24 bytes ago*/ unsigned int len; /* 152 4 */ unsigned int data_len; /* 156 4 */ unsigned int mac_len; /* 160 4 */ union { __wsum csum; /* 4 */ __u32 csum_offset; /* 4 */ }; /* 164 4 */ __u32 priority; /* 168 4 */ __u8 local_df:1; /* 172 1 */ __u8 cloned:1; /* 172 1 */ __u8 ip_summed:2; /* 172 1 */ __u8 nohdr:1; /* 172 1 */ __u8 nfctinfo:3; /* 172 1 */ __u8 pkt_type:3; /* 173 1 */ __u8 fclone:2; /* 173 1 */ __u8 ipvs_property:1; /* 173 1 */ /* XXX 2 bits hole, try to pack */ __be16 protocol; /* 174 2 */ void (*destructor)(struct sk_buff *); /* 176 8 */ struct nf_conntrack * nfct; /* 184 8 */ /* --- cacheline 3 boundary (192 bytes) --- */ struct sk_buff * nfct_reasm; /* 192 8 */ struct nf_bridge_info *nf_bridge; /* 200 8 */ __u16 tc_index; /* 208 2 */ __u16 tc_verd; /* 210 2 */ dma_cookie_t dma_cookie; /* 212 4 */ __u32 secmark; /* 216 4 */ __u32 mark; /* 220 4 */ unsigned int truesize; /* 224 4 */ atomic_t users; /* 228 4 */ unsigned char * head; /* 232 8 */ unsigned char * data; /* 240 8 */ unsigned char * tail; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ unsigned char * end; /* 256 8 */ }; /* size: 264, cachelines: 5 */ /* sum members: 260, holes: 1, sum holes: 4 */ /* bit holes: 1, sum bit holes: 2 bits */ /* last cacheline: 8 bytes */ On 32 bits nothing changes, and pointers continue to be used with the compiler turning all this abstraction layer into dust. But there are some sk_buff validation tricks that are now possible, humm... :-) Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- net/sctp/input.c | 4 ++-- net/sctp/ipv6.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'net/sctp') diff --git a/net/sctp/input.c b/net/sctp/input.c index 87feee166da9..1ff47b18724a 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -513,7 +513,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) struct sctp_association *asoc = NULL; struct sctp_transport *transport; struct inet_sock *inet; - char *saveip, *savesctp; + sk_buff_data_t saveip, savesctp; int err; if (skb->len < ihlen + 8) { @@ -527,7 +527,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) skb_reset_network_header(skb); skb_set_transport_header(skb, ihlen); sk = sctp_err_lookup(AF_INET, skb, sctp_hdr(skb), &asoc, &transport); - /* Put back, the original pointers. */ + /* Put back, the original values. */ skb->network_header = saveip; skb->transport_header = savesctp; if (!sk) { diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index afcb0093c290..5b0cdda4b449 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -126,7 +126,7 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, struct sctp_association *asoc; struct sctp_transport *transport; struct ipv6_pinfo *np; - char *saveip, *savesctp; + sk_buff_data_t saveip, savesctp; int err; idev = in6_dev_get(skb->dev); -- cgit v1.2.3 From 27a884dc3cb63b93c2b3b643f5b31eed5f8a4d26 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 19 Apr 2007 20:29:13 -0700 Subject: [SK_BUFF]: Convert skb->tail to sk_buff_data_t So that it is also an offset from skb->head, reduces its size from 8 to 4 bytes on 64bit architectures, allowing us to combine the 4 bytes hole left by the layer headers conversion, reducing struct sk_buff size to 256 bytes, i.e. 4 64byte cachelines, and since the sk_buff slab cache is SLAB_HWCACHE_ALIGN... :-) Many calculations that previously required that skb->{transport,network, mac}_header be first converted to a pointer now can be done directly, being meaningful as offsets or pointers. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- net/sctp/input.c | 4 ++-- net/sctp/inqueue.c | 8 ++++---- net/sctp/sm_make_chunk.c | 4 ++-- net/sctp/sm_statefuns.c | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) (limited to 'net/sctp') diff --git a/net/sctp/input.c b/net/sctp/input.c index 1ff47b18724a..18b97eedc1fa 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -612,7 +612,7 @@ int sctp_rcv_ootb(struct sk_buff *skb) break; ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); - if (ch_end > skb->tail) + if (ch_end > skb_tail_pointer(skb)) break; /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the @@ -644,7 +644,7 @@ int sctp_rcv_ootb(struct sk_buff *skb) } ch = (sctp_chunkhdr_t *) ch_end; - } while (ch_end < skb->tail); + } while (ch_end < skb_tail_pointer(skb)); return 0; diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index c30629e17781..88aa22407549 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -159,16 +159,16 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) * the skb->tail. */ if (unlikely(skb_is_nonlinear(chunk->skb))) { - if (chunk->chunk_end > chunk->skb->tail) - chunk->chunk_end = chunk->skb->tail; + if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) + chunk->chunk_end = skb_tail_pointer(chunk->skb); } skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); chunk->subh.v = NULL; /* Subheader is no longer valid. */ - if (chunk->chunk_end < chunk->skb->tail) { + if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) { /* This is not a singleton */ chunk->singleton = 0; - } else if (chunk->chunk_end > chunk->skb->tail) { + } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { /* RFC 2960, Section 6.10 Bundling * * Partial chunks MUST NOT be placed in an SCTP packet. diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 60c5b59d4c65..759ea3d19976 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1143,7 +1143,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(chunklen + padlen + len); - chunk->chunk_end = chunk->skb->tail; + chunk->chunk_end = skb_tail_pointer(chunk->skb); return target; } @@ -1168,7 +1168,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(ntohs(chunk->chunk_hdr->length) + len); - chunk->chunk_end = chunk->skb->tail; + chunk->chunk_end = skb_tail_pointer(chunk->skb); out: return err; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index bf502c499c81..438e5dc5c714 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3115,7 +3115,7 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, break; ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); - if (ch_end > skb->tail) + if (ch_end > skb_tail_pointer(skb)) break; if (SCTP_CID_SHUTDOWN_ACK == ch->type) @@ -3130,7 +3130,7 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, return sctp_sf_pdiscard(ep, asoc, type, arg, commands); ch = (sctp_chunkhdr_t *) ch_end; - } while (ch_end < skb->tail); + } while (ch_end < skb_tail_pointer(skb)); if (ootb_shut_ack) sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); -- cgit v1.2.3 From b6e1331f3ce25a56edb956054eaf8011654686cb Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Fri, 20 Apr 2007 12:23:15 -0700 Subject: [SCTP]: Implement SCTP_FRAGMENT_INTERLEAVE socket option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This option was introduced in draft-ietf-tsvwg-sctpsocket-13. It prevents head-of-line blocking in the case of one-to-many endpoint. Applications enabling this option really must enable SCTP_SNDRCV event so that they would know where the data belongs. Based on an earlier patch by Ivan Skytte Jørgensen. Additionally, this functionality now permits multiple associations on the same endpoint to enter Partial Delivery. Applications should be extra careful, when using this functionality, to track EOR indicators. Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- net/sctp/socket.c | 84 ++++++++++++++++++++++++++++++++++++++---- net/sctp/ulpqueue.c | 103 +++++++++++++++++++++++++++++++++++++--------------- 2 files changed, 151 insertions(+), 36 deletions(-) (limited to 'net/sctp') diff --git a/net/sctp/socket.c b/net/sctp/socket.c index a1d026f12b0e..b4be473c68b0 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2255,7 +2255,7 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk, return 0; } -/* 7.1.24. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) +/* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) * * This options will get or set the delayed ack timer. The time is set * in milliseconds. If the assoc_id is 0, then this sets or gets the @@ -2792,6 +2792,46 @@ static int sctp_setsockopt_context(struct sock *sk, char __user *optval, return 0; } +/* + * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) + * + * This options will at a minimum specify if the implementation is doing + * fragmented interleave. Fragmented interleave, for a one to many + * socket, is when subsequent calls to receive a message may return + * parts of messages from different associations. Some implementations + * may allow you to turn this value on or off. If so, when turned off, + * no fragment interleave will occur (which will cause a head of line + * blocking amongst multiple associations sharing the same one to many + * socket). When this option is turned on, then each receive call may + * come from a different association (thus the user must receive data + * with the extended calls (e.g. sctp_recvmsg) to keep track of which + * association each receive belongs to. + * + * This option takes a boolean value. A non-zero value indicates that + * fragmented interleave is on. A value of zero indicates that + * fragmented interleave is off. + * + * Note that it is important that an implementation that allows this + * option to be turned on, have it off by default. Otherwise an unaware + * application using the one to many model may become confused and act + * incorrectly. + */ +static int sctp_setsockopt_fragment_interleave(struct sock *sk, + char __user *optval, + int optlen) +{ + int val; + + if (optlen != sizeof(int)) + return -EINVAL; + if (get_user(val, (int __user *)optval)) + return -EFAULT; + + sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; + + return 0; +} + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -2906,7 +2946,9 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_CONTEXT: retval = sctp_setsockopt_context(sk, optval, optlen); break; - + case SCTP_FRAGMENT_INTERLEAVE: + retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -3134,8 +3176,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) sp->pf = sctp_get_pf_specific(sk->sk_family); /* Control variables for partial data delivery. */ - sp->pd_mode = 0; + atomic_set(&sp->pd_mode, 0); skb_queue_head_init(&sp->pd_lobby); + sp->frag_interleave = 0; /* Create a per socket endpoint structure. Even if we * change the data structure relationships, this may still @@ -3642,7 +3685,7 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, return 0; } -/* 7.1.24. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) +/* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) * * This options will get or set the delayed ack timer. The time is set * in milliseconds. If the assoc_id is 0, then this sets or gets the @@ -4536,6 +4579,29 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len, return 0; } +/* + * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) + * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) + */ +static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + int val; + + if (len < sizeof(int)) + return -EINVAL; + + len = sizeof(int); + + val = sctp_sk(sk)->frag_interleave; + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return 0; +} + SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -4648,6 +4714,10 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, case SCTP_CONTEXT: retval = sctp_getsockopt_context(sk, len, optval, optlen); break; + case SCTP_FRAGMENT_INTERLEAVE: + retval = sctp_getsockopt_fragment_interleave(sk, len, optval, + optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -5742,9 +5812,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. */ skb_queue_head_init(&newsp->pd_lobby); - sctp_sk(newsk)->pd_mode = assoc->ulpq.pd_mode; + atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); - if (sctp_sk(oldsk)->pd_mode) { + if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { struct sk_buff_head *queue; /* Decide which queue to move pd_lobby skbs to. */ @@ -5770,7 +5840,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, * delivery to finish. */ if (assoc->ulpq.pd_mode) - sctp_clear_pd(oldsk); + sctp_clear_pd(oldsk, NULL); } diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index b29e3e4b72c9..ac80c34f6c2c 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -138,18 +138,42 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, /* Clear the partial delivery mode for this socket. Note: This * assumes that no association is currently in partial delivery mode. */ -int sctp_clear_pd(struct sock *sk) +int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc) { struct sctp_sock *sp = sctp_sk(sk); - sp->pd_mode = 0; - if (!skb_queue_empty(&sp->pd_lobby)) { - struct list_head *list; - sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue); - list = (struct list_head *)&sctp_sk(sk)->pd_lobby; - INIT_LIST_HEAD(list); - return 1; + if (atomic_dec_and_test(&sp->pd_mode)) { + /* This means there are no other associations in PD, so + * we can go ahead and clear out the lobby in one shot + */ + if (!skb_queue_empty(&sp->pd_lobby)) { + struct list_head *list; + sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue); + list = (struct list_head *)&sctp_sk(sk)->pd_lobby; + INIT_LIST_HEAD(list); + return 1; + } + } else { + /* There are other associations in PD, so we only need to + * pull stuff out of the lobby that belongs to the + * associations that is exiting PD (all of its notifications + * are posted here). + */ + if (!skb_queue_empty(&sp->pd_lobby) && asoc) { + struct sk_buff *skb, *tmp; + struct sctp_ulpevent *event; + + sctp_skb_for_each(skb, &sp->pd_lobby, tmp) { + event = sctp_skb2event(skb); + if (event->asoc == asoc) { + __skb_unlink(skb, &sp->pd_lobby); + __skb_queue_tail(&sk->sk_receive_queue, + skb); + } + } + } } + return 0; } @@ -157,7 +181,7 @@ int sctp_clear_pd(struct sock *sk) static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) { ulpq->pd_mode = 0; - return sctp_clear_pd(ulpq->asoc->base.sk); + return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc); } /* If the SKB of 'event' is on a list, it is the first such member @@ -187,25 +211,35 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) * the association the cause of the partial delivery. */ - if (!sctp_sk(sk)->pd_mode) { + if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) { queue = &sk->sk_receive_queue; - } else if (ulpq->pd_mode) { - /* If the association is in partial delivery, we - * need to finish delivering the partially processed - * packet before passing any other data. This is - * because we don't truly support stream interleaving. - */ - if ((event->msg_flags & MSG_NOTIFICATION) || - (SCTP_DATA_NOT_FRAG == - (event->msg_flags & SCTP_DATA_FRAG_MASK))) - queue = &sctp_sk(sk)->pd_lobby; - else { - clear_pd = event->msg_flags & MSG_EOR; - queue = &sk->sk_receive_queue; + } else { + if (ulpq->pd_mode) { + /* If the association is in partial delivery, we + * need to finish delivering the partially processed + * packet before passing any other data. This is + * because we don't truly support stream interleaving. + */ + if ((event->msg_flags & MSG_NOTIFICATION) || + (SCTP_DATA_NOT_FRAG == + (event->msg_flags & SCTP_DATA_FRAG_MASK))) + queue = &sctp_sk(sk)->pd_lobby; + else { + clear_pd = event->msg_flags & MSG_EOR; + queue = &sk->sk_receive_queue; + } + } else { + /* + * If fragment interleave is enabled, we + * can queue this to the recieve queue instead + * of the lobby. + */ + if (sctp_sk(sk)->frag_interleave) + queue = &sk->sk_receive_queue; + else + queue = &sctp_sk(sk)->pd_lobby; } - } else - queue = &sctp_sk(sk)->pd_lobby; - + } /* If we are harvesting multiple skbs they will be * collected on a list. @@ -826,18 +860,29 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, { struct sctp_ulpevent *event; struct sctp_association *asoc; + struct sctp_sock *sp; asoc = ulpq->asoc; + sp = sctp_sk(asoc->base.sk); - /* Are we already in partial delivery mode? */ - if (!sctp_sk(asoc->base.sk)->pd_mode) { + /* If the association is already in Partial Delivery mode + * we have noting to do. + */ + if (ulpq->pd_mode) + return; + /* If the user enabled fragment interleave socket option, + * multiple associations can enter partial delivery. + * Otherwise, we can only enter partial delivery if the + * socket is not in partial deliver mode. + */ + if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) { /* Is partial delivery possible? */ event = sctp_ulpq_retrieve_first(ulpq); /* Send event to the ULP. */ if (event) { sctp_ulpq_tail_event(ulpq, event); - sctp_sk(asoc->base.sk)->pd_mode = 1; + atomic_inc(&sp->pd_mode); ulpq->pd_mode = 1; return; } -- cgit v1.2.3 From d49d91d79a8dc5e85108a5ae1c8eef23dec135c1 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Fri, 23 Mar 2007 11:32:00 -0700 Subject: [SCTP]: Implement SCTP_PARTIAL_DELIVERY_POINT option. This option induces partial delivery to run as soon as the specified amount of data has been accumulated on the association. However, we give preference to fully reassembled messages over PD messages. In any case, window and buffer is freed up. Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- net/sctp/socket.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++ net/sctp/ulpqueue.c | 64 +++++++++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 117 insertions(+), 4 deletions(-) (limited to 'net/sctp') diff --git a/net/sctp/socket.c b/net/sctp/socket.c index b4be473c68b0..1e787a2d0b5f 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2832,6 +2832,32 @@ static int sctp_setsockopt_fragment_interleave(struct sock *sk, return 0; } +/* + * 7.1.25. Set or Get the sctp partial delivery point + * (SCTP_PARTIAL_DELIVERY_POINT) + * This option will set or get the SCTP partial delivery point. This + * point is the size of a message where the partial delivery API will be + * invoked to help free up rwnd space for the peer. Setting this to a + * lower value will cause partial delivery's to happen more often. The + * calls argument is an integer that sets or gets the partial delivery + * point. + */ +static int sctp_setsockopt_partial_delivery_point(struct sock *sk, + char __user *optval, + int optlen) +{ + u32 val; + + if (optlen != sizeof(u32)) + return -EINVAL; + if (get_user(val, (int __user *)optval)) + return -EFAULT; + + sctp_sk(sk)->pd_point = val; + + return 0; /* is this the right error code? */ +} + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -2911,6 +2937,9 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_DELAYED_ACK_TIME: retval = sctp_setsockopt_delayed_ack_time(sk, optval, optlen); break; + case SCTP_PARTIAL_DELIVERY_POINT: + retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); + break; case SCTP_INITMSG: retval = sctp_setsockopt_initmsg(sk, optval, optlen); @@ -4602,6 +4631,30 @@ static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, return 0; } +/* + * 7.1.25. Set or Get the sctp partial delivery point + * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) + */ +static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + u32 val; + + if (len < sizeof(u32)) + return -EINVAL; + + len = sizeof(u32); + + val = sctp_sk(sk)->pd_point; + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return -ENOTSUPP; +} + SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -4718,6 +4771,10 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, retval = sctp_getsockopt_fragment_interleave(sk, len, optval, optlen); break; + case SCTP_PARTIAL_DELIVERY_POINT: + retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, + optlen); + break; default: retval = -ENOPROTOOPT; break; diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index ac80c34f6c2c..0fa4d4d4df17 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -177,6 +177,15 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc) return 0; } +/* Set the pd_mode on the socket and ulpq */ +static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq) +{ + struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk); + + atomic_inc(&sp->pd_mode); + ulpq->pd_mode = 1; +} + /* Clear the pd_mode and restart any pending messages waiting for delivery. */ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) { @@ -401,6 +410,11 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u struct sk_buff *first_frag = NULL; __u32 ctsn, next_tsn; struct sctp_ulpevent *retval = NULL; + struct sk_buff *pd_first = NULL; + struct sk_buff *pd_last = NULL; + size_t pd_len = 0; + struct sctp_association *asoc; + u32 pd_point; /* Initialized to 0 just to avoid compiler warning message. Will * never be used with this value. It is referenced only after it @@ -416,6 +430,10 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u * we expect to find the remaining middle fragments and the last * fragment in order. If not, first_frag is reset to NULL and we * start the next pass when we find another first fragment. + * + * There is a potential to do partial delivery if user sets + * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here + * to see if can do PD. */ skb_queue_walk(&ulpq->reasm, pos) { cevent = sctp_skb2event(pos); @@ -423,14 +441,32 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { case SCTP_DATA_FIRST_FRAG: + /* If this "FIRST_FRAG" is the first + * element in the queue, then count it towards + * possible PD. + */ + if (pos == ulpq->reasm.next) { + pd_first = pos; + pd_last = pos; + pd_len = pos->len; + } else { + pd_first = NULL; + pd_last = NULL; + pd_len = 0; + } + first_frag = pos; next_tsn = ctsn + 1; break; case SCTP_DATA_MIDDLE_FRAG: - if ((first_frag) && (ctsn == next_tsn)) + if ((first_frag) && (ctsn == next_tsn)) { next_tsn++; - else + if (pd_first) { + pd_last = pos; + pd_len += pos->len; + } + } else first_frag = NULL; break; @@ -441,7 +477,28 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u first_frag = NULL; break; }; + } + + asoc = ulpq->asoc; + if (pd_first) { + /* Make sure we can enter partial deliver. + * We can trigger partial delivery only if framgent + * interleave is set, or the socket is not already + * in partial delivery. + */ + if (!sctp_sk(asoc->base.sk)->frag_interleave && + atomic_read(&sctp_sk(asoc->base.sk)->pd_mode)) + goto done; + cevent = sctp_skb2event(pd_first); + pd_point = sctp_sk(asoc->base.sk)->pd_point; + if (pd_point && pd_point <= pd_len) { + retval = sctp_make_reassembled_event(&ulpq->reasm, + pd_first, + pd_last); + if (retval) + sctp_ulpq_set_pd(ulpq); + } } done: return retval; @@ -882,8 +939,7 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, /* Send event to the ULP. */ if (event) { sctp_ulpq_tail_event(ulpq, event); - atomic_inc(&sp->pd_mode); - ulpq->pd_mode = 1; + sctp_ulpq_set_pd(ulpq); return; } } -- cgit v1.2.3 From 1ae4114dce35dd1d32ed847f60b599dbbdfd5829 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Fri, 23 Mar 2007 11:32:26 -0700 Subject: [SCTP]: Implement SCTP_ADDR_CONFIRMED state for ADDR_CHNAGE event Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- net/sctp/associola.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'net/sctp') diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 78d2ddb5ca18..85af1cb70fe8 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -714,8 +714,16 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, /* Record the transition on the transport. */ switch (command) { case SCTP_TRANSPORT_UP: + /* If we are moving from UNCONFIRMED state due + * to heartbeat success, report the SCTP_ADDR_CONFIRMED + * state to the user, otherwise report SCTP_ADDR_AVAILABLE. + */ + if (SCTP_UNCONFIRMED == transport->state && + SCTP_HEARTBEAT_SUCCESS == error) + spc_state = SCTP_ADDR_CONFIRMED; + else + spc_state = SCTP_ADDR_AVAILABLE; transport->state = SCTP_ACTIVE; - spc_state = SCTP_ADDR_AVAILABLE; break; case SCTP_TRANSPORT_DOWN: -- cgit v1.2.3 From bdf3092af601ccad765974652ab103162fbe14f4 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Fri, 23 Mar 2007 11:33:12 -0700 Subject: [SCTP]: Honor flags when setting peer address parameters Parameters only take effect when a corresponding flag bit is set and a value is specified. This means we need to check the flags in addition to checking for non-zero value. Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- net/sctp/socket.c | 54 ++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 44 insertions(+), 10 deletions(-) (limited to 'net/sctp') diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 1e787a2d0b5f..dda2f6700f5b 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2039,6 +2039,10 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, * SPP_HB_DEMAND - Request a user initiated heartbeat * to be made immediately. * + * SPP_HB_TIME_IS_ZERO - Specify's that the time for + * heartbeat delayis to be set to the value of 0 + * milliseconds. + * * SPP_PMTUD_ENABLE - This field will enable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses @@ -2081,13 +2085,30 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, return error; } - if (params->spp_hbinterval) { - if (trans) { - trans->hbinterval = msecs_to_jiffies(params->spp_hbinterval); - } else if (asoc) { - asoc->hbinterval = msecs_to_jiffies(params->spp_hbinterval); - } else { - sp->hbinterval = params->spp_hbinterval; + /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of + * this field is ignored. Note also that a value of zero indicates + * the current setting should be left unchanged. + */ + if (params->spp_flags & SPP_HB_ENABLE) { + + /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is + * set. This lets us use 0 value when this flag + * is set. + */ + if (params->spp_flags & SPP_HB_TIME_IS_ZERO) + params->spp_hbinterval = 0; + + if (params->spp_hbinterval || + (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { + if (trans) { + trans->hbinterval = + msecs_to_jiffies(params->spp_hbinterval); + } else if (asoc) { + asoc->hbinterval = + msecs_to_jiffies(params->spp_hbinterval); + } else { + sp->hbinterval = params->spp_hbinterval; + } } } @@ -2104,7 +2125,12 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, } } - if (params->spp_pathmtu) { + /* When Path MTU discovery is disabled the value specified here will + * be the "fixed" path mtu (i.e. the value of the spp_flags field must + * include the flag SPP_PMTUD_DISABLE for this field to have any + * effect). + */ + if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { if (trans) { trans->pathmtu = params->spp_pathmtu; sctp_assoc_sync_pmtu(asoc); @@ -2135,7 +2161,11 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, } } - if (params->spp_sackdelay) { + /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the + * value of this field is ignored. Note also that a value of zero + * indicates the current setting should be left unchanged. + */ + if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { if (trans) { trans->sackdelay = msecs_to_jiffies(params->spp_sackdelay); @@ -2163,7 +2193,11 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, } } - if (params->spp_pathmaxrxt) { + /* Note that unless the spp_flag is set to SPP_PMTUD_ENABLE the value + * of this field is ignored. Note also that a value of zero + * indicates the current setting should be left unchanged. + */ + if ((params->spp_flags & SPP_PMTUD_ENABLE) && params->spp_pathmaxrxt) { if (trans) { trans->pathmaxrxt = params->spp_pathmaxrxt; } else if (asoc) { -- cgit v1.2.3 From a5a35e76753d27e782028843a5186f176b50dd16 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Fri, 23 Mar 2007 11:34:08 -0700 Subject: [SCTP]: Implement sac_info field in SCTP_ASSOC_CHANGE notification. As stated in the sctp socket api draft: sac_info: variable If the sac_state is SCTP_COMM_LOST and an ABORT chunk was received for this association, sac_info[] contains the complete ABORT chunk as defined in the SCTP specification RFC2960 [RFC2960] section 3.3.7. We now save received ABORT chunks into the sac_info field and pass that to the user. Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- net/sctp/sm_sideeffect.c | 11 ++++++++--- net/sctp/sm_statefuns.c | 14 +++++++------- net/sctp/ulpevent.c | 49 +++++++++++++++++++++++++++++++++++++++++------- 3 files changed, 57 insertions(+), 17 deletions(-) (limited to 'net/sctp') diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 135567493119..0a1a197193a2 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -464,7 +464,7 @@ static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, struct sctp_ulpevent *event; event = sctp_ulpevent_make_assoc_change(asoc,0, SCTP_CANT_STR_ASSOC, - (__u16)error, 0, 0, + (__u16)error, 0, 0, NULL, GFP_ATOMIC); if (event) @@ -492,8 +492,13 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, /* Cancel any partial delivery in progress. */ sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); - event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, - (__u16)error, 0, 0, + if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT) + event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, + (__u16)error, 0, 0, chunk, + GFP_ATOMIC); + else + event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, + (__u16)error, 0, 0, NULL, GFP_ATOMIC); if (event) sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 438e5dc5c714..e0ec16dd678a 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -186,7 +186,7 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep, * notification is passed to the upper layer. */ ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP, - 0, 0, 0, GFP_ATOMIC); + 0, 0, 0, NULL, GFP_ATOMIC); if (ev) sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); @@ -661,7 +661,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, ev = sctp_ulpevent_make_assoc_change(new_asoc, 0, SCTP_COMM_UP, 0, new_asoc->c.sinit_num_ostreams, new_asoc->c.sinit_max_instreams, - GFP_ATOMIC); + NULL, GFP_ATOMIC); if (!ev) goto nomem_ev; @@ -790,7 +790,7 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep, ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP, 0, asoc->c.sinit_num_ostreams, asoc->c.sinit_max_instreams, - GFP_ATOMIC); + NULL, GFP_ATOMIC); if (!ev) goto nomem; @@ -1625,7 +1625,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep, ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0, new_asoc->c.sinit_num_ostreams, new_asoc->c.sinit_max_instreams, - GFP_ATOMIC); + NULL, GFP_ATOMIC); if (!ev) goto nomem_ev; @@ -1691,7 +1691,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep, ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP, 0, new_asoc->c.sinit_num_ostreams, new_asoc->c.sinit_max_instreams, - GFP_ATOMIC); + NULL, GFP_ATOMIC); if (!ev) goto nomem_ev; @@ -1786,7 +1786,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep, SCTP_COMM_UP, 0, asoc->c.sinit_num_ostreams, asoc->c.sinit_max_instreams, - GFP_ATOMIC); + NULL, GFP_ATOMIC); if (!ev) goto nomem; @@ -3035,7 +3035,7 @@ sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep, * notification is passed to the upper layer. */ ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP, - 0, 0, 0, GFP_ATOMIC); + 0, 0, 0, NULL, GFP_ATOMIC); if (!ev) goto nomem; diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 2e11bc8d5d35..661ea2dd78ba 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -131,19 +131,54 @@ static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( const struct sctp_association *asoc, __u16 flags, __u16 state, __u16 error, __u16 outbound, - __u16 inbound, gfp_t gfp) + __u16 inbound, struct sctp_chunk *chunk, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_assoc_change *sac; struct sk_buff *skb; - event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change), + /* If the lower layer passed in the chunk, it will be + * an ABORT, so we need to include it in the sac_info. + */ + if (chunk) { + /* sctp_inqu_pop() has allready pulled off the chunk + * header. We need to put it back temporarily + */ + skb_push(chunk->skb, sizeof(sctp_chunkhdr_t)); + + /* Copy the chunk data to a new skb and reserve enough + * head room to use as notification. + */ + skb = skb_copy_expand(chunk->skb, + sizeof(struct sctp_assoc_change), 0, gfp); + + if (!skb) + goto fail; + + /* put back the chunk header now that we have a copy */ + skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); + + /* Embed the event fields inside the cloned skb. */ + event = sctp_skb2event(skb); + sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); + + /* Include the notification structure */ + sac = (struct sctp_assoc_change *) + skb_push(skb, sizeof(struct sctp_assoc_change)); + + /* Trim the buffer to the right length. */ + skb_trim(skb, sizeof(struct sctp_assoc_change) + + ntohs(chunk->chunk_hdr->length)); + } else { + event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change), MSG_NOTIFICATION, gfp); - if (!event) - goto fail; - skb = sctp_event2skb(event); - sac = (struct sctp_assoc_change *) - skb_put(skb, sizeof(struct sctp_assoc_change)); + if (!event) + goto fail; + + skb = sctp_event2skb(event); + sac = (struct sctp_assoc_change *) skb_put(skb, + sizeof(struct sctp_assoc_change)); + } /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE -- cgit v1.2.3 From 703315712cfccfe0b45ef4aa6994527d8ee95e33 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Fri, 23 Mar 2007 11:34:36 -0700 Subject: [SCTP]: Implement SCTP_MAX_BURST socket option. Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- net/sctp/associola.c | 2 +- net/sctp/protocol.c | 2 +- net/sctp/socket.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 63 insertions(+), 2 deletions(-) (limited to 'net/sctp') diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 85af1cb70fe8..37a343e1ebb7 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -143,7 +143,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a /* Initialize the maximum mumber of new data packets that can be sent * in a burst. */ - asoc->max_burst = sctp_max_burst; + asoc->max_burst = sp->max_burst; /* initialize association timers */ asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0; diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 7c28c9b959e2..c361deb6cea9 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1042,7 +1042,7 @@ SCTP_STATIC __init int sctp_init(void) sctp_cookie_preserve_enable = 1; /* Max.Burst - 4 */ - sctp_max_burst = SCTP_MAX_BURST; + sctp_max_burst = SCTP_DEFAULT_MAX_BURST; /* Association.Max.Retrans - 10 attempts * Path.Max.Retrans - 5 attempts (per destination address) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index dda2f6700f5b..f904f2bc0f2c 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2892,6 +2892,36 @@ static int sctp_setsockopt_partial_delivery_point(struct sock *sk, return 0; /* is this the right error code? */ } +/* + * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) + * + * This option will allow a user to change the maximum burst of packets + * that can be emitted by this association. Note that the default value + * is 4, and some implementations may restrict this setting so that it + * can only be lowered. + * + * NOTE: This text doesn't seem right. Do this on a socket basis with + * future associations inheriting the socket value. + */ +static int sctp_setsockopt_maxburst(struct sock *sk, + char __user *optval, + int optlen) +{ + int val; + + if (optlen != sizeof(int)) + return -EINVAL; + if (get_user(val, (int __user *)optval)) + return -EFAULT; + + if (val < 0) + return -EINVAL; + + sctp_sk(sk)->max_burst = val; + + return 0; +} + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -3012,6 +3042,9 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_FRAGMENT_INTERLEAVE: retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); break; + case SCTP_MAX_BURST: + retval = sctp_setsockopt_maxburst(sk, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -3171,6 +3204,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) sp->default_timetolive = 0; sp->default_rcv_context = 0; + sp->max_burst = sctp_max_burst; /* Initialize default setup parameters. These parameters * can be modified with the SCTP_INITMSG socket option or @@ -4689,6 +4723,30 @@ static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, return -ENOTSUPP; } +/* + * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) + * (chapter and verse is quoted at sctp_setsockopt_maxburst()) + */ +static int sctp_getsockopt_maxburst(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + int val; + + if (len < sizeof(int)) + return -EINVAL; + + len = sizeof(int); + + val = sctp_sk(sk)->max_burst; + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return -ENOTSUPP; +} + SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -4809,6 +4867,9 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, optlen); break; + case SCTP_MAX_BURST: + retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; -- cgit v1.2.3 From 604763722c655c7e3f31ecf6f7b4dafcd26a7a15 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 9 Apr 2007 11:59:39 -0700 Subject: [NET]: Treat CHECKSUM_PARTIAL as CHECKSUM_UNNECESSARY When a transmitted packet is looped back directly, CHECKSUM_PARTIAL maps to the semantics of CHECKSUM_UNNECESSARY. Therefore we should treat it as such in the stack. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- net/sctp/input.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'net/sctp') diff --git a/net/sctp/input.c b/net/sctp/input.c index 18b97eedc1fa..885109fb3dda 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -140,8 +140,7 @@ int sctp_rcv(struct sk_buff *skb) __skb_pull(skb, skb_transport_offset(skb)); if (skb->len < sizeof(struct sctphdr)) goto discard_it; - if ((skb->ip_summed != CHECKSUM_UNNECESSARY) && - (sctp_rcv_checksum(skb) < 0)) + if (!skb_csum_unnecessary(skb) && sctp_rcv_checksum(skb) < 0) goto discard_it; skb_pull(skb, sizeof(struct sctphdr)); -- cgit v1.2.3 From 3ff50b7997fe06cd5d276b229967bb52d6b3b6c1 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Fri, 20 Apr 2007 17:09:22 -0700 Subject: [NET]: cleanup extra semicolons Spring cleaning time... There seems to be a lot of places in the network code that have extra bogus semicolons after conditionals. Most commonly is a bogus semicolon after: switch() { } Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/sctp/associola.c | 2 +- net/sctp/debug.c | 5 +++-- net/sctp/ipv6.c | 2 +- net/sctp/output.c | 2 +- net/sctp/outqueue.c | 12 ++++++------ net/sctp/sm_make_chunk.c | 4 ++-- net/sctp/sm_sideeffect.c | 5 +++-- net/sctp/sm_statefuns.c | 10 +++++----- net/sctp/sm_statetable.c | 2 +- net/sctp/socket.c | 11 ++++++----- net/sctp/transport.c | 2 +- net/sctp/ulpqueue.c | 8 ++++---- 12 files changed, 34 insertions(+), 31 deletions(-) (limited to 'net/sctp') diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 37a343e1ebb7..db73ef97485a 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -733,7 +733,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, default: return; - }; + } /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the * user. diff --git a/net/sctp/debug.c b/net/sctp/debug.c index 5f5ab28977c9..e8c0f7435d7f 100644 --- a/net/sctp/debug.c +++ b/net/sctp/debug.c @@ -93,8 +93,9 @@ const char *sctp_cname(const sctp_subtype_t cid) return "FWD_TSN"; default: - return "unknown chunk"; - }; + break; + } + return "unknown chunk"; } diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 5b0cdda4b449..ca527a27dd05 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -604,7 +604,7 @@ static sctp_scope_t sctp_v6_scope(union sctp_addr *addr) default: retval = SCTP_SCOPE_GLOBAL; break; - }; + } return retval; } diff --git a/net/sctp/output.c b/net/sctp/output.c index f875fc3ced54..d85543def754 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -176,7 +176,7 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, case SCTP_XMIT_OK: case SCTP_XMIT_NAGLE_DELAY: break; - }; + } return retval; } diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 41abfd17627e..992f361084b7 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -338,7 +338,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS); q->empty = 0; break; - }; + } } else { list_add_tail(&chunk->list, &q->control_chunk_list); SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); @@ -630,7 +630,7 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, /* Retrieve a new chunk to bundle. */ lchunk = sctp_list_dequeue(lqueue); break; - }; + } /* If we are here due to a retransmit timeout or a fast * retransmit and if there are any chunks left in the retransmit @@ -779,7 +779,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) default: /* We built a chunk with an illegal type! */ BUG(); - }; + } } /* Is it OK to send data chunks? */ @@ -1397,7 +1397,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, SCTP_DEBUG_PRINTK("ACKed: %08x", tsn); dbg_prt_state = 0; dbg_ack_tsn = tsn; - }; + } dbg_last_ack_tsn = tsn; #endif /* SCTP_DEBUG */ @@ -1452,7 +1452,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, SCTP_DEBUG_PRINTK("KEPT: %08x",tsn); dbg_prt_state = 1; dbg_kept_tsn = tsn; - }; + } dbg_last_kept_tsn = tsn; #endif /* SCTP_DEBUG */ @@ -1476,7 +1476,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, } else { SCTP_DEBUG_PRINTK("\n"); } - }; + } #endif /* SCTP_DEBUG */ if (transport) { if (bytes_acked) { diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 759ea3d19976..be783a3761c4 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2077,7 +2077,7 @@ static int sctp_process_param(struct sctp_association *asoc, default: /* Just ignore anything else. */ break; - }; + } } break; @@ -2118,7 +2118,7 @@ static int sctp_process_param(struct sctp_association *asoc, SCTP_DEBUG_PRINTK("Ignoring param: %d for association %p.\n", ntohs(param.p->type), asoc); break; - }; + } return retval; } diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 0a1a197193a2..b37a7adeb150 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1009,7 +1009,7 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, status, state, event_type, subtype.chunk); BUG(); break; - }; + } bail: return error; @@ -1489,7 +1489,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, printk(KERN_WARNING "Impossible command: %u, %p\n", cmd->verb, cmd->obj.ptr); break; - }; + } + if (error) break; } diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index e0ec16dd678a..9e28a5d51200 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -629,7 +629,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, case -SCTP_IERROR_BAD_SIG: default: return sctp_sf_pdiscard(ep, asoc, type, arg, commands); - }; + } } @@ -1195,7 +1195,7 @@ static void sctp_tietags_populate(struct sctp_association *new_asoc, new_asoc->c.my_ttag = asoc->c.my_vtag; new_asoc->c.peer_ttag = asoc->c.peer_vtag; break; - }; + } /* Other parameters for the endpoint SHOULD be copied from the * existing parameters of the association (e.g. number of @@ -1904,7 +1904,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep, case -SCTP_IERROR_BAD_SIG: default: return sctp_sf_pdiscard(ep, asoc, type, arg, commands); - }; + } } /* Compare the tie_tag in cookie with the verification tag of @@ -1936,7 +1936,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep, default: /* Discard packet for all others. */ retval = sctp_sf_pdiscard(ep, asoc, type, arg, commands); break; - }; + } /* Delete the tempory new association. */ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); @@ -4816,7 +4816,7 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep, default: BUG(); break; - }; + } if (!reply) goto nomem; diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index 5e54b17377f4..523071c7902f 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c @@ -101,7 +101,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, default: /* Yikes! We got an illegal event type. */ return &bug; - }; + } } #define TYPE_SCTP_FUNC(func) {.fn = func, .name = #func} diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f904f2bc0f2c..11938fb20395 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -941,7 +941,7 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk, default: err = -EINVAL; break; - }; + } out: kfree(kaddrs); @@ -3048,7 +3048,7 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, default: retval = -ENOPROTOOPT; break; - }; + } sctp_release_sock(sk); @@ -4873,7 +4873,7 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, default: retval = -ENOPROTOOPT; break; - }; + } sctp_release_sock(sk); return retval; @@ -5198,7 +5198,8 @@ int sctp_inet_listen(struct socket *sock, int backlog) break; default: break; - }; + } + if (err) goto cleanup; @@ -5461,7 +5462,7 @@ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg, default: return -EINVAL; - }; + } } return 0; } diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 4d8c2ab864fc..961df275d5b9 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -507,7 +507,7 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport, transport->cwnd = max(transport->cwnd/2, 4*transport->asoc->pathmtu); break; - }; + } transport->partial_bytes_acked = 0; SCTP_DEBUG_PRINTK("%s: transport: %p reason: %d cwnd: " diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 0fa4d4d4df17..34eb977a204d 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -391,7 +391,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu break; pos->next = pnext; pos = pnext; - }; + } event = sctp_skb2event(f_frag); SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS); @@ -476,7 +476,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u else first_frag = NULL; break; - }; + } } asoc = ulpq->asoc; @@ -556,7 +556,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq goto done; default: return NULL; - }; + } } /* We have the reassembled event. There is no need to look @@ -648,7 +648,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u break; default: return NULL; - }; + } } /* We have the reassembled event. There is no need to look -- cgit v1.2.3