diff options
author | David S. Miller <davem@davemloft.net> | 2008-04-17 23:56:30 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-04-17 23:56:30 -0700 |
commit | 1e42198609d73ed1a9adcba2af275c24c2678420 (patch) | |
tree | 32fd4d9073bfc0f3909af8f9fb4bcff38951d01a /net/ipv4/tcp_input.c | |
parent | 794eb6bf20ebf992c040ea831cd3a9c64b0c1f7a (diff) | |
parent | 4b119e21d0c66c22e8ca03df05d9de623d0eb50f (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 78 |
1 files changed, 52 insertions, 26 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1bdb1bd22134..cdc051bfdb4d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3854,8 +3854,28 @@ static void tcp_ofo_queue(struct sock *sk) } } +static int tcp_prune_ofo_queue(struct sock *sk); static int tcp_prune_queue(struct sock *sk); +static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) +{ + if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || + !sk_rmem_schedule(sk, size)) { + + if (tcp_prune_queue(sk) < 0) + return -1; + + if (!sk_rmem_schedule(sk, size)) { + if (!tcp_prune_ofo_queue(sk)) + return -1; + + if (!sk_rmem_schedule(sk, size)) + return -1; + } + } + return 0; +} + static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { struct tcphdr *th = tcp_hdr(skb); @@ -3905,12 +3925,9 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) if (eaten <= 0) { queue_and_out: if (eaten < 0 && - (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || - !sk_rmem_schedule(sk, skb->truesize))) { - if (tcp_prune_queue(sk) < 0 || - !sk_rmem_schedule(sk, skb->truesize)) - goto drop; - } + tcp_try_rmem_schedule(sk, skb->truesize)) + goto drop; + skb_set_owner_r(skb, sk); __skb_queue_tail(&sk->sk_receive_queue, skb); } @@ -3979,12 +3996,8 @@ drop: TCP_ECN_check_ce(tp, skb); - if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || - !sk_rmem_schedule(sk, skb->truesize)) { - if (tcp_prune_queue(sk) < 0 || - !sk_rmem_schedule(sk, skb->truesize)) - goto drop; - } + if (tcp_try_rmem_schedule(sk, skb->truesize)) + goto drop; /* Disable header prediction. */ tp->pred_flags = 0; @@ -4211,6 +4224,32 @@ static void tcp_collapse_ofo_queue(struct sock *sk) } } +/* + * Purge the out-of-order queue. + * Return true if queue was pruned. + */ +static int tcp_prune_ofo_queue(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + int res = 0; + + if (!skb_queue_empty(&tp->out_of_order_queue)) { + NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); + __skb_queue_purge(&tp->out_of_order_queue); + + /* Reset SACK state. A conforming SACK implementation will + * do the same at a timeout based retransmit. When a connection + * is in a sad state like this, we care only about integrity + * of the connection not performance. + */ + if (tp->rx_opt.sack_ok) + tcp_sack_reset(&tp->rx_opt); + sk_mem_reclaim(sk); + res = 1; + } + return res; +} + /* Reduce allocated memory if we can, trying to get * the socket within its memory limits again. * @@ -4244,20 +4283,7 @@ static int tcp_prune_queue(struct sock *sk) /* Collapsing did not help, destructive actions follow. * This must not ever occur. */ - /* First, purge the out_of_order queue. */ - if (!skb_queue_empty(&tp->out_of_order_queue)) { - NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); - __skb_queue_purge(&tp->out_of_order_queue); - - /* Reset SACK state. A conforming SACK implementation will - * do the same at a timeout based retransmit. When a connection - * is in a sad state like this, we care only about integrity - * of the connection not performance. - */ - if (tcp_is_sack(tp)) - tcp_sack_reset(&tp->rx_opt); - sk_mem_reclaim(sk); - } + tcp_prune_ofo_queue(sk); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) return 0; |