diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-05-17 17:12:24 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-17 17:12:24 +0200 |
commit | 538f0fd0f210c2ce5c585799f18d0e5c7cf6155e (patch) | |
tree | e9fa2b10ce5d92ac6bcd8ac55af1cd97bda3ec5d /net/ipv4 | |
parent | 3bb6fbf9969a8bbe4892968659239273d092e78a (diff) | |
parent | f26a3988917913b3d11b2bd741601a2c64ab9204 (diff) |
Merge branch 'linus' into x86/garttip-x86-gart-2008-05-17-15-12-25
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/arp.c | 2 | ||||
-rw-r--r-- | net/ipv4/cipso_ipv4.c | 4 | ||||
-rw-r--r-- | net/ipv4/igmp.c | 4 | ||||
-rw-r--r-- | net/ipv4/ipconfig.c | 6 | ||||
-rw-r--r-- | net/ipv4/raw.c | 10 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 17 |
6 files changed, 24 insertions, 19 deletions
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 68b72a7a1806..418862f1bf22 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -570,7 +570,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, * Allocate a buffer */ - skb = alloc_skb(arp_hdr_len(dev) + LL_RESERVED_SPACE(dev), GFP_ATOMIC); + skb = alloc_skb(arp_hdr_len(dev) + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); if (skb == NULL) return NULL; diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 05afb576d935..2c0e4572cc90 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -338,7 +338,7 @@ static int cipso_v4_cache_check(const unsigned char *key, return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); - bkt = hash & (CIPSO_V4_CACHE_BUCKETBITS - 1); + bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && @@ -417,7 +417,7 @@ int cipso_v4_cache_add(const struct sk_buff *skb, atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; - bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1); + bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 6250f4239b61..2769dc4a4c84 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -292,7 +292,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) struct iphdr *pip; struct igmpv3_report *pig; - skb = alloc_skb(size + LL_RESERVED_SPACE(dev), GFP_ATOMIC); + skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); if (skb == NULL) return NULL; @@ -653,7 +653,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, return -1; } - skb=alloc_skb(IGMP_SIZE+LL_RESERVED_SPACE(dev), GFP_ATOMIC); + skb=alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); if (skb == NULL) { ip_rt_put(rt); return -1; diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 89dee4346f60..ed45037ce9be 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -710,14 +710,14 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d struct net_device *dev = d->dev; struct sk_buff *skb; struct bootp_pkt *b; - int hh_len = LL_RESERVED_SPACE(dev); struct iphdr *h; /* Allocate packet */ - skb = alloc_skb(sizeof(struct bootp_pkt) + hh_len + 15, GFP_KERNEL); + skb = alloc_skb(sizeof(struct bootp_pkt) + LL_ALLOCATED_SPACE(dev) + 15, + GFP_KERNEL); if (!skb) return; - skb_reserve(skb, hh_len); + skb_reserve(skb, LL_RESERVED_SPACE(dev)); b = (struct bootp_pkt *) skb_put(skb, sizeof(struct bootp_pkt)); memset(b, 0, sizeof(struct bootp_pkt)); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 11d7f753a820..fead049daf43 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -322,7 +322,6 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, unsigned int flags) { struct inet_sock *inet = inet_sk(sk); - int hh_len; struct iphdr *iph; struct sk_buff *skb; unsigned int iphlen; @@ -336,13 +335,12 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, if (flags&MSG_PROBE) goto out; - hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); - - skb = sock_alloc_send_skb(sk, length+hh_len+15, - flags&MSG_DONTWAIT, &err); + skb = sock_alloc_send_skb(sk, + length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15, + flags & MSG_DONTWAIT, &err); if (skb == NULL) goto error; - skb_reserve(skb, hh_len); + skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev)); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 26c936930e92..b54d9d37b636 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1842,9 +1842,16 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; } - /* Don't lost mark skbs that were fwd transmitted after RTO */ - if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) && - !after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) { + /* Marking forward transmissions that were made after RTO lost + * can cause unnecessary retransmissions in some scenarios, + * SACK blocks will mitigate that in some but not in all cases. + * We used to not mark them but it was causing break-ups with + * receivers that do only in-order receival. + * + * TODO: we could detect presence of such receiver and select + * different behavior per flow. + */ + if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; tp->lost_out += tcp_skb_pcount(skb); } @@ -1860,7 +1867,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) tp->reordering = min_t(unsigned int, tp->reordering, sysctl_tcp_reordering); tcp_set_ca_state(sk, TCP_CA_Loss); - tp->high_seq = tp->frto_highmark; + tp->high_seq = tp->snd_nxt; TCP_ECN_queue_cwr(tp); tcp_clear_retrans_hints_partial(tp); @@ -2482,7 +2489,7 @@ static void tcp_try_to_open(struct sock *sk, int flag) tcp_verify_left_out(tp); - if (tp->retrans_out == 0) + if (!tp->frto_counter && tp->retrans_out == 0) tp->retrans_stamp = 0; if (flag & FLAG_ECE) |