diff options
-rw-r--r-- | drivers/net/wan/lmc/lmc_main.c | 55 | ||||
-rw-r--r-- | include/linux/Kbuild | 1 | ||||
-rw-r--r-- | net/core/dev.c | 10 | ||||
-rw-r--r-- | net/core/netpoll.c | 37 | ||||
-rw-r--r-- | net/ipv4/ipvs/ip_vs_sync.c | 5 | ||||
-rw-r--r-- | net/ipv4/proc.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 10 | ||||
-rw-r--r-- | net/ipv4/tcp_vegas.c | 37 | ||||
-rw-r--r-- | net/ipv6/ndisc.c | 2 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 10 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_core.c | 2 | ||||
-rw-r--r-- | net/socket.c | 5 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/rpc_rdma.c | 10 |
14 files changed, 116 insertions, 72 deletions
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 5ea877221f46..64eb57893602 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -142,9 +142,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ * To date internally, just copy this out to the user. */ case LMCIOCGINFO: /*fold01*/ - if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof (lmc_ctl_t))) - return -EFAULT; - ret = 0; + if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof(lmc_ctl_t))) + ret = -EFAULT; + else + ret = 0; break; case LMCIOCSINFO: /*fold01*/ @@ -159,8 +160,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ break; } - if (copy_from_user(&ctl, ifr->ifr_data, sizeof (lmc_ctl_t))) - return -EFAULT; + if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) { + ret = -EFAULT; + break; + } sc->lmc_media->set_status (sc, &ctl); @@ -190,8 +193,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ break; } - if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t))) - return -EFAULT; + if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t))) { + ret = -EFAULT; + break; + } if (new_type == old_type) @@ -229,9 +234,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ sc->lmc_xinfo.Magic1 = 0xDEADBEEF; if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo, - sizeof (struct lmc_xinfo))) - return -EFAULT; - ret = 0; + sizeof(struct lmc_xinfo))) { + ret = -EFAULT; + else + ret = 0; break; @@ -262,9 +268,9 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ if (copy_to_user(ifr->ifr_data, &sc->stats, sizeof (struct lmc_statistics))) - return -EFAULT; - - ret = 0; + ret = -EFAULT; + else + ret = 0; break; case LMCIOCCLEARLMCSTATS: /*fold01*/ @@ -292,8 +298,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ break; } - if (copy_from_user(&ctl, ifr->ifr_data, sizeof (lmc_ctl_t))) - return -EFAULT; + if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) { + ret = -EFAULT; + break; + } sc->lmc_media->set_circuit_type(sc, ctl.circuit_type); sc->ictl.circuit_type = ctl.circuit_type; ret = 0; @@ -318,12 +326,15 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ #ifdef DEBUG case LMCIOCDUMPEVENTLOG: - if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof (u32))) - return -EFAULT; + if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof(u32))) { + ret = -EFAULT; + break; + } if (copy_to_user(ifr->ifr_data + sizeof (u32), lmcEventLogBuf, sizeof (lmcEventLogBuf))) - return -EFAULT; + ret = -EFAULT; + else + ret = 0; - ret = 0; break; #endif /* end ifdef _DBG_EVENTLOG */ case LMCIOCT1CONTROL: /*fold01*/ @@ -346,8 +357,10 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ */ netif_stop_queue(dev); - if (copy_from_user(&xc, ifr->ifr_data, sizeof (struct lmc_xilinx_control))) - return -EFAULT; + if (copy_from_user(&xc, ifr->ifr_data, sizeof(struct lmc_xilinx_control))) { + ret = -EFAULT; + break; + } switch(xc.command){ case lmc_xilinx_reset: /*fold02*/ { diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 6a65231bc785..bd33c22315c1 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -149,6 +149,7 @@ header-y += ticable.h header-y += times.h header-y += tiocl.h header-y += tipc.h +header-y += tipc_config.h header-y += toshiba.h header-y += ultrasound.h header-y += un.h diff --git a/net/core/dev.c b/net/core/dev.c index 853c8b575f1d..02e7d8377c4a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2172,7 +2172,15 @@ static void net_rx_action(struct softirq_action *h) weight = n->weight; - work = n->poll(n, weight); + /* This NAPI_STATE_SCHED test is for avoiding a race + * with netpoll's poll_napi(). Only the entity which + * obtains the lock and sees NAPI_STATE_SCHED set will + * actually make the ->poll() call. Therefore we avoid + * accidently calling ->poll() when NAPI is not scheduled. + */ + work = 0; + if (test_bit(NAPI_STATE_SCHED, &n->state)) + work = n->poll(n, weight); WARN_ON_ONCE(work > weight); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index bf8d18f1b013..c499b5c69bed 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -116,6 +116,29 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, * network adapter, forcing superfluous retries and possibly timeouts. * Thus, we set our budget to greater than 1. */ +static int poll_one_napi(struct netpoll_info *npinfo, + struct napi_struct *napi, int budget) +{ + int work; + + /* net_rx_action's ->poll() invocations and our's are + * synchronized by this test which is only made while + * holding the napi->poll_lock. + */ + if (!test_bit(NAPI_STATE_SCHED, &napi->state)) + return budget; + + npinfo->rx_flags |= NETPOLL_RX_DROP; + atomic_inc(&trapped); + + work = napi->poll(napi, budget); + + atomic_dec(&trapped); + npinfo->rx_flags &= ~NETPOLL_RX_DROP; + + return budget - work; +} + static void poll_napi(struct netpoll *np) { struct netpoll_info *npinfo = np->dev->npinfo; @@ -123,17 +146,13 @@ static void poll_napi(struct netpoll *np) int budget = 16; list_for_each_entry(napi, &np->dev->napi_list, dev_list) { - if (test_bit(NAPI_STATE_SCHED, &napi->state) && - napi->poll_owner != smp_processor_id() && + if (napi->poll_owner != smp_processor_id() && spin_trylock(&napi->poll_lock)) { - npinfo->rx_flags |= NETPOLL_RX_DROP; - atomic_inc(&trapped); - - napi->poll(napi, budget); - - atomic_dec(&trapped); - npinfo->rx_flags &= ~NETPOLL_RX_DROP; + budget = poll_one_napi(npinfo, napi, budget); spin_unlock(&napi->poll_lock); + + if (!budget) + break; } } } diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c index c99f2a33fb9e..0d4d9721cbd4 100644 --- a/net/ipv4/ipvs/ip_vs_sync.c +++ b/net/ipv4/ipvs/ip_vs_sync.c @@ -72,7 +72,6 @@ struct ip_vs_sync_thread_data { int state; }; -#define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ) #define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn)) #define FULL_CONN_SIZE \ (sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options)) @@ -284,6 +283,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen) struct ip_vs_sync_conn *s; struct ip_vs_sync_conn_options *opt; struct ip_vs_conn *cp; + struct ip_vs_protocol *pp; char *p; int i; @@ -342,7 +342,8 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen) p += SIMPLE_CONN_SIZE; atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]); - cp->timeout = IP_VS_SYNC_CONN_TIMEOUT; + pp = ip_vs_proto_get(s->protocol); + cp->timeout = pp->timeout_table[cp->state]; ip_vs_conn_put(cp); if (p > buffer+buflen) { diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 9be0daa9c0ec..ffdccc0972e0 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -304,7 +304,7 @@ static void icmp_put(struct seq_file *seq) for (i=0; icmpmibmap[i].name != NULL; i++) seq_printf(seq, " %lu", snmp_fold_field((void **) icmpmsg_statistics, - icmpmibmap[i].index)); + icmpmibmap[i].index | 0x100)); } /* diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2e6ad6dbba6c..c64072bb504b 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2453,7 +2453,7 @@ void __init tcp_init(void) 0, &tcp_hashinfo.ehash_size, NULL, - 0); + thash_entries ? 0 : 512 * 1024); tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size; for (i = 0; i < tcp_hashinfo.ehash_size; i++) { rwlock_init(&tcp_hashinfo.ehash[i].lock); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ad759f1c3777..d3d8d5dfcee3 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -858,16 +858,16 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, u8 *newkey, u8 newkeylen) { /* Add Key to the list */ - struct tcp4_md5sig_key *key; + struct tcp_md5sig_key *key; struct tcp_sock *tp = tcp_sk(sk); struct tcp4_md5sig_key *keys; - key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr); + key = tcp_v4_md5_do_lookup(sk, addr); if (key) { /* Pre-existing entry - just update that one. */ - kfree(key->base.key); - key->base.key = newkey; - key->base.keylen = newkeylen; + kfree(key->key); + key->key = newkey; + key->keylen = newkeylen; } else { struct tcp_md5sig_info *md5sig; diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index b49dedcda52d..007304e99842 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c @@ -266,26 +266,25 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, */ diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; - if (tp->snd_cwnd <= tp->snd_ssthresh) { - /* Slow start. */ - if (diff > gamma) { - /* Going too fast. Time to slow down - * and switch to congestion avoidance. - */ - tp->snd_ssthresh = 2; - - /* Set cwnd to match the actual rate - * exactly: - * cwnd = (actual rate) * baseRTT - * Then we add 1 because the integer - * truncation robs us of full link - * utilization. - */ - tp->snd_cwnd = min(tp->snd_cwnd, - (target_cwnd >> - V_PARAM_SHIFT)+1); + if (diff > gamma && tp->snd_ssthresh > 2 ) { + /* Going too fast. Time to slow down + * and switch to congestion avoidance. + */ + tp->snd_ssthresh = 2; + + /* Set cwnd to match the actual rate + * exactly: + * cwnd = (actual rate) * baseRTT + * Then we add 1 because the integer + * truncation robs us of full link + * utilization. + */ + tp->snd_cwnd = min(tp->snd_cwnd, + (target_cwnd >> + V_PARAM_SHIFT)+1); - } + } else if (tp->snd_cwnd <= tp->snd_ssthresh) { + /* Slow start. */ tcp_slow_start(tp); } else { /* Congestion avoidance. */ diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 20cfc90d5597..36f7dbfb6dbb 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1670,7 +1670,7 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * f filp, buffer, lenp, ppos); else if ((strcmp(ctl->procname, "retrans_time_ms") == 0) || - (strcmp(ctl->procname, "base_reacable_time_ms") == 0)) + (strcmp(ctl->procname, "base_reachable_time_ms") == 0)) ret = proc_dointvec_ms_jiffies(ctl, write, filp, buffer, lenp, ppos); else diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 85208026278b..f1523b82cac1 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -561,16 +561,16 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, char *newkey, u8 newkeylen) { /* Add key to the list */ - struct tcp6_md5sig_key *key; + struct tcp_md5sig_key *key; struct tcp_sock *tp = tcp_sk(sk); struct tcp6_md5sig_key *keys; - key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer); + key = tcp_v6_md5_do_lookup(sk, peer); if (key) { /* modify existing entry - just update that one */ - kfree(key->base.key); - key->base.key = newkey; - key->base.keylen = newkeylen; + kfree(key->key); + key->key = newkey; + key->keylen = newkeylen; } else { /* reallocate new list if current one is full. */ if (!tp->md5sig_info) { diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 4d6171bc0829..000c2fb462d0 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -999,7 +999,7 @@ struct hlist_head *nf_ct_alloc_hashtable(int *sizep, int *vmalloced) *vmalloced = 0; size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head)); - hash = (void*)__get_free_pages(GFP_KERNEL, + hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN, get_order(sizeof(struct hlist_head) * size)); if (!hash) { diff --git a/net/socket.c b/net/socket.c index 540013ea8620..5d879fd3d01d 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1250,11 +1250,14 @@ asmlinkage long sys_socketpair(int family, int type, int protocol, goto out_release_both; fd1 = sock_alloc_fd(&newfile1); - if (unlikely(fd1 < 0)) + if (unlikely(fd1 < 0)) { + err = fd1; goto out_release_both; + } fd2 = sock_alloc_fd(&newfile2); if (unlikely(fd2 < 0)) { + err = fd2; put_filp(newfile1); put_unused_fd(fd1); goto out_release_both; diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index f877b88091ce..9e11ce715958 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -221,8 +221,8 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, seg->mr_base); dprintk("RPC: %s: read chunk " "elem %d@0x%llx:0x%x pos %d (%s)\n", __func__, - seg->mr_len, seg->mr_base, seg->mr_rkey, pos, - n < nsegs ? "more" : "last"); + seg->mr_len, (unsigned long long)seg->mr_base, + seg->mr_rkey, pos, n < nsegs ? "more" : "last"); cur_rchunk++; r_xprt->rx_stats.read_chunk_count++; } else { /* write/reply */ @@ -234,8 +234,8 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, dprintk("RPC: %s: %s chunk " "elem %d@0x%llx:0x%x (%s)\n", __func__, (type == rpcrdma_replych) ? "reply" : "write", - seg->mr_len, seg->mr_base, seg->mr_rkey, - n < nsegs ? "more" : "last"); + seg->mr_len, (unsigned long long)seg->mr_base, + seg->mr_rkey, n < nsegs ? "more" : "last"); cur_wchunk++; if (type == rpcrdma_replych) r_xprt->rx_stats.reply_chunk_count++; @@ -577,7 +577,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, __be32 **ipt dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", __func__, ntohl(seg->rs_length), - off, + (unsigned long long)off, ntohl(seg->rs_handle)); } total_len += ntohl(seg->rs_length); |