diff options
-rw-r--r-- | include/net/netdma.h | 2 | ||||
-rw-r--r-- | net/core/dev.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 2 |
3 files changed, 4 insertions, 4 deletions
diff --git a/include/net/netdma.h b/include/net/netdma.h index ceae5ee85c04..7f53cd1d8b1e 100644 --- a/include/net/netdma.h +++ b/include/net/netdma.h @@ -29,7 +29,7 @@ static inline struct dma_chan *get_softnet_dma(void) { struct dma_chan *chan; rcu_read_lock(); - chan = rcu_dereference(__get_cpu_var(softnet_data.net_dma)); + chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma); if (chan) dma_chan_get(chan); rcu_read_unlock(); diff --git a/net/core/dev.c b/net/core/dev.c index f25d7ecaf035..d95e2626d944 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3421,7 +3421,7 @@ static void net_dma_rebalance(void) if (net_dma_count == 0) { for_each_online_cpu(cpu) - rcu_assign_pointer(per_cpu(softnet_data.net_dma, cpu), NULL); + rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); return; } @@ -3434,7 +3434,7 @@ static void net_dma_rebalance(void) + (i < (num_online_cpus() % net_dma_count) ? 1 : 0)); while(n) { - per_cpu(softnet_data.net_dma, cpu) = chan; + per_cpu(softnet_data, cpu).net_dma = chan; cpu = next_cpu(cpu, cpu_online_map); n--; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 7b621e44b124..934396bb1376 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1132,7 +1132,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, tp->ucopy.dma_chan = NULL; preempt_disable(); if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && - !sysctl_tcp_low_latency && __get_cpu_var(softnet_data.net_dma)) { + !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) { preempt_enable_no_resched(); tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); } else |