diff options
-rw-r--r-- | net/bridge/netfilter/ebtables.c | 12 | ||||
-rw-r--r-- | net/core/dev.c | 2 | ||||
-rw-r--r-- | net/core/flow.c | 4 | ||||
-rw-r--r-- | net/core/neighbour.c | 2 | ||||
-rw-r--r-- | net/core/utils.c | 4 | ||||
-rw-r--r-- | net/ipv4/icmp.c | 2 | ||||
-rw-r--r-- | net/ipv4/ipcomp.c | 8 | ||||
-rw-r--r-- | net/ipv4/netfilter/arp_tables.c | 4 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_conntrack_core.c | 2 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_tables.c | 4 | ||||
-rw-r--r-- | net/ipv4/proc.c | 4 | ||||
-rw-r--r-- | net/ipv4/route.c | 2 | ||||
-rw-r--r-- | net/ipv6/icmp.c | 4 | ||||
-rw-r--r-- | net/ipv6/ipcomp6.c | 8 | ||||
-rw-r--r-- | net/ipv6/netfilter/ip6_tables.c | 4 | ||||
-rw-r--r-- | net/ipv6/proc.c | 4 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_core.c | 2 | ||||
-rw-r--r-- | net/netfilter/x_tables.c | 4 | ||||
-rw-r--r-- | net/sctp/proc.c | 2 | ||||
-rw-r--r-- | net/socket.c | 2 |
20 files changed, 40 insertions, 40 deletions
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 01eae97c53d9..66bd93252c4e 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -829,7 +829,7 @@ static int translate_table(struct ebt_replace *repl, * sizeof(struct ebt_chainstack)); if (!newinfo->chainstack) return -ENOMEM; - for_each_cpu(i) { + for_each_possible_cpu(i) { newinfo->chainstack[i] = vmalloc(udc_cnt * sizeof(struct ebt_chainstack)); if (!newinfo->chainstack[i]) { @@ -901,7 +901,7 @@ static void get_counters(struct ebt_counter *oldcounters, sizeof(struct ebt_counter) * nentries); /* add other counters to those of cpu 0 */ - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { if (cpu == 0) continue; counter_base = COUNTER_BASE(oldcounters, nentries, cpu); @@ -1036,7 +1036,7 @@ static int do_replace(void __user *user, unsigned int len) vfree(table->entries); if (table->chainstack) { - for_each_cpu(i) + for_each_possible_cpu(i) vfree(table->chainstack[i]); vfree(table->chainstack); } @@ -1054,7 +1054,7 @@ free_counterstmp: vfree(counterstmp); /* can be initialized in translate_table() */ if (newinfo->chainstack) { - for_each_cpu(i) + for_each_possible_cpu(i) vfree(newinfo->chainstack[i]); vfree(newinfo->chainstack); } @@ -1201,7 +1201,7 @@ free_unlock: mutex_unlock(&ebt_mutex); free_chainstack: if (newinfo->chainstack) { - for_each_cpu(i) + for_each_possible_cpu(i) vfree(newinfo->chainstack[i]); vfree(newinfo->chainstack); } @@ -1224,7 +1224,7 @@ void ebt_unregister_table(struct ebt_table *table) mutex_unlock(&ebt_mutex); vfree(table->private->entries); if (table->private->chainstack) { - for_each_cpu(i) + for_each_possible_cpu(i) vfree(table->private->chainstack[i]); vfree(table->private->chainstack); } diff --git a/net/core/dev.c b/net/core/dev.c index 2731570eba5b..83231a27ae02 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3346,7 +3346,7 @@ static int __init net_dev_init(void) * Initialise the packet receive queues. */ - for_each_cpu(i) { + for_each_possible_cpu(i) { struct softnet_data *queue; queue = &per_cpu(softnet_data, i); diff --git a/net/core/flow.c b/net/core/flow.c index 885a2f655db0..2191af5f26ac 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -79,7 +79,7 @@ static void flow_cache_new_hashrnd(unsigned long arg) { int i; - for_each_cpu(i) + for_each_possible_cpu(i) flow_hash_rnd_recalc(i) = 1; flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; @@ -361,7 +361,7 @@ static int __init flow_cache_init(void) flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; add_timer(&flow_hash_rnd_timer); - for_each_cpu(i) + for_each_possible_cpu(i) flow_cache_cpu_prepare(i); hotcpu_notifier(flow_cache_cpu, 0); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 2ec8693fb778..4cf878efdb49 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1627,7 +1627,7 @@ static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb, memset(&ndst, 0, sizeof(ndst)); - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { struct neigh_statistics *st; st = per_cpu_ptr(tbl->stats, cpu); diff --git a/net/core/utils.c b/net/core/utils.c index fdc4f38bc46c..4f96f389243d 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -121,7 +121,7 @@ void __init net_random_init(void) { int i; - for_each_cpu(i) { + for_each_possible_cpu(i) { struct nrnd_state *state = &per_cpu(net_rand_state,i); __net_srandom(state, i+jiffies); } @@ -133,7 +133,7 @@ static int net_random_reseed(void) unsigned long seed[NR_CPUS]; get_random_bytes(seed, sizeof(seed)); - for_each_cpu(i) { + for_each_possible_cpu(i) { struct nrnd_state *state = &per_cpu(net_rand_state,i); __net_srandom(state, seed[i]); } diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 9831fd2c73a0..2a0455911ee0 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -1107,7 +1107,7 @@ void __init icmp_init(struct net_proto_family *ops) struct inet_sock *inet; int i; - for_each_cpu(i) { + for_each_possible_cpu(i) { int err; err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP, diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 0a1d86a0f632..04a429465665 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -290,7 +290,7 @@ static void ipcomp_free_scratches(void) if (!scratches) return; - for_each_cpu(i) { + for_each_possible_cpu(i) { void *scratch = *per_cpu_ptr(scratches, i); if (scratch) vfree(scratch); @@ -313,7 +313,7 @@ static void **ipcomp_alloc_scratches(void) ipcomp_scratches = scratches; - for_each_cpu(i) { + for_each_possible_cpu(i) { void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE); if (!scratch) return NULL; @@ -344,7 +344,7 @@ static void ipcomp_free_tfms(struct crypto_tfm **tfms) if (!tfms) return; - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); crypto_free_tfm(tfm); } @@ -384,7 +384,7 @@ static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name) if (!tfms) goto error; - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); if (!tfm) goto error; diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index a44a5d73457d..c2d92f99a2b8 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -646,7 +646,7 @@ static int translate_table(const char *name, } /* And one copy for every other CPU */ - for_each_cpu(i) { + for_each_possible_cpu(i) { if (newinfo->entries[i] && newinfo->entries[i] != entry0) memcpy(newinfo->entries[i], entry0, newinfo->size); } @@ -696,7 +696,7 @@ static void get_counters(const struct xt_table_info *t, counters, &i); - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { if (cpu == curcpu) continue; i = 0; diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index ceaabc18202b..979a2eac6f00 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c @@ -133,7 +133,7 @@ static void ip_ct_event_cache_flush(void) struct ip_conntrack_ecache *ecache; int cpu; - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { ecache = &per_cpu(ip_conntrack_ecache, cpu); if (ecache->ct) ip_conntrack_put(ecache->ct); diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index d5b8cdd361ce..d25ac8ba6eba 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -735,7 +735,7 @@ translate_table(const char *name, } /* And one copy for every other CPU */ - for_each_cpu(i) { + for_each_possible_cpu(i) { if (newinfo->entries[i] && newinfo->entries[i] != entry0) memcpy(newinfo->entries[i], entry0, newinfo->size); } @@ -788,7 +788,7 @@ get_counters(const struct xt_table_info *t, counters, &i); - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { if (cpu == curcpu) continue; i = 0; diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 1b167c4bb3be..d61e2a9d394d 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto) int res = 0; int cpu; - for_each_cpu(cpu) + for_each_possible_cpu(cpu) res += proto->stats[cpu].inuse; return res; @@ -91,7 +91,7 @@ fold_field(void *mib[], int offt) unsigned long res = 0; int i; - for_each_cpu(i) { + for_each_possible_cpu(i) { res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt); res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt); } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 94fcbc5e5a1b..ff434821909f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -3083,7 +3083,7 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset, memcpy(dst, src, length); /* Add the other cpus in, one int at a time */ - for_each_cpu(i) { + for_each_possible_cpu(i) { unsigned int j; src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 21eb725e885f..1044b6fce0d5 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -717,7 +717,7 @@ int __init icmpv6_init(struct net_proto_family *ops) struct sock *sk; int err, i, j; - for_each_cpu(i) { + for_each_possible_cpu(i) { err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, &per_cpu(__icmpv6_socket, i)); if (err < 0) { @@ -763,7 +763,7 @@ void icmpv6_cleanup(void) { int i; - for_each_cpu(i) { + for_each_possible_cpu(i) { sock_release(per_cpu(__icmpv6_socket, i)); } inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 00f3fadfcca7..05eb67def39f 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -290,7 +290,7 @@ static void ipcomp6_free_scratches(void) if (!scratches) return; - for_each_cpu(i) { + for_each_possible_cpu(i) { void *scratch = *per_cpu_ptr(scratches, i); vfree(scratch); @@ -313,7 +313,7 @@ static void **ipcomp6_alloc_scratches(void) ipcomp6_scratches = scratches; - for_each_cpu(i) { + for_each_possible_cpu(i) { void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE); if (!scratch) return NULL; @@ -344,7 +344,7 @@ static void ipcomp6_free_tfms(struct crypto_tfm **tfms) if (!tfms) return; - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); crypto_free_tfm(tfm); } @@ -384,7 +384,7 @@ static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name) if (!tfms) goto error; - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); if (!tfm) goto error; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 3ecf2db841f8..642b4b11464f 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -788,7 +788,7 @@ translate_table(const char *name, } /* And one copy for every other CPU */ - for_each_cpu(i) { + for_each_possible_cpu(i) { if (newinfo->entries[i] && newinfo->entries[i] != entry0) memcpy(newinfo->entries[i], entry0, newinfo->size); } @@ -841,7 +841,7 @@ get_counters(const struct xt_table_info *t, counters, &i); - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { if (cpu == curcpu) continue; i = 0; diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 4238b1ed8860..779ddf77f4d4 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto) int res = 0; int cpu; - for_each_cpu(cpu) + for_each_possible_cpu(cpu) res += proto->stats[cpu].inuse; return res; @@ -140,7 +140,7 @@ fold_field(void *mib[], int offt) unsigned long res = 0; int i; - for_each_cpu(i) { + for_each_possible_cpu(i) { res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt); res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt); } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 56389c83557c..e581190fb6c3 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -146,7 +146,7 @@ static void nf_ct_event_cache_flush(void) struct nf_conntrack_ecache *ecache; int cpu; - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { ecache = &per_cpu(nf_conntrack_ecache, cpu); if (ecache->ct) nf_ct_put(ecache->ct); diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index feb8a9e066b0..00cf0a4f4d92 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -413,7 +413,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size) newinfo->size = size; - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { if (size <= PAGE_SIZE) newinfo->entries[cpu] = kmalloc_node(size, GFP_KERNEL, @@ -436,7 +436,7 @@ void xt_free_table_info(struct xt_table_info *info) { int cpu; - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { if (info->size <= PAGE_SIZE) kfree(info->entries[cpu]); else diff --git a/net/sctp/proc.c b/net/sctp/proc.c index d47a52c303a8..5b3b0e0ae7e5 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -69,7 +69,7 @@ fold_field(void *mib[], int nr) unsigned long res = 0; int i; - for_each_cpu(i) { + for_each_possible_cpu(i) { res += *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) + sizeof (unsigned long) * nr)); diff --git a/net/socket.c b/net/socket.c index b807f360e02c..00cdfd2088db 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2136,7 +2136,7 @@ void socket_seq_show(struct seq_file *seq) int cpu; int counter = 0; - for_each_cpu(cpu) + for_each_possible_cpu(cpu) counter += per_cpu(sockets_in_use, cpu); /* It can be negative, by the way. 8) */ |