summaryrefslogtreecommitdiff
path: root/arch/i386
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2006-03-23 03:01:05 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-23 07:38:17 -0800
commit394e3902c55e667945f6f1c2bdbc59842cce70f7 (patch)
treef4bca0bdc0c291fda6f6949265aacec0669b9084 /arch/i386
parent63872f87a151413100678f110d1556026002809e (diff)
[PATCH] more for_each_cpu() conversions
When we stop allocating percpu memory for not-possible CPUs we must not touch the percpu data for not-possible CPUs at all. The correct way of doing this is to test cpu_possible() or to use for_each_cpu(). This patch is a kernel-wide sweep of all instances of NR_CPUS. I found very few instances of this bug, if any. But the patch converts lots of open-coded test to use the preferred helper macros. Cc: Mikael Starvik <starvik@axis.com> Cc: David Howells <dhowells@redhat.com> Acked-by: Kyle McMartin <kyle@parisc-linux.org> Cc: Anton Blanchard <anton@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: Andi Kleen <ak@muc.de> Cc: Christian Zankel <chris@zankel.net> Cc: Philippe Elie <phil.el@wanadoo.fr> Cc: Nathan Scott <nathans@sgi.com> Cc: Jens Axboe <axboe@suse.de> Cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c4
-rw-r--r--arch/i386/kernel/io_apic.c22
-rw-r--r--arch/i386/kernel/nmi.c4
-rw-r--r--arch/i386/oprofile/nmi_int.c7
4 files changed, 14 insertions, 23 deletions
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index e11a09207ec8..3d5110b65cc3 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -1145,9 +1145,7 @@ static int __cpuinit powernowk8_init(void)
{
unsigned int i, supported_cpus = 0;
- for (i=0; i<NR_CPUS; i++) {
- if (!cpu_online(i))
- continue;
+ for_each_cpu(i) {
if (check_supported_cpu(i))
supported_cpus++;
}
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index fd1c60cfd294..311b4e7266f1 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -351,8 +351,8 @@ static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
{
int i, j;
Dprintk("Rotating IRQs among CPUs.\n");
- for (i = 0; i < NR_CPUS; i++) {
- for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) {
+ for_each_online_cpu(i) {
+ for (j = 0; j < NR_IRQS; j++) {
if (!irq_desc[j].action)
continue;
/* Is it a significant load ? */
@@ -381,7 +381,7 @@ static void do_irq_balance(void)
unsigned long imbalance = 0;
cpumask_t allowed_mask, target_cpu_mask, tmp;
- for (i = 0; i < NR_CPUS; i++) {
+ for_each_cpu(i) {
int package_index;
CPU_IRQ(i) = 0;
if (!cpu_online(i))
@@ -422,9 +422,7 @@ static void do_irq_balance(void)
}
}
/* Find the least loaded processor package */
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_online(i))
- continue;
+ for_each_online_cpu(i) {
if (i != CPU_TO_PACKAGEINDEX(i))
continue;
if (min_cpu_irq > CPU_IRQ(i)) {
@@ -441,9 +439,7 @@ tryanothercpu:
*/
tmp_cpu_irq = 0;
tmp_loaded = -1;
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_online(i))
- continue;
+ for_each_online_cpu(i) {
if (i != CPU_TO_PACKAGEINDEX(i))
continue;
if (max_cpu_irq <= CPU_IRQ(i))
@@ -619,9 +615,7 @@ static int __init balanced_irq_init(void)
if (smp_num_siblings > 1 && !cpus_empty(tmp))
physical_balance = 1;
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_online(i))
- continue;
+ for_each_online_cpu(i) {
irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
@@ -638,9 +632,11 @@ static int __init balanced_irq_init(void)
else
printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
failed:
- for (i = 0; i < NR_CPUS; i++) {
+ for_each_cpu(i) {
kfree(irq_cpu_data[i].irq_delta);
+ irq_cpu_data[i].irq_delta = NULL;
kfree(irq_cpu_data[i].last_irq);
+ irq_cpu_data[i].last_irq = NULL;
}
return 0;
}
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 1db34effdd8d..9074818b9473 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -143,7 +143,7 @@ static int __init check_nmi_watchdog(void)
local_irq_enable();
mdelay((10*1000)/nmi_hz); // wait 10 ticks
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ for_each_cpu(cpu) {
#ifdef CONFIG_SMP
/* Check cpu_callin_map here because that is set
after the timer is started. */
@@ -510,7 +510,7 @@ void touch_nmi_watchdog (void)
* Just reset the alert counters, (other CPUs might be
* spinning on locks we hold):
*/
- for (i = 0; i < NR_CPUS; i++)
+ for_each_cpu(i)
alert_counter[i] = 0;
/*
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c
index 0493e8b8ec49..1accce50c2c7 100644
--- a/arch/i386/oprofile/nmi_int.c
+++ b/arch/i386/oprofile/nmi_int.c
@@ -122,7 +122,7 @@ static void nmi_save_registers(void * dummy)
static void free_msrs(void)
{
int i;
- for (i = 0; i < NR_CPUS; ++i) {
+ for_each_cpu(i) {
kfree(cpu_msrs[i].counters);
cpu_msrs[i].counters = NULL;
kfree(cpu_msrs[i].controls);
@@ -138,10 +138,7 @@ static int allocate_msrs(void)
size_t counters_size = sizeof(struct op_msr) * model->num_counters;
int i;
- for (i = 0; i < NR_CPUS; ++i) {
- if (!cpu_online(i))
- continue;
-
+ for_each_online_cpu(i) {
cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL);
if (!cpu_msrs[i].counters) {
success = 0;