summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/power/smp.c4
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/stop_machine.c4
4 files changed, 7 insertions, 7 deletions
diff --git a/kernel/module.c b/kernel/module.c
index 83b3d376708c..a566745dde62 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -379,7 +379,7 @@ static void module_unload_init(struct module *mod)
for (i = 0; i < NR_CPUS; i++)
local_set(&mod->ref[i].count, 0);
/* Hold reference count during initialization. */
- local_set(&mod->ref[_smp_processor_id()].count, 1);
+ local_set(&mod->ref[raw_smp_processor_id()].count, 1);
/* Backwards compatibility macros put refcount during init. */
mod->waiter = current;
}
diff --git a/kernel/power/smp.c b/kernel/power/smp.c
index cba3584b80fe..457c2302ed42 100644
--- a/kernel/power/smp.c
+++ b/kernel/power/smp.c
@@ -48,11 +48,11 @@ void disable_nonboot_cpus(void)
{
oldmask = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(0));
- printk("Freezing CPUs (at %d)", _smp_processor_id());
+ printk("Freezing CPUs (at %d)", raw_smp_processor_id());
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(HZ);
printk("...");
- BUG_ON(_smp_processor_id() != 0);
+ BUG_ON(raw_smp_processor_id() != 0);
/* FIXME: for this to work, all the CPUs must be running
* "idle" thread (or we deadlock). Is that guaranteed? */
diff --git a/kernel/sched.c b/kernel/sched.c
index f12a0c8a7d98..deca041fc364 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3814,7 +3814,7 @@ EXPORT_SYMBOL(yield);
*/
void __sched io_schedule(void)
{
- struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id());
+ struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
atomic_inc(&rq->nr_iowait);
schedule();
@@ -3825,7 +3825,7 @@ EXPORT_SYMBOL(io_schedule);
long __sched io_schedule_timeout(long timeout)
{
- struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id());
+ struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
long ret;
atomic_inc(&rq->nr_iowait);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 6116b25aa7cf..84a9d18aa8da 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -100,7 +100,7 @@ static int stop_machine(void)
stopmachine_state = STOPMACHINE_WAIT;
for_each_online_cpu(i) {
- if (i == _smp_processor_id())
+ if (i == raw_smp_processor_id())
continue;
ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL);
if (ret < 0)
@@ -182,7 +182,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
/* If they don't care which CPU fn runs on, bind to any online one. */
if (cpu == NR_CPUS)
- cpu = _smp_processor_id();
+ cpu = raw_smp_processor_id();
p = kthread_create(do_stop, &smdata, "kstopmachine");
if (!IS_ERR(p)) {