1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
|
--- b/kernel/stop_machine.c
+++ a/kernel/stop_machine.c
@@ -36,7 +36,7 @@
struct cpu_stopper {
struct task_struct *thread;
+ spinlock_t lock;
- raw_spinlock_t lock;
bool enabled; /* is this stopper enabled? */
struct list_head works; /* list of pending works */
@@ -78,13 +78,13 @@
unsigned long flags;
bool enabled;
+ spin_lock_irqsave(&stopper->lock, flags);
- raw_spin_lock_irqsave(&stopper->lock, flags);
enabled = stopper->enabled;
if (enabled)
__cpu_stop_queue_work(stopper, work);
else if (work->done)
cpu_stop_signal_done(work->done);
+ spin_unlock_irqrestore(&stopper->lock, flags);
- raw_spin_unlock_irqrestore(&stopper->lock, flags);
return enabled;
}
@@ -231,8 +231,8 @@
struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
int err;
retry:
+ spin_lock_irq(&stopper1->lock);
+ spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
- raw_spin_lock_irq(&stopper1->lock);
- raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
err = -ENOENT;
if (!stopper1->enabled || !stopper2->enabled)
@@ -255,8 +255,8 @@
__cpu_stop_queue_work(stopper1, work1);
__cpu_stop_queue_work(stopper2, work2);
unlock:
+ spin_unlock(&stopper2->lock);
+ spin_unlock_irq(&stopper1->lock);
- raw_spin_unlock(&stopper2->lock);
- raw_spin_unlock_irq(&stopper1->lock);
if (unlikely(err == -EDEADLK)) {
while (stop_cpus_in_progress)
@@ -448,9 +448,9 @@
unsigned long flags;
int run;
+ spin_lock_irqsave(&stopper->lock, flags);
- raw_spin_lock_irqsave(&stopper->lock, flags);
run = !list_empty(&stopper->works);
+ spin_unlock_irqrestore(&stopper->lock, flags);
- raw_spin_unlock_irqrestore(&stopper->lock, flags);
return run;
}
@@ -461,13 +461,13 @@
repeat:
work = NULL;
+ spin_lock_irq(&stopper->lock);
- raw_spin_lock_irq(&stopper->lock);
if (!list_empty(&stopper->works)) {
work = list_first_entry(&stopper->works,
struct cpu_stop_work, list);
list_del_init(&work->list);
}
+ spin_unlock_irq(&stopper->lock);
- raw_spin_unlock_irq(&stopper->lock);
if (work) {
cpu_stop_fn_t fn = work->fn;
@@ -541,7 +541,7 @@
for_each_possible_cpu(cpu) {
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+ spin_lock_init(&stopper->lock);
- raw_spin_lock_init(&stopper->lock);
INIT_LIST_HEAD(&stopper->works);
}
|