diff options
| author | Olof Johansson <olof@lixom.net> | 2014-07-22 09:17:18 -0700 |
|---|---|---|
| committer | Olof Johansson <olof@lixom.net> | 2014-07-22 09:17:18 -0700 |
| commit | e9a86d96be0dff611156ea89fbb6eb91478bddab (patch) | |
| tree | ad38c21c76e1c275264cb20482a7f2bdd54618ba /kernel/locking/mcs_spinlock.c | |
| parent | fc3791f3a95df5b76ecbda6952c13ff67251fd10 (diff) | |
| parent | ad8adbf2c7d6269d0100f7f460f4837ece060977 (diff) | |
Merge tag 'sti-defconfig-for-v3.17-1' of git://git.stlinux.com/devel/kernel/linux-sti into next/defconfig
Merge "ARM: STi: defconfig changes for v3.17" from Maxime Coquelin:
STi defconfig updates for v3.17
- Enable ST's Thermal controller driver
- Enable ST's Keyscan driver
- Enable ST's MiPHY365 Phy driver for STiH416 SATA & PCIe
- Enable ST's AHCI driver.
* tag 'sti-defconfig-for-v3.17-1' of git://git.stlinux.com/devel/kernel/linux-sti:
ARM: multi_v7_defconfig: Enable MiPHY365x - ST's Generic (SATA & PCIe) PHY
ARM: multi_v7_defconfig: Enable ST's (S)ATA driver
ARM: multi_v7_defconfig: add ST Keyscan driver
ARM: update multi_v7_defconfig for STI
ARM: multi_v7_defconfig: Configure in ST's Thermal Controller
+ Linux 3.16-rc6
Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'kernel/locking/mcs_spinlock.c')
| -rw-r--r-- | kernel/locking/mcs_spinlock.c | 64 |
1 files changed, 48 insertions, 16 deletions
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c index 838dc9e00669..be9ee1559fca 100644 --- a/kernel/locking/mcs_spinlock.c +++ b/kernel/locking/mcs_spinlock.c @@ -14,21 +14,47 @@ * called from interrupt context and we have preemption disabled while * spinning. */ -static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node); +static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node); + +/* + * We use the value 0 to represent "no CPU", thus the encoded value + * will be the CPU number incremented by 1. + */ +static inline int encode_cpu(int cpu_nr) +{ + return cpu_nr + 1; +} + +static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val) +{ + int cpu_nr = encoded_cpu_val - 1; + + return per_cpu_ptr(&osq_node, cpu_nr); +} /* * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. * Can return NULL in case we were the last queued and we updated @lock instead. */ -static inline struct optimistic_spin_queue * -osq_wait_next(struct optimistic_spin_queue **lock, - struct optimistic_spin_queue *node, - struct optimistic_spin_queue *prev) +static inline struct optimistic_spin_node * +osq_wait_next(struct optimistic_spin_queue *lock, + struct optimistic_spin_node *node, + struct optimistic_spin_node *prev) { - struct optimistic_spin_queue *next = NULL; + struct optimistic_spin_node *next = NULL; + int curr = encode_cpu(smp_processor_id()); + int old; + + /* + * If there is a prev node in queue, then the 'old' value will be + * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if + * we're currently last in queue, then the queue will then become empty. + */ + old = prev ? prev->cpu : OSQ_UNLOCKED_VAL; for (;;) { - if (*lock == node && cmpxchg(lock, node, prev) == node) { + if (atomic_read(&lock->tail) == curr && + atomic_cmpxchg(&lock->tail, curr, old) == curr) { /* * We were the last queued, we moved @lock back. @prev * will now observe @lock and will complete its @@ -59,18 +85,23 @@ osq_wait_next(struct optimistic_spin_queue **lock, return next; } -bool osq_lock(struct optimistic_spin_queue **lock) +bool osq_lock(struct optimistic_spin_queue *lock) { - struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); - struct optimistic_spin_queue *prev, *next; + struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); + struct optimistic_spin_node *prev, *next; + int curr = encode_cpu(smp_processor_id()); + int old; node->locked = 0; node->next = NULL; + node->cpu = curr; - node->prev = prev = xchg(lock, node); - if (likely(prev == NULL)) + old = atomic_xchg(&lock->tail, curr); + if (old == OSQ_UNLOCKED_VAL) return true; + prev = decode_cpu(old); + node->prev = prev; ACCESS_ONCE(prev->next) = node; /* @@ -149,20 +180,21 @@ unqueue: return false; } -void osq_unlock(struct optimistic_spin_queue **lock) +void osq_unlock(struct optimistic_spin_queue *lock) { - struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); - struct optimistic_spin_queue *next; + struct optimistic_spin_node *node, *next; + int curr = encode_cpu(smp_processor_id()); /* * Fast path for the uncontended case. */ - if (likely(cmpxchg(lock, node, NULL) == node)) + if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr)) return; /* * Second most likely case. */ + node = this_cpu_ptr(&osq_node); next = xchg(&node->next, NULL); if (next) { ACCESS_ONCE(next->locked) = 1; |
