diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-09-19 14:29:31 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-09-25 10:52:05 +0200 |
commit | d59b93da5e572703e1a7311c13dd3472a4e56e30 (patch) | |
tree | 4c75073780f54bc9785433256eee3d6d70eaf868 /arch | |
parent | 46b05c7bd51edafb8c8da088b49bddf7f78d48f9 (diff) |
s390/rwlock: use directed yield for write-locked rwlocks
Add an owner field to the arch_rwlock_t to be able to pass the timeslice
of a virtual CPU with diagnose 0x9c to the lock owner in case the rwlock
is write-locked. The undirected yield in case the rwlock is acquired
writable but the lock is read-locked is removed.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/include/asm/smp.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/spinlock.h | 28 | ||||
-rw-r--r-- | arch/s390/include/asm/spinlock_types.h | 1 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 6 | ||||
-rw-r--r-- | arch/s390/lib/spinlock.c | 49 |
5 files changed, 54 insertions, 32 deletions
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index 4f1307962a95..762d4f88af5a 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -29,7 +29,6 @@ extern int smp_find_processor_id(u16 address); extern int smp_store_status(int cpu); extern int smp_vcpu_scheduled(int cpu); extern void smp_yield_cpu(int cpu); -extern void smp_yield(void); extern void smp_cpu_set_polarization(int cpu, int val); extern int smp_cpu_get_polarization(int cpu); extern void smp_fill_possible_mask(void); @@ -50,7 +49,6 @@ static inline int smp_find_processor_id(u16 address) { return 0; } static inline int smp_store_status(int cpu) { return 0; } static inline int smp_vcpu_scheduled(int cpu) { return 1; } static inline void smp_yield_cpu(int cpu) { } -static inline void smp_yield(void) { } static inline void smp_fill_possible_mask(void) { } #endif /* CONFIG_SMP */ diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index d26ad2ac7cb2..e98654163e5c 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -37,11 +37,17 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) * (the type definitions are in asm/spinlock_types.h) */ +void arch_lock_relax(unsigned int cpu); + void arch_spin_lock_wait(arch_spinlock_t *); int arch_spin_trylock_retry(arch_spinlock_t *); -void arch_spin_relax(arch_spinlock_t *); void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); +static inline void arch_spin_relax(arch_spinlock_t *lock) +{ + arch_lock_relax(lock->lock); +} + static inline u32 arch_spin_lockval(int cpu) { return ~cpu; @@ -170,17 +176,21 @@ static inline void arch_write_lock(arch_rwlock_t *rw) { if (!arch_write_trylock_once(rw)) _raw_write_lock_wait(rw); + rw->owner = SPINLOCK_LOCKVAL; } static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) { if (!arch_write_trylock_once(rw)) _raw_write_lock_wait_flags(rw, flags); + rw->owner = SPINLOCK_LOCKVAL; } static inline void arch_write_unlock(arch_rwlock_t *rw) { typecheck(unsigned int, rw->lock); + + rw->owner = 0; asm volatile( __ASM_BARRIER "st %1,%0\n" @@ -198,12 +208,20 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) static inline int arch_write_trylock(arch_rwlock_t *rw) { - if (!arch_write_trylock_once(rw)) - return _raw_write_trylock_retry(rw); + if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw)) + return 0; + rw->owner = SPINLOCK_LOCKVAL; return 1; } -#define arch_read_relax(lock) cpu_relax() -#define arch_write_relax(lock) cpu_relax() +static inline void arch_read_relax(arch_rwlock_t *rw) +{ + arch_lock_relax(rw->owner); +} + +static inline void arch_write_relax(arch_rwlock_t *rw) +{ + arch_lock_relax(rw->owner); +} #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index b2cd6ff7c2c5..d84b6939237c 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -13,6 +13,7 @@ typedef struct { typedef struct { unsigned int lock; + unsigned int owner; } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED { 0 } diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 243c7e512600..abec97b4ddbf 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -333,12 +333,6 @@ int smp_vcpu_scheduled(int cpu) return pcpu_running(pcpu_devices + cpu); } -void smp_yield(void) -{ - if (MACHINE_HAS_DIAG44) - asm volatile("diag 0,0,0x44"); -} - void smp_yield_cpu(int cpu) { if (MACHINE_HAS_DIAG9C) diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 5b0e445bc3f3..5f63ac5783cb 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -98,17 +98,6 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) } EXPORT_SYMBOL(arch_spin_lock_wait_flags); -void arch_spin_relax(arch_spinlock_t *lp) -{ - unsigned int cpu = lp->lock; - if (cpu != 0) { - if (MACHINE_IS_VM || MACHINE_IS_KVM || - !smp_vcpu_scheduled(~cpu)) - smp_yield_cpu(~cpu); - } -} -EXPORT_SYMBOL(arch_spin_relax); - int arch_spin_trylock_retry(arch_spinlock_t *lp) { int count; @@ -122,15 +111,18 @@ EXPORT_SYMBOL(arch_spin_trylock_retry); void _raw_read_lock_wait(arch_rwlock_t *rw) { - unsigned int old; + unsigned int owner, old; int count = spin_retry; + owner = 0; while (1) { if (count-- <= 0) { - smp_yield(); + if (owner && !smp_vcpu_scheduled(~owner)) + smp_yield_cpu(~owner); count = spin_retry; } old = ACCESS_ONCE(rw->lock); + owner = ACCESS_ONCE(rw->owner); if ((int) old < 0) continue; if (_raw_compare_and_swap(&rw->lock, old, old + 1)) @@ -141,16 +133,19 @@ EXPORT_SYMBOL(_raw_read_lock_wait); void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) { - unsigned int old; + unsigned int owner, old; int count = spin_retry; local_irq_restore(flags); + owner = 0; while (1) { if (count-- <= 0) { - smp_yield(); + if (owner && !smp_vcpu_scheduled(~owner)) + smp_yield_cpu(~owner); count = spin_retry; } old = ACCESS_ONCE(rw->lock); + owner = ACCESS_ONCE(rw->owner); if ((int) old < 0) continue; local_irq_disable(); @@ -179,15 +174,18 @@ EXPORT_SYMBOL(_raw_read_trylock_retry); void _raw_write_lock_wait(arch_rwlock_t *rw) { - unsigned int old; + unsigned int owner, old; int count = spin_retry; + owner = 0; while (1) { if (count-- <= 0) { - smp_yield(); + if (owner && !smp_vcpu_scheduled(~owner)) + smp_yield_cpu(~owner); count = spin_retry; } old = ACCESS_ONCE(rw->lock); + owner = ACCESS_ONCE(rw->owner); if (old) continue; if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) @@ -198,16 +196,19 @@ EXPORT_SYMBOL(_raw_write_lock_wait); void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) { - unsigned int old; + unsigned int owner, old; int count = spin_retry; local_irq_restore(flags); + owner = 0; while (1) { if (count-- <= 0) { - smp_yield(); + if (owner && !smp_vcpu_scheduled(~owner)) + smp_yield_cpu(~owner); count = spin_retry; } old = ACCESS_ONCE(rw->lock); + owner = ACCESS_ONCE(rw->owner); if (old) continue; local_irq_disable(); @@ -233,3 +234,13 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw) return 0; } EXPORT_SYMBOL(_raw_write_trylock_retry); + +void arch_lock_relax(unsigned int cpu) +{ + if (!cpu) + return; + if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu)) + return; + smp_yield_cpu(~cpu); +} +EXPORT_SYMBOL(arch_lock_relax); |