summaryrefslogtreecommitdiff
path: root/arch/s390/lib
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2005-07-27 11:44:57 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-27 16:26:04 -0700
commit951f22d5b1f0eaae35dafc669e3774a0c2084d10 (patch)
tree66c0131b576dadb98026da11d624df453c4c9a7c /arch/s390/lib
parent8449d003f323ca7a00eec38905d984ba5ec83a29 (diff)
[PATCH] s390: spin lock retry
Split spin lock and r/w lock implementation into a single try which is done inline and an out of line function that repeatedly tries to get the lock before doing the cpu_relax(). Add a system control to set the number of retries before a cpu is yielded. The reason for the spin lock retry is that the diagnose 0x44 that is used to give up the virtual cpu is quite expensive. For spin locks that are held only for a short period of time the costs of the diagnoses outweights the savings for spin locks that are held for a longer timer. The default retry count is 1000. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/Makefile4
-rw-r--r--arch/s390/lib/spinlock.c133
2 files changed, 135 insertions, 2 deletions
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index a8758b1d20a9..b701efa1f00e 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -5,5 +5,5 @@
EXTRA_AFLAGS := -traditional
lib-y += delay.o string.o
-lib-$(CONFIG_ARCH_S390_31) += uaccess.o
-lib-$(CONFIG_ARCH_S390X) += uaccess64.o
+lib-$(CONFIG_ARCH_S390_31) += uaccess.o spinlock.o
+lib-$(CONFIG_ARCH_S390X) += uaccess64.o spinlock.o
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
new file mode 100644
index 000000000000..888b5596c195
--- /dev/null
+++ b/arch/s390/lib/spinlock.c
@@ -0,0 +1,133 @@
+/*
+ * arch/s390/lib/spinlock.c
+ * Out of line spinlock code.
+ *
+ * S390 version
+ * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <asm/io.h>
+
+atomic_t spin_retry_counter;
+int spin_retry = 1000;
+
+/**
+ * spin_retry= parameter
+ */
+static int __init spin_retry_setup(char *str)
+{
+ spin_retry = simple_strtoul(str, &str, 0);
+ return 1;
+}
+__setup("spin_retry=", spin_retry_setup);
+
+static inline void
+_diag44(void)
+{
+#ifdef __s390x__
+ if (MACHINE_HAS_DIAG44)
+#endif
+ asm volatile("diag 0,0,0x44");
+}
+
+void
+_raw_spin_lock_wait(spinlock_t *lp, unsigned int pc)
+{
+ int count = spin_retry;
+
+ while (1) {
+ if (count-- <= 0) {
+ _diag44();
+ count = spin_retry;
+ }
+ atomic_inc(&spin_retry_counter);
+ if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0)
+ return;
+ }
+}
+EXPORT_SYMBOL(_raw_spin_lock_wait);
+
+int
+_raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc)
+{
+ int count = spin_retry;
+
+ while (count-- > 0) {
+ atomic_inc(&spin_retry_counter);
+ if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0)
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(_raw_spin_trylock_retry);
+
+void
+_raw_read_lock_wait(rwlock_t *rw)
+{
+ unsigned int old;
+ int count = spin_retry;
+
+ while (1) {
+ if (count-- <= 0) {
+ _diag44();
+ count = spin_retry;
+ }
+ atomic_inc(&spin_retry_counter);
+ old = rw->lock & 0x7fffffffU;
+ if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
+ return;
+ }
+}
+EXPORT_SYMBOL(_raw_read_lock_wait);
+
+int
+_raw_read_trylock_retry(rwlock_t *rw)
+{
+ unsigned int old;
+ int count = spin_retry;
+
+ while (count-- > 0) {
+ atomic_inc(&spin_retry_counter);
+ old = rw->lock & 0x7fffffffU;
+ if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(_raw_read_trylock_retry);
+
+void
+_raw_write_lock_wait(rwlock_t *rw)
+{
+ int count = spin_retry;
+
+ while (1) {
+ if (count-- <= 0) {
+ _diag44();
+ count = spin_retry;
+ }
+ atomic_inc(&spin_retry_counter);
+ if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
+ return;
+ }
+}
+EXPORT_SYMBOL(_raw_write_lock_wait);
+
+int
+_raw_write_trylock_retry(rwlock_t *rw)
+{
+ int count = spin_retry;
+
+ while (count-- > 0) {
+ atomic_inc(&spin_retry_counter);
+ if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(_raw_write_trylock_retry);