summaryrefslogtreecommitdiff
path: root/include/asm-s390/irqflags.h
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-28 16:56:43 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-28 16:56:43 +0200
commit94c12cc7d196bab34aaa98d38521549fa1e5ef76 (patch)
tree8e0cec0ed44445d74a2cb5160303d6b4dfb1bc31 /include/asm-s390/irqflags.h
parent25d83cbfaa44e1b9170c0941c3ef52ca39f54ccc (diff)
[S390] Inline assembly cleanup.
Major cleanup of all s390 inline assemblies. They now have a common coding style. Quite a few have been shortened, mainly by using register asm variables. Use of the EX_TABLE macro helps as well. The atomic ops, bit ops and locking inlines new use the Q-constraint if a newer gcc is used. That results in slightly better code. Thanks to Christian Borntraeger for proof reading the changes. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'include/asm-s390/irqflags.h')
-rw-r--r--include/asm-s390/irqflags.h110
1 files changed, 80 insertions, 30 deletions
diff --git a/include/asm-s390/irqflags.h b/include/asm-s390/irqflags.h
index 3b566a5b3cc7..3f26131120b7 100644
--- a/include/asm-s390/irqflags.h
+++ b/include/asm-s390/irqflags.h
@@ -10,43 +10,93 @@
#ifdef __KERNEL__
+#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
+
+/* store then or system mask. */
+#define __raw_local_irq_stosm(__or) \
+({ \
+ unsigned long __mask; \
+ asm volatile( \
+ " stosm %0,%1" \
+ : "=Q" (__mask) : "i" (__or) : "memory"); \
+ __mask; \
+})
+
+/* store then and system mask. */
+#define __raw_local_irq_stnsm(__and) \
+({ \
+ unsigned long __mask; \
+ asm volatile( \
+ " stnsm %0,%1" \
+ : "=Q" (__mask) : "i" (__and) : "memory"); \
+ __mask; \
+})
+
+/* set system mask. */
+#define __raw_local_irq_ssm(__mask) \
+({ \
+ asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \
+})
+
+#else /* __GNUC__ */
+
+/* store then or system mask. */
+#define __raw_local_irq_stosm(__or) \
+({ \
+ unsigned long __mask; \
+ asm volatile( \
+ " stosm 0(%1),%2" \
+ : "=m" (__mask) \
+ : "a" (&__mask), "i" (__or) : "memory"); \
+ __mask; \
+})
+
+/* store then and system mask. */
+#define __raw_local_irq_stnsm(__and) \
+({ \
+ unsigned long __mask; \
+ asm volatile( \
+ " stnsm 0(%1),%2" \
+ : "=m" (__mask) \
+ : "a" (&__mask), "i" (__and) : "memory"); \
+ __mask; \
+})
+
+/* set system mask. */
+#define __raw_local_irq_ssm(__mask) \
+({ \
+ asm volatile( \
+ " ssm 0(%0)" \
+ : : "a" (&__mask), "m" (__mask) : "memory"); \
+})
+
+#endif /* __GNUC__ */
+
/* interrupt control.. */
-#define raw_local_irq_enable() ({ \
- unsigned long __dummy; \
- __asm__ __volatile__ ( \
- "stosm 0(%1),0x03" \
- : "=m" (__dummy) : "a" (&__dummy) : "memory" ); \
- })
-
-#define raw_local_irq_disable() ({ \
- unsigned long __flags; \
- __asm__ __volatile__ ( \
- "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \
- __flags; \
- })
-
-#define raw_local_save_flags(x) \
-do { \
- typecheck(unsigned long, x); \
- __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) ); \
-} while (0)
+static inline unsigned long raw_local_irq_enable(void)
+{
+ return __raw_local_irq_stosm(0x03);
+}
-#define raw_local_irq_restore(x) \
-do { \
- typecheck(unsigned long, x); \
- __asm__ __volatile__("ssm 0(%0)" : : "a" (&x), "m" (x) : "memory"); \
+static inline unsigned long raw_local_irq_disable(void)
+{
+ return __raw_local_irq_stnsm(0xfc);
+}
+
+#define raw_local_save_flags(x) \
+do { \
+ typecheck(unsigned long, x); \
+ (x) = __raw_local_irq_stosm(0x00); \
} while (0)
-#define raw_irqs_disabled() \
-({ \
- unsigned long flags; \
- raw_local_save_flags(flags); \
- !((flags >> __FLAG_SHIFT) & 3); \
-})
+static inline void raw_local_irq_restore(unsigned long flags)
+{
+ __raw_local_irq_ssm(flags);
+}
static inline int raw_irqs_disabled_flags(unsigned long flags)
{
- return !((flags >> __FLAG_SHIFT) & 3);
+ return !(flags & (3UL << (BITS_PER_LONG - 8)));
}
/* For spinlocks etc */