summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/system.h')
-rw-r--r--arch/powerpc/include/asm/system.h24
1 files changed, 13 insertions, 11 deletions
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index d6648c143322..2a4be19a92c4 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -23,15 +23,17 @@
* read_barrier_depends() prevents data-dependent loads being reordered
* across this point (nop on PPC).
*
- * We have to use the sync instructions for mb(), since lwsync doesn't
- * order loads with respect to previous stores. Lwsync is fine for
- * rmb(), though. Note that rmb() actually uses a sync on 32-bit
- * architectures.
+ * *mb() variants without smp_ prefix must order all types of memory
+ * operations with one another. sync is the only instruction sufficient
+ * to do this.
*
- * For wmb(), we use sync since wmb is used in drivers to order
- * stores to system memory with respect to writes to the device.
- * However, smp_wmb() can be a lighter-weight lwsync or eieio barrier
- * on SMP since it is only used to order updates to system memory.
+ * For the smp_ barriers, ordering is for cacheable memory operations
+ * only. We have to use the sync instruction for smp_mb(), since lwsync
+ * doesn't order loads with respect to previous stores. Lwsync can be
+ * used for smp_rmb() and smp_wmb().
+ *
+ * However, on CPUs that don't support lwsync, lwsync actually maps to a
+ * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
*/
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
@@ -45,14 +47,14 @@
#ifdef CONFIG_SMP
#ifdef __SUBARCH_HAS_LWSYNC
-# define SMPWMB lwsync
+# define SMPWMB LWSYNC
#else
# define SMPWMB eieio
#endif
#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() __asm__ __volatile__ (__stringify(SMPWMB) : : :"memory")
+#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() barrier()