summaryrefslogtreecommitdiff
path: root/arch/sparc64/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-11-15 13:33:25 -0800
committerDavid S. Miller <davem@davemloft.net>2008-12-04 09:16:47 -0800
commit293666b7a17cb7a389fc274980439212386a19c4 (patch)
tree075cc7661d2113cf04da7130b3383979d8024206 /arch/sparc64/kernel
parent64f2dde3f743c8a1ad8c0a1aa74166c1034afd92 (diff)
sparc64: Stop using memory barriers for atomics and locks.
The kernel always executes in the TSO memory model now, so none of this stuff is necessary any more. With helpful feedback from Nick Piggin. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r--arch/sparc64/kernel/smp.c11
-rw-r--r--arch/sparc64/kernel/trampoline.S4
-rw-r--r--arch/sparc64/kernel/traps.c1
-rw-r--r--arch/sparc64/kernel/tsb.S6
4 files changed, 8 insertions, 14 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index f500b0618bb0..c6d06362728c 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -163,7 +163,7 @@ static inline long get_delta (long *rt, long *master)
for (i = 0; i < NUM_ITERS; i++) {
t0 = tick_ops->get_tick();
go[MASTER] = 1;
- membar_storeload();
+ membar_safe("#StoreLoad");
while (!(tm = go[SLAVE]))
rmb();
go[SLAVE] = 0;
@@ -257,7 +257,7 @@ static void smp_synchronize_one_tick(int cpu)
/* now let the client proceed into his loop */
go[MASTER] = 0;
- membar_storeload();
+ membar_safe("#StoreLoad");
spin_lock_irqsave(&itc_sync_lock, flags);
{
@@ -267,7 +267,7 @@ static void smp_synchronize_one_tick(int cpu)
go[MASTER] = 0;
wmb();
go[SLAVE] = tick_ops->get_tick();
- membar_storeload();
+ membar_safe("#StoreLoad");
}
}
spin_unlock_irqrestore(&itc_sync_lock, flags);
@@ -1122,7 +1122,6 @@ void smp_capture(void)
smp_processor_id());
#endif
penguins_are_doing_time = 1;
- membar_storestore_loadstore();
atomic_inc(&smp_capture_registry);
smp_cross_call(&xcall_capture, 0, 0, 0);
while (atomic_read(&smp_capture_registry) != ncpus)
@@ -1142,7 +1141,7 @@ void smp_release(void)
smp_processor_id());
#endif
penguins_are_doing_time = 0;
- membar_storeload_storestore();
+ membar_safe("#StoreLoad");
atomic_dec(&smp_capture_registry);
}
}
@@ -1161,7 +1160,7 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
__asm__ __volatile__("flushw");
prom_world(1);
atomic_inc(&smp_capture_registry);
- membar_storeload_storestore();
+ membar_safe("#StoreLoad");
while (penguins_are_doing_time)
rmb();
atomic_dec(&smp_capture_registry);
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index 83abd5ae88a4..da1b781b5e65 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -109,7 +109,6 @@ startup_continue:
*/
sethi %hi(prom_entry_lock), %g2
1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
- membar #StoreLoad | #StoreStore
brnz,pn %g1, 1b
nop
@@ -214,7 +213,6 @@ startup_continue:
sethi %hi(prom_entry_lock), %g2
stb %g0, [%g2 + %lo(prom_entry_lock)]
- membar #StoreStore | #StoreLoad
ba,pt %xcc, after_lock_tlb
nop
@@ -330,7 +328,6 @@ after_lock_tlb:
sethi %hi(prom_entry_lock), %g2
1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
- membar #StoreLoad | #StoreStore
brnz,pn %g1, 1b
nop
@@ -394,7 +391,6 @@ after_lock_tlb:
3: sethi %hi(prom_entry_lock), %g2
stb %g0, [%g2 + %lo(prom_entry_lock)]
- membar #StoreStore | #StoreLoad
ldx [%l0], %g6
ldx [%g6 + TI_TASK], %g4
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 81ccd22e78d4..04994fc8700d 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -1371,7 +1371,6 @@ static int cheetah_fix_ce(unsigned long physaddr)
__asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
"ldxa [%1] %3, %%g0\n\t"
"casxa [%2] %3, %%g0, %%g0\n\t"
- "membar #StoreLoad | #StoreStore\n\t"
"ldxa [%0] %3, %%g0\n\t"
"ldxa [%1] %3, %%g0\n\t"
"membar #Sync"
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index c499214b501d..8c91d9b29a2f 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -317,7 +317,7 @@ tsb_flush:
srlx %g1, 32, %o3
andcc %o3, %g2, %g0
bne,pn %icc, 1b
- membar #LoadLoad
+ nop
cmp %g1, %o1
mov 1, %o3
bne,pt %xcc, 2f
@@ -327,7 +327,7 @@ tsb_flush:
bne,pn %xcc, 1b
nop
2: retl
- TSB_MEMBAR
+ nop
.size tsb_flush, .-tsb_flush
/* Reload MMU related context switch state at
@@ -478,7 +478,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
nop
retl
- TSB_MEMBAR
+ nop
.size copy_tsb, .-copy_tsb
/* Set the invalid bit in all TSB entries. */