summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-01-31 18:33:00 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 01:11:22 -0800
commit4da808c352c290d3f762933d44d4ab90c2fd65f3 (patch)
treeda99326440777580a19c345a5b0d52fbf800042b
parent4753eb2ac7022b999e5e484f1a5dc001dba22bd3 (diff)
[SPARC64]: Fix bogus flush instruction usage.
Some of the trap code was still assuming that alternate global %g6 was hard coded with current_thread_info(). Let's just consistently flush at KERNBASE when we need a pipeline synchronization. That's locked into the TLB and will always work. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/kernel/etrap.S6
-rw-r--r--arch/sparc64/kernel/rtrap.S3
-rw-r--r--arch/sparc64/kernel/winfixup.S3
-rw-r--r--arch/sparc64/lib/clear_page.S4
-rw-r--r--arch/sparc64/mm/ultra.S31
5 files changed, 30 insertions, 17 deletions
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
index 8b3b6d720ed5..db7681017299 100644
--- a/arch/sparc64/kernel/etrap.S
+++ b/arch/sparc64/kernel/etrap.S
@@ -72,7 +72,8 @@ etrap_irq:
sethi %hi(sparc64_kern_pri_context), %g2
ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
stxa %g3, [%l4] ASI_DMMU
- flush %l6
+ sethi %hi(KERNBASE), %l4
+ flush %l4
wr %g0, ASI_AIUS, %asi
2: wrpr %g0, 0x0, %tl
mov %g4, %l4
@@ -215,7 +216,8 @@ scetrap:
sethi %hi(sparc64_kern_pri_context), %g2
ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
stxa %g3, [%l4] ASI_DMMU
- flush %l6
+ sethi %hi(KERNBASE), %l4
+ flush %l4
mov ASI_AIUS, %l7
2: mov %g4, %l4
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index 5a62ec5d531c..89794ebdcbcf 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -259,7 +259,8 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
or %l0, %l1, %l0
stxa %l0, [%l7] ASI_DMMU
- flush %g6
+ sethi %hi(KERNBASE), %l7
+ flush %l7
rdpr %wstate, %l1
rdpr %otherwin, %l2
srl %l1, 3, %l1
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
index de588036df43..c0545d089c96 100644
--- a/arch/sparc64/kernel/winfixup.S
+++ b/arch/sparc64/kernel/winfixup.S
@@ -20,7 +20,8 @@ set_pcontext:
ldx [%l1 + %lo(sparc64_kern_pri_context)], %l1
mov PRIMARY_CONTEXT, %g1
stxa %l1, [%g1] ASI_DMMU
- flush %g6
+ sethi %hi(KERNBASE), %l1
+ flush %l1
retl
nop
diff --git a/arch/sparc64/lib/clear_page.S b/arch/sparc64/lib/clear_page.S
index b59884ef051d..cdc634bceba0 100644
--- a/arch/sparc64/lib/clear_page.S
+++ b/arch/sparc64/lib/clear_page.S
@@ -9,6 +9,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/spitfire.h>
+#include <asm/head.h>
/* What we used to do was lock a TLB entry into a specific
* TLB slot, clear the page with interrupts disabled, then
@@ -66,7 +67,8 @@ clear_user_page: /* %o0=dest, %o1=vaddr */
wrpr %o4, PSTATE_IE, %pstate
stxa %o0, [%g3] ASI_DMMU
stxa %g1, [%g0] ASI_DTLB_DATA_IN
- flush %g6
+ sethi %hi(KERNBASE), %g1
+ flush %g1
wrpr %o4, 0x0, %pstate
mov 1, %o4
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index a87394824ec2..269ed57b3e9d 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -36,9 +36,10 @@ __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
mov 0x50, %g3
stxa %g0, [%g3] ASI_DMMU_DEMAP
stxa %g0, [%g3] ASI_IMMU_DEMAP
+ sethi %hi(KERNBASE), %g3
+ flush %g3
retl
- flush %g6
- nop
+ nop
nop
nop
nop
@@ -72,7 +73,8 @@ __flush_tlb_pending:
brnz,pt %o1, 1b
nop
stxa %g2, [%o4] ASI_DMMU
- flush %g6
+ sethi %hi(KERNBASE), %o4
+ flush %o4
retl
wrpr %g7, 0x0, %pstate
nop
@@ -94,8 +96,10 @@ __flush_tlb_kernel_range: /* %o0=start, %o1=end */
membar #Sync
brnz,pt %o3, 1b
sub %o3, %o4, %o3
-2: retl
- flush %g6
+2: sethi %hi(KERNBASE), %o3
+ flush %o3
+ retl
+ nop
__spitfire_flush_tlb_mm_slow:
rdpr %pstate, %g1
@@ -105,7 +109,8 @@ __spitfire_flush_tlb_mm_slow:
stxa %g0, [%g3] ASI_IMMU_DEMAP
flush %g6
stxa %g2, [%o1] ASI_DMMU
- flush %g6
+ sethi %hi(KERNBASE), %o1
+ flush %o1
retl
wrpr %g1, 0, %pstate
@@ -181,7 +186,7 @@ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
.previous
/* Cheetah specific versions, patched at boot time. */
-__cheetah_flush_tlb_mm: /* 18 insns */
+__cheetah_flush_tlb_mm: /* 19 insns */
rdpr %pstate, %g7
andn %g7, PSTATE_IE, %g2
wrpr %g2, 0x0, %pstate
@@ -196,12 +201,13 @@ __cheetah_flush_tlb_mm: /* 18 insns */
stxa %g0, [%g3] ASI_DMMU_DEMAP
stxa %g0, [%g3] ASI_IMMU_DEMAP
stxa %g2, [%o2] ASI_DMMU
- flush %g6
+ sethi %hi(KERNBASE), %o2
+ flush %o2
wrpr %g0, 0, %tl
retl
wrpr %g7, 0x0, %pstate
-__cheetah_flush_tlb_pending: /* 26 insns */
+__cheetah_flush_tlb_pending: /* 27 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
rdpr %pstate, %g7
sllx %o1, 3, %o1
@@ -225,7 +231,8 @@ __cheetah_flush_tlb_pending: /* 26 insns */
brnz,pt %o1, 1b
nop
stxa %g2, [%o4] ASI_DMMU
- flush %g6
+ sethi %hi(KERNBASE), %o4
+ flush %o4
wrpr %g0, 0, %tl
retl
wrpr %g7, 0x0, %pstate
@@ -265,14 +272,14 @@ cheetah_patch_cachetlbops:
sethi %hi(__cheetah_flush_tlb_mm), %o1
or %o1, %lo(__cheetah_flush_tlb_mm), %o1
call cheetah_patch_one
- mov 18, %o2
+ mov 19, %o2
sethi %hi(__flush_tlb_pending), %o0
or %o0, %lo(__flush_tlb_pending), %o0
sethi %hi(__cheetah_flush_tlb_pending), %o1
or %o1, %lo(__cheetah_flush_tlb_pending), %o1
call cheetah_patch_one
- mov 26, %o2
+ mov 27, %o2
#ifdef DCACHE_ALIASING_POSSIBLE
sethi %hi(__flush_dcache_page), %o0