summaryrefslogtreecommitdiff
path: root/arch/sparc64/kernel/tsb.S
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-01-31 18:31:20 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 01:11:17 -0800
commit98c5584cfc47932c4f3ccf5eee2e0bae1447b85e (patch)
treec067ac8bfc081bbe0b3073374cb15708458e04ab /arch/sparc64/kernel/tsb.S
parent09f94287f7260e03bbeab497e743691fafcc22c3 (diff)
[SPARC64]: Add infrastructure for dynamic TSB sizing.
This also cleans up tsb_context_switch(). The assembler routine is now __tsb_context_switch() and the former is an inline function that picks out the bits from the mm_struct and passes it into the assembler code as arguments. setup_tsb_parms() computes the locked TLB entry to map the TSB. Later when we support using the physical address quad load instructions of Cheetah+ and later, we'll simply use the physical address for the TSB register value and set the map virtual and PTE both to zero. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/tsb.S')
-rw-r--r--arch/sparc64/kernel/tsb.S55
1 files changed, 21 insertions, 34 deletions
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 76f2c0b01f36..fe266bad0a28 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -130,48 +130,36 @@ winfix_trampoline:
* schedule() time.
*
* %o0: page table physical address
- * %o1: TSB address
+ * %o1: TSB register value
+ * %o2: TSB virtual address
+ * %o3: TSB mapping locked PTE
+ *
+ * We have to run this whole thing with interrupts
+ * disabled so that the current cpu doesn't change
+ * due to preemption.
*/
.align 32
- .globl tsb_context_switch
-tsb_context_switch:
+ .globl __tsb_context_switch
+__tsb_context_switch:
rdpr %pstate, %o5
wrpr %o5, PSTATE_IE, %pstate
- ldub [%g6 + TI_CPU], %o3
- sethi %hi(trap_block), %o4
- sllx %o3, TRAP_BLOCK_SZ_SHIFT, %o3
- or %o4, %lo(trap_block), %o4
- add %o4, %o3, %o4
- stx %o0, [%o4 + TRAP_PER_CPU_PGD_PADDR]
-
- brgez %o1, 9f
- nop
-
- /* Lock TSB into D-TLB. */
- sethi %hi(PAGE_SIZE), %o3
- and %o3, %o1, %o3
- sethi %hi(TSBMAP_BASE), %o2
- add %o2, %o3, %o2
+ ldub [%g6 + TI_CPU], %g1
+ sethi %hi(trap_block), %g2
+ sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1
+ or %g2, %lo(trap_block), %g2
+ add %g2, %g1, %g2
+ stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
- /* XXX handle PAGE_SIZE != 8K correctly... */
mov TSB_REG, %g1
- stxa %o2, [%g1] ASI_DMMU
+ stxa %o1, [%g1] ASI_DMMU
membar #Sync
- stxa %o2, [%g1] ASI_IMMU
+ stxa %o1, [%g1] ASI_IMMU
membar #Sync
-#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZBITS)^0xfffff80000000000)
-#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W | _PAGE_L)
- sethi %uhi(KERN_HIGHBITS), %g2
- or %g2, %ulo(KERN_HIGHBITS), %g2
- sllx %g2, 32, %g2
- or %g2, KERN_LOWBITS, %g2
-#undef KERN_HIGHBITS
-#undef KERN_LOWBITS
-
- xor %o1, %g2, %o1
+ brz %o2, 9f
+ nop
/* We use entry 61 for this locked entry. This is the spitfire
* TLB entry number, and luckily cheetah masks the value with
@@ -184,11 +172,10 @@ tsb_context_switch:
stxa %o2, [%g1] ASI_DMMU
membar #Sync
mov (61 << 3), %g1
- stxa %o1, [%g1] ASI_DTLB_DATA_ACCESS
+ stxa %o3, [%g1] ASI_DTLB_DATA_ACCESS
membar #Sync
-
9:
wrpr %o5, %pstate
retl
- mov %o2, %o0
+ nop