summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/slb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r--arch/powerpc/mm/slb.c45
1 files changed, 16 insertions, 29 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index b0697017d0e8..a73d2d700973 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -69,20 +69,9 @@ static inline void slb_shadow_update(unsigned long ea,
smp_wmb();
}
-static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
- unsigned long entry)
+static inline void slb_shadow_clear(unsigned long entry)
{
- /*
- * Updating the shadow buffer before writing the SLB ensures
- * we don't get a stale entry here if we get preempted by PHYP
- * between these two statements.
- */
- slb_shadow_update(ea, flags, entry);
-
- asm volatile("slbmte %0,%1" :
- : "r" (mk_vsid_data(ea, flags)),
- "r" (mk_esid_data(ea, entry))
- : "memory" );
+ get_slb_shadow()->save_area[entry].esid = 0;
}
void slb_flush_and_rebolt(void)
@@ -100,11 +89,13 @@ void slb_flush_and_rebolt(void)
vflags = SLB_VSID_KERNEL | vmalloc_llp;
ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
- if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
+ if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) {
ksp_esid_data &= ~SLB_ESID_V;
-
- /* Only third entry (stack) may change here so only resave that */
- slb_shadow_update(get_paca()->kstack, lflags, 2);
+ slb_shadow_clear(2);
+ } else {
+ /* Update stack entry; others don't change */
+ slb_shadow_update(get_paca()->kstack, lflags, 2);
+ }
/* We need to do this all in asm, so we're sure we don't touch
* the stack between the slbia and rebolting it. */
@@ -235,16 +226,12 @@ void slb_initialize(void)
vflags = SLB_VSID_KERNEL | vmalloc_llp;
/* Invalidate the entire SLB (even slot 0) & all the ERATS */
- asm volatile("isync":::"memory");
- asm volatile("slbmte %0,%0"::"r" (0) : "memory");
- asm volatile("isync; slbia; isync":::"memory");
- create_shadowed_slbe(PAGE_OFFSET, lflags, 0);
-
- create_shadowed_slbe(VMALLOC_START, vflags, 1);
-
- /* We don't bolt the stack for the time being - we're in boot,
- * so the stack is in the bolted segment. By the time it goes
- * elsewhere, we'll call _switch() which will bolt in the new
- * one. */
- asm volatile("isync":::"memory");
+ slb_shadow_update(PAGE_OFFSET, lflags, 0);
+ asm volatile("isync; slbia; sync; slbmte %0,%1; isync" ::
+ "r" (get_slb_shadow()->save_area[0].vsid),
+ "r" (get_slb_shadow()->save_area[0].esid) : "memory");
+
+ slb_shadow_update(VMALLOC_START, vflags, 1);
+
+ slb_flush_and_rebolt();
}