diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 10 |
1 files changed, 10 insertions, 0 deletions
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 4c48b487698c..0b48ce40d351 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -179,6 +179,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) b slb_finish_load 8: /* invalid EA */ + /* + * It's possible the bad EA is too large to fit in the SLB cache, which + * would mean we'd fail to invalidate it on context switch. So mark the + * SLB cache as full so we force a full flush. We also set cr7+eq to + * mark the address as a kernel address, so slb_finish_load() skips + * trying to insert it into the SLB cache. + */ + li r9,SLB_CACHE_ENTRIES + 1 + sth r9,PACASLBCACHEPTR(r13) + crset 4*cr7+eq li r10,0 /* BAD_VSID */ li r9,0 /* BAD_VSID */ li r11,SLB_VSID_USER /* flags don't much matter */ |