summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorDavid Daney <david.daney@cavium.com>2014-10-20 15:34:23 -0700
committerZefan Li <lizefan@huawei.com>2015-02-02 17:04:56 +0800
commite930516632e7bde53d1abc5154e9245c4b0bdb09 (patch)
tree26c6243c0bf873eb4dd22bfb327c9353e931de59 /arch
parentb58f0431a070f39b0e140255a26e87b3ad4557a0 (diff)
MIPS: tlbex: Properly fix HUGE TLB Refill exception handler
commit 9e0f162a36914937a937358fcb45e0609ef2bfc4 upstream. In commit 8393c524a25609 (MIPS: tlbex: Fix a missing statement for HUGETLB), the TLB Refill handler was fixed so that non-OCTEON targets would work properly with huge pages. The change was incorrect in that it broke the OCTEON case. The problem is shown here: xxx0: df7a0000 ld k0,0(k1) . . . xxxc0: df610000 ld at,0(k1) xxxc4: 335a0ff0 andi k0,k0,0xff0 xxxc8: e825ffcd bbit1 at,0x5,0x0 xxxcc: 003ad82d daddu k1,at,k0 . . . In the non-octeon case there is a destructive test for the huge PTE bit, and then at 0, $k0 is reloaded (that is what the 8393c524a25609 patch added). In the octeon case, we modify k1 in the branch delay slot, but we never need k0 again, so the new load is not needed, but since k1 is modified, if we do the load, we load from a garbage location and then get a nested TLB Refill, which is seen in userspace as either SIGBUS or SIGSEGV (depending on the garbage). The real fix is to only do this reloading if it is needed, and never where it is harmful. Signed-off-by: David Daney <david.daney@cavium.com> Cc: Huacai Chen <chenhc@lemote.com> Cc: Fuxin Zhang <zhangfx@lemote.com> Cc: Zhangjin Wu <wuzhangjin@gmail.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/8151/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: Zefan Li <lizefan@huawei.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/mm/tlbex.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index f5abdfa2f8a1..6d64efe77026 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1041,6 +1041,7 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
struct mips_huge_tlb_info {
int huge_pte;
int restore_scratch;
+ bool need_reload_pte;
};
static struct mips_huge_tlb_info __cpuinit
@@ -1055,6 +1056,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
rv.huge_pte = scratch;
rv.restore_scratch = 0;
+ rv.need_reload_pte = false;
if (check_for_high_segbits) {
UASM_i_MFC0(p, tmp, C0_BADVADDR);
@@ -1247,6 +1249,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
} else {
htlb_info.huge_pte = K0;
htlb_info.restore_scratch = 0;
+ htlb_info.need_reload_pte = true;
vmalloc_mode = refill_noscratch;
/*
* create the plain linear handler
@@ -1283,7 +1286,8 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
}
#ifdef CONFIG_HUGETLB_PAGE
uasm_l_tlb_huge_update(&l, p);
- UASM_i_LW(&p, K0, 0, K1);
+ if (htlb_info.need_reload_pte)
+ UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
build_huge_update_entries(&p, htlb_info.huge_pte, K1);
build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
htlb_info.restore_scratch);