summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNitin Garg <nitin.garg@nxp.com>2019-12-04 19:03:59 -0600
committerAnson Huang <Anson.Huang@nxp.com>2019-12-11 13:58:36 +0800
commit7ae82f64e9648cca0e3c9f59a678b80138d251e1 (patch)
treefe3137eccf0bb05a5a74927dc0303381caef3e05
parentfaee6309301eb0e0866e7e3a0df3f7747f38bbd5 (diff)
LF-363 arm64: kernel: TKT340553 Errata workaround update for i.MX8QM
As per latest i.MX8QM SOC Errata, TKT340553 workaround needs to be updated to unconditionally downgrade TLB operations and instruction cache maintenance. Signed-off-by: Nitin Garg <nitin.garg@nxp.com> Signed-off-by: Anson Huang <Anson.Huang@nxp.com> Acked-by: Peng Fan <peng.fan@nxp.com>
-rw-r--r--arch/arm64/include/asm/tlbflush.h21
-rw-r--r--arch/arm64/kernel/cpufeature.c13
-rw-r--r--arch/arm64/kernel/traps.c28
-rw-r--r--arch/arm64/kvm/hyp/tlb.c10
4 files changed, 59 insertions, 13 deletions
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 2220f4f92ff9..e774f7cfad2c 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -151,9 +151,9 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
unsigned long asid = __TLBI_VADDR(0, ASID(mm));
dsb(ishst);
- if (TKT340553_SW_WORKAROUND && ASID(mm) >> 12) {
+ if (TKT340553_SW_WORKAROUND) {
__tlbi(vmalle1is);
- } else {
+ } else {
__tlbi(aside1is, asid);
__tlbi_user(aside1is, asid);
}
@@ -166,8 +166,8 @@ static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
dsb(ishst);
- if (TKT340553_SW_WORKAROUND && (uaddr >> 36 || (ASID(vma->vm_mm) >> 12))) {
- __tlbi(vmalle1is);
+ if (TKT340553_SW_WORKAROUND) {
+ __tlbi(vmalle1is);
} else {
__tlbi(vale1is, addr);
__tlbi_user(vale1is, addr);
@@ -212,9 +212,9 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
dsb(ishst);
for (addr = start; addr < end; addr += stride) {
- if (TKT340553_SW_WORKAROUND && (addr & mask || (ASID(vma->vm_mm) >> 12))) {
- __tlbi(vmalle1is);
- } else if (last_level) {
+ if (TKT340553_SW_WORKAROUND) {
+ __tlbi(vmalle1is);
+ } else if (last_level) {
__tlbi(vale1is, addr);
__tlbi_user(vale1is, addr);
} else {
@@ -222,6 +222,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
__tlbi_user(vae1is, addr);
}
}
+
dsb(ish);
}
@@ -249,8 +250,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
- if (TKT340553_SW_WORKAROUND && addr >> 24)
- __tlbi(vmalle1is);
+ if (TKT340553_SW_WORKAROUND)
+ __tlbi(vmalle1is);
else
__tlbi(vaale1is, addr);
}
@@ -267,7 +268,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
unsigned long addr = __TLBI_VADDR(kaddr, 0);
dsb(ishst);
- if (TKT340553_SW_WORKAROUND && addr >> 24)
+ if (TKT340553_SW_WORKAROUND)
__tlbi(vmalle1is);
else
__tlbi(vaae1is, addr);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 80f459ad0190..0d3cf394d9c2 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -565,9 +565,22 @@ init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *ca
}
}
+extern bool TKT340553_SW_WORKAROUND;
static void __init init_cpu_hwcaps_indirect_list(void)
{
init_cpu_hwcaps_indirect_list_from_array(arm64_features);
+#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
+#if defined(CONFIG_ARM64_ERRATUM_826319) || \
+ defined(CONFIG_ARM64_ERRATUM_827319) || \
+ defined(CONFIG_ARM64_ERRATUM_824069)
+ if (TKT340553_SW_WORKAROUND) {
+ struct midr_range *midr_range_list =
+ (struct midr_range *)(arm64_errata[0].midr_range_list);
+
+ midr_range_list[0].rv_max = MIDR_CPU_VAR_REV(0, 4);
+ }
+#endif
+#endif
init_cpu_hwcaps_indirect_list_from_array(arm64_errata);
}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 34739e80211b..3661d78e93fe 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -426,6 +426,29 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
uaccess_ttbr0_disable(); \
}
+#define __user_cache_maint_ivau(insn, address, res) \
+ do { \
+ if (address >= user_addr_max()) { \
+ res = -EFAULT; \
+ } else { \
+ uaccess_ttbr0_enable(); \
+ asm volatile ( \
+ "1: " insn "\n" \
+ " mov %w0, #0\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %w0, %w2\n" \
+ " b 2b\n" \
+ " .popsection\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r" (res) \
+ : "r" (address), "i" (-EFAULT)); \
+ uaccess_ttbr0_disable(); \
+ } \
+ } while (0)
+
+extern bool TKT340553_SW_WORKAROUND;
static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
{
unsigned long address;
@@ -452,7 +475,10 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
__user_cache_maint("dc civac", address, ret);
break;
case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */
- __user_cache_maint("ic ivau", address, ret);
+ if (TKT340553_SW_WORKAROUND)
+ __user_cache_maint_ivau("ic ialluis", address, ret);
+ else
+ __user_cache_maint("ic ivau", address, ret);
break;
default:
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index eb0efc5557f3..6a109156681c 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -16,6 +16,8 @@ struct tlb_inv_context {
u64 sctlr;
};
+extern bool TKT340553_SW_WORKAROUND;
+
static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
struct tlb_inv_context *cxt)
{
@@ -126,8 +128,12 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
* Instead, we invalidate Stage-2 for this IPA, and the
* whole of Stage-1. Weep...
*/
- ipa >>= 12;
- __tlbi(ipas2e1is, ipa);
+ if (TKT340553_SW_WORKAROUND) {
+ __tlbi(vmalls12e1is);
+ } else {
+ ipa >>= 12;
+ __tlbi(ipas2e1is, ipa);
+ }
/*
* We have to ensure completion of the invalidation at Stage-2,