summaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorJason Liu <jason.hui.liu@nxp.com>2017-06-16 06:55:33 +0800
committerJason Liu <jason.hui.liu@nxp.com>2019-02-12 10:27:28 +0800
commitc9eb1788558f07dfda0c15b684f79aedb4bfa623 (patch)
tree28813ffb85998ec1906f757bcd9db72b2c63179e /arch/arm64/include
parent4c998a8ec68aa49a7dab3ca1224045b1d37ec944 (diff)
MLK-16005-2 arm64: tlb: add the SW workaround for i.MX8QM TKT340553
on i.MX8QM TO1.0, there is an issue: the bus width between A53-CCI-A72 is limited to 36bits.TLB maintenance through DVM messages over AR channel, some bits will be forced(truncated) to zero as the followings: ASID[15:12] is forced to 0 VA[48:45] is forced to 0 VA[44:41] is forced to 0 VA[39:36] is forced to 0 This issue will result in the TLB aintenance across the clusters not working as expected due to some VA and ASID bits get truncated and forced to be zero. The SW workaround is: use the vmalle1is if VA larger than 36bits or ASID[15:12] is not zero, otherwise, we use original TLB maintenance path. Signed-off-by: Jason Liu <jason.hui.liu@nxp.com> Reviewed-by: Anson Huang <anson.huang@nxp.com>
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/tlbflush.h41
1 files changed, 32 insertions, 9 deletions
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 9e82dd79c7db..8c72efcb638e 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -24,6 +24,7 @@
#include <linux/sched.h>
#include <asm/cputype.h>
#include <asm/mmu.h>
+#include <soc/imx8/soc.h>
/*
* Raw TLBI operations.
@@ -120,8 +121,12 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
unsigned long asid = ASID(mm) << 48;
dsb(ishst);
- __tlbi(aside1is, asid);
- __tlbi_user(aside1is, asid);
+ if (TKT340553_SW_WORKAROUND && ASID(mm) >> 11) {
+ __tlbi(vmalle1is);
+ } else {
+ __tlbi(aside1is, asid);
+ __tlbi_user(aside1is, asid);
+ }
dsb(ish);
}
@@ -131,8 +136,12 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
dsb(ishst);
- __tlbi(vale1is, addr);
- __tlbi_user(vale1is, addr);
+ if (TKT340553_SW_WORKAROUND && (uaddr >> 36 || (ASID(vma->vm_mm) >> 12))) {
+ __tlbi(vmalle1is);
+ } else {
+ __tlbi(vale1is, addr);
+ __tlbi_user(vale1is, addr);
+ }
dsb(ish);
}
@@ -148,6 +157,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
{
unsigned long asid = ASID(vma->vm_mm) << 48;
unsigned long addr;
+ unsigned long mask = (1 << 20) - 1;
if ((end - start) > MAX_TLB_RANGE) {
flush_tlb_mm(vma->vm_mm);
@@ -156,10 +166,14 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
start = asid | (start >> 12);
end = asid | (end >> 12);
+ mask <<= 24;
+
dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
- if (last_level) {
+ if (TKT340553_SW_WORKAROUND && (addr & mask || (ASID(vma->vm_mm) >> 12))) {
+ __tlbi(vmalle1is);
+ } else if (last_level) {
__tlbi(vale1is, addr);
__tlbi_user(vale1is, addr);
} else {
@@ -189,8 +203,12 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
end >>= 12;
dsb(ishst);
- for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
- __tlbi(vaae1is, addr);
+ for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
+ if (TKT340553_SW_WORKAROUND && addr >> 24)
+ __tlbi(vmalle1is);
+ else
+ __tlbi(vaae1is, addr);
+ }
dsb(ish);
isb();
}
@@ -204,8 +222,13 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
{
unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
- __tlbi(vae1is, addr);
- __tlbi_user(vae1is, addr);
+ if (TKT340553_SW_WORKAROUND && (uaddr >> 36 || (ASID(mm) >> 12))) {
+ __tlbi(vmalle1is);
+ } else {
+ __tlbi(vae1is, addr);
+ __tlbi_user(vae1is, addr);
+ }
+
dsb(ish);
}