summaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorNitin Garg <nitin.garg@nxp.com>2021-05-06 11:55:22 -0500
committerDenys Drozdov <denys.drozdov@toradex.com>2021-07-13 14:41:45 +0300
commitb93083071e9fdaf6ccc0be7c35c04a23c17662cd (patch)
treec3ea65c044901a57803f3c3c302ad4a51a39d97f /arch/arm64/include
parentedfc37d93d8da6f99923297d82a88fef751bf635 (diff)
MLK-23277: 8qm: Fix SW workaround for i.MX8QM TKT340553
Current workaround is looping uselessly on the address range doing a _tlbi(vmalle1is) which is harmful for the system performance and buggy as the instruction is flushing the entire TLB and there is no benefit of redoing it more than once. Also fix missing barriers. Signed-off-by: Nitin Garg <nitin.garg@nxp.com> Signed-off-by: Marouen Ghodhbane <marouen.ghodhbane@nxp.com> Reviewed-by: Jason Liu <jason.hui.liu@nxp.com> (cherry picked from commit 5799755f37dd7bc826dfe8a3cac12871a7946a1a)
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/tlbflush.h35
1 files changed, 22 insertions, 13 deletions
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 6042814301ed..e3660cce3e96 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -156,12 +156,15 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
dsb(ishst);
if (TKT340553_SW_WORKAROUND) {
+ /* Flush the entire TLB */
__tlbi(vmalle1is);
+ dsb(ish);
+ isb();
} else {
__tlbi(aside1is, asid);
__tlbi_user(aside1is, asid);
+ dsb(ish);
}
- dsb(ish);
}
static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
@@ -171,7 +174,10 @@ static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
dsb(ishst);
if (TKT340553_SW_WORKAROUND) {
+ /* Flush the entire TLB */
__tlbi(vmalle1is);
+ dsb(ish);
+ isb();
} else {
__tlbi(vale1is, addr);
__tlbi_user(vale1is, addr);
@@ -197,7 +203,6 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
{
unsigned long asid = ASID(vma->vm_mm);
unsigned long addr;
- unsigned long mask = (1 << 20) - 1;
start = round_down(start, stride);
end = round_up(end, stride);
@@ -212,13 +217,19 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
start = __TLBI_VADDR(start, asid);
end = __TLBI_VADDR(end, asid);
- mask <<= 24;
dsb(ishst);
+
+ if (TKT340553_SW_WORKAROUND) {
+ /* Flush the entire TLB and exit */
+ __tlbi(vmalle1is);
+ dsb(ish);
+ isb();
+ return;
+ }
+
for (addr = start; addr < end; addr += stride) {
- if (TKT340553_SW_WORKAROUND) {
- __tlbi(vmalle1is);
- } else if (last_level) {
+ if (last_level) {
__tlbi(vale1is, addr);
__tlbi_user(vale1is, addr);
} else {
@@ -244,7 +255,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
{
unsigned long addr;
- if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
+ if (((end - start) > (MAX_TLBI_OPS * PAGE_SIZE))
+ || (TKT340553_SW_WORKAROUND)) {
flush_tlb_all();
return;
}
@@ -253,12 +265,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
end = __TLBI_VADDR(end, 0);
dsb(ishst);
- for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
- if (TKT340553_SW_WORKAROUND)
- __tlbi(vmalle1is);
- else
- __tlbi(vaale1is, addr);
- }
+ for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
+ __tlbi(vaale1is, addr);
dsb(ish);
isb();
}
@@ -273,6 +281,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
dsb(ishst);
if (TKT340553_SW_WORKAROUND)
+ /* Flush the entire TLB */
__tlbi(vmalle1is);
else
__tlbi(vaae1is, addr);