summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2026-04-07 11:28:41 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2026-04-10 19:46:14 +0100
commit6bfbf574a39139da11af9fdf6e8d56fe1989cd3e (patch)
tree8510b7275b7dbe26ed33ceb4927b1b3447b77e37 /arch/arm64/include/asm
parent1f318b96cc84d7c2ab792fcc0bfd42a7ca890681 (diff)
arm64: tlb: Introduce __tlbi_sync_s1ish_{kernel,batch}() for TLB maintenance
Add __tlbi_sync_s1ish_kernel() similar to __tlbi_sync_s1ish() and use it for kernel TLB maintenance. Also use this function in flush_tlb_all() which is only used in relation to kernel mappings. Subsequent patches can differentiate between workarounds that apply to user only or both user and kernel. A subsequent patch will add mm_struct to __tlbi_sync_s1ish(). Since arch_tlbbatch_flush() is not specific to an mm, add a corresponding __tlbi_sync_s1ish_batch() helper. Acked-by: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will@kernel.org> Reviewed-by: Will Deacon <will@kernel.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r--arch/arm64/include/asm/tlbflush.h20
1 files changed, 16 insertions, 4 deletions
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 1416e652612b..f41eebf00990 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -191,6 +191,18 @@ static inline void __tlbi_sync_s1ish(void)
__repeat_tlbi_sync(vale1is, 0);
}
+static inline void __tlbi_sync_s1ish_batch(void)
+{
+ dsb(ish);
+ __repeat_tlbi_sync(vale1is, 0);
+}
+
+static inline void __tlbi_sync_s1ish_kernel(void)
+{
+ dsb(ish);
+ __repeat_tlbi_sync(vale1is, 0);
+}
+
/*
* Complete broadcast TLB maintenance issued by hyp code which invalidates
* stage 1 translation information in any translation regime.
@@ -299,7 +311,7 @@ static inline void flush_tlb_all(void)
{
dsb(ishst);
__tlbi(vmalle1is);
- __tlbi_sync_s1ish();
+ __tlbi_sync_s1ish_kernel();
isb();
}
@@ -385,7 +397,7 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
*/
static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
- __tlbi_sync_s1ish();
+ __tlbi_sync_s1ish_batch();
}
/*
@@ -568,7 +580,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
dsb(ishst);
__flush_tlb_range_op(vaale1is, start, pages, stride, 0,
TLBI_TTL_UNKNOWN, false, lpa2_is_enabled());
- __tlbi_sync_s1ish();
+ __tlbi_sync_s1ish_kernel();
isb();
}
@@ -582,7 +594,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
dsb(ishst);
__tlbi(vaae1is, addr);
- __tlbi_sync_s1ish();
+ __tlbi_sync_s1ish_kernel();
isb();
}