summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2026-04-20 13:12:35 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2026-04-20 13:12:35 +0100
commit858fbd7248bd84b2899fb2c29bc7bc2634296edf (patch)
tree879b4b3a05bc4c6f452212373d5d03a10a25a755 /arch/arm64/include/asm
parent818f644ec6cbe00a3cddc767b6316e2f527ae865 (diff)
parent0baba94a9779c13c857f6efc55807e6a45b1d4e4 (diff)
Merge branch 'for-next/c1-pro-erratum-4193714' into for-next/core
* for-next/c1-pro-erratum-4193714: : Work around C1-Pro erratum 4193714 (CVE-2026-0995) arm64: errata: Work around early CME DVMSync acknowledgement arm64: cputype: Add C1-Pro definitions arm64: tlb: Pass the corresponding mm to __tlbi_sync_s1ish() arm64: tlb: Introduce __tlbi_sync_s1ish_{kernel,batch}() for TLB maintenance
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r--arch/arm64/include/asm/cpucaps.h2
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/fpsimd.h21
-rw-r--r--arch/arm64/include/asm/tlbbatch.h10
-rw-r--r--arch/arm64/include/asm/tlbflush.h94
5 files changed, 120 insertions, 9 deletions
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 6e3da333442e..d0d3cdd5763c 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -64,6 +64,8 @@ cpucap_is_possible(const unsigned int cap)
return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI);
case ARM64_WORKAROUND_SPECULATIVE_SSBS:
return IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386);
+ case ARM64_WORKAROUND_4193714:
+ return IS_ENABLED(CONFIG_ARM64_ERRATUM_4193714);
case ARM64_MPAM:
/*
* KVM MPAM support doesn't rely on the host kernel supporting MPAM.
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 08860d482e60..7b518e81dd15 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -98,6 +98,7 @@
#define ARM_CPU_PART_CORTEX_A725 0xD87
#define ARM_CPU_PART_CORTEX_A720AE 0xD89
#define ARM_CPU_PART_NEOVERSE_N3 0xD8E
+#define ARM_CPU_PART_C1_PRO 0xD8B
#define APM_CPU_PART_XGENE 0x000
#define APM_CPU_VAR_POTENZA 0x00
@@ -189,6 +190,7 @@
#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
#define MIDR_CORTEX_A720AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720AE)
#define MIDR_NEOVERSE_N3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N3)
+#define MIDR_C1_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_C1_PRO)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 1d2e33559bd5..d9d00b45ab11 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -428,6 +428,24 @@ static inline size_t sme_state_size(struct task_struct const *task)
return __sme_state_size(task_get_sme_vl(task));
}
+void sme_enable_dvmsync(void);
+void sme_set_active(void);
+void sme_clear_active(void);
+
+static inline void sme_enter_from_user_mode(void)
+{
+ if (alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714) &&
+ test_thread_flag(TIF_SME))
+ sme_clear_active();
+}
+
+static inline void sme_exit_to_user_mode(void)
+{
+ if (alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714) &&
+ test_thread_flag(TIF_SME))
+ sme_set_active();
+}
+
#else
static inline void sme_user_disable(void) { BUILD_BUG(); }
@@ -456,6 +474,9 @@ static inline size_t sme_state_size(struct task_struct const *task)
return 0;
}
+static inline void sme_enter_from_user_mode(void) { }
+static inline void sme_exit_to_user_mode(void) { }
+
#endif /* ! CONFIG_ARM64_SME */
/* For use by EFI runtime services calls only */
diff --git a/arch/arm64/include/asm/tlbbatch.h b/arch/arm64/include/asm/tlbbatch.h
index fedb0b87b8db..6297631532e5 100644
--- a/arch/arm64/include/asm/tlbbatch.h
+++ b/arch/arm64/include/asm/tlbbatch.h
@@ -2,11 +2,17 @@
#ifndef _ARCH_ARM64_TLBBATCH_H
#define _ARCH_ARM64_TLBBATCH_H
+#include <linux/cpumask.h>
+
struct arch_tlbflush_unmap_batch {
+#ifdef CONFIG_ARM64_ERRATUM_4193714
/*
- * For arm64, HW can do tlb shootdown, so we don't
- * need to record cpumask for sending IPI
+ * Track CPUs that need SME DVMSync on completion of this batch.
+ * Otherwise, the arm64 HW can do tlb shootdown, so we don't need to
+ * record cpumask for sending IPI
*/
+ cpumask_var_t cpumask;
+#endif
};
#endif /* _ARCH_ARM64_TLBBATCH_H */
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 47fa4d39a461..c0bf5b398041 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -80,6 +80,71 @@ static inline unsigned long get_trans_granule(void)
}
}
+#ifdef CONFIG_ARM64_ERRATUM_4193714
+
+void sme_do_dvmsync(const struct cpumask *mask);
+
+static inline void sme_dvmsync(struct mm_struct *mm)
+{
+ if (!alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714))
+ return;
+
+ sme_do_dvmsync(mm_cpumask(mm));
+}
+
+static inline void sme_dvmsync_add_pending(struct arch_tlbflush_unmap_batch *batch,
+ struct mm_struct *mm)
+{
+ if (!alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714))
+ return;
+
+ /*
+ * Order the mm_cpumask() read after the hardware DVMSync.
+ */
+ dsb(ish);
+ if (cpumask_empty(mm_cpumask(mm)))
+ return;
+
+ /*
+ * Allocate the batch cpumask on first use. Fall back to an immediate
+ * IPI for this mm in case of failure.
+ */
+ if (!cpumask_available(batch->cpumask) &&
+ !zalloc_cpumask_var(&batch->cpumask, GFP_ATOMIC)) {
+ sme_do_dvmsync(mm_cpumask(mm));
+ return;
+ }
+
+ cpumask_or(batch->cpumask, batch->cpumask, mm_cpumask(mm));
+}
+
+static inline void sme_dvmsync_batch(struct arch_tlbflush_unmap_batch *batch)
+{
+ if (!alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714))
+ return;
+
+ if (!cpumask_available(batch->cpumask))
+ return;
+
+ sme_do_dvmsync(batch->cpumask);
+ cpumask_clear(batch->cpumask);
+}
+
+#else
+
+static inline void sme_dvmsync(struct mm_struct *mm)
+{
+}
+static inline void sme_dvmsync_add_pending(struct arch_tlbflush_unmap_batch *batch,
+ struct mm_struct *mm)
+{
+}
+static inline void sme_dvmsync_batch(struct arch_tlbflush_unmap_batch *batch)
+{
+}
+
+#endif /* CONFIG_ARM64_ERRATUM_4193714 */
+
/*
* Level-based TLBI operations.
*
@@ -213,7 +278,21 @@ do { \
* Complete broadcast TLB maintenance issued by the host which invalidates
* stage 1 information in the host's own translation regime.
*/
-static inline void __tlbi_sync_s1ish(void)
+static inline void __tlbi_sync_s1ish(struct mm_struct *mm)
+{
+ dsb(ish);
+ __repeat_tlbi_sync(vale1is, 0);
+ sme_dvmsync(mm);
+}
+
+static inline void __tlbi_sync_s1ish_batch(struct arch_tlbflush_unmap_batch *batch)
+{
+ dsb(ish);
+ __repeat_tlbi_sync(vale1is, 0);
+ sme_dvmsync_batch(batch);
+}
+
+static inline void __tlbi_sync_s1ish_kernel(void)
{
dsb(ish);
__repeat_tlbi_sync(vale1is, 0);
@@ -322,7 +401,7 @@ static inline void flush_tlb_all(void)
{
dsb(ishst);
__tlbi(vmalle1is);
- __tlbi_sync_s1ish();
+ __tlbi_sync_s1ish_kernel();
isb();
}
@@ -334,7 +413,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
asid = __TLBI_VADDR(0, ASID(mm));
__tlbi(aside1is, asid);
__tlbi_user(aside1is, asid);
- __tlbi_sync_s1ish();
+ __tlbi_sync_s1ish(mm);
mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
}
@@ -355,7 +434,7 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
*/
static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
- __tlbi_sync_s1ish();
+ __tlbi_sync_s1ish_batch(batch);
}
/*
@@ -557,7 +636,7 @@ static __always_inline void __do_flush_tlb_range(struct vm_area_struct *vma,
if (!(flags & TLBF_NOSYNC)) {
if (!(flags & TLBF_NOBROADCAST))
- __tlbi_sync_s1ish();
+ __tlbi_sync_s1ish(mm);
else
dsb(nsh);
}
@@ -618,7 +697,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
dsb(ishst);
__flush_s1_tlb_range_op(vaale1is, start, pages, stride, 0,
TLBI_TTL_UNKNOWN);
- __tlbi_sync_s1ish();
+ __tlbi_sync_s1ish_kernel();
isb();
}
@@ -632,7 +711,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
dsb(ishst);
__tlbi(vaae1is, addr);
- __tlbi_sync_s1ish();
+ __tlbi_sync_s1ish_kernel();
isb();
}
@@ -643,6 +722,7 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b
__flush_tlb_range(&vma, start, end, PAGE_SIZE, 3,
TLBF_NOWALKCACHE | TLBF_NOSYNC);
+ sme_dvmsync_add_pending(batch, mm);
}
static inline bool __pte_flags_need_flush(ptdesc_t oldval, ptdesc_t newval)