summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2026-01-29 12:05:19 +0000
committerWill Deacon <will@kernel.org>2026-01-29 12:05:19 +0000
commit86941154bc8398d617b7a5c141fb545cad9ac4fc (patch)
tree50c403b89bd53fb290b49e5e1c86dcce4d37b255
parent3e42a4f5e0a1d9d0f0ac3a7a9d1a889a2a830f12 (diff)
parent1f3b950492db411e6c30ee0076b61ef2694c100a (diff)
Merge branch 'for-next/cpufeature' into for-next/core
* for-next/cpufeature: arm64: poe: fix stale POR_EL0 values for ptrace arm64: mte: Set TCMA1 whenever MTE is present in the kernel arm64: Add support for FEAT_{LS64, LS64_V} KVM: arm64: Enable FEAT_{LS64, LS64_V} in the supported guest arm64: Provide basic EL2 setup for FEAT_{LS64, LS64_V} usage at EL0/1 KVM: arm64: Handle DABT caused by LS64* instructions on unsupported memory KVM: arm64: Add documentation for KVM_EXIT_ARM_LDST64B KVM: arm64: Add exit to userspace on {LD,ST}64B* outside of memslots arm64: Unconditionally enable PAN support arm64: Unconditionally enable LSE support arm64: Add support for TSV110 Spectre-BHB mitigation
-rw-r--r--Documentation/arch/arm64/booting.rst12
-rw-r--r--Documentation/arch/arm64/elf_hwcaps.rst7
-rw-r--r--Documentation/virt/kvm/api.rst43
-rw-r--r--arch/arm64/Kconfig33
-rw-r--r--arch/arm64/include/asm/cpucaps.h2
-rw-r--r--arch/arm64/include/asm/el2_setup.h12
-rw-r--r--arch/arm64/include/asm/esr.h8
-rw-r--r--arch/arm64/include/asm/hwcap.h1
-rw-r--r--arch/arm64/include/asm/insn.h23
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h7
-rw-r--r--arch/arm64/include/asm/lse.h9
-rw-r--r--arch/arm64/include/asm/uaccess.h6
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h1
-rw-r--r--arch/arm64/kernel/cpufeature.c34
-rw-r--r--arch/arm64/kernel/cpuinfo.c1
-rw-r--r--arch/arm64/kernel/proton-pack.c1
-rw-r--r--arch/arm64/kernel/ptrace.c3
-rw-r--r--arch/arm64/kvm/at.c7
-rw-r--r--arch/arm64/kvm/hyp/entry.S2
-rw-r--r--arch/arm64/kvm/inject_fault.c34
-rw-r--r--arch/arm64/kvm/mmio.c27
-rw-r--r--arch/arm64/kvm/mmu.c14
-rw-r--r--arch/arm64/lib/insn.c2
-rw-r--r--arch/arm64/mm/proc.S10
-rw-r--r--arch/arm64/net/bpf_jit_comp.c7
-rw-r--r--arch/arm64/tools/cpucaps2
-rw-r--r--include/uapi/linux/kvm.h3
27 files changed, 201 insertions, 110 deletions
diff --git a/Documentation/arch/arm64/booting.rst b/Documentation/arch/arm64/booting.rst
index 26efca09aef3..13ef311dace8 100644
--- a/Documentation/arch/arm64/booting.rst
+++ b/Documentation/arch/arm64/booting.rst
@@ -556,6 +556,18 @@ Before jumping into the kernel, the following conditions must be met:
- MDCR_EL3.TPM (bit 6) must be initialized to 0b0
+ For CPUs with support for 64-byte loads and stores without status (FEAT_LS64):
+
+ - If the kernel is entered at EL1 and EL2 is present:
+
+ - HCRX_EL2.EnALS (bit 1) must be initialised to 0b1.
+
+ For CPUs with support for 64-byte stores with status (FEAT_LS64_V):
+
+ - If the kernel is entered at EL1 and EL2 is present:
+
+ - HCRX_EL2.EnASR (bit 2) must be initialised to 0b1.
+
The requirements described above for CPU mode, caches, MMUs, architected
timers, coherency and system registers apply to all CPUs. All CPUs must
enter the kernel in the same exception level. Where the values documented
diff --git a/Documentation/arch/arm64/elf_hwcaps.rst b/Documentation/arch/arm64/elf_hwcaps.rst
index a15df4956849..97315ae6c0da 100644
--- a/Documentation/arch/arm64/elf_hwcaps.rst
+++ b/Documentation/arch/arm64/elf_hwcaps.rst
@@ -444,6 +444,13 @@ HWCAP3_MTE_STORE_ONLY
HWCAP3_LSFE
Functionality implied by ID_AA64ISAR3_EL1.LSFE == 0b0001
+HWCAP3_LS64
+ Functionality implied by ID_AA64ISAR1_EL1.LS64 == 0b0001. Note that
+ the function of instruction ld64b/st64b requires support by CPU, system
+ and target (device) memory location and HWCAP3_LS64 implies the support
+ of CPU. User should only use ld64b/st64b on supported target (device)
+ memory location, otherwise fallback to the non-atomic alternatives.
+
4. Unused AT_HWCAP bits
-----------------------
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 01a3abef8abb..bfa0ab343081 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -1303,12 +1303,13 @@ userspace, for example because of missing instruction syndrome decode
information or because there is no device mapped at the accessed IPA, then
userspace can ask the kernel to inject an external abort using the address
from the exiting fault on the VCPU. It is a programming error to set
-ext_dabt_pending after an exit which was not either KVM_EXIT_MMIO or
-KVM_EXIT_ARM_NISV. This feature is only available if the system supports
-KVM_CAP_ARM_INJECT_EXT_DABT. This is a helper which provides commonality in
-how userspace reports accesses for the above cases to guests, across different
-userspace implementations. Nevertheless, userspace can still emulate all Arm
-exceptions by manipulating individual registers using the KVM_SET_ONE_REG API.
+ext_dabt_pending after an exit which was not either KVM_EXIT_MMIO,
+KVM_EXIT_ARM_NISV, or KVM_EXIT_ARM_LDST64B. This feature is only available if
+the system supports KVM_CAP_ARM_INJECT_EXT_DABT. This is a helper which
+provides commonality in how userspace reports accesses for the above cases to
+guests, across different userspace implementations. Nevertheless, userspace
+can still emulate all Arm exceptions by manipulating individual registers
+using the KVM_SET_ONE_REG API.
See KVM_GET_VCPU_EVENTS for the data structure.
@@ -7050,12 +7051,14 @@ in send_page or recv a buffer to recv_page).
::
- /* KVM_EXIT_ARM_NISV */
+ /* KVM_EXIT_ARM_NISV / KVM_EXIT_ARM_LDST64B */
struct {
__u64 esr_iss;
__u64 fault_ipa;
} arm_nisv;
+- KVM_EXIT_ARM_NISV:
+
Used on arm64 systems. If a guest accesses memory not in a memslot,
KVM will typically return to userspace and ask it to do MMIO emulation on its
behalf. However, for certain classes of instructions, no instruction decode
@@ -7089,6 +7092,32 @@ Note that although KVM_CAP_ARM_NISV_TO_USER will be reported if
queried outside of a protected VM context, the feature will not be
exposed if queried on a protected VM file descriptor.
+- KVM_EXIT_ARM_LDST64B:
+
+Used on arm64 systems. When a guest using a LD64B, ST64B, ST64BV, ST64BV0,
+outside of a memslot, KVM will return to userspace with KVM_EXIT_ARM_LDST64B,
+exposing the relevant ESR_EL2 information and faulting IPA, similarly to
+KVM_EXIT_ARM_NISV.
+
+Userspace is supposed to fully emulate the instructions, which includes:
+
+ - fetch of the operands for a store, including ACCDATA_EL1 in the case
+ of a ST64BV0 instruction
+ - deal with the endianness if the guest is big-endian
+ - emulate the access, including the delivery of an exception if the
+ access didn't succeed
+ - provide a return value in the case of ST64BV/ST64BV0
+ - return the data in the case of a load
+ - increment PC if the instruction was successfully executed
+
+Note that there is no expectation of performance for this emulation, as it
+involves a large number of interaction with the guest state. It is, however,
+expected that the instruction's semantics are preserved, specially the
+single-copy atomicity property of the 64 byte access.
+
+This exit reason must be handled if userspace sets ID_AA64ISAR1_EL1.LS64 to a
+non-zero value, indicating that FEAT_LS64* is enabled.
+
::
/* KVM_EXIT_X86_RDMSR / KVM_EXIT_X86_WRMSR */
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 93173f0a09c7..fcfb62ec4bae 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1680,7 +1680,6 @@ config MITIGATE_SPECTRE_BRANCH_HISTORY
config ARM64_SW_TTBR0_PAN
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
depends on !KCSAN
- select ARM64_PAN
help
Enabling this option prevents the kernel from accessing
user-space memory directly by pointing TTBR0_EL1 to a reserved
@@ -1859,36 +1858,6 @@ config ARM64_HW_AFDBM
to work on pre-ARMv8.1 hardware and the performance impact is
minimal. If unsure, say Y.
-config ARM64_PAN
- bool "Enable support for Privileged Access Never (PAN)"
- default y
- help
- Privileged Access Never (PAN; part of the ARMv8.1 Extensions)
- prevents the kernel or hypervisor from accessing user-space (EL0)
- memory directly.
-
- Choosing this option will cause any unprotected (not using
- copy_to_user et al) memory access to fail with a permission fault.
-
- The feature is detected at runtime, and will remain as a 'nop'
- instruction if the cpu does not implement the feature.
-
-config ARM64_LSE_ATOMICS
- bool
- default ARM64_USE_LSE_ATOMICS
-
-config ARM64_USE_LSE_ATOMICS
- bool "Atomic instructions"
- default y
- help
- As part of the Large System Extensions, ARMv8.1 introduces new
- atomic instructions that are designed specifically to scale in
- very large systems.
-
- Say Y here to make use of these instructions for the in-kernel
- atomic routines. This incurs a small overhead on CPUs that do
- not support these instructions.
-
endmenu # "ARMv8.1 architectural features"
menu "ARMv8.2 architectural features"
@@ -2125,7 +2094,6 @@ config ARM64_MTE
depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
depends on AS_HAS_ARMV8_5
# Required for tag checking in the uaccess routines
- select ARM64_PAN
select ARCH_HAS_SUBPAGE_FAULTS
select ARCH_USES_HIGH_VMA_FLAGS
select ARCH_USES_PG_ARCH_2
@@ -2157,7 +2125,6 @@ menu "ARMv8.7 architectural features"
config ARM64_EPAN
bool "Enable support for Enhanced Privileged Access Never (EPAN)"
default y
- depends on ARM64_PAN
help
Enhanced Privileged Access Never (EPAN) allows Privileged
Access Never to be used with Execute-only mappings.
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 2c8029472ad4..177c691914f8 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -19,8 +19,6 @@ cpucap_is_possible(const unsigned int cap)
"cap must be < ARM64_NCAPS");
switch (cap) {
- case ARM64_HAS_PAN:
- return IS_ENABLED(CONFIG_ARM64_PAN);
case ARM64_HAS_EPAN:
return IS_ENABLED(CONFIG_ARM64_EPAN);
case ARM64_SVE:
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index cacd20df1786..9393220de069 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -83,9 +83,19 @@
/* Enable GCS if supported */
mrs_s x1, SYS_ID_AA64PFR1_EL1
ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4
- cbz x1, .Lset_hcrx_\@
+ cbz x1, .Lskip_gcs_hcrx_\@
orr x0, x0, #HCRX_EL2_GCSEn
+.Lskip_gcs_hcrx_\@:
+ /* Enable LS64, LS64_V if supported */
+ mrs_s x1, SYS_ID_AA64ISAR1_EL1
+ ubfx x1, x1, #ID_AA64ISAR1_EL1_LS64_SHIFT, #4
+ cbz x1, .Lset_hcrx_\@
+ orr x0, x0, #HCRX_EL2_EnALS
+ cmp x1, #ID_AA64ISAR1_EL1_LS64_LS64_V
+ b.lt .Lset_hcrx_\@
+ orr x0, x0, #HCRX_EL2_EnASR
+
.Lset_hcrx_\@:
msr_s SYS_HCRX_EL2, x0
.Lskip_hcrx_\@:
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 4975a92cbd17..7e86d400864e 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -124,6 +124,7 @@
#define ESR_ELx_FSC_SEA_TTW(n) (0x14 + (n))
#define ESR_ELx_FSC_SECC (0x18)
#define ESR_ELx_FSC_SECC_TTW(n) (0x1c + (n))
+#define ESR_ELx_FSC_EXCL_ATOMIC (0x35)
#define ESR_ELx_FSC_ADDRSZ (0x00)
/*
@@ -488,6 +489,13 @@ static inline bool esr_fsc_is_access_flag_fault(unsigned long esr)
(esr == ESR_ELx_FSC_ACCESS_L(0));
}
+static inline bool esr_fsc_is_excl_atomic_fault(unsigned long esr)
+{
+ esr = esr & ESR_ELx_FSC;
+
+ return esr == ESR_ELx_FSC_EXCL_ATOMIC;
+}
+
static inline bool esr_fsc_is_addr_sz_fault(unsigned long esr)
{
esr &= ESR_ELx_FSC;
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 1f63814ae6c4..72ea4bda79f3 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -179,6 +179,7 @@
#define KERNEL_HWCAP_MTE_FAR __khwcap3_feature(MTE_FAR)
#define KERNEL_HWCAP_MTE_STORE_ONLY __khwcap3_feature(MTE_STORE_ONLY)
#define KERNEL_HWCAP_LSFE __khwcap3_feature(LSFE)
+#define KERNEL_HWCAP_LS64 __khwcap3_feature(LS64)
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index e1d30ba99d01..f463a654a2bb 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -671,7 +671,6 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
enum aarch64_insn_register Rn,
enum aarch64_insn_register Rd,
u8 lsb);
-#ifdef CONFIG_ARM64_LSE_ATOMICS
u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
@@ -683,28 +682,6 @@ u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size,
enum aarch64_insn_mem_order_type order);
-#else
-static inline
-u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
- enum aarch64_insn_register address,
- enum aarch64_insn_register value,
- enum aarch64_insn_size_type size,
- enum aarch64_insn_mem_atomic_op op,
- enum aarch64_insn_mem_order_type order)
-{
- return AARCH64_BREAK_FAULT;
-}
-
-static inline
-u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
- enum aarch64_insn_register address,
- enum aarch64_insn_register value,
- enum aarch64_insn_size_type size,
- enum aarch64_insn_mem_order_type order)
-{
- return AARCH64_BREAK_FAULT;
-}
-#endif
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type);
u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index c9eab316398e..29291e25ecfd 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -47,6 +47,7 @@ void kvm_skip_instr32(struct kvm_vcpu *vcpu);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr);
int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
+int kvm_inject_dabt_excl_atomic(struct kvm_vcpu *vcpu, u64 addr);
void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
static inline int kvm_inject_sea_dabt(struct kvm_vcpu *vcpu, u64 addr)
@@ -694,6 +695,12 @@ static inline void vcpu_set_hcrx(struct kvm_vcpu *vcpu)
if (kvm_has_sctlr2(kvm))
vcpu->arch.hcrx_el2 |= HCRX_EL2_SCTLR2En;
+
+ if (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64))
+ vcpu->arch.hcrx_el2 |= HCRX_EL2_EnALS;
+
+ if (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V))
+ vcpu->arch.hcrx_el2 |= HCRX_EL2_EnASR;
}
}
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index 3129a5819d0e..1e77c45bb0a8 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -4,8 +4,6 @@
#include <asm/atomic_ll_sc.h>
-#ifdef CONFIG_ARM64_LSE_ATOMICS
-
#define __LSE_PREAMBLE ".arch_extension lse\n"
#include <linux/compiler_types.h>
@@ -27,11 +25,4 @@
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS)
-#else /* CONFIG_ARM64_LSE_ATOMICS */
-
-#define __lse_ll_sc_body(op, ...) __ll_sc_##op(__VA_ARGS__)
-
-#define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc
-
-#endif /* CONFIG_ARM64_LSE_ATOMICS */
#endif /* __ASM_LSE_H */
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 6490930deef8..9810106a3f66 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -124,14 +124,12 @@ static inline bool uaccess_ttbr0_enable(void)
static inline void __uaccess_disable_hw_pan(void)
{
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
- CONFIG_ARM64_PAN));
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN));
}
static inline void __uaccess_enable_hw_pan(void)
{
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
- CONFIG_ARM64_PAN));
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN));
}
static inline void uaccess_disable_privileged(void)
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 575564ecdb0b..06f83ca8de56 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -146,5 +146,6 @@
#define HWCAP3_MTE_FAR (1UL << 0)
#define HWCAP3_MTE_STORE_ONLY (1UL << 1)
#define HWCAP3_LSFE (1UL << 2)
+#define HWCAP3_LS64 (1UL << 3)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index c840a93b9ef9..c688d2225c94 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -240,6 +240,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_LS64_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_XS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, 0),
@@ -2164,7 +2165,6 @@ static bool has_bbml2_noabort(const struct arm64_cpu_capabilities *caps, int sco
return cpu_supports_bbml2_noabort();
}
-#ifdef CONFIG_ARM64_PAN
static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
{
/*
@@ -2176,7 +2176,6 @@ static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
set_pstate_pan(1);
}
-#endif /* CONFIG_ARM64_PAN */
#ifdef CONFIG_ARM64_RAS_EXTN
static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
@@ -2260,6 +2259,16 @@ static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
}
#endif /* CONFIG_ARM64_E0PD */
+static void cpu_enable_ls64(struct arm64_cpu_capabilities const *cap)
+{
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_EnALS, SCTLR_EL1_EnALS);
+}
+
+static void cpu_enable_ls64_v(struct arm64_cpu_capabilities const *cap)
+{
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_EnASR, 0);
+}
+
#ifdef CONFIG_ARM64_PSEUDO_NMI
static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
int scope)
@@ -2541,7 +2550,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, ECV, CNTPOFF)
},
-#ifdef CONFIG_ARM64_PAN
{
.desc = "Privileged Access Never",
.capability = ARM64_HAS_PAN,
@@ -2550,7 +2558,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_pan,
ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, PAN, IMP)
},
-#endif /* CONFIG_ARM64_PAN */
#ifdef CONFIG_ARM64_EPAN
{
.desc = "Enhanced Privileged Access Never",
@@ -2560,7 +2567,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, PAN, PAN3)
},
#endif /* CONFIG_ARM64_EPAN */
-#ifdef CONFIG_ARM64_LSE_ATOMICS
{
.desc = "LSE atomic instructions",
.capability = ARM64_HAS_LSE_ATOMICS,
@@ -2568,7 +2574,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, ATOMIC, IMP)
},
-#endif /* CONFIG_ARM64_LSE_ATOMICS */
{
.desc = "Virtualization Host Extensions",
.capability = ARM64_HAS_VIRT_HOST_EXTN,
@@ -3148,6 +3153,22 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, XNX, IMP)
},
+ {
+ .desc = "LS64",
+ .capability = ARM64_HAS_LS64,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .cpu_enable = cpu_enable_ls64,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LS64, LS64)
+ },
+ {
+ .desc = "LS64_V",
+ .capability = ARM64_HAS_LS64_V,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .cpu_enable = cpu_enable_ls64_v,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LS64, LS64_V)
+ },
{},
};
@@ -3267,6 +3288,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_EBF16),
HWCAP_CAP(ID_AA64ISAR1_EL1, DGH, IMP, CAP_HWCAP, KERNEL_HWCAP_DGH),
HWCAP_CAP(ID_AA64ISAR1_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_I8MM),
+ HWCAP_CAP(ID_AA64ISAR1_EL1, LS64, LS64, CAP_HWCAP, KERNEL_HWCAP_LS64),
HWCAP_CAP(ID_AA64ISAR2_EL1, LUT, IMP, CAP_HWCAP, KERNEL_HWCAP_LUT),
HWCAP_CAP(ID_AA64ISAR3_EL1, FAMINMAX, IMP, CAP_HWCAP, KERNEL_HWCAP_FAMINMAX),
HWCAP_CAP(ID_AA64ISAR3_EL1, LSFE, IMP, CAP_HWCAP, KERNEL_HWCAP_LSFE),
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index c44e6d94f5de..6149bc91251d 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -81,6 +81,7 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_PACA] = "paca",
[KERNEL_HWCAP_PACG] = "pacg",
[KERNEL_HWCAP_GCS] = "gcs",
+ [KERNEL_HWCAP_LS64] = "ls64",
[KERNEL_HWCAP_DCPODP] = "dcpodp",
[KERNEL_HWCAP_SVE2] = "sve2",
[KERNEL_HWCAP_SVEAES] = "sveaes",
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index 80a580e019c5..b3801f532b10 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -887,6 +887,7 @@ static u8 spectre_bhb_loop_affected(void)
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+ MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
{},
};
static const struct midr_range spectre_bhb_k24_list[] = {
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index b9bdd83fbbca..8a14b86cd066 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1486,6 +1486,9 @@ static int poe_get(struct task_struct *target,
if (!system_supports_poe())
return -EINVAL;
+ if (target == current)
+ current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
+
return membuf_write(&to, &target->thread.por_el0,
sizeof(target->thread.por_el0));
}
diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
index 53bf70126f81..6cbcec041a9d 100644
--- a/arch/arm64/kvm/at.c
+++ b/arch/arm64/kvm/at.c
@@ -1700,7 +1700,6 @@ int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa, int *level)
}
}
-#ifdef CONFIG_ARM64_LSE_ATOMICS
static int __lse_swap_desc(u64 __user *ptep, u64 old, u64 new)
{
u64 tmp = old;
@@ -1725,12 +1724,6 @@ static int __lse_swap_desc(u64 __user *ptep, u64 old, u64 new)
return ret;
}
-#else
-static int __lse_swap_desc(u64 __user *ptep, u64 old, u64 new)
-{
- return -EINVAL;
-}
-#endif
static int __llsc_swap_desc(u64 __user *ptep, u64 old, u64 new)
{
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index 9f4e8d68ab50..11a10d8f5beb 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -126,7 +126,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
add x1, x1, #VCPU_CONTEXT
- ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
+ ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN)
// Store the guest regs x2 and x3
stp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index dfcd66c65517..6cc7ad84d7d8 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -253,6 +253,40 @@ int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
return 1;
}
+static int kvm_inject_nested_excl_atomic(struct kvm_vcpu *vcpu, u64 addr)
+{
+ u64 esr = FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_DABT_LOW) |
+ FIELD_PREP(ESR_ELx_FSC, ESR_ELx_FSC_EXCL_ATOMIC) |
+ ESR_ELx_IL;
+
+ vcpu_write_sys_reg(vcpu, addr, FAR_EL2);
+ return kvm_inject_nested_sync(vcpu, esr);
+}
+
+/**
+ * kvm_inject_dabt_excl_atomic - inject a data abort for unsupported exclusive
+ * or atomic access
+ * @vcpu: The VCPU to receive the data abort
+ * @addr: The address to report in the DFAR
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+int kvm_inject_dabt_excl_atomic(struct kvm_vcpu *vcpu, u64 addr)
+{
+ u64 esr;
+
+ if (is_nested_ctxt(vcpu) && (vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_VM))
+ return kvm_inject_nested_excl_atomic(vcpu, addr);
+
+ __kvm_inject_sea(vcpu, false, addr);
+ esr = vcpu_read_sys_reg(vcpu, exception_esr_elx(vcpu));
+ esr &= ~ESR_ELx_FSC;
+ esr |= ESR_ELx_FSC_EXCL_ATOMIC;
+ vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
+ return 1;
+}
+
void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
{
unsigned long addr, esr;
diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c
index 54f9358c9e0e..e2285ed8c91d 100644
--- a/arch/arm64/kvm/mmio.c
+++ b/arch/arm64/kvm/mmio.c
@@ -159,6 +159,9 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
bool is_write;
int len;
u8 data_buf[8];
+ u64 esr;
+
+ esr = kvm_vcpu_get_esr(vcpu);
/*
* No valid syndrome? Ask userspace for help if it has
@@ -168,7 +171,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
* though, so directly deliver an exception to the guest.
*/
if (!kvm_vcpu_dabt_isvalid(vcpu)) {
- trace_kvm_mmio_nisv(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
+ trace_kvm_mmio_nisv(*vcpu_pc(vcpu), esr,
kvm_vcpu_get_hfar(vcpu), fault_ipa);
if (vcpu_is_protected(vcpu))
@@ -186,6 +189,28 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
}
/*
+ * When (DFSC == 0b00xxxx || DFSC == 0b10101x) && DFSC != 0b0000xx
+ * ESR_EL2[12:11] describe the Load/Store Type. This allows us to
+ * punt the LD64B/ST64B/ST64BV/ST64BV0 instructions to userspace,
+ * which will have to provide a full emulation of these 4
+ * instructions. No, we don't expect this do be fast.
+ *
+ * We rely on traps being set if the corresponding features are not
+ * enabled, so if we get here, userspace has promised us to handle
+ * it already.
+ */
+ switch (kvm_vcpu_trap_get_fault(vcpu)) {
+ case 0b000100 ... 0b001111:
+ case 0b101010 ... 0b101011:
+ if (FIELD_GET(GENMASK(12, 11), esr)) {
+ run->exit_reason = KVM_EXIT_ARM_LDST64B;
+ run->arm_nisv.esr_iss = esr & ~(u64)ESR_ELx_FSC;
+ run->arm_nisv.fault_ipa = fault_ipa;
+ return 0;
+ }
+ }
+
+ /*
* Prepare MMIO operation. First decode the syndrome data we get
* from the CPU. Then try if some in-kernel emulation feels
* responsible, otherwise let user space do its magic.
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 48d7c372a4cd..edc348431d71 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1845,6 +1845,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return ret;
}
+ /*
+ * Guest performs atomic/exclusive operations on memory with unsupported
+ * attributes (e.g. ld64b/st64b on normal memory when no FEAT_LS64WB)
+ * and trigger the exception here. Since the memslot is valid, inject
+ * the fault back to the guest.
+ */
+ if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(vcpu))) {
+ kvm_inject_dabt_excl_atomic(vcpu, kvm_vcpu_get_hfar(vcpu));
+ return 1;
+ }
+
if (nested)
adjust_nested_fault_perms(nested, &prot, &writable);
@@ -2082,7 +2093,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
/* Check the stage-2 fault is trans. fault or write fault */
if (!esr_fsc_is_translation_fault(esr) &&
!esr_fsc_is_permission_fault(esr) &&
- !esr_fsc_is_access_flag_fault(esr)) {
+ !esr_fsc_is_access_flag_fault(esr) &&
+ !esr_fsc_is_excl_atomic_fault(esr)) {
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
kvm_vcpu_trap_get_class(vcpu),
(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c
index 4e298baddc2e..cc5b40917d0d 100644
--- a/arch/arm64/lib/insn.c
+++ b/arch/arm64/lib/insn.c
@@ -611,7 +611,6 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
state);
}
-#ifdef CONFIG_ARM64_LSE_ATOMICS
static u32 aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type,
u32 insn)
{
@@ -755,7 +754,6 @@ u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
value);
}
-#endif
u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 01e868116448..680ff21d79f1 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -48,14 +48,14 @@
#define TCR_KASAN_SW_FLAGS 0
#endif
-#ifdef CONFIG_KASAN_HW_TAGS
-#define TCR_MTE_FLAGS TCR_EL1_TCMA1 | TCR_EL1_TBI1 | TCR_EL1_TBID1
-#elif defined(CONFIG_ARM64_MTE)
+#ifdef CONFIG_ARM64_MTE
/*
* The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
- * TBI being enabled at EL1.
+ * TBI being enabled at EL1. TCMA1 is needed to treat accesses with the
+ * match-all tag (0xF) as Tag Unchecked, irrespective of the SCTLR_EL1.TCF
+ * setting.
*/
-#define TCR_MTE_FLAGS TCR_EL1_TBI1 | TCR_EL1_TBID1
+#define TCR_MTE_FLAGS TCR_EL1_TCMA1 | TCR_EL1_TBI1 | TCR_EL1_TBID1
#else
#define TCR_MTE_FLAGS 0
#endif
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 74dd29816f36..008612aa4131 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -776,7 +776,6 @@ static int emit_atomic_ld_st(const struct bpf_insn *insn, struct jit_ctx *ctx)
return 0;
}
-#ifdef CONFIG_ARM64_LSE_ATOMICS
static int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
{
const u8 code = insn->code;
@@ -843,12 +842,6 @@ static int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
return 0;
}
-#else
-static inline int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
-{
- return -EINVAL;
-}
-#endif
static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
{
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 0fac75f01534..50a0c695f340 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -46,6 +46,8 @@ HAS_HCX
HAS_LDAPR
HAS_LPA2
HAS_LSE_ATOMICS
+HAS_LS64
+HAS_LS64_V
HAS_MOPS
HAS_NESTED_VIRT
HAS_BBML2_NOABORT
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index dddb781b0507..88cca0e22ece 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -180,6 +180,7 @@ struct kvm_xen_exit {
#define KVM_EXIT_MEMORY_FAULT 39
#define KVM_EXIT_TDX 40
#define KVM_EXIT_ARM_SEA 41
+#define KVM_EXIT_ARM_LDST64B 42
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -402,7 +403,7 @@ struct kvm_run {
} eoi;
/* KVM_EXIT_HYPERV */
struct kvm_hyperv_exit hyperv;
- /* KVM_EXIT_ARM_NISV */
+ /* KVM_EXIT_ARM_NISV / KVM_EXIT_ARM_LDST64B */
struct {
__u64 esr_iss;
__u64 fault_ipa;