summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/cpufeature.c59
-rw-r--r--arch/arm64/kernel/fpsimd.c130
-rw-r--r--arch/arm64/kernel/hyp-stub.S5
-rw-r--r--arch/arm64/kernel/image-vars.h1
-rw-r--r--arch/arm64/kernel/process.c1
-rw-r--r--arch/arm64/kernel/topology.c101
6 files changed, 86 insertions, 211 deletions
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 42b182cfa404..c840a93b9ef9 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2304,6 +2304,49 @@ static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry
}
#endif
+static bool can_trap_icv_dir_el1(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ static const struct midr_range has_vgic_v3[] = {
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
+ {},
+ };
+ struct arm_smccc_res res = {};
+
+ BUILD_BUG_ON(ARM64_HAS_ICH_HCR_EL2_TDIR <= ARM64_HAS_GICV3_CPUIF);
+ BUILD_BUG_ON(ARM64_HAS_ICH_HCR_EL2_TDIR <= ARM64_HAS_GICV5_LEGACY);
+ if (!this_cpu_has_cap(ARM64_HAS_GICV3_CPUIF) &&
+ !is_midr_in_range_list(has_vgic_v3))
+ return false;
+
+ if (!is_hyp_mode_available())
+ return false;
+
+ if (this_cpu_has_cap(ARM64_HAS_GICV5_LEGACY))
+ return true;
+
+ if (is_kernel_in_hyp_mode())
+ res.a1 = read_sysreg_s(SYS_ICH_VTR_EL2);
+ else
+ arm_smccc_1_1_hvc(HVC_GET_ICH_VTR_EL2, &res);
+
+ if (res.a0 == HVC_STUB_ERR)
+ return false;
+
+ return res.a1 & ICH_VTR_EL2_TDS;
+}
+
#ifdef CONFIG_ARM64_BTI
static void bti_enable(const struct arm64_cpu_capabilities *__unused)
{
@@ -2815,6 +2858,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_gic_prio_relaxed_sync,
},
#endif
+ {
+ /*
+ * Depends on having GICv3
+ */
+ .desc = "ICV_DIR_EL1 trapping",
+ .capability = ARM64_HAS_ICH_HCR_EL2_TDIR,
+ .type = ARM64_CPUCAP_EARLY_LOCAL_CPU_FEATURE,
+ .matches = can_trap_icv_dir_el1,
+ },
#ifdef CONFIG_ARM64_E0PD
{
.desc = "E0PD",
@@ -3089,6 +3141,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_GICV5_LEGACY,
.matches = test_has_gicv5_legacy,
},
+ {
+ .desc = "XNX",
+ .capability = ARM64_HAS_XNX,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, XNX, IMP)
+ },
{},
};
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index c154f72634e0..9de1d8a604cb 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -180,13 +180,6 @@ static inline void set_sve_default_vl(int val)
set_default_vl(ARM64_VEC_SVE, val);
}
-static u8 *efi_sve_state;
-
-#else /* ! CONFIG_ARM64_SVE */
-
-/* Dummy declaration for code that will be optimised out: */
-extern u8 *efi_sve_state;
-
#endif /* ! CONFIG_ARM64_SVE */
#ifdef CONFIG_ARM64_SME
@@ -1095,36 +1088,6 @@ int vec_verify_vq_map(enum vec_type type)
return 0;
}
-static void __init sve_efi_setup(void)
-{
- int max_vl = 0;
- int i;
-
- if (!IS_ENABLED(CONFIG_EFI))
- return;
-
- for (i = 0; i < ARRAY_SIZE(vl_info); i++)
- max_vl = max(vl_info[i].max_vl, max_vl);
-
- /*
- * alloc_percpu() warns and prints a backtrace if this goes wrong.
- * This is evidence of a crippled system and we are returning void,
- * so no attempt is made to handle this situation here.
- */
- if (!sve_vl_valid(max_vl))
- goto fail;
-
- efi_sve_state = kmalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(max_vl)),
- GFP_KERNEL);
- if (!efi_sve_state)
- goto fail;
-
- return;
-
-fail:
- panic("Cannot allocate memory for EFI SVE save/restore");
-}
-
void cpu_enable_sve(const struct arm64_cpu_capabilities *__always_unused p)
{
write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
@@ -1185,8 +1148,6 @@ void __init sve_setup(void)
if (sve_max_virtualisable_vl() < sve_max_vl())
pr_warn("%s: unvirtualisable vector lengths present\n",
info->name);
-
- sve_efi_setup();
}
/*
@@ -1947,9 +1908,6 @@ EXPORT_SYMBOL_GPL(kernel_neon_end);
#ifdef CONFIG_EFI
static struct user_fpsimd_state efi_fpsimd_state;
-static bool efi_fpsimd_state_used;
-static bool efi_sve_state_used;
-static bool efi_sm_state;
/*
* EFI runtime services support functions
@@ -1976,43 +1934,26 @@ void __efi_fpsimd_begin(void)
if (may_use_simd()) {
kernel_neon_begin(&efi_fpsimd_state);
} else {
- WARN_ON(preemptible());
-
/*
- * If !efi_sve_state, SVE can't be in use yet and doesn't need
- * preserving:
+ * We are running in hardirq or NMI context, and the only
+ * legitimate case where this might happen is when EFI pstore
+ * is attempting to record the system's dying gasps into EFI
+ * variables. This could be due to an oops, a panic or a call
+ * to emergency_restart(), and in none of those cases, we can
+ * expect the current task to ever return to user space again,
+ * or for the kernel to resume any normal execution, for that
+ * matter (an oops in hardirq context triggers a panic too).
+ *
+ * Therefore, there is no point in attempting to preserve any
+ * SVE/SME state here. On the off chance that we might have
+ * ended up here for a different reason inadvertently, kill the
+ * task and preserve/restore the base FP/SIMD state, which
+ * might belong to kernel mode FP/SIMD.
*/
- if (system_supports_sve() && efi_sve_state != NULL) {
- bool ffr = true;
- u64 svcr;
-
- efi_sve_state_used = true;
-
- if (system_supports_sme()) {
- svcr = read_sysreg_s(SYS_SVCR);
-
- efi_sm_state = svcr & SVCR_SM_MASK;
-
- /*
- * Unless we have FA64 FFR does not
- * exist in streaming mode.
- */
- if (!system_supports_fa64())
- ffr = !(svcr & SVCR_SM_MASK);
- }
-
- sve_save_state(efi_sve_state + sve_ffr_offset(sve_max_vl()),
- &efi_fpsimd_state.fpsr, ffr);
-
- if (system_supports_sme())
- sysreg_clear_set_s(SYS_SVCR,
- SVCR_SM_MASK, 0);
-
- } else {
- fpsimd_save_state(&efi_fpsimd_state);
- }
-
- efi_fpsimd_state_used = true;
+ pr_warn_ratelimited("Calling EFI runtime from %s context\n",
+ in_nmi() ? "NMI" : "hardirq");
+ force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
+ fpsimd_save_state(&efi_fpsimd_state);
}
}
@@ -2024,41 +1965,10 @@ void __efi_fpsimd_end(void)
if (!system_supports_fpsimd())
return;
- if (!efi_fpsimd_state_used) {
+ if (may_use_simd()) {
kernel_neon_end(&efi_fpsimd_state);
} else {
- if (system_supports_sve() && efi_sve_state_used) {
- bool ffr = true;
-
- /*
- * Restore streaming mode; EFI calls are
- * normal function calls so should not return in
- * streaming mode.
- */
- if (system_supports_sme()) {
- if (efi_sm_state) {
- sysreg_clear_set_s(SYS_SVCR,
- 0,
- SVCR_SM_MASK);
-
- /*
- * Unless we have FA64 FFR does not
- * exist in streaming mode.
- */
- if (!system_supports_fa64())
- ffr = false;
- }
- }
-
- sve_load_state(efi_sve_state + sve_ffr_offset(sve_max_vl()),
- &efi_fpsimd_state.fpsr, ffr);
-
- efi_sve_state_used = false;
- } else {
- fpsimd_load_state(&efi_fpsimd_state);
- }
-
- efi_fpsimd_state_used = false;
+ fpsimd_load_state(&efi_fpsimd_state);
}
}
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 36e2d26b54f5..085bc9972f6b 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -54,6 +54,11 @@ SYM_CODE_START_LOCAL(elx_sync)
1: cmp x0, #HVC_FINALISE_EL2
b.eq __finalise_el2
+ cmp x0, #HVC_GET_ICH_VTR_EL2
+ b.ne 2f
+ mrs_s x1, SYS_ICH_VTR_EL2
+ b 9f
+
2: cmp x0, #HVC_SOFT_RESTART
b.ne 3f
mov x0, x2
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 5369763606e7..85bc629270bd 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -91,6 +91,7 @@ KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable);
KVM_NVHE_ALIAS(spectre_bhb_patch_wa3);
KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb);
KVM_NVHE_ALIAS(alt_cb_patch_nops);
+KVM_NVHE_ALIAS(kvm_compute_ich_hcr_trap_bits);
/* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS(kvm_vgic_global_state);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index fba7ca102a8c..489554931231 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -292,6 +292,7 @@ static void flush_gcs(void)
current->thread.gcs_base = 0;
current->thread.gcs_size = 0;
current->thread.gcs_el0_mode = 0;
+ current->thread.gcs_el0_locked = 0;
write_sysreg_s(GCSCRE0_EL1_nTR, SYS_GCSCRE0_EL1);
write_sysreg_s(0, SYS_GCSPR_EL0);
}
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 5d07ee85bdae..5d24dc53799b 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -25,107 +25,6 @@
#include <asm/cputype.h>
#include <asm/topology.h>
-#ifdef CONFIG_ACPI
-static bool __init acpi_cpu_is_threaded(int cpu)
-{
- int is_threaded = acpi_pptt_cpu_is_thread(cpu);
-
- /*
- * if the PPTT doesn't have thread information, assume a homogeneous
- * machine and return the current CPU's thread state.
- */
- if (is_threaded < 0)
- is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
-
- return !!is_threaded;
-}
-
-struct cpu_smt_info {
- unsigned int thread_num;
- int core_id;
-};
-
-/*
- * Propagate the topology information of the processor_topology_node tree to the
- * cpu_topology array.
- */
-int __init parse_acpi_topology(void)
-{
- unsigned int max_smt_thread_num = 1;
- struct cpu_smt_info *entry;
- struct xarray hetero_cpu;
- unsigned long hetero_id;
- int cpu, topology_id;
-
- if (acpi_disabled)
- return 0;
-
- xa_init(&hetero_cpu);
-
- for_each_possible_cpu(cpu) {
- topology_id = find_acpi_cpu_topology(cpu, 0);
- if (topology_id < 0)
- return topology_id;
-
- if (acpi_cpu_is_threaded(cpu)) {
- cpu_topology[cpu].thread_id = topology_id;
- topology_id = find_acpi_cpu_topology(cpu, 1);
- cpu_topology[cpu].core_id = topology_id;
-
- /*
- * In the PPTT, CPUs below a node with the 'identical
- * implementation' flag have the same number of threads.
- * Count the number of threads for only one CPU (i.e.
- * one core_id) among those with the same hetero_id.
- * See the comment of find_acpi_cpu_topology_hetero_id()
- * for more details.
- *
- * One entry is created for each node having:
- * - the 'identical implementation' flag
- * - its parent not having the flag
- */
- hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
- entry = xa_load(&hetero_cpu, hetero_id);
- if (!entry) {
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- WARN_ON_ONCE(!entry);
-
- if (entry) {
- entry->core_id = topology_id;
- entry->thread_num = 1;
- xa_store(&hetero_cpu, hetero_id,
- entry, GFP_KERNEL);
- }
- } else if (entry->core_id == topology_id) {
- entry->thread_num++;
- }
- } else {
- cpu_topology[cpu].thread_id = -1;
- cpu_topology[cpu].core_id = topology_id;
- }
- topology_id = find_acpi_cpu_topology_cluster(cpu);
- cpu_topology[cpu].cluster_id = topology_id;
- topology_id = find_acpi_cpu_topology_package(cpu);
- cpu_topology[cpu].package_id = topology_id;
- }
-
- /*
- * This is a short loop since the number of XArray elements is the
- * number of heterogeneous CPU clusters. On a homogeneous system
- * there's only one entry in the XArray.
- */
- xa_for_each(&hetero_cpu, hetero_id, entry) {
- max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
- xa_erase(&hetero_cpu, hetero_id);
- kfree(entry);
- }
-
- cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
- xa_destroy(&hetero_cpu);
- return 0;
-}
-#endif
-
#ifdef CONFIG_ARM64_AMU_EXTN
#define read_corecnt() read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0)
#define read_constcnt() read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0)