summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2026-05-06 16:10:00 +0200
committerTakashi Iwai <tiwai@suse.de>2026-05-06 16:10:00 +0200
commit06bc7ff0a1e0f2b0102e1314e3527a7ec0997851 (patch)
treefc0fe1da457a988a463e8c883940e445873a28c4 /arch
parent2bcbb163162789d3488562073dbb99d9bd71a762 (diff)
parent5776bcdf4dccac8edc1160482792b512da5c08b4 (diff)
Merge tag 'asoc-fix-v7.1-rc2' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Fixes for v7.1 Another batch of fixes, plus a couple of quirks (mostly AMD ones, as has been the case recently). All driver changes, including fixes for the KUnit tests for the Cirrus drivers that could cause memory corruption.
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/irqflags.h14
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h7
-rw-r--r--arch/arm64/include/asm/kvm_asm.h28
-rw-r--r--arch/arm64/include/asm/kvm_host.h5
-rw-r--r--arch/arm64/kernel/pi/patch-scs.c4
-rw-r--r--arch/arm64/kernel/signal.c54
-rw-r--r--arch/arm64/kvm/arm.c4
-rw-r--r--arch/arm64/kvm/config.c23
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c30
-rw-r--r--arch/arm64/kvm/hyp/nvhe/pkvm.c38
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c6
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v2.c2
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v3.c2
-rw-r--r--arch/s390/kernel/debug.c8
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/sh/include/asm/setup.h2
16 files changed, 154 insertions, 75 deletions
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index d4d7451c2c12..a8cb5a5c93b7 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -40,7 +40,7 @@ static __always_inline void __pmr_local_irq_enable(void)
barrier();
}
-static inline void arch_local_irq_enable(void)
+static __always_inline void arch_local_irq_enable(void)
{
if (system_uses_irq_prio_masking()) {
__pmr_local_irq_enable();
@@ -68,7 +68,7 @@ static __always_inline void __pmr_local_irq_disable(void)
barrier();
}
-static inline void arch_local_irq_disable(void)
+static __always_inline void arch_local_irq_disable(void)
{
if (system_uses_irq_prio_masking()) {
__pmr_local_irq_disable();
@@ -90,7 +90,7 @@ static __always_inline unsigned long __pmr_local_save_flags(void)
/*
* Save the current interrupt enable state.
*/
-static inline unsigned long arch_local_save_flags(void)
+static __always_inline unsigned long arch_local_save_flags(void)
{
if (system_uses_irq_prio_masking()) {
return __pmr_local_save_flags();
@@ -109,7 +109,7 @@ static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags)
return flags != GIC_PRIO_IRQON;
}
-static inline bool arch_irqs_disabled_flags(unsigned long flags)
+static __always_inline bool arch_irqs_disabled_flags(unsigned long flags)
{
if (system_uses_irq_prio_masking()) {
return __pmr_irqs_disabled_flags(flags);
@@ -128,7 +128,7 @@ static __always_inline bool __pmr_irqs_disabled(void)
return __pmr_irqs_disabled_flags(__pmr_local_save_flags());
}
-static inline bool arch_irqs_disabled(void)
+static __always_inline bool arch_irqs_disabled(void)
{
if (system_uses_irq_prio_masking()) {
return __pmr_irqs_disabled();
@@ -160,7 +160,7 @@ static __always_inline unsigned long __pmr_local_irq_save(void)
return flags;
}
-static inline unsigned long arch_local_irq_save(void)
+static __always_inline unsigned long arch_local_irq_save(void)
{
if (system_uses_irq_prio_masking()) {
return __pmr_local_irq_save();
@@ -187,7 +187,7 @@ static __always_inline void __pmr_local_irq_restore(unsigned long flags)
/*
* restore saved IRQ state
*/
-static inline void arch_local_irq_restore(unsigned long flags)
+static __always_inline void arch_local_irq_restore(unsigned long flags)
{
if (system_uses_irq_prio_masking()) {
__pmr_local_irq_restore(flags);
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 74a4f738c5f5..229ee7976f69 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -68,7 +68,12 @@
#define KERNEL_SEGMENT_COUNT 5
#if SWAPPER_BLOCK_SIZE > SEGMENT_ALIGN
-#define EARLY_SEGMENT_EXTRA_PAGES (KERNEL_SEGMENT_COUNT + 1)
+/*
+ * KERNEL_SEGMENT_COUNT counts the permanent kernel VMAs. The early mapping
+ * has one additional split, [_text, _stext). Reserve one more page for the
+ * SWAPPER_BLOCK_SIZE-unaligned boundaries.
+ */
+#define EARLY_SEGMENT_EXTRA_PAGES (KERNEL_SEGMENT_COUNT + 2)
/*
* The initial ID map consists of the kernel image, mapped as two separate
* segments, and may appear misaligned wrt the swapper block size. This means
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 37414440cee7..043495f7fc78 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -50,6 +50,9 @@
#include <linux/mm.h>
+#define MARKER(m) \
+ m, __after_##m = m - 1
+
enum __kvm_host_smccc_func {
/* Hypercalls that are unavailable once pKVM has finalised. */
/* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
@@ -59,8 +62,10 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
__KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
__KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
+
+ MARKER(__KVM_HOST_SMCCC_FUNC_MIN_PKVM),
+
__KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
- __KVM_HOST_SMCCC_FUNC_MIN_PKVM = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
/* Hypercalls that are always available and common to [nh]VHE/pKVM. */
__KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
@@ -72,11 +77,20 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
+ __KVM_HOST_SMCCC_FUNC___tracing_load,
+ __KVM_HOST_SMCCC_FUNC___tracing_unload,
+ __KVM_HOST_SMCCC_FUNC___tracing_enable,
+ __KVM_HOST_SMCCC_FUNC___tracing_swap_reader,
+ __KVM_HOST_SMCCC_FUNC___tracing_update_clock,
+ __KVM_HOST_SMCCC_FUNC___tracing_reset,
+ __KVM_HOST_SMCCC_FUNC___tracing_enable_event,
+ __KVM_HOST_SMCCC_FUNC___tracing_write_event,
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
__KVM_HOST_SMCCC_FUNC___vgic_v5_save_apr,
__KVM_HOST_SMCCC_FUNC___vgic_v5_restore_vmcr_apr,
- __KVM_HOST_SMCCC_FUNC_MAX_NO_PKVM = __KVM_HOST_SMCCC_FUNC___vgic_v5_restore_vmcr_apr,
+
+ MARKER(__KVM_HOST_SMCCC_FUNC_PKVM_ONLY),
/* Hypercalls that are available only when pKVM has finalised. */
__KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
@@ -100,14 +114,8 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
__KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid,
- __KVM_HOST_SMCCC_FUNC___tracing_load,
- __KVM_HOST_SMCCC_FUNC___tracing_unload,
- __KVM_HOST_SMCCC_FUNC___tracing_enable,
- __KVM_HOST_SMCCC_FUNC___tracing_swap_reader,
- __KVM_HOST_SMCCC_FUNC___tracing_update_clock,
- __KVM_HOST_SMCCC_FUNC___tracing_reset,
- __KVM_HOST_SMCCC_FUNC___tracing_enable_event,
- __KVM_HOST_SMCCC_FUNC___tracing_write_event,
+
+ MARKER(__KVM_HOST_SMCCC_FUNC_MAX)
};
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 851f6171751c..65eead8362e0 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -450,9 +450,6 @@ struct kvm_vcpu_fault_info {
r = __VNCR_START__ + ((VNCR_ ## r) / 8), \
__after_##r = __MAX__(__before_##r - 1, r)
-#define MARKER(m) \
- m, __after_##m = m - 1
-
enum vcpu_sysreg {
__INVALID_SYSREG__, /* 0 is reserved as an invalid value */
MPIDR_EL1, /* MultiProcessor Affinity Register */
@@ -1548,7 +1545,7 @@ static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature)
#define kvm_vcpu_has_feature(k, f) __vcpu_has_feature(&(k)->arch, (f))
#define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f))
-#define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED)
+#define kvm_vcpu_initialized(v) vcpu_get_flag(v, VCPU_INITIALIZED)
int kvm_trng_call(struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM
diff --git a/arch/arm64/kernel/pi/patch-scs.c b/arch/arm64/kernel/pi/patch-scs.c
index dac568e4a54f..3944ad899021 100644
--- a/arch/arm64/kernel/pi/patch-scs.c
+++ b/arch/arm64/kernel/pi/patch-scs.c
@@ -196,9 +196,9 @@ static int scs_handle_fde_frame(const struct eh_frame *frame,
loc += *opcode++ * code_alignment_factor;
loc += (*opcode++ << 8) * code_alignment_factor;
loc += (*opcode++ << 16) * code_alignment_factor;
- loc += (*opcode++ << 24) * code_alignment_factor;
+ loc += ((u64)*opcode++ << 24) * code_alignment_factor;
size -= 4;
- break;
+ break;
case DW_CFA_def_cfa:
case DW_CFA_offset_extended:
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 08ffc5a5aea4..38e6fa204c17 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -67,6 +67,9 @@ struct rt_sigframe_user_layout {
unsigned long end_offset;
};
+#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
+#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
+
/*
* Holds any EL0-controlled state that influences unprivileged memory accesses.
* This includes both accesses done in userspace and uaccess done in the kernel.
@@ -74,13 +77,35 @@ struct rt_sigframe_user_layout {
* This state needs to be carefully managed to ensure that it doesn't cause
* uaccess to fail when setting up the signal frame, and the signal handler
* itself also expects a well-defined state when entered.
+ *
+ * The struct should be zero-initialised. Its members should only be accessed
+ * via the accessors below. __valid_fields tracks which of the fields are valid
+ * (have been set to some value).
*/
struct user_access_state {
- u64 por_el0;
+ unsigned int __valid_fields;
+ u64 __por_el0;
};
-#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
-#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
+#define UA_STATE_HAS_POR_EL0 BIT(0)
+
+static void set_ua_state_por_el0(struct user_access_state *ua_state,
+ u64 por_el0)
+{
+ ua_state->__por_el0 = por_el0;
+ ua_state->__valid_fields |= UA_STATE_HAS_POR_EL0;
+}
+
+static int get_ua_state_por_el0(const struct user_access_state *ua_state,
+ u64 *por_el0)
+{
+ if (ua_state->__valid_fields & UA_STATE_HAS_POR_EL0) {
+ *por_el0 = ua_state->__por_el0;
+ return 0;
+ }
+
+ return -ENOENT;
+}
/*
* Save the user access state into ua_state and reset it to disable any
@@ -94,7 +119,7 @@ static void save_reset_user_access_state(struct user_access_state *ua_state)
for (int pkey = 0; pkey < arch_max_pkey(); pkey++)
por_enable_all |= POR_ELx_PERM_PREP(pkey, POE_RWX);
- ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0);
+ set_ua_state_por_el0(ua_state, read_sysreg_s(SYS_POR_EL0));
write_sysreg_s(por_enable_all, SYS_POR_EL0);
/*
* No ISB required as we can tolerate spurious Overlay faults -
@@ -122,8 +147,10 @@ static void set_handler_user_access_state(void)
*/
static void restore_user_access_state(const struct user_access_state *ua_state)
{
- if (system_supports_poe())
- write_sysreg_s(ua_state->por_el0, SYS_POR_EL0);
+ u64 por_el0;
+
+ if (get_ua_state_por_el0(ua_state, &por_el0) == 0)
+ write_sysreg_s(por_el0, SYS_POR_EL0);
}
static void init_user_layout(struct rt_sigframe_user_layout *user)
@@ -333,11 +360,16 @@ static int restore_fpmr_context(struct user_ctxs *user)
static int preserve_poe_context(struct poe_context __user *ctx,
const struct user_access_state *ua_state)
{
- int err = 0;
+ int err;
+ u64 por_el0;
+
+ err = get_ua_state_por_el0(ua_state, &por_el0);
+ if (WARN_ON_ONCE(err))
+ return err;
__put_user_error(POE_MAGIC, &ctx->head.magic, err);
__put_user_error(sizeof(*ctx), &ctx->head.size, err);
- __put_user_error(ua_state->por_el0, &ctx->por_el0, err);
+ __put_user_error(por_el0, &ctx->por_el0, err);
return err;
}
@@ -353,7 +385,7 @@ static int restore_poe_context(struct user_ctxs *user,
__get_user_error(por_el0, &(user->poe->por_el0), err);
if (!err)
- ua_state->por_el0 = por_el0;
+ set_ua_state_por_el0(ua_state, por_el0);
return err;
}
@@ -1095,7 +1127,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
{
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame;
- struct user_access_state ua_state;
+ struct user_access_state ua_state = {};
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
@@ -1507,7 +1539,7 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
{
struct rt_sigframe_user_layout user;
struct rt_sigframe __user *frame;
- struct user_access_state ua_state;
+ struct user_access_state ua_state = {};
int err = 0;
fpsimd_save_and_flush_current_state();
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 176cbe8baad3..8bb2c7422cc8 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -824,6 +824,10 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF | HCR_VSE);
+ irq_lines |= (!irqchip_in_kernel(v->kvm) &&
+ (kvm_timer_should_notify_user(v) ||
+ kvm_pmu_should_notify_user(v)));
+
return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
&& !kvm_arm_vcpu_stopped(v) && !v->arch.pause);
}
diff --git a/arch/arm64/kvm/config.c b/arch/arm64/kvm/config.c
index f35b8dddd7c1..0622162b089e 100644
--- a/arch/arm64/kvm/config.c
+++ b/arch/arm64/kvm/config.c
@@ -131,7 +131,6 @@ struct reg_feat_map_desc {
}
#define FEAT_SPE ID_AA64DFR0_EL1, PMSVer, IMP
-#define FEAT_SPE_FnE ID_AA64DFR0_EL1, PMSVer, V1P2
#define FEAT_BRBE ID_AA64DFR0_EL1, BRBE, IMP
#define FEAT_TRC_SR ID_AA64DFR0_EL1, TraceVer, IMP
#define FEAT_PMUv3 ID_AA64DFR0_EL1, PMUVer, IMP
@@ -192,7 +191,7 @@ struct reg_feat_map_desc {
#define FEAT_SRMASK ID_AA64MMFR4_EL1, SRMASK, IMP
#define FEAT_PoPS ID_AA64MMFR4_EL1, PoPS, IMP
#define FEAT_PFAR ID_AA64PFR1_EL1, PFAR, IMP
-#define FEAT_Debugv8p9 ID_AA64DFR0_EL1, PMUVer, V3P9
+#define FEAT_Debugv8p9 ID_AA64DFR0_EL1, DebugVer, V8P9
#define FEAT_PMUv3_SS ID_AA64DFR0_EL1, PMSS, IMP
#define FEAT_SEBEP ID_AA64DFR0_EL1, SEBEP, IMP
#define FEAT_EBEP ID_AA64DFR1_EL1, EBEP, IMP
@@ -283,7 +282,7 @@ static bool feat_anerr(struct kvm *kvm)
static bool feat_sme_smps(struct kvm *kvm)
{
/*
- * Revists this if KVM ever supports SME -- this really should
+ * Revisit this if KVM ever supports SME -- this really should
* look at the guest's view of SMIDR_EL1. Funnily enough, this
* is not captured in the JSON file, but only as a note in the
* ARM ARM.
@@ -295,17 +294,27 @@ static bool feat_sme_smps(struct kvm *kvm)
static bool feat_spe_fds(struct kvm *kvm)
{
/*
- * Revists this if KVM ever supports SPE -- this really should
+ * Revisit this if KVM ever supports SPE -- this really should
* look at the guest's view of PMSIDR_EL1.
*/
return (kvm_has_feat(kvm, FEAT_SPEv1p4) &&
(read_sysreg_s(SYS_PMSIDR_EL1) & PMSIDR_EL1_FDS));
}
+static bool feat_spe_fne(struct kvm *kvm)
+{
+ /*
+ * Revisit this if KVM ever supports SPE -- this really should
+ * look at the guest's view of PMSIDR_EL1.
+ */
+ return (kvm_has_feat(kvm, FEAT_SPEv1p2) &&
+ (read_sysreg_s(SYS_PMSIDR_EL1) & PMSIDR_EL1_FnE));
+}
+
static bool feat_trbe_mpam(struct kvm *kvm)
{
/*
- * Revists this if KVM ever supports both MPAM and TRBE --
+ * Revisit this if KVM ever supports both MPAM and TRBE --
* this really should look at the guest's view of TRBIDR_EL1.
*/
return (kvm_has_feat(kvm, FEAT_TRBE) &&
@@ -537,7 +546,7 @@ static const struct reg_bits_to_feat_map hdfgrtr_feat_map[] = {
HDFGRTR_EL2_PMBPTR_EL1 |
HDFGRTR_EL2_PMBLIMITR_EL1,
FEAT_SPE),
- NEEDS_FEAT(HDFGRTR_EL2_nPMSNEVFR_EL1, FEAT_SPE_FnE),
+ NEEDS_FEAT(HDFGRTR_EL2_nPMSNEVFR_EL1, feat_spe_fne),
NEEDS_FEAT(HDFGRTR_EL2_nBRBDATA |
HDFGRTR_EL2_nBRBCTL |
HDFGRTR_EL2_nBRBIDR,
@@ -605,7 +614,7 @@ static const struct reg_bits_to_feat_map hdfgwtr_feat_map[] = {
HDFGWTR_EL2_PMBPTR_EL1 |
HDFGWTR_EL2_PMBLIMITR_EL1,
FEAT_SPE),
- NEEDS_FEAT(HDFGWTR_EL2_nPMSNEVFR_EL1, FEAT_SPE_FnE),
+ NEEDS_FEAT(HDFGWTR_EL2_nPMSNEVFR_EL1, feat_spe_fne),
NEEDS_FEAT(HDFGWTR_EL2_nBRBDATA |
HDFGWTR_EL2_nBRBCTL,
FEAT_BRBE),
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 73f2e0221e70..06db299c37a8 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -709,6 +709,14 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_tlb_flush_vmid_range),
HANDLE_FUNC(__kvm_flush_cpu_context),
HANDLE_FUNC(__kvm_timer_set_cntvoff),
+ HANDLE_FUNC(__tracing_load),
+ HANDLE_FUNC(__tracing_unload),
+ HANDLE_FUNC(__tracing_enable),
+ HANDLE_FUNC(__tracing_swap_reader),
+ HANDLE_FUNC(__tracing_update_clock),
+ HANDLE_FUNC(__tracing_reset),
+ HANDLE_FUNC(__tracing_enable_event),
+ HANDLE_FUNC(__tracing_write_event),
HANDLE_FUNC(__vgic_v3_save_aprs),
HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs),
HANDLE_FUNC(__vgic_v5_save_apr),
@@ -735,22 +743,16 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_vcpu_load),
HANDLE_FUNC(__pkvm_vcpu_put),
HANDLE_FUNC(__pkvm_tlb_flush_vmid),
- HANDLE_FUNC(__tracing_load),
- HANDLE_FUNC(__tracing_unload),
- HANDLE_FUNC(__tracing_enable),
- HANDLE_FUNC(__tracing_swap_reader),
- HANDLE_FUNC(__tracing_update_clock),
- HANDLE_FUNC(__tracing_reset),
- HANDLE_FUNC(__tracing_enable_event),
- HANDLE_FUNC(__tracing_write_event),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(unsigned long, id, host_ctxt, 0);
- unsigned long hcall_min = 0, hcall_max = -1;
+ unsigned long hcall_min = 0, hcall_max = __KVM_HOST_SMCCC_FUNC_MAX;
hcall_t hfn;
+ BUILD_BUG_ON(ARRAY_SIZE(host_hcall) != __KVM_HOST_SMCCC_FUNC_MAX);
+
/*
* If pKVM has been initialised then reject any calls to the
* early "privileged" hypercalls. Note that we cannot reject
@@ -763,16 +765,14 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
if (static_branch_unlikely(&kvm_protected_mode_initialized)) {
hcall_min = __KVM_HOST_SMCCC_FUNC_MIN_PKVM;
} else {
- hcall_max = __KVM_HOST_SMCCC_FUNC_MAX_NO_PKVM;
+ hcall_max = __KVM_HOST_SMCCC_FUNC_PKVM_ONLY;
}
id &= ~ARM_SMCCC_CALL_HINTS;
id -= KVM_HOST_SMCCC_ID(0);
- if (unlikely(id < hcall_min || id > hcall_max ||
- id >= ARRAY_SIZE(host_hcall))) {
+ if (unlikely(id < hcall_min || id >= hcall_max))
goto inval;
- }
hfn = host_hcall[id];
if (unlikely(!hfn))
@@ -805,6 +805,10 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
}
func_id &= ~ARM_SMCCC_CALL_HINTS;
+ if (upper_32_bits(func_id)) {
+ cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
+ goto exit_skip_instr;
+ }
handled = kvm_host_psci_handler(host_ctxt, func_id);
if (!handled)
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 7ed96d64d611..e7496eb85628 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -266,7 +266,8 @@ struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
if (hyp_vm->kvm.created_vcpus <= vcpu_idx)
goto unlock;
- hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
+ /* Pairs with smp_store_release() in register_hyp_vcpu(). */
+ hyp_vcpu = smp_load_acquire(&hyp_vm->vcpus[vcpu_idx]);
if (!hyp_vcpu)
goto unlock;
@@ -860,12 +861,30 @@ err_unpin_kvm:
* the page-aligned size of 'struct pkvm_hyp_vcpu'.
* Return 0 on success, negative error code on failure.
*/
+static int register_hyp_vcpu(struct pkvm_hyp_vm *hyp_vm,
+ struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+ unsigned int idx = hyp_vcpu->vcpu.vcpu_idx;
+
+ if (idx >= hyp_vm->kvm.created_vcpus)
+ return -EINVAL;
+
+ if (hyp_vm->vcpus[idx])
+ return -EINVAL;
+
+ /*
+ * Ensure the hyp_vcpu is initialised before publishing it to
+ * the vCPU-load path via 'hyp_vm->vcpus[]'.
+ */
+ smp_store_release(&hyp_vm->vcpus[idx], hyp_vcpu);
+ return 0;
+}
+
int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
unsigned long vcpu_hva)
{
struct pkvm_hyp_vcpu *hyp_vcpu;
struct pkvm_hyp_vm *hyp_vm;
- unsigned int idx;
int ret;
hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
@@ -884,18 +903,11 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
if (ret)
goto unlock;
- idx = hyp_vcpu->vcpu.vcpu_idx;
- if (idx >= hyp_vm->kvm.created_vcpus) {
- ret = -EINVAL;
- goto unlock;
- }
-
- if (hyp_vm->vcpus[idx]) {
- ret = -EINVAL;
- goto unlock;
+ ret = register_hyp_vcpu(hyp_vm, hyp_vcpu);
+ if (ret) {
+ unpin_host_vcpu(host_vcpu);
+ unpin_host_sve_state(hyp_vcpu);
}
-
- hyp_vm->vcpus[idx] = hyp_vcpu;
unlock:
hyp_spin_unlock(&vm_table_lock);
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index d8e5b563fd3d..d461981616d9 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -312,15 +312,15 @@ void __noreturn __pkvm_init_finalise(void)
};
pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
- ret = fix_host_ownership();
+ ret = fix_hyp_pgtable_refcnt();
if (ret)
goto out;
- ret = fix_hyp_pgtable_refcnt();
+ ret = hyp_create_fixmap();
if (ret)
goto out;
- ret = hyp_create_fixmap();
+ ret = fix_host_ownership();
if (ret)
goto out;
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v2.c b/arch/arm64/kvm/vgic/vgic-mmio-v2.c
index 406845b3117c..0643e333db35 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v2.c
@@ -91,7 +91,7 @@ static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu,
* migration from old kernels to new kernels with legacy
* userspace.
*/
- reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg);
+ reg = FIELD_GET(GICD_IIDR_REVISION_MASK, val);
switch (reg) {
case KVM_VGIC_IMP_REV_2:
case KVM_VGIC_IMP_REV_3:
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index 89edb84d1ac6..5913a20d8301 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -194,7 +194,7 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu,
if ((reg ^ val) & ~GICD_IIDR_REVISION_MASK)
return -EINVAL;
- reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg);
+ reg = FIELD_GET(GICD_IIDR_REVISION_MASK, val);
switch (reg) {
case KVM_VGIC_IMP_REV_2:
case KVM_VGIC_IMP_REV_3:
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 31430e9bcfdd..7650f2adb5cf 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1414,6 +1414,9 @@ static inline char *debug_get_user_string(const char __user *user_buf,
{
char *buffer;
+ if (!user_len)
+ return ERR_PTR(-EINVAL);
+
buffer = memdup_user_nul(user_buf, user_len);
if (IS_ERR(buffer))
return buffer;
@@ -1584,6 +1587,11 @@ static int debug_input_flush_fn(debug_info_t *id, struct debug_view *view,
char input_buf[1];
int rc = user_len;
+ if (!user_len) {
+ rc = -EINVAL;
+ goto out;
+ }
+
if (user_len > 0x10000)
user_len = 0x10000;
if (*offset != 0) {
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 191cc53caead..028aeb9c48d6 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -438,7 +438,7 @@ void do_secure_storage_access(struct pt_regs *regs)
panic("Unexpected PGM 0x3d with TEID bit 61=0");
}
if (is_kernel_fault(regs)) {
- folio = phys_to_folio(addr);
+ folio = virt_to_folio((void *)addr);
if (unlikely(!folio_try_get(folio)))
return;
rc = uv_convert_from_secure(folio_to_phys(folio));
diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h
index 63c9efc06348..8488f76b48b4 100644
--- a/arch/sh/include/asm/setup.h
+++ b/arch/sh/include/asm/setup.h
@@ -7,7 +7,7 @@
/*
* This is set up by the setup-routine at boot-time
*/
-extern unsigned char *boot_params_page;
+extern unsigned char boot_params_page[];
#define PARAM boot_params_page
#define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000))