From 2f2d46959808e9b039ecb241ff13d50be2d6e231 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 19 Oct 2024 18:15:42 +0100 Subject: firmware/psci: Add definitions for PSCI v1.3 specification The v1.3 PSCI spec (https://developer.arm.com/documentation/den0022) adds the SYSTEM_OFF2 function. Add definitions for it and its hibernation type parameter. Signed-off-by: David Woodhouse Reviewed-by: Miguel Luis Link: https://lore.kernel.org/r/20241019172459.2241939-2-dwmw2@infradead.org Signed-off-by: Oliver Upton --- include/uapi/linux/psci.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h index 42a40ad3fb62..81759ff385e6 100644 --- a/include/uapi/linux/psci.h +++ b/include/uapi/linux/psci.h @@ -59,6 +59,7 @@ #define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18) #define PSCI_1_1_FN_MEM_PROTECT PSCI_0_2_FN(19) #define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN(20) +#define PSCI_1_3_FN_SYSTEM_OFF2 PSCI_0_2_FN(21) #define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND PSCI_0_2_FN64(12) #define PSCI_1_0_FN64_NODE_HW_STATE PSCI_0_2_FN64(13) @@ -68,6 +69,7 @@ #define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18) #define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN64(20) +#define PSCI_1_3_FN64_SYSTEM_OFF2 PSCI_0_2_FN64(21) /* PSCI v0.2 power state encoding for CPU_SUSPEND function */ #define PSCI_0_2_POWER_STATE_ID_MASK 0xffff @@ -100,6 +102,9 @@ #define PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET 0 #define PSCI_1_1_RESET_TYPE_VENDOR_START 0x80000000U +/* PSCI v1.3 hibernate type for SYSTEM_OFF2 */ +#define PSCI_1_3_OFF_TYPE_HIBERNATE_OFF BIT(0) + /* PSCI version decoding (independent of PSCI version) */ #define PSCI_VERSION_MAJOR_SHIFT 16 #define PSCI_VERSION_MINOR_MASK \ -- cgit v1.2.3 From 8be82d536a9f8d9974cf746d235c1f1c24dd31ed Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 19 Oct 2024 18:15:44 +0100 Subject: KVM: arm64: Add support for PSCI v1.2 and v1.3 As with PSCI v1.1 in commit 512865d83fd9 ("KVM: arm64: Bump guest PSCI version to 1.1"), expose v1.3 to the guest by default. The SYSTEM_OFF2 call which is exposed by doing so is compatible for userspace because it's just a new flag in the event that KVM raises, in precisely the same way that SYSTEM_RESET2 was compatible when v1.1 was enabled by default. Signed-off-by: David Woodhouse Reviewed-by: Miguel Luis Link: https://lore.kernel.org/r/20241019172459.2241939-4-dwmw2@infradead.org Signed-off-by: Oliver Upton --- include/kvm/arm_psci.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index e8fb624013d1..cbaec804eb83 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -14,8 +14,10 @@ #define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2) #define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0) #define KVM_ARM_PSCI_1_1 PSCI_VERSION(1, 1) +#define KVM_ARM_PSCI_1_2 PSCI_VERSION(1, 2) +#define KVM_ARM_PSCI_1_3 PSCI_VERSION(1, 3) -#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_1 +#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_3 static inline int kvm_psci_version(struct kvm_vcpu *vcpu) { -- cgit v1.2.3 From 164b5e20cdf6038f1b38867d2f6252ec6f10c356 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 23 Oct 2024 15:53:14 +0100 Subject: KVM: arm64: nv: Handle CNTHCTL_EL2 specially Accessing CNTHCTL_EL2 is fraught with danger if running with HCR_EL2.E2H=1: half of the bits are held in CNTKCTL_EL1, and thus can be changed behind our back, while the rest lives in the CNTHCTL_EL2 shadow copy that is memory-based. Yes, this is a lot of fun! Make sure that we merge the two on read access, while we can write to CNTKCTL_EL1 in a more straightforward manner. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20241023145345.1613824-7-maz@kernel.org Signed-off-by: Oliver Upton --- include/kvm/arm_arch_timer.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index c819c5d16613..fd650a8789b9 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -147,6 +147,9 @@ u64 timer_get_cval(struct arch_timer_context *ctxt); void kvm_timer_cpu_up(void); void kvm_timer_cpu_down(void); +/* CNTKCTL_EL1 valid bits as of DDI0487J.a */ +#define CNTKCTL_VALID_BITS (BIT(17) | GENMASK_ULL(9, 0)) + static inline bool has_cntpoff(void) { return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF)); -- cgit v1.2.3 From 336afe0c832d6eb985d0e9dbc5a70929594e58d9 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Fri, 25 Oct 2024 18:23:45 +0000 Subject: KVM: arm64: nv: Describe trap behaviour of MDCR_EL2.HPMN MDCR_EL2.HPMN splits the PMU event counters into two ranges: the first range is accessible from all ELs, and the second range is accessible only to EL2/3. Supposing the guest hypervisor allows direct access to the PMU counters from the L2, KVM needs to locally handle those accesses. Add a new complex trap configuration for HPMN that checks if the counter index is accessible to the current context. As written, the architecture suggests HPMN only causes PMEVCNTR_EL0 to trap, though intuition (and the pseudocode) suggest that the trap applies to PMEVTYPER_EL0 as well. Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20241025182354.3364124-11-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- include/kvm/arm_pmu.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index e08aeec5d936..feb5d1d35f0f 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -96,6 +96,7 @@ int kvm_arm_set_default_pmu(struct kvm *kvm); u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm); u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); +bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx); #else struct kvm_pmu { }; @@ -187,6 +188,11 @@ static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) return 0; } +static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx) +{ + return false; +} + #endif #endif -- cgit v1.2.3 From a3034dab74fc12d6c0a589e31af9fafc436a4a0e Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Fri, 25 Oct 2024 18:23:47 +0000 Subject: KVM: arm64: Rename kvm_pmu_valid_counter_mask() Nested PMU support requires dynamically changing the visible range of PMU counters based on the exception level and value of MDCR_EL2.HPMN. At the same time, the PMU emulation code needs to know the absolute number of implemented counters, regardless of context. Rename the existing helper to make it obvious that it returns the number of implemented counters and not anything else. Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20241025182354.3364124-13-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- include/kvm/arm_pmu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index feb5d1d35f0f..019ace2bb914 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -47,7 +47,7 @@ static __always_inline bool kvm_arm_support_pmu_v3(void) #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); -u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); +u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu); u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1); void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); @@ -114,7 +114,7 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, } static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) {} -static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) +static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu) { return 0; } -- cgit v1.2.3 From 9a1c58cfefb06974a804174f127de3fedc779394 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Fri, 25 Oct 2024 18:23:48 +0000 Subject: KVM: arm64: nv: Adjust range of accessible PMCs according to HPMN The value of MDCR_EL2.HPMN controls the number of event counters made visible to EL0 and EL1. This means it is possible for the guest hypervisor to allow direct access to event counters to the L2. Rework KVM's PMU register emulation to take the effects of HPMN into account when handling a trap. For bitmask-style registers, writes only affect accessible registers. Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20241025182354.3364124-14-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- include/kvm/arm_pmu.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 019ace2bb914..76244f0bd47a 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -48,6 +48,7 @@ static __always_inline bool kvm_arm_support_pmu_v3(void) u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu); +u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu); u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1); void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); @@ -118,6 +119,10 @@ static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu) { return 0; } +static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu) +{ + return 0; +} static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} -- cgit v1.2.3 From ae323e035801def145776dddf46c01ca1b90d21d Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Fri, 25 Oct 2024 18:25:59 +0000 Subject: KVM: arm64: nv: Reprogram PMU events affected by nested transition Start reprogramming PMU events at nested boundaries now that everything is in place to handle the EL2 event filter. Only repaint events where the filter differs between EL1 and EL2 as a slight optimization. PMU now 'works' for nested VMs, albeit slow. Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20241025182559.3364829-1-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- include/kvm/arm_pmu.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 76244f0bd47a..e61dd7dd2286 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -98,6 +98,7 @@ u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm); u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx); +void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu); #else struct kvm_pmu { }; @@ -198,6 +199,8 @@ static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int id return false; } +static inline void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) {} + #endif #endif -- cgit v1.2.3