From de0c51370b7dca71825c5e3e4099ebc353a0df1a Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 6 Feb 2026 09:27:46 +0800 Subject: LoongArch: KVM: Add more CPUCFG mask bits With new CPU cores there are more features supported which are indicated in CPUCFG2 bits 24:30 and CPUCFG3 bits 17:23. The KVM hypervisor cannot enable or disable (most of) these features and there is no KVM exception when instructions of these features are executed in guest mode. Here add more CPUCFG mask support with LA664 CPU type. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen --- arch/loongarch/kvm/vcpu.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 656b954c1134..2ef7bb0ad047 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -652,6 +652,8 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) static int _kvm_get_cpucfg_mask(int id, u64 *v) { + unsigned int config; + if (id < 0 || id >= KVM_MAX_CPUCFG_REGS) return -EINVAL; @@ -684,9 +686,17 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v) if (cpu_has_ptw) *v |= CPUCFG2_PTW; + config = read_cpucfg(LOONGARCH_CPUCFG2); + *v |= config & (CPUCFG2_FRECIPE | CPUCFG2_DIV32 | CPUCFG2_LAM_BH); + *v |= config & (CPUCFG2_LAMCAS | CPUCFG2_LLACQ_SCREL | CPUCFG2_SCQ); return 0; case LOONGARCH_CPUCFG3: - *v = GENMASK(16, 0); + *v = GENMASK(23, 0); + + /* VM does not support memory order and SFB setting */ + config = read_cpucfg(LOONGARCH_CPUCFG3); + *v &= config & ~(CPUCFG3_SFB); + *v &= config & ~(CPUCFG3_ALDORDER_CAP | CPUCFG3_ASTORDER_CAP | CPUCFG3_SLDORDER_CAP); return 0; case LOONGARCH_CPUCFG4: case LOONGARCH_CPUCFG5: @@ -717,6 +727,7 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v) static int kvm_check_cpucfg(int id, u64 val) { int ret; + u32 host; u64 mask = 0; ret = _kvm_get_cpucfg_mask(id, &mask); @@ -746,9 +757,16 @@ static int kvm_check_cpucfg(int id, u64 val) /* LASX architecturally implies LSX and FP but val does not satisfy that */ return -EINVAL; return 0; + case LOONGARCH_CPUCFG3: + host = read_cpucfg(LOONGARCH_CPUCFG3); + if ((val & CPUCFG3_RVAMAX) > (host & CPUCFG3_RVAMAX)) + return -EINVAL; + if ((val & CPUCFG3_SPW_LVL) > (host & CPUCFG3_SPW_LVL)) + return -EINVAL; + return 0; case LOONGARCH_CPUCFG6: if (val & CPUCFG6_PMP) { - u32 host = read_cpucfg(LOONGARCH_CPUCFG6); + host = read_cpucfg(LOONGARCH_CPUCFG6); if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS)) return -EINVAL; if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM)) -- cgit v1.2.3 From 82db90bf461b0cfbb9457a2e721a781c14512e37 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 6 Feb 2026 09:27:46 +0800 Subject: LoongArch: KVM: Move feature detection in kvm_vm_init_features() VM feature detection is sparsed in function kvm_vm_init_features() and kvm_vm_feature_has_attr(). Here move all the features detection in function kvm_vm_init_features(), and there is only feature checking in function kvm_vm_feature_has_attr(). Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen --- arch/loongarch/kvm/vm.c | 36 +++++++++++++++--------------------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index 194ccbcdc3b3..d3ff6d6966f8 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -29,6 +29,21 @@ static void kvm_vm_init_features(struct kvm *kvm) { unsigned long val; + if (cpu_has_lsx) + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_LSX); + if (cpu_has_lasx) + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_LASX); + if (cpu_has_lbt_x86) + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_X86BT); + if (cpu_has_lbt_arm) + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_ARMBT); + if (cpu_has_lbt_mips) + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_MIPSBT); + if (cpu_has_ptw) + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PTW); + if (cpu_has_msgint) + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_MSGINT); + val = read_csr_gcfg(); if (val & CSR_GCFG_GPMP) kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PMU); @@ -131,33 +146,12 @@ static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr { switch (attr->attr) { case KVM_LOONGARCH_VM_FEAT_LSX: - if (cpu_has_lsx) - return 0; - return -ENXIO; case KVM_LOONGARCH_VM_FEAT_LASX: - if (cpu_has_lasx) - return 0; - return -ENXIO; case KVM_LOONGARCH_VM_FEAT_X86BT: - if (cpu_has_lbt_x86) - return 0; - return -ENXIO; case KVM_LOONGARCH_VM_FEAT_ARMBT: - if (cpu_has_lbt_arm) - return 0; - return -ENXIO; case KVM_LOONGARCH_VM_FEAT_MIPSBT: - if (cpu_has_lbt_mips) - return 0; - return -ENXIO; case KVM_LOONGARCH_VM_FEAT_PTW: - if (cpu_has_ptw) - return 0; - return -ENXIO; case KVM_LOONGARCH_VM_FEAT_MSGINT: - if (cpu_has_msgint) - return 0; - return -ENXIO; case KVM_LOONGARCH_VM_FEAT_PMU: case KVM_LOONGARCH_VM_FEAT_PV_IPI: case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME: -- cgit v1.2.3 From 31966edb9a5d70b6a842da3ac7e5602df10bb4c2 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 6 Feb 2026 09:27:46 +0800 Subject: LoongArch: KVM: Add msgint registers in kvm_init_gcsr_flag() Add flag HW_GCSR with msgint registers in function kvm_init_gcsr_flag(). Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen --- arch/loongarch/kvm/main.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index 80ea63d465b8..f3211fc447fd 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -192,6 +192,13 @@ static void kvm_init_gcsr_flag(void) set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR2); set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL3); set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR3); + + if (cpu_has_msgint) { + set_gcsr_hw_flag(LOONGARCH_CSR_ISR0); + set_gcsr_hw_flag(LOONGARCH_CSR_ISR1); + set_gcsr_hw_flag(LOONGARCH_CSR_ISR2); + set_gcsr_hw_flag(LOONGARCH_CSR_ISR3); + } } static void kvm_update_vpid(struct kvm_vcpu *vcpu, int cpu) -- cgit v1.2.3 From c2f94dafe197961f266fef8946d39df66a9750f4 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 6 Feb 2026 09:27:46 +0800 Subject: LoongArch: KVM: Check VM msgint feature during interrupt handling During message interrupt handling and relative CSR registers saving and restore, it is better to check VM msgint feature rather than host msgint feature, because VM may disable this feature even if host supports this. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/kvm_host.h | 5 +++++ arch/loongarch/kvm/interrupt.c | 4 ++-- arch/loongarch/kvm/vcpu.c | 6 ++++-- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index e4fe5b8e8149..bced2d607849 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -265,6 +265,11 @@ static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned csr->csrs[reg] = val; } +static inline bool kvm_guest_has_msgint(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[1] & CPUCFG1_MSGINT; +} + static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch) { return arch->cpucfg[2] & CPUCFG2_FP; diff --git a/arch/loongarch/kvm/interrupt.c b/arch/loongarch/kvm/interrupt.c index a6d42d399a59..fb704f4c8ac5 100644 --- a/arch/loongarch/kvm/interrupt.c +++ b/arch/loongarch/kvm/interrupt.c @@ -32,7 +32,7 @@ static int kvm_irq_deliver(struct kvm_vcpu *vcpu, unsigned int priority) if (priority < EXCCODE_INT_NUM) irq = priority_to_irq[priority]; - if (cpu_has_msgint && (priority == INT_AVEC)) { + if (kvm_guest_has_msgint(&vcpu->arch) && (priority == INT_AVEC)) { set_gcsr_estat(irq); return 1; } @@ -64,7 +64,7 @@ static int kvm_irq_clear(struct kvm_vcpu *vcpu, unsigned int priority) if (priority < EXCCODE_INT_NUM) irq = priority_to_irq[priority]; - if (cpu_has_msgint && (priority == INT_AVEC)) { + if (kvm_guest_has_msgint(&vcpu->arch) && (priority == INT_AVEC)) { clear_gcsr_estat(irq); return 1; } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 2ef7bb0ad047..948a3d809aac 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -1679,7 +1679,8 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2); kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3); kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL); - if (cpu_has_msgint) { + + if (kvm_guest_has_msgint(&vcpu->arch)) { kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR0); kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR1); kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR2); @@ -1774,7 +1775,8 @@ static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu) kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1); kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2); kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3); - if (cpu_has_msgint) { + + if (kvm_guest_has_msgint(&vcpu->arch)) { kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR0); kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR1); kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR2); -- cgit v1.2.3 From c5cb12b81a0bddbff0f963662f18747b6d633592 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 6 Feb 2026 09:27:46 +0800 Subject: LoongArch: KVM: Handle LOONGARCH_CSR_IPR during vCPU context switch Register LOONGARCH_CSR_IPR is interrupt priority setting for nested interrupt handling. Though LoongArch Linux AVEC driver does not use this register, KVM hypervisor needs to save and restore this it during vCPU context switch. Because Linux AVEC driver may use this register in future, or other OS may use it. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/loongarch.h | 1 + arch/loongarch/kvm/main.c | 1 + arch/loongarch/kvm/vcpu.c | 2 ++ 3 files changed, 4 insertions(+) diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 553c4dc7a156..2a6bc99177d8 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -690,6 +690,7 @@ #define LOONGARCH_CSR_ISR3 0xa3 #define LOONGARCH_CSR_IRR 0xa4 +#define LOONGARCH_CSR_IPR 0xa5 #define LOONGARCH_CSR_PRID 0xc0 diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index f3211fc447fd..d1c5156e02d8 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -194,6 +194,7 @@ static void kvm_init_gcsr_flag(void) set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR3); if (cpu_has_msgint) { + set_gcsr_hw_flag(LOONGARCH_CSR_IPR); set_gcsr_hw_flag(LOONGARCH_CSR_ISR0); set_gcsr_hw_flag(LOONGARCH_CSR_ISR1); set_gcsr_hw_flag(LOONGARCH_CSR_ISR2); diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 948a3d809aac..fd3a2e60c670 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -1681,6 +1681,7 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL); if (kvm_guest_has_msgint(&vcpu->arch)) { + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_IPR); kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR0); kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR1); kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR2); @@ -1777,6 +1778,7 @@ static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu) kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3); if (kvm_guest_has_msgint(&vcpu->arch)) { + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_IPR); kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR0); kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR1); kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR2); -- cgit v1.2.3 From 89b5dc53971328934fcb6a68bf75e4b76fc59ef0 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 6 Feb 2026 09:27:47 +0800 Subject: LoongArch: KVM: Move LSX capability check in exception handler Like FPU exception handler, check LSX capability in the LSX exception handler rather than function kvm_own_lsx(). Since LSX capability in the function kvm_guest_has_lsx() implies FPU capability, only checking kvm_guest_has_lsx() is OK here. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen --- arch/loongarch/kvm/exit.c | 4 +++- arch/loongarch/kvm/vcpu.c | 3 --- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index cb493980d874..76eec3f24953 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -792,8 +792,10 @@ static long kvm_save_notify(struct kvm_vcpu *vcpu) */ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode) { - if (kvm_own_lsx(vcpu)) + if (!kvm_guest_has_lsx(&vcpu->arch)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); + else + kvm_own_lsx(vcpu); return RESUME_GUEST; } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index fd3a2e60c670..0fa3981ec7ed 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -1376,9 +1376,6 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu) /* Enable LSX and restore context */ int kvm_own_lsx(struct kvm_vcpu *vcpu) { - if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch)) - return -EINVAL; - preempt_disable(); /* Enable LSX for guest */ -- cgit v1.2.3 From 37da26e0e8396def86ff4ca70ece0a5f5619dbd4 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 6 Feb 2026 09:27:47 +0800 Subject: LoongArch: KVM: Move LASX capability check in exception handler Like FPU exception handler, check LASX capability in the LASX exception handler rather than function kvm_own_lasx(). Since LASX capability in the function kvm_guest_has_lasx() implies FPU and LSX capability, only checking kvm_guest_has_lasx() is OK here. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen --- arch/loongarch/kvm/exit.c | 4 +++- arch/loongarch/kvm/vcpu.c | 3 --- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 76eec3f24953..74b427287e96 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -810,8 +810,10 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode) */ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode) { - if (kvm_own_lasx(vcpu)) + if (!kvm_guest_has_lasx(&vcpu->arch)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); + else + kvm_own_lasx(vcpu); return RESUME_GUEST; } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 0fa3981ec7ed..53d720a15617 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -1409,9 +1409,6 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu) /* Enable LASX and restore context */ int kvm_own_lasx(struct kvm_vcpu *vcpu) { - if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch)) - return -EINVAL; - preempt_disable(); kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); -- cgit v1.2.3 From b1388a9598fbe77b193cbb8368eed46e982212c5 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 6 Feb 2026 09:27:47 +0800 Subject: LoongArch: KVM: Move LBT capability check in exception handler Like FPU exception handler, check LBT capability in the LBT exception handler rather than function kvm_own_lbt(). Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen --- arch/loongarch/kvm/exit.c | 4 +++- arch/loongarch/kvm/vcpu.c | 3 --- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 74b427287e96..65ec10a7245a 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -820,8 +820,10 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode) static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode) { - if (kvm_own_lbt(vcpu)) + if (!kvm_guest_has_lbt(&vcpu->arch)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); + else + kvm_own_lbt(vcpu); return RESUME_GUEST; } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 53d720a15617..07c427a5e156 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -1304,9 +1304,6 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) #ifdef CONFIG_CPU_HAS_LBT int kvm_own_lbt(struct kvm_vcpu *vcpu) { - if (!kvm_guest_has_lbt(&vcpu->arch)) - return -EINVAL; - preempt_disable(); if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) { set_csr_euen(CSR_EUEN_LBTEN); -- cgit v1.2.3 From 382c38c9ec94fe87ffd6c9b456fa8ce67943cf1b Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 6 Feb 2026 09:28:00 +0800 Subject: LoongArch: KVM: Add FPU/LBT delay load support FPU/LBT are lazy enabled with KVM hypervisor. After FPU/LBT enabled and loaded, vCPU can be preempted and FPU/LBT will be lost again, there will be unnecessary FPU/LBT exceptions, load and store stuff. Here delay the FPU/LBT load until the guest entry. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/kvm_host.h | 2 ++ arch/loongarch/kvm/exit.c | 21 ++++++++++++++------- arch/loongarch/kvm/vcpu.c | 33 +++++++++++++++++++++------------ 3 files changed, 37 insertions(+), 19 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index bced2d607849..4a7e816fae84 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -37,6 +37,7 @@ #define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1) #define KVM_REQ_PMU KVM_ARCH_REQ(2) +#define KVM_REQ_AUX_LOAD KVM_ARCH_REQ(3) #define KVM_GUESTDBG_SW_BP_MASK \ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP) @@ -200,6 +201,7 @@ struct kvm_vcpu_arch { /* Which auxiliary state is loaded (KVM_LARCH_*) */ unsigned int aux_inuse; + unsigned int aux_ldtype; /* FPU state */ struct loongarch_fpu fpu FPU_ALIGN; diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 65ec10a7245a..da0ad89f2eb7 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -754,7 +754,8 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu, int ecode) return RESUME_HOST; } - kvm_own_fpu(vcpu); + vcpu->arch.aux_ldtype = KVM_LARCH_FPU; + kvm_make_request(KVM_REQ_AUX_LOAD, vcpu); return RESUME_GUEST; } @@ -794,8 +795,10 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode) { if (!kvm_guest_has_lsx(&vcpu->arch)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); - else - kvm_own_lsx(vcpu); + else { + vcpu->arch.aux_ldtype = KVM_LARCH_LSX; + kvm_make_request(KVM_REQ_AUX_LOAD, vcpu); + } return RESUME_GUEST; } @@ -812,8 +815,10 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode) { if (!kvm_guest_has_lasx(&vcpu->arch)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); - else - kvm_own_lasx(vcpu); + else { + vcpu->arch.aux_ldtype = KVM_LARCH_LASX; + kvm_make_request(KVM_REQ_AUX_LOAD, vcpu); + } return RESUME_GUEST; } @@ -822,8 +827,10 @@ static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode) { if (!kvm_guest_has_lbt(&vcpu->arch)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); - else - kvm_own_lbt(vcpu); + else { + vcpu->arch.aux_ldtype = KVM_LARCH_LBT; + kvm_make_request(KVM_REQ_AUX_LOAD, vcpu); + } return RESUME_GUEST; } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 07c427a5e156..4f0d10f52b99 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -232,6 +232,27 @@ static void kvm_late_check_requests(struct kvm_vcpu *vcpu) kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa); vcpu->arch.flush_gpa = INVALID_GPA; } + + if (kvm_check_request(KVM_REQ_AUX_LOAD, vcpu)) { + switch (vcpu->arch.aux_ldtype) { + case KVM_LARCH_FPU: + kvm_own_fpu(vcpu); + break; + case KVM_LARCH_LSX: + kvm_own_lsx(vcpu); + break; + case KVM_LARCH_LASX: + kvm_own_lasx(vcpu); + break; + case KVM_LARCH_LBT: + kvm_own_lbt(vcpu); + break; + default: + break; + } + + vcpu->arch.aux_ldtype = 0; + } } /* @@ -1304,13 +1325,11 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) #ifdef CONFIG_CPU_HAS_LBT int kvm_own_lbt(struct kvm_vcpu *vcpu) { - preempt_disable(); if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) { set_csr_euen(CSR_EUEN_LBTEN); _restore_lbt(&vcpu->arch.lbt); vcpu->arch.aux_inuse |= KVM_LARCH_LBT; } - preempt_enable(); return 0; } @@ -1353,8 +1372,6 @@ static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { } /* Enable FPU and restore context */ void kvm_own_fpu(struct kvm_vcpu *vcpu) { - preempt_disable(); - /* * Enable FPU for guest * Set FR and FRE according to guest context @@ -1365,16 +1382,12 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu) kvm_restore_fpu(&vcpu->arch.fpu); vcpu->arch.aux_inuse |= KVM_LARCH_FPU; trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); - - preempt_enable(); } #ifdef CONFIG_CPU_HAS_LSX /* Enable LSX and restore context */ int kvm_own_lsx(struct kvm_vcpu *vcpu) { - preempt_disable(); - /* Enable LSX for guest */ kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN); @@ -1396,7 +1409,6 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu) trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX); vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU; - preempt_enable(); return 0; } @@ -1406,8 +1418,6 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu) /* Enable LASX and restore context */ int kvm_own_lasx(struct kvm_vcpu *vcpu) { - preempt_disable(); - kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { @@ -1429,7 +1439,6 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu) trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX); vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU; - preempt_enable(); return 0; } -- cgit v1.2.3 From 2faec60a4858bd7fe1bc1419963c6bf030e85706 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 6 Feb 2026 09:28:00 +0800 Subject: LoongArch: KVM: Set default return value in KVM IO bus ops When in-kernel irqchip is enabled, its register area is registered in the KVM IO bus list with API kvm_io_bus_register_dev(). In MMIO/IOCSR register access emulation, kvm_io_bus_read()/kvm_io_bus_write() is called firstly. If it returns 0, it means that the in-kernel irqchip handles the emulation already, else it returns to user-mode VMM and lets VMM emulate the register access. Once in-kernel irqchip is enabled, it should return 0 if the address is within range of the registered KVM IO bus. It should not return to user-mode VMM since VMM does not know how to handle it, and irqchip is handled in kernel already. Here set default return value with 0 in KVM IO bus operations. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen --- arch/loongarch/kvm/intc/eiointc.c | 43 ++++++++++++++++----------------------- arch/loongarch/kvm/intc/ipi.c | 26 ++++++++--------------- arch/loongarch/kvm/intc/pch_pic.c | 31 +++++++++++++--------------- 3 files changed, 39 insertions(+), 61 deletions(-) diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c index dfaf6ccfdd8b..e498a3f1e136 100644 --- a/arch/loongarch/kvm/intc/eiointc.c +++ b/arch/loongarch/kvm/intc/eiointc.c @@ -119,7 +119,7 @@ void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level) static int loongarch_eiointc_read(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s, gpa_t addr, unsigned long *val) { - int index, ret = 0; + int index; u64 data = 0; gpa_t offset; @@ -150,40 +150,36 @@ static int loongarch_eiointc_read(struct kvm_vcpu *vcpu, struct loongarch_eioint data = s->coremap[index]; break; default: - ret = -EINVAL; break; } *val = data; - return ret; + return 0; } static int kvm_eiointc_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, void *val) { - int ret = -EINVAL; unsigned long flags, data, offset; struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc; if (!eiointc) { kvm_err("%s: eiointc irqchip not valid!\n", __func__); - return -EINVAL; + return 0; } if (addr & (len - 1)) { kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); - return -EINVAL; + return 0; } offset = addr & 0x7; addr -= offset; vcpu->stat.eiointc_read_exits++; spin_lock_irqsave(&eiointc->lock, flags); - ret = loongarch_eiointc_read(vcpu, eiointc, addr, &data); + loongarch_eiointc_read(vcpu, eiointc, addr, &data); spin_unlock_irqrestore(&eiointc->lock, flags); - if (ret) - return ret; data = data >> (offset * 8); switch (len) { @@ -208,7 +204,7 @@ static int loongarch_eiointc_write(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s, gpa_t addr, u64 value, u64 field_mask) { - int index, irq, ret = 0; + int index, irq; u8 cpu; u64 data, old, mask; gpa_t offset; @@ -287,29 +283,27 @@ static int loongarch_eiointc_write(struct kvm_vcpu *vcpu, eiointc_update_sw_coremap(s, index * 8, data, sizeof(data), true); break; default: - ret = -EINVAL; break; } - return ret; + return 0; } static int kvm_eiointc_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, const void *val) { - int ret = -EINVAL; unsigned long flags, value; struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc; if (!eiointc) { kvm_err("%s: eiointc irqchip not valid!\n", __func__); - return -EINVAL; + return 0; } if (addr & (len - 1)) { kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); - return -EINVAL; + return 0; } vcpu->stat.eiointc_write_exits++; @@ -317,24 +311,24 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu, switch (len) { case 1: value = *(unsigned char *)val; - ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, 0xFF); + loongarch_eiointc_write(vcpu, eiointc, addr, value, 0xFF); break; case 2: value = *(unsigned short *)val; - ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, USHRT_MAX); + loongarch_eiointc_write(vcpu, eiointc, addr, value, USHRT_MAX); break; case 4: value = *(unsigned int *)val; - ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, UINT_MAX); + loongarch_eiointc_write(vcpu, eiointc, addr, value, UINT_MAX); break; default: value = *(unsigned long *)val; - ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, ULONG_MAX); + loongarch_eiointc_write(vcpu, eiointc, addr, value, ULONG_MAX); break; } spin_unlock_irqrestore(&eiointc->lock, flags); - return ret; + return 0; } static const struct kvm_io_device_ops kvm_eiointc_ops = { @@ -352,7 +346,7 @@ static int kvm_eiointc_virt_read(struct kvm_vcpu *vcpu, if (!eiointc) { kvm_err("%s: eiointc irqchip not valid!\n", __func__); - return -EINVAL; + return 0; } addr -= EIOINTC_VIRT_BASE; @@ -376,28 +370,25 @@ static int kvm_eiointc_virt_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, const void *val) { - int ret = 0; unsigned long flags; u32 value = *(u32 *)val; struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc; if (!eiointc) { kvm_err("%s: eiointc irqchip not valid!\n", __func__); - return -EINVAL; + return 0; } addr -= EIOINTC_VIRT_BASE; spin_lock_irqsave(&eiointc->lock, flags); switch (addr) { case EIOINTC_VIRT_FEATURES: - ret = -EPERM; break; case EIOINTC_VIRT_CONFIG: /* * eiointc features can only be set at disabled status */ if ((eiointc->status & BIT(EIOINTC_ENABLE)) && value) { - ret = -EPERM; break; } eiointc->status = value & eiointc->features; @@ -407,7 +398,7 @@ static int kvm_eiointc_virt_write(struct kvm_vcpu *vcpu, } spin_unlock_irqrestore(&eiointc->lock, flags); - return ret; + return 0; } static const struct kvm_io_device_ops kvm_eiointc_virt_ops = { diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c index 1058c13dba7f..6a044a74c095 100644 --- a/arch/loongarch/kvm/intc/ipi.c +++ b/arch/loongarch/kvm/intc/ipi.c @@ -111,7 +111,7 @@ static int mail_send(struct kvm *kvm, uint64_t data) vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); if (unlikely(vcpu == NULL)) { kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); - return -EINVAL; + return 0; } mailbox = ((data & 0xffffffff) >> 2) & 0x7; offset = IOCSR_IPI_BUF_20 + mailbox * 4; @@ -145,7 +145,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data) srcu_read_unlock(&vcpu->kvm->srcu, idx); if (unlikely(ret)) { kvm_err("%s: : read data from addr %llx failed\n", __func__, addr); - return ret; + return 0; } /* Construct the mask by scanning the bit 27-30 */ for (i = 0; i < 4; i++) { @@ -162,7 +162,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data) if (unlikely(ret)) kvm_err("%s: : write data to addr %llx failed\n", __func__, addr); - return ret; + return 0; } static int any_send(struct kvm *kvm, uint64_t data) @@ -174,7 +174,7 @@ static int any_send(struct kvm *kvm, uint64_t data) vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); if (unlikely(vcpu == NULL)) { kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); - return -EINVAL; + return 0; } offset = data & 0xffff; @@ -183,7 +183,6 @@ static int any_send(struct kvm *kvm, uint64_t data) static int loongarch_ipi_readl(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *val) { - int ret = 0; uint32_t offset; uint64_t res = 0; @@ -202,33 +201,27 @@ static int loongarch_ipi_readl(struct kvm_vcpu *vcpu, gpa_t addr, int len, void spin_unlock(&vcpu->arch.ipi_state.lock); break; case IOCSR_IPI_SET: - res = 0; - break; case IOCSR_IPI_CLEAR: - res = 0; break; case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7: if (offset + len > IOCSR_IPI_BUF_38 + 8) { kvm_err("%s: invalid offset or len: offset = %d, len = %d\n", __func__, offset, len); - ret = -EINVAL; break; } res = read_mailbox(vcpu, offset, len); break; default: kvm_err("%s: unknown addr: %llx\n", __func__, addr); - ret = -EINVAL; break; } *(uint64_t *)val = res; - return ret; + return 0; } static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *val) { - int ret = 0; uint64_t data; uint32_t offset; @@ -239,7 +232,6 @@ static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, cons switch (offset) { case IOCSR_IPI_STATUS: - ret = -EINVAL; break; case IOCSR_IPI_EN: spin_lock(&vcpu->arch.ipi_state.lock); @@ -257,7 +249,6 @@ static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, cons if (offset + len > IOCSR_IPI_BUF_38 + 8) { kvm_err("%s: invalid offset or len: offset = %d, len = %d\n", __func__, offset, len); - ret = -EINVAL; break; } write_mailbox(vcpu, offset, data, len); @@ -266,18 +257,17 @@ static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, cons ipi_send(vcpu->kvm, data); break; case IOCSR_MAIL_SEND: - ret = mail_send(vcpu->kvm, data); + mail_send(vcpu->kvm, data); break; case IOCSR_ANY_SEND: - ret = any_send(vcpu->kvm, data); + any_send(vcpu->kvm, data); break; default: kvm_err("%s: unknown addr: %llx\n", __func__, addr); - ret = -EINVAL; break; } - return ret; + return 0; } static int kvm_ipi_read(struct kvm_vcpu *vcpu, diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c index 4addb34bf432..a175f52fcf7f 100644 --- a/arch/loongarch/kvm/intc/pch_pic.c +++ b/arch/loongarch/kvm/intc/pch_pic.c @@ -74,7 +74,7 @@ void pch_msi_set_irq(struct kvm *kvm, int irq, int level) static int loongarch_pch_pic_read(struct loongarch_pch_pic *s, gpa_t addr, int len, void *val) { - int ret = 0, offset; + int offset; u64 data = 0; void *ptemp; @@ -121,34 +121,32 @@ static int loongarch_pch_pic_read(struct loongarch_pch_pic *s, gpa_t addr, int l data = s->isr; break; default: - ret = -EINVAL; + break; } spin_unlock(&s->lock); - if (ret == 0) { - offset = (addr - s->pch_pic_base) & 7; - data = data >> (offset * 8); - memcpy(val, &data, len); - } + offset = (addr - s->pch_pic_base) & 7; + data = data >> (offset * 8); + memcpy(val, &data, len); - return ret; + return 0; } static int kvm_pch_pic_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, void *val) { - int ret; + int ret = 0; struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic; if (!s) { kvm_err("%s: pch pic irqchip not valid!\n", __func__); - return -EINVAL; + return ret; } if (addr & (len - 1)) { kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len); - return -EINVAL; + return ret; } /* statistics of pch pic reading */ @@ -161,7 +159,7 @@ static int kvm_pch_pic_read(struct kvm_vcpu *vcpu, static int loongarch_pch_pic_write(struct loongarch_pch_pic *s, gpa_t addr, int len, const void *val) { - int ret = 0, offset; + int offset; u64 old, data, mask; void *ptemp; @@ -226,29 +224,28 @@ static int loongarch_pch_pic_write(struct loongarch_pch_pic *s, gpa_t addr, case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END: break; default: - ret = -EINVAL; break; } spin_unlock(&s->lock); - return ret; + return 0; } static int kvm_pch_pic_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, const void *val) { - int ret; + int ret = 0; struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic; if (!s) { kvm_err("%s: pch pic irqchip not valid!\n", __func__); - return -EINVAL; + return ret; } if (addr & (len - 1)) { kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len); - return -EINVAL; + return ret; } /* statistics of pch pic writing */ -- cgit v1.2.3 From 9b486cdd032a90032c6b567ea723595205ca2626 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 6 Feb 2026 09:28:01 +0800 Subject: LoongArch: KVM: Add paravirt preempt feature in hypervisor side Feature KVM_FEATURE_PREEMPT is added to show whether vCPU is preempted or not. It is to help guest OS scheduling or lock checking etc. Here add KVM_FEATURE_PREEMPT feature and use one byte as preempted flag in the steal time structure. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/kvm_host.h | 2 ++ arch/loongarch/include/asm/kvm_para.h | 4 ++- arch/loongarch/include/uapi/asm/kvm.h | 1 + arch/loongarch/include/uapi/asm/kvm_para.h | 1 + arch/loongarch/kvm/vcpu.c | 53 +++++++++++++++++++++++++++++- arch/loongarch/kvm/vm.c | 3 ++ 6 files changed, 62 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 4a7e816fae84..19eb5e5c3984 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -165,6 +165,7 @@ enum emulation_result { #define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63) #define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \ + BIT(KVM_FEATURE_PREEMPT) | \ BIT(KVM_FEATURE_STEAL_TIME) | \ BIT(KVM_FEATURE_USER_HCALL) | \ BIT(KVM_FEATURE_VIRT_EXTIOI)) @@ -254,6 +255,7 @@ struct kvm_vcpu_arch { u64 guest_addr; u64 last_steal; struct gfn_to_hva_cache cache; + u8 preempted; } st; }; diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h index 3e4b397f423f..fb17ba0fa101 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -37,8 +37,10 @@ struct kvm_steal_time { __u64 steal; __u32 version; __u32 flags; - __u32 pad[12]; + __u8 preempted; + __u8 pad[47]; }; +#define KVM_VCPU_PREEMPTED (1 << 0) /* * Hypercall interface for KVM hypervisor diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index de6c3f18e40a..419647aacdf3 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -105,6 +105,7 @@ struct kvm_fpu { #define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7 #define KVM_LOONGARCH_VM_FEAT_PTW 8 #define KVM_LOONGARCH_VM_FEAT_MSGINT 9 +#define KVM_LOONGARCH_VM_FEAT_PV_PREEMPT 10 /* Device Control API on vcpu fd */ #define KVM_LOONGARCH_VCPU_CPUCFG 0 diff --git a/arch/loongarch/include/uapi/asm/kvm_para.h b/arch/loongarch/include/uapi/asm/kvm_para.h index 76d802ef01ce..d28cbcadd276 100644 --- a/arch/loongarch/include/uapi/asm/kvm_para.h +++ b/arch/loongarch/include/uapi/asm/kvm_para.h @@ -15,6 +15,7 @@ #define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) #define KVM_FEATURE_IPI 1 #define KVM_FEATURE_STEAL_TIME 2 +#define KVM_FEATURE_PREEMPT 3 /* BIT 24 - 31 are features configurable by user space vmm */ #define KVM_FEATURE_VIRT_EXTIOI 24 #define KVM_FEATURE_USER_HCALL 25 diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 4f0d10f52b99..550c0d05666a 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -181,6 +181,11 @@ static void kvm_update_stolen_time(struct kvm_vcpu *vcpu) } st = (struct kvm_steal_time __user *)ghc->hva; + if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) { + unsafe_put_user(0, &st->preempted, out); + vcpu->arch.st.preempted = 0; + } + unsafe_get_user(version, &st->version, out); if (version & 1) version += 1; /* first time write, random junk */ @@ -1795,11 +1800,57 @@ out: return 0; } +static void kvm_vcpu_set_pv_preempted(struct kvm_vcpu *vcpu) +{ + gpa_t gpa; + struct gfn_to_hva_cache *ghc; + struct kvm_memslots *slots; + struct kvm_steal_time __user *st; + + gpa = vcpu->arch.st.guest_addr; + if (!(gpa & KVM_STEAL_PHYS_VALID)) + return; + + /* vCPU may be preempted for many times */ + if (vcpu->arch.st.preempted) + return; + + /* This happens on process exit */ + if (unlikely(current->mm != vcpu->kvm->mm)) + return; + + gpa &= KVM_STEAL_PHYS_MASK; + ghc = &vcpu->arch.st.cache; + slots = kvm_memslots(vcpu->kvm); + if (slots->generation != ghc->generation || gpa != ghc->gpa) { + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) { + ghc->gpa = INVALID_GPA; + return; + } + } + + st = (struct kvm_steal_time __user *)ghc->hva; + unsafe_put_user(KVM_VCPU_PREEMPTED, &st->preempted, out); + vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; +out: + mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); +} + void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { - int cpu; + int cpu, idx; unsigned long flags; + if (vcpu->preempted && kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) { + /* + * Take the srcu lock as memslots will be accessed to check + * the gfn cache generation against the memslots generation. + */ + idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_set_pv_preempted(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, idx); + } + local_irq_save(flags); cpu = smp_processor_id(); vcpu->arch.last_sched_cpu = cpu; diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index d3ff6d6966f8..9681ade890c6 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -52,7 +52,9 @@ static void kvm_vm_init_features(struct kvm *kvm) kvm->arch.pv_features = BIT(KVM_FEATURE_IPI); kvm->arch.kvm_features = BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI); if (kvm_pvtime_supported()) { + kvm->arch.pv_features |= BIT(KVM_FEATURE_PREEMPT); kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME); + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_PREEMPT); kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_STEALTIME); } } @@ -154,6 +156,7 @@ static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr case KVM_LOONGARCH_VM_FEAT_MSGINT: case KVM_LOONGARCH_VM_FEAT_PMU: case KVM_LOONGARCH_VM_FEAT_PV_IPI: + case KVM_LOONGARCH_VM_FEAT_PV_PREEMPT: case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME: if (kvm_vm_support(&kvm->arch, attr->attr)) return 0; -- cgit v1.2.3 From 872d277a1e2f8a8b83dc9f21151af9cb64e8f1ce Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 6 Feb 2026 09:28:01 +0800 Subject: LoongArch: KVM: Add paravirt vcpu_is_preempted() support in guest side Function vcpu_is_preempted() is used to check whether vCPU is preempted or not. Here add the implementation with vcpu_is_preempted() when option CONFIG_PARAVIRT is enabled. Acked-by: Juergen Gross Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/qspinlock.h | 4 ++++ arch/loongarch/kernel/paravirt.c | 21 ++++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h index e76d3aa1e1eb..66244801db67 100644 --- a/arch/loongarch/include/asm/qspinlock.h +++ b/arch/loongarch/include/asm/qspinlock.h @@ -34,6 +34,10 @@ __retry: return true; } +#define vcpu_is_preempted vcpu_is_preempted + +bool vcpu_is_preempted(int cpu); + #endif /* CONFIG_PARAVIRT */ #include diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index b1b51f920b23..7f1561906e10 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -12,6 +12,7 @@ static int has_steal_clock; struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); +static DEFINE_STATIC_KEY_FALSE(virt_preempt_key); DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key); static u64 native_steal_clock(int cpu) @@ -267,6 +268,18 @@ static int pv_time_cpu_down_prepare(unsigned int cpu) return 0; } + +bool vcpu_is_preempted(int cpu) +{ + struct kvm_steal_time *src; + + if (!static_branch_unlikely(&virt_preempt_key)) + return false; + + src = &per_cpu(steal_time, cpu); + return !!(src->preempted & KVM_VCPU_PREEMPTED); +} +EXPORT_SYMBOL(vcpu_is_preempted); #endif static void pv_cpu_reboot(void *unused) @@ -308,6 +321,9 @@ int __init pv_time_init(void) pr_err("Failed to install cpu hotplug callbacks\n"); return r; } + + if (kvm_para_has_feature(KVM_FEATURE_PREEMPT)) + static_branch_enable(&virt_preempt_key); #endif static_call_update(pv_steal_clock, paravt_steal_clock); @@ -318,7 +334,10 @@ int __init pv_time_init(void) static_key_slow_inc(¶virt_steal_rq_enabled); #endif - pr_info("Using paravirt steal-time\n"); + if (static_key_enabled(&virt_preempt_key)) + pr_info("Using paravirt steal-time with preempt enabled\n"); + else + pr_info("Using paravirt steal-time with preempt disabled\n"); return 0; } -- cgit v1.2.3 From 2d94a3f7088b69ae25e27fb98d7f1ef572c843f9 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 6 Feb 2026 09:28:01 +0800 Subject: KVM: LoongArch: selftests: Add steal time test case LoongArch KVM supports steal time accounting now, here add steal time test case on LoongArch. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen --- tools/testing/selftests/kvm/Makefile.kvm | 1 + tools/testing/selftests/kvm/steal_time.c | 96 ++++++++++++++++++++++++++++++++ 2 files changed, 97 insertions(+) diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm index ba5c2b643efa..a18c00f1a4fa 100644 --- a/tools/testing/selftests/kvm/Makefile.kvm +++ b/tools/testing/selftests/kvm/Makefile.kvm @@ -228,6 +228,7 @@ TEST_GEN_PROGS_loongarch += kvm_page_table_test TEST_GEN_PROGS_loongarch += memslot_modification_stress_test TEST_GEN_PROGS_loongarch += memslot_perf_test TEST_GEN_PROGS_loongarch += set_memory_region_test +TEST_GEN_PROGS_loongarch += steal_time SPLIT_TESTS += arch_timer SPLIT_TESTS += get-reg-list diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c index 8edc1fca345b..7be8adfe5dd3 100644 --- a/tools/testing/selftests/kvm/steal_time.c +++ b/tools/testing/selftests/kvm/steal_time.c @@ -301,6 +301,102 @@ static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) pr_info("\n"); } +#elif defined(__loongarch__) + +/* steal_time must have 64-byte alignment */ +#define STEAL_TIME_SIZE ((sizeof(struct kvm_steal_time) + 63) & ~63) +#define KVM_STEAL_PHYS_VALID BIT_ULL(0) + +struct kvm_steal_time { + __u64 steal; + __u32 version; + __u32 flags; + __u8 preempted; + __u8 pad[47]; +}; + +static void check_status(struct kvm_steal_time *st) +{ + GUEST_ASSERT(!(READ_ONCE(st->version) & 1)); + GUEST_ASSERT_EQ(READ_ONCE(st->flags), 0); + GUEST_ASSERT_EQ(READ_ONCE(st->preempted), 0); +} + +static void guest_code(int cpu) +{ + uint32_t version; + struct kvm_steal_time *st = st_gva[cpu]; + + memset(st, 0, sizeof(*st)); + GUEST_SYNC(0); + + check_status(st); + WRITE_ONCE(guest_stolen_time[cpu], st->steal); + version = READ_ONCE(st->version); + check_status(st); + GUEST_SYNC(1); + + check_status(st); + GUEST_ASSERT(version < READ_ONCE(st->version)); + WRITE_ONCE(guest_stolen_time[cpu], st->steal); + check_status(st); + GUEST_DONE(); +} + +static bool is_steal_time_supported(struct kvm_vcpu *vcpu) +{ + int err; + uint64_t val; + struct kvm_device_attr attr = { + .group = KVM_LOONGARCH_VCPU_CPUCFG, + .attr = CPUCFG_KVM_FEATURE, + .addr = (uint64_t)&val, + }; + + err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr); + if (err) + return false; + + err = __vcpu_ioctl(vcpu, KVM_GET_DEVICE_ATTR, &attr); + if (err) + return false; + + return val & BIT(KVM_FEATURE_STEAL_TIME); +} + +static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) +{ + int err; + uint64_t st_gpa; + struct kvm_vm *vm = vcpu->vm; + struct kvm_device_attr attr = { + .group = KVM_LOONGARCH_VCPU_PVTIME_CTRL, + .attr = KVM_LOONGARCH_VCPU_PVTIME_GPA, + .addr = (uint64_t)&st_gpa, + }; + + /* ST_GPA_BASE is identity mapped */ + st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); + sync_global_to_guest(vm, st_gva[i]); + + err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr); + TEST_ASSERT(err == 0, "No PV stealtime Feature"); + + st_gpa = (unsigned long)st_gva[i] | KVM_STEAL_PHYS_VALID; + err = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &attr); + TEST_ASSERT(err == 0, "Fail to set PV stealtime GPA"); +} + +static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) +{ + struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); + + ksft_print_msg("VCPU%d:\n", vcpu_idx); + ksft_print_msg(" steal: %lld\n", st->steal); + ksft_print_msg(" flags: %d\n", st->flags); + ksft_print_msg(" version: %d\n", st->version); + ksft_print_msg(" preempted: %d\n", st->preempted); +} #endif static void *do_steal_time(void *arg) -- cgit v1.2.3