summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNaohiko Shimizu <naohiko.shimizu@gmail.com>2026-01-04 22:59:37 +0900
committerPaul Walmsley <pjw@kernel.org>2026-01-14 17:43:07 -0700
commit75870639bf5d1c447ddba4d738ff72771a69f2a1 (patch)
tree24fb652566302368c006b4fbec9f5e58b2e1409c
parenteaa9bb1d39d59e7c17b06cec12622b7c586ab629 (diff)
riscv: kvm: Fix vstimecmp update hazard on RV32
On RV32, updating the 64-bit stimecmp (or vstimecmp) CSR requires two separate 32-bit writes. A race condition exists if the timer triggers during these two writes. The RISC-V Privileged Specification (e.g., Section 3.2.1 for mtimecmp) recommends a specific 3-step sequence to avoid spurious interrupts when updating 64-bit comparison registers on 32-bit systems: 1. Set the low-order bits (stimecmp) to all ones (ULONG_MAX). 2. Set the high-order bits (stimecmph) to the desired value. 3. Set the low-order bits (stimecmp) to the desired value. Current implementation writes the LSB first without ensuring a future value, which may lead to a transient state where the 64-bit comparison is incorrectly evaluated as "expired" by the hardware. This results in spurious timer interrupts. This patch adopts the spec-recommended 3-step sequence to ensure the intermediate 64-bit state is never smaller than the current time. Fixes: 8f5cb44b1bae ("RISC-V: KVM: Support sstc extension") Signed-off-by: Naohiko Shimizu <naohiko.shimizu@gmail.com> Reviewed-by: Anup Patel <anup@brainfault.org> Link: https://patch.msgid.link/20260104135938.524-3-naohiko.shimizu@gmail.com Signed-off-by: Paul Walmsley <pjw@kernel.org>
-rw-r--r--arch/riscv/kvm/vcpu_timer.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c
index 85a7262115e1..f36247e4c783 100644
--- a/arch/riscv/kvm/vcpu_timer.c
+++ b/arch/riscv/kvm/vcpu_timer.c
@@ -72,8 +72,9 @@ static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t)
static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles)
{
#if defined(CONFIG_32BIT)
- ncsr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF);
+ ncsr_write(CSR_VSTIMECMP, ULONG_MAX);
ncsr_write(CSR_VSTIMECMPH, ncycles >> 32);
+ ncsr_write(CSR_VSTIMECMP, (u32)ncycles);
#else
ncsr_write(CSR_VSTIMECMP, ncycles);
#endif
@@ -307,8 +308,9 @@ void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
return;
#if defined(CONFIG_32BIT)
- ncsr_write(CSR_VSTIMECMP, (u32)t->next_cycles);
+ ncsr_write(CSR_VSTIMECMP, ULONG_MAX);
ncsr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32));
+ ncsr_write(CSR_VSTIMECMP, (u32)(t->next_cycles));
#else
ncsr_write(CSR_VSTIMECMP, t->next_cycles);
#endif