summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/svm/sev.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2025-08-27 04:18:01 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2025-08-27 04:25:40 -0400
commit0dc4a751507177af24a2529d57dbe4a37e45b30c (patch)
treebf3b8585d8c309ff40fedd3dc299d3c34a207e0c /arch/x86/kvm/svm/sev.c
parent1b237f190eb3d36f52dffe07a40b5eb210280e00 (diff)
parentdce1b33ed7430c7189b8cc1567498f9e6bf12731 (diff)
Merge tag 'kvm-x86-fixes-6.17-rc7' of https://github.com/kvm-x86/linux into HEAD
* Use array_index_nospec() to sanitize the target vCPU ID when handling PV IPIs and yields as the ID is guest-controlled. * Drop a superfluous cpumask_empty() check when reclaiming SEV memory, as the common case, by far, is that at least one CPU will have entered the VM, and wbnoinvd_on_cpus_mask() will naturally handle the rare case where the set of have_run_cpus is empty. * Rename the is_signed_type() macro in kselftest_harness.h to is_signed_var() to fix a collision with linux/overflow.h. The collision generates compiler warnings due to the two macros having different implementations.
Diffstat (limited to 'arch/x86/kvm/svm/sev.c')
-rw-r--r--arch/x86/kvm/svm/sev.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 2fbdebf79fbb..0635bd71c10e 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -719,13 +719,6 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages)
static void sev_writeback_caches(struct kvm *kvm)
{
/*
- * Note, the caller is responsible for ensuring correctness if the mask
- * can be modified, e.g. if a CPU could be doing VMRUN.
- */
- if (cpumask_empty(to_kvm_sev_info(kvm)->have_run_cpus))
- return;
-
- /*
* Ensure that all dirty guest tagged cache entries are written back
* before releasing the pages back to the system for use. CLFLUSH will
* not do this without SME_COHERENT, and flushing many cache lines
@@ -739,6 +732,9 @@ static void sev_writeback_caches(struct kvm *kvm)
* serializing multiple calls and having responding CPUs (to the IPI)
* mark themselves as still running if they are running (or about to
* run) a vCPU for the VM.
+ *
+ * Note, the caller is responsible for ensuring correctness if the mask
+ * can be modified, e.g. if a CPU could be doing VMRUN.
*/
wbnoinvd_on_cpus_mask(to_kvm_sev_info(kvm)->have_run_cpus);
}