diff options
| -rw-r--r-- | arch/x86/kvm/svm/sev.c | 94 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx/tdx.c | 2 | ||||
| -rw-r--r-- | include/linux/kvm_host.h | 2 | ||||
| -rw-r--r-- | virt/kvm/guest_memfd.c | 30 |
4 files changed, 56 insertions, 72 deletions
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 261d9ef8631b..a70bd3f19e29 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2267,67 +2267,53 @@ struct sev_gmem_populate_args { int fw_error; }; -static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pfn, - void __user *src, int order, void *opaque) +static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, + void __user *src, void *opaque) { struct sev_gmem_populate_args *sev_populate_args = opaque; + struct sev_data_snp_launch_update fw_args = {0}; struct kvm_sev_info *sev = to_kvm_sev_info(kvm); - int n_private = 0, ret, i; - int npages = (1 << order); - gfn_t gfn; + bool assigned = false; + int level; + int ret; if (WARN_ON_ONCE(sev_populate_args->type != KVM_SEV_SNP_PAGE_TYPE_ZERO && !src)) return -EINVAL; - for (gfn = gfn_start, i = 0; gfn < gfn_start + npages; gfn++, i++) { - struct sev_data_snp_launch_update fw_args = {0}; - bool assigned = false; - int level; - - ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level); - if (ret || assigned) { - pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n", - __func__, gfn, ret, assigned); - ret = ret ? -EINVAL : -EEXIST; - goto err; - } + ret = snp_lookup_rmpentry((u64)pfn, &assigned, &level); + if (ret || assigned) { + pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n", + __func__, gfn, ret, assigned); + ret = ret ? -EINVAL : -EEXIST; + goto out; + } - if (src) { - void *vaddr = kmap_local_pfn(pfn + i); + if (src) { + void *vaddr = kmap_local_pfn(pfn); - if (copy_from_user(vaddr, src + i * PAGE_SIZE, PAGE_SIZE)) { - kunmap_local(vaddr); - ret = -EFAULT; - goto err; - } + if (copy_from_user(vaddr, src, PAGE_SIZE)) { kunmap_local(vaddr); + ret = -EFAULT; + goto out; } - - ret = rmp_make_private(pfn + i, gfn << PAGE_SHIFT, PG_LEVEL_4K, - sev_get_asid(kvm), true); - if (ret) - goto err; - - n_private++; - - fw_args.gctx_paddr = __psp_pa(sev->snp_context); - fw_args.address = __sme_set(pfn_to_hpa(pfn + i)); - fw_args.page_size = PG_LEVEL_TO_RMP(PG_LEVEL_4K); - fw_args.page_type = sev_populate_args->type; - - ret = __sev_issue_cmd(sev_populate_args->sev_fd, SEV_CMD_SNP_LAUNCH_UPDATE, - &fw_args, &sev_populate_args->fw_error); - if (ret) - goto fw_err; + kunmap_local(vaddr); } - return 0; + ret = rmp_make_private(pfn, gfn << PAGE_SHIFT, PG_LEVEL_4K, + sev_get_asid(kvm), true); + if (ret) + goto out; + + fw_args.gctx_paddr = __psp_pa(sev->snp_context); + fw_args.address = __sme_set(pfn_to_hpa(pfn)); + fw_args.page_size = PG_LEVEL_TO_RMP(PG_LEVEL_4K); + fw_args.page_type = sev_populate_args->type; -fw_err: + ret = __sev_issue_cmd(sev_populate_args->sev_fd, SEV_CMD_SNP_LAUNCH_UPDATE, + &fw_args, &sev_populate_args->fw_error); /* * If the firmware command failed handle the reclaim and cleanup of that - * PFN specially vs. prior pages which can be cleaned up below without - * needing to reclaim in advance. + * PFN before reporting an error. * * Additionally, when invalid CPUID function entries are detected, * firmware writes the expected values into the page and leaves it @@ -2337,26 +2323,20 @@ fw_err: * information to provide information on which CPUID leaves/fields * failed CPUID validation. */ - if (!snp_page_reclaim(kvm, pfn + i) && + if (ret && !snp_page_reclaim(kvm, pfn) && sev_populate_args->type == KVM_SEV_SNP_PAGE_TYPE_CPUID && sev_populate_args->fw_error == SEV_RET_INVALID_PARAM) { - void *vaddr = kmap_local_pfn(pfn + i); + void *vaddr = kmap_local_pfn(pfn); - if (copy_to_user(src + i * PAGE_SIZE, vaddr, PAGE_SIZE)) + if (copy_to_user(src, vaddr, PAGE_SIZE)) pr_debug("Failed to write CPUID page back to userspace\n"); kunmap_local(vaddr); } - /* pfn + i is hypervisor-owned now, so skip below cleanup for it. */ - n_private--; - -err: - pr_debug("%s: exiting with error ret %d (fw_error %d), restoring %d gmem PFNs to shared.\n", - __func__, ret, sev_populate_args->fw_error, n_private); - for (i = 0; i < n_private; i++) - kvm_rmp_make_shared(kvm, pfn + i, PG_LEVEL_4K); - +out: + pr_debug("%s: exiting with return code %d (fw_error %d)\n", + __func__, ret, sev_populate_args->fw_error); return ret; } diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index 2d7a4d52ccfb..4fb042ce8ed1 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -3118,7 +3118,7 @@ struct tdx_gmem_post_populate_arg { }; static int tdx_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, - void __user *src, int order, void *_arg) + void __user *src, void *_arg) { struct tdx_gmem_post_populate_arg *arg = _arg; struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d93f75b05ae2..1d0cee72e560 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2581,7 +2581,7 @@ int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_ord * Returns the number of pages that were populated. */ typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, - void __user *src, int order, void *opaque); + void __user *src, void *opaque); long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages, kvm_gmem_populate_cb post_populate, void *opaque); diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c index fdaea3422c30..24eb33c7948d 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c @@ -151,6 +151,15 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index) mapping_gfp_mask(inode->i_mapping), policy); mpol_cond_put(policy); + /* + * External interfaces like kvm_gmem_get_pfn() support dealing + * with hugepages to a degree, but internally, guest_memfd currently + * assumes that all folios are order-0 and handling would need + * to be updated for anything otherwise (e.g. page-clearing + * operations). + */ + WARN_ON_ONCE(!IS_ERR(folio) && folio_order(folio)); + return folio; } @@ -829,7 +838,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long struct kvm_memory_slot *slot; void __user *p; - int ret = 0, max_order; + int ret = 0; long i; lockdep_assert_held(&kvm->slots_lock); @@ -848,7 +857,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long filemap_invalidate_lock(file->f_mapping); npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages); - for (i = 0; i < npages; i += (1 << max_order)) { + for (i = 0; i < npages; i++) { struct folio *folio; gfn_t gfn = start_gfn + i; pgoff_t index = kvm_gmem_get_index(slot, gfn); @@ -860,7 +869,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long break; } - folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, &max_order); + folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, NULL); if (IS_ERR(folio)) { ret = PTR_ERR(folio); break; @@ -874,20 +883,15 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long } folio_unlock(folio); - WARN_ON(!IS_ALIGNED(gfn, 1 << max_order) || - (npages - i) < (1 << max_order)); ret = -EINVAL; - while (!kvm_range_has_memory_attributes(kvm, gfn, gfn + (1 << max_order), - KVM_MEMORY_ATTRIBUTE_PRIVATE, - KVM_MEMORY_ATTRIBUTE_PRIVATE)) { - if (!max_order) - goto put_folio_and_exit; - max_order--; - } + if (!kvm_range_has_memory_attributes(kvm, gfn, gfn + 1, + KVM_MEMORY_ATTRIBUTE_PRIVATE, + KVM_MEMORY_ATTRIBUTE_PRIVATE)) + goto put_folio_and_exit; p = src ? src + i * PAGE_SIZE : NULL; - ret = post_populate(kvm, gfn, pfn, p, max_order, opaque); + ret = post_populate(kvm, gfn, pfn, p, opaque); if (!ret) kvm_gmem_mark_prepared(folio); |
