diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/uapi/asm/kvm.h | 3 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 37 | ||||
-rw-r--r-- | arch/x86/kvm/x86.h | 10 |
3 files changed, 46 insertions, 4 deletions
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index aae1033c8afa..467116186e71 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -437,6 +437,9 @@ struct kvm_xcrs { #define KVM_X86_REG_KVM(index) \ KVM_X86_REG_ID(KVM_X86_REG_TYPE_KVM, index) +/* KVM-defined registers starting from 0 */ +#define KVM_REG_GUEST_SSP 0 + #define KVM_SYNC_X86_REGS (1UL << 0) #define KVM_SYNC_X86_SREGS (1UL << 1) #define KVM_SYNC_X86_EVENTS (1UL << 2) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5f23d2d2731d..d85bb723f25a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6016,9 +6016,27 @@ struct kvm_x86_reg_id { __u8 x86; }; -static int kvm_translate_kvm_reg(struct kvm_x86_reg_id *reg) +static int kvm_translate_kvm_reg(struct kvm_vcpu *vcpu, + struct kvm_x86_reg_id *reg) { - return -EINVAL; + switch (reg->index) { + case KVM_REG_GUEST_SSP: + /* + * FIXME: If host-initiated accesses are ever exempted from + * ignore_msrs (in kvm_do_msr_access()), drop this manual check + * and rely on KVM's standard checks to reject accesses to regs + * that don't exist. + */ + if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK)) + return -EINVAL; + + reg->type = KVM_X86_REG_TYPE_MSR; + reg->index = MSR_KVM_INTERNAL_GUEST_SSP; + break; + default: + return -EINVAL; + } + return 0; } static int kvm_get_one_msr(struct kvm_vcpu *vcpu, u32 msr, u64 __user *user_val) @@ -6067,7 +6085,7 @@ static int kvm_get_set_one_reg(struct kvm_vcpu *vcpu, unsigned int ioctl, return -EINVAL; if (reg->type == KVM_X86_REG_TYPE_KVM) { - r = kvm_translate_kvm_reg(reg); + r = kvm_translate_kvm_reg(vcpu, reg); if (r) return r; } @@ -6098,11 +6116,22 @@ static int kvm_get_set_one_reg(struct kvm_vcpu *vcpu, unsigned int ioctl, static int kvm_get_reg_list(struct kvm_vcpu *vcpu, struct kvm_reg_list __user *user_list) { - u64 nr_regs = 0; + u64 nr_regs = guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) ? 1 : 0; + u64 user_nr_regs; + + if (get_user(user_nr_regs, &user_list->n)) + return -EFAULT; if (put_user(nr_regs, &user_list->n)) return -EFAULT; + if (user_nr_regs < nr_regs) + return -E2BIG; + + if (nr_regs && + put_user(KVM_X86_REG_KVM(KVM_REG_GUEST_SSP), &user_list->reg[0])) + return -EFAULT; + return 0; } diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 786e36fcd0fb..a7c9c72fca93 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -101,6 +101,16 @@ do { \ #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX #define KVM_SVM_DEFAULT_PLE_WINDOW 3000 +/* + * KVM's internal, non-ABI indices for synthetic MSRs. The values themselves + * are arbitrary and have no meaning, the only requirement is that they don't + * conflict with "real" MSRs that KVM supports. Use values at the upper end + * of KVM's reserved paravirtual MSR range to minimize churn, i.e. these values + * will be usable until KVM exhausts its supply of paravirtual MSR indices. + */ + +#define MSR_KVM_INTERNAL_GUEST_SSP 0x4b564dff + static inline unsigned int __grow_ple_window(unsigned int val, unsigned int base, unsigned int modifier, unsigned int max) { |