diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-04-02 15:13:15 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-04-02 15:13:15 -0700 |
commit | 8c1b724ddb218f221612d4c649bc9c7819d8d7a6 (patch) | |
tree | 0e226f4156b554eec2690adb8f30ba54b15b68cc /tools/testing/selftests/kvm/steal_time.c | |
parent | f14a9532ee30c68a56ff502c382860f674cc180c (diff) | |
parent | 514ccc194971d0649e4e7ec8a9b3a6e33561d7bf (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Paolo Bonzini:
"ARM:
- GICv4.1 support
- 32bit host removal
PPC:
- secure (encrypted) using under the Protected Execution Framework
ultravisor
s390:
- allow disabling GISA (hardware interrupt injection) and protected
VMs/ultravisor support.
x86:
- New dirty bitmap flag that sets all bits in the bitmap when dirty
page logging is enabled; this is faster because it doesn't require
bulk modification of the page tables.
- Initial work on making nested SVM event injection more similar to
VMX, and less buggy.
- Various cleanups to MMU code (though the big ones and related
optimizations were delayed to 5.8). Instead of using cr3 in
function names which occasionally means eptp, KVM too has
standardized on "pgd".
- A large refactoring of CPUID features, which now use an array that
parallels the core x86_features.
- Some removal of pointer chasing from kvm_x86_ops, which will also
be switched to static calls as soon as they are available.
- New Tigerlake CPUID features.
- More bugfixes, optimizations and cleanups.
Generic:
- selftests: cleanups, new MMU notifier stress test, steal-time test
- CSV output for kvm_stat"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (277 commits)
x86/kvm: fix a missing-prototypes "vmread_error"
KVM: x86: Fix BUILD_BUG() in __cpuid_entry_get_reg() w/ CONFIG_UBSAN=y
KVM: VMX: Add a trampoline to fix VMREAD error handling
KVM: SVM: Annotate svm_x86_ops as __initdata
KVM: VMX: Annotate vmx_x86_ops as __initdata
KVM: x86: Drop __exit from kvm_x86_ops' hardware_unsetup()
KVM: x86: Copy kvm_x86_ops by value to eliminate layer of indirection
KVM: x86: Set kvm_x86_ops only after ->hardware_setup() completes
KVM: VMX: Configure runtime hooks using vmx_x86_ops
KVM: VMX: Move hardware_setup() definition below vmx_x86_ops
KVM: x86: Move init-only kvm_x86_ops to separate struct
KVM: Pass kvm_init()'s opaque param to additional arch funcs
s390/gmap: return proper error code on ksm unsharing
KVM: selftests: Fix cosmetic copy-paste error in vm_mem_region_move()
KVM: Fix out of range accesses to memslots
KVM: X86: Micro-optimize IPI fastpath delay
KVM: X86: Delay read msr data iff writes ICR MSR
KVM: PPC: Book3S HV: Add a capability for enabling secure guests
KVM: arm64: GICv4.1: Expose HW-based SGIs in debugfs
KVM: arm64: GICv4.1: Allow non-trapping WFI when using HW SGIs
...
Diffstat (limited to 'tools/testing/selftests/kvm/steal_time.c')
-rw-r--r-- | tools/testing/selftests/kvm/steal_time.c | 352 |
1 files changed, 352 insertions, 0 deletions
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c new file mode 100644 index 000000000000..fcc840088c91 --- /dev/null +++ b/tools/testing/selftests/kvm/steal_time.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * steal/stolen time test + * + * Copyright (C) 2020, Red Hat, Inc. + */ +#define _GNU_SOURCE +#include <stdio.h> +#include <time.h> +#include <sched.h> +#include <pthread.h> +#include <linux/kernel.h> +#include <sys/syscall.h> +#include <asm/kvm.h> +#include <asm/kvm_para.h> + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" + +#define NR_VCPUS 4 +#define ST_GPA_BASE (1 << 30) +#define MIN_RUN_DELAY_NS 200000UL + +static void *st_gva[NR_VCPUS]; +static uint64_t guest_stolen_time[NR_VCPUS]; + +#if defined(__x86_64__) + +/* steal_time must have 64-byte alignment */ +#define STEAL_TIME_SIZE ((sizeof(struct kvm_steal_time) + 63) & ~63) + +static void check_status(struct kvm_steal_time *st) +{ + GUEST_ASSERT(!(READ_ONCE(st->version) & 1)); + GUEST_ASSERT(READ_ONCE(st->flags) == 0); + GUEST_ASSERT(READ_ONCE(st->preempted) == 0); +} + +static void guest_code(int cpu) +{ + struct kvm_steal_time *st = st_gva[cpu]; + uint32_t version; + + GUEST_ASSERT(rdmsr(MSR_KVM_STEAL_TIME) == ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED)); + + memset(st, 0, sizeof(*st)); + GUEST_SYNC(0); + + check_status(st); + WRITE_ONCE(guest_stolen_time[cpu], st->steal); + version = READ_ONCE(st->version); + check_status(st); + GUEST_SYNC(1); + + check_status(st); + GUEST_ASSERT(version < READ_ONCE(st->version)); + WRITE_ONCE(guest_stolen_time[cpu], st->steal); + check_status(st); + GUEST_DONE(); +} + +static void steal_time_init(struct kvm_vm *vm) +{ + int i; + + if (!(kvm_get_supported_cpuid_entry(KVM_CPUID_FEATURES)->eax & + KVM_FEATURE_STEAL_TIME)) { + print_skip("steal-time not supported"); + exit(KSFT_SKIP); + } + + for (i = 0; i < NR_VCPUS; ++i) { + int ret; + + vcpu_set_cpuid(vm, i, kvm_get_supported_cpuid()); + + /* ST_GPA_BASE is identity mapped */ + st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); + sync_global_to_guest(vm, st_gva[i]); + + ret = _vcpu_set_msr(vm, i, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_STEAL_RESERVED_MASK); + TEST_ASSERT(ret == 0, "Bad GPA didn't fail"); + + vcpu_set_msr(vm, i, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED); + } +} + +static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpuid) +{ + struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpuid]); + int i; + + pr_info("VCPU%d:\n", vcpuid); + pr_info(" steal: %lld\n", st->steal); + pr_info(" version: %d\n", st->version); + pr_info(" flags: %d\n", st->flags); + pr_info(" preempted: %d\n", st->preempted); + pr_info(" u8_pad: "); + for (i = 0; i < 3; ++i) + pr_info("%d", st->u8_pad[i]); + pr_info("\n pad: "); + for (i = 0; i < 11; ++i) + pr_info("%d", st->pad[i]); + pr_info("\n"); +} + +#elif defined(__aarch64__) + +/* PV_TIME_ST must have 64-byte alignment */ +#define STEAL_TIME_SIZE ((sizeof(struct st_time) + 63) & ~63) + +#define SMCCC_ARCH_FEATURES 0x80000001 +#define PV_TIME_FEATURES 0xc5000020 +#define PV_TIME_ST 0xc5000021 + +struct st_time { + uint32_t rev; + uint32_t attr; + uint64_t st_time; +}; + +static int64_t smccc(uint32_t func, uint32_t arg) +{ + unsigned long ret; + + asm volatile( + "mov x0, %1\n" + "mov x1, %2\n" + "hvc #0\n" + "mov %0, x0\n" + : "=r" (ret) : "r" (func), "r" (arg) : + "x0", "x1", "x2", "x3"); + + return ret; +} + +static void check_status(struct st_time *st) +{ + GUEST_ASSERT(READ_ONCE(st->rev) == 0); + GUEST_ASSERT(READ_ONCE(st->attr) == 0); +} + +static void guest_code(int cpu) +{ + struct st_time *st; + int64_t status; + + status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES); + GUEST_ASSERT(status == 0); + status = smccc(PV_TIME_FEATURES, PV_TIME_FEATURES); + GUEST_ASSERT(status == 0); + status = smccc(PV_TIME_FEATURES, PV_TIME_ST); + GUEST_ASSERT(status == 0); + + status = smccc(PV_TIME_ST, 0); + GUEST_ASSERT(status != -1); + GUEST_ASSERT(status == (ulong)st_gva[cpu]); + + st = (struct st_time *)status; + GUEST_SYNC(0); + + check_status(st); + WRITE_ONCE(guest_stolen_time[cpu], st->st_time); + GUEST_SYNC(1); + + check_status(st); + WRITE_ONCE(guest_stolen_time[cpu], st->st_time); + GUEST_DONE(); +} + +static void steal_time_init(struct kvm_vm *vm) +{ + struct kvm_device_attr dev = { + .group = KVM_ARM_VCPU_PVTIME_CTRL, + .attr = KVM_ARM_VCPU_PVTIME_IPA, + }; + int i, ret; + + ret = _vcpu_ioctl(vm, 0, KVM_HAS_DEVICE_ATTR, &dev); + if (ret != 0 && errno == ENXIO) { + print_skip("steal-time not supported"); + exit(KSFT_SKIP); + } + + for (i = 0; i < NR_VCPUS; ++i) { + uint64_t st_ipa; + + vcpu_ioctl(vm, i, KVM_HAS_DEVICE_ATTR, &dev); + + dev.addr = (uint64_t)&st_ipa; + + /* ST_GPA_BASE is identity mapped */ + st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); + sync_global_to_guest(vm, st_gva[i]); + + st_ipa = (ulong)st_gva[i] | 1; + ret = _vcpu_ioctl(vm, i, KVM_SET_DEVICE_ATTR, &dev); + TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL"); + + st_ipa = (ulong)st_gva[i]; + vcpu_ioctl(vm, i, KVM_SET_DEVICE_ATTR, &dev); + + ret = _vcpu_ioctl(vm, i, KVM_SET_DEVICE_ATTR, &dev); + TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST"); + + } +} + +static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpuid) +{ + struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpuid]); + + pr_info("VCPU%d:\n", vcpuid); + pr_info(" rev: %d\n", st->rev); + pr_info(" attr: %d\n", st->attr); + pr_info(" st_time: %ld\n", st->st_time); +} + +#endif + +static long get_run_delay(void) +{ + char path[64]; + long val[2]; + FILE *fp; + + sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid)); + fp = fopen(path, "r"); + fscanf(fp, "%ld %ld ", &val[0], &val[1]); + fclose(fp); + + return val[1]; +} + +static void *do_steal_time(void *arg) +{ + struct timespec ts, stop; + + clock_gettime(CLOCK_MONOTONIC, &ts); + stop = timespec_add_ns(ts, MIN_RUN_DELAY_NS); + + while (1) { + clock_gettime(CLOCK_MONOTONIC, &ts); + if (timespec_to_ns(timespec_sub(ts, stop)) >= 0) + break; + } + + return NULL; +} + +static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid) +{ + struct ucall uc; + + vcpu_args_set(vm, vcpuid, 1, vcpuid); + + vcpu_ioctl(vm, vcpuid, KVM_RUN, NULL); + + switch (get_ucall(vm, vcpuid, &uc)) { + case UCALL_SYNC: + case UCALL_DONE: + break; + case UCALL_ABORT: + TEST_ASSERT(false, "%s at %s:%ld", (const char *)uc.args[0], + __FILE__, uc.args[1]); + default: + TEST_ASSERT(false, "Unexpected exit: %s", + exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason)); + } +} + +int main(int ac, char **av) +{ + struct kvm_vm *vm; + pthread_attr_t attr; + pthread_t thread; + cpu_set_t cpuset; + unsigned int gpages; + long stolen_time; + long run_delay; + bool verbose; + int i; + + verbose = ac > 1 && (!strncmp(av[1], "-v", 3) || !strncmp(av[1], "--verbose", 10)); + + /* Set CPU affinity so we can force preemption of the VCPU */ + CPU_ZERO(&cpuset); + CPU_SET(0, &cpuset); + pthread_attr_init(&attr); + pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset); + pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset); + + /* Create a one VCPU guest and an identity mapped memslot for the steal time structure */ + vm = vm_create_default(0, 0, guest_code); + gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS); + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0); + virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages, 0); + ucall_init(vm, NULL); + + /* Add the rest of the VCPUs */ + for (i = 1; i < NR_VCPUS; ++i) + vm_vcpu_add_default(vm, i, guest_code); + + steal_time_init(vm); + + /* Run test on each VCPU */ + for (i = 0; i < NR_VCPUS; ++i) { + /* First VCPU run initializes steal-time */ + run_vcpu(vm, i); + + /* Second VCPU run, expect guest stolen time to be <= run_delay */ + run_vcpu(vm, i); + sync_global_from_guest(vm, guest_stolen_time[i]); + stolen_time = guest_stolen_time[i]; + run_delay = get_run_delay(); + TEST_ASSERT(stolen_time <= run_delay, + "Expected stolen time <= %ld, got %ld", + run_delay, stolen_time); + + /* Steal time from the VCPU. The steal time thread has the same CPU affinity as the VCPUs. */ + run_delay = get_run_delay(); + pthread_create(&thread, &attr, do_steal_time, NULL); + do + pthread_yield(); + while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS); + pthread_join(thread, NULL); + run_delay = get_run_delay() - run_delay; + TEST_ASSERT(run_delay >= MIN_RUN_DELAY_NS, + "Expected run_delay >= %ld, got %ld", + MIN_RUN_DELAY_NS, run_delay); + + /* Run VCPU again to confirm stolen time is consistent with run_delay */ + run_vcpu(vm, i); + sync_global_from_guest(vm, guest_stolen_time[i]); + stolen_time = guest_stolen_time[i] - stolen_time; + TEST_ASSERT(stolen_time >= run_delay, + "Expected stolen time >= %ld, got %ld", + run_delay, stolen_time); + + if (verbose) { + pr_info("VCPU%d: total-stolen-time=%ld test-stolen-time=%ld", i, + guest_stolen_time[i], stolen_time); + if (stolen_time == run_delay) + pr_info(" (BONUS: guest test-stolen-time even exactly matches test-run_delay)"); + pr_info("\n"); + steal_time_dump(vm, i); + } + } + + return 0; +} |