From 025eed7b3519be30cc2310711137ab4ff827fbe3 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Thu, 23 Jan 2020 10:04:27 -0800 Subject: KVM: selftests: Create a demand paging test While userfaultfd, KVM's demand paging implementation, is not specific to KVM, having a benchmark for its performance will be useful for guiding performance improvements to KVM. As a first step towards creating a userfaultfd demand paging test, create a simple memory access test, based on dirty_log_test. Reviewed-by: Oliver Upton Signed-off-by: Ben Gardon Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/demand_paging_test.c | 283 +++++++++++++++++++++++ 1 file changed, 283 insertions(+) create mode 100644 tools/testing/selftests/kvm/demand_paging_test.c (limited to 'tools/testing/selftests/kvm/demand_paging_test.c') diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c new file mode 100644 index 000000000000..e3d49172e2c3 --- /dev/null +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KVM demand paging test + * Adapted from dirty_log_test.c + * + * Copyright (C) 2018, Red Hat, Inc. + * Copyright (C) 2019, Google, Inc. + */ + +#define _GNU_SOURCE /* for program_invocation_name */ + +#include +#include +#include +#include +#include +#include +#include + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" + +#define VCPU_ID 1 + +/* The memory slot index demand page */ +#define TEST_MEM_SLOT_INDEX 1 + +/* Default guest test virtual memory offset */ +#define DEFAULT_GUEST_TEST_MEM 0xc0000000 + +/* + * Guest/Host shared variables. Ensure addr_gva2hva() and/or + * sync_global_to/from_guest() are used when accessing from + * the host. READ/WRITE_ONCE() should also be used with anything + * that may change. + */ +static uint64_t host_page_size; +static uint64_t guest_page_size; +static uint64_t guest_num_pages; + +/* + * Guest physical memory offset of the testing memory slot. + * This will be set to the topmost valid physical address minus + * the test memory size. + */ +static uint64_t guest_test_phys_mem; + +/* + * Guest virtual memory offset of the testing memory slot. + * Must not conflict with identity mapped test code. + */ +static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; + +/* + * Continuously write to the first 8 bytes of each page in the demand paging + * memory region. + */ +static void guest_code(void) +{ + int i; + + for (i = 0; i < guest_num_pages; i++) { + uint64_t addr = guest_test_virt_mem; + + addr += i * guest_page_size; + addr &= ~(host_page_size - 1); + *(uint64_t *)addr = 0x0123456789ABCDEF; + } + + GUEST_SYNC(1); +} + +/* Points to the test VM memory region on which we are doing demand paging */ +static void *host_test_mem; +static uint64_t host_num_pages; + +static void *vcpu_worker(void *data) +{ + int ret; + struct kvm_vm *vm = data; + struct kvm_run *run; + + run = vcpu_state(vm, VCPU_ID); + + /* Let the guest access its memory */ + ret = _vcpu_run(vm, VCPU_ID); + TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); + if (get_ucall(vm, VCPU_ID, NULL) != UCALL_SYNC) { + TEST_ASSERT(false, + "Invalid guest sync status: exit_reason=%s\n", + exit_reason_str(run->exit_reason)); + } + + return NULL; +} + +static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid, + uint64_t extra_mem_pages, void *guest_code) +{ + struct kvm_vm *vm; + uint64_t extra_pg_pages = extra_mem_pages / 512 * 2; + + vm = _vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR); + kvm_vm_elf_load(vm, program_invocation_name, 0, 0); +#ifdef __x86_64__ + vm_create_irqchip(vm); +#endif + vm_vcpu_add_default(vm, vcpuid, guest_code); + return vm; +} + +#define GUEST_MEM_SHIFT 30 /* 1G */ +#define PAGE_SHIFT_4K 12 + +static void run_test(enum vm_guest_mode mode) +{ + pthread_t vcpu_thread; + struct kvm_vm *vm; + + /* + * We reserve page table for 2 times of extra dirty mem which + * will definitely cover the original (1G+) test range. Here + * we do the calculation with 4K page size which is the + * smallest so the page number will be enough for all archs + * (e.g., 64K page size guest will need even less memory for + * page tables). + */ + vm = create_vm(mode, VCPU_ID, + 2ul << (GUEST_MEM_SHIFT - PAGE_SHIFT_4K), + guest_code); + + guest_page_size = vm_get_page_size(vm); + /* + * A little more than 1G of guest page sized pages. Cover the + * case where the size is not aligned to 64 pages. + */ + guest_num_pages = (1ul << (GUEST_MEM_SHIFT - + vm_get_page_shift(vm))) + 16; +#ifdef __s390x__ + /* Round up to multiple of 1M (segment size) */ + guest_num_pages = (guest_num_pages + 0xff) & ~0xffUL; +#endif + + host_page_size = getpagesize(); + host_num_pages = (guest_num_pages * guest_page_size) / host_page_size + + !!((guest_num_pages * guest_page_size) % + host_page_size); + + guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) * + guest_page_size; + guest_test_phys_mem &= ~(host_page_size - 1); + +#ifdef __s390x__ + /* Align to 1M (segment size) */ + guest_test_phys_mem &= ~((1 << 20) - 1); +#endif + + DEBUG("guest physical test memory offset: 0x%lx\n", + guest_test_phys_mem); + + + /* Add an extra memory slot for testing demand paging */ + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, + guest_test_phys_mem, + TEST_MEM_SLOT_INDEX, + guest_num_pages, 0); + + /* Do mapping for the demand paging memory slot */ + virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, + guest_num_pages * guest_page_size, 0); + + /* Cache the HVA pointer of the region */ + host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem); + +#ifdef __x86_64__ + vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); +#endif +#ifdef __aarch64__ + ucall_init(vm, NULL); +#endif + + /* Export the shared variables to the guest */ + sync_global_to_guest(vm, host_page_size); + sync_global_to_guest(vm, guest_page_size); + sync_global_to_guest(vm, guest_test_virt_mem); + sync_global_to_guest(vm, guest_num_pages); + + pthread_create(&vcpu_thread, NULL, vcpu_worker, vm); + + /* Wait for the vcpu thread to quit */ + pthread_join(vcpu_thread, NULL); + + ucall_uninit(vm); + kvm_vm_free(vm); +} + +struct guest_mode { + bool supported; + bool enabled; +}; +static struct guest_mode guest_modes[NUM_VM_MODES]; + +#define guest_mode_init(mode, supported, enabled) ({ \ + guest_modes[mode] = (struct guest_mode){ supported, enabled }; \ +}) + +static void help(char *name) +{ + int i; + + puts(""); + printf("usage: %s [-h] [-m mode]\n", name); + printf(" -m: specify the guest mode ID to test\n" + " (default: test all supported modes)\n" + " This option may be used multiple times.\n" + " Guest mode IDs:\n"); + for (i = 0; i < NUM_VM_MODES; ++i) { + printf(" %d: %s%s\n", i, vm_guest_mode_string(i), + guest_modes[i].supported ? " (supported)" : ""); + } + puts(""); + exit(0); +} + +int main(int argc, char *argv[]) +{ + bool mode_selected = false; + unsigned int mode; + int opt, i; + +#ifdef __x86_64__ + guest_mode_init(VM_MODE_PXXV48_4K, true, true); +#endif +#ifdef __aarch64__ + guest_mode_init(VM_MODE_P40V48_4K, true, true); + guest_mode_init(VM_MODE_P40V48_64K, true, true); + { + unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE); + + if (limit >= 52) + guest_mode_init(VM_MODE_P52V48_64K, true, true); + if (limit >= 48) { + guest_mode_init(VM_MODE_P48V48_4K, true, true); + guest_mode_init(VM_MODE_P48V48_64K, true, true); + } + } +#endif +#ifdef __s390x__ + guest_mode_init(VM_MODE_P40V48_4K, true, true); +#endif + + while ((opt = getopt(argc, argv, "hm:")) != -1) { + switch (opt) { + case 'm': + if (!mode_selected) { + for (i = 0; i < NUM_VM_MODES; ++i) + guest_modes[i].enabled = false; + mode_selected = true; + } + mode = strtoul(optarg, NULL, 10); + TEST_ASSERT(mode < NUM_VM_MODES, + "Guest mode ID %d too big", mode); + guest_modes[mode].enabled = true; + break; + case 'h': + default: + help(argv[0]); + break; + } + } + + for (i = 0; i < NUM_VM_MODES; ++i) { + if (!guest_modes[i].enabled) + continue; + TEST_ASSERT(guest_modes[i].supported, + "Guest mode ID %d (%s) not supported.", + i, vm_guest_mode_string(i)); + run_test(i); + } + + return 0; +} -- cgit v1.2.3 From 4f72180eb4da9ce0bad2f284e81875bb15ecfbb7 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Thu, 20 Feb 2020 18:09:12 +0100 Subject: KVM: selftests: Add demand paging content to the demand paging test The demand paging test is currently a simple page access test which, while potentially useful, doesn't add much versus the existing dirty logging test. To improve the demand paging test, add a basic userfaultfd demand paging implementation. Signed-off-by: Ben Gardon Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/demand_paging_test.c | 210 ++++++++++++++++++++++- 1 file changed, 206 insertions(+), 4 deletions(-) (limited to 'tools/testing/selftests/kvm/demand_paging_test.c') diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index e3d49172e2c3..6be9793789f1 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -11,16 +11,21 @@ #include #include +#include #include +#include #include +#include #include #include #include +#include #include "test_util.h" #include "kvm_util.h" #include "processor.h" +#ifdef __NR_userfaultfd #define VCPU_ID 1 /* The memory slot index demand page */ @@ -39,6 +44,8 @@ static uint64_t host_page_size; static uint64_t guest_page_size; static uint64_t guest_num_pages; +static char *guest_data_prototype; + /* * Guest physical memory offset of the testing memory slot. * This will be set to the topmost valid physical address minus @@ -110,13 +117,169 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid, return vm; } +static int handle_uffd_page_request(int uffd, uint64_t addr) +{ + pid_t tid; + struct uffdio_copy copy; + int r; + + tid = syscall(__NR_gettid); + + copy.src = (uint64_t)guest_data_prototype; + copy.dst = addr; + copy.len = host_page_size; + copy.mode = 0; + + r = ioctl(uffd, UFFDIO_COPY, ©); + if (r == -1) { + DEBUG("Failed Paged in 0x%lx from thread %d with errno: %d\n", + addr, tid, errno); + return r; + } + + return 0; +} + +bool quit_uffd_thread; + +struct uffd_handler_args { + int uffd; + int pipefd; +}; + +static void *uffd_handler_thread_fn(void *arg) +{ + struct uffd_handler_args *uffd_args = (struct uffd_handler_args *)arg; + int uffd = uffd_args->uffd; + int pipefd = uffd_args->pipefd; + int64_t pages = 0; + + while (!quit_uffd_thread) { + struct uffd_msg msg; + struct pollfd pollfd[2]; + char tmp_chr; + int r; + uint64_t addr; + + pollfd[0].fd = uffd; + pollfd[0].events = POLLIN; + pollfd[1].fd = pipefd; + pollfd[1].events = POLLIN; + + r = poll(pollfd, 2, -1); + switch (r) { + case -1: + DEBUG("poll err"); + continue; + case 0: + continue; + case 1: + break; + default: + DEBUG("Polling uffd returned %d", r); + return NULL; + } + + if (pollfd[0].revents & POLLERR) { + DEBUG("uffd revents has POLLERR"); + return NULL; + } + + if (pollfd[1].revents & POLLIN) { + r = read(pollfd[1].fd, &tmp_chr, 1); + TEST_ASSERT(r == 1, + "Error reading pipefd in UFFD thread\n"); + return NULL; + } + + if (!pollfd[0].revents & POLLIN) + continue; + + r = read(uffd, &msg, sizeof(msg)); + if (r == -1) { + if (errno == EAGAIN) + continue; + DEBUG("Read of uffd gor errno %d", errno); + return NULL; + } + + if (r != sizeof(msg)) { + DEBUG("Read on uffd returned unexpected size: %d bytes", + r); + return NULL; + } + + if (!(msg.event & UFFD_EVENT_PAGEFAULT)) + continue; + + addr = msg.arg.pagefault.address; + r = handle_uffd_page_request(uffd, addr); + if (r < 0) + return NULL; + pages++; + } + + return NULL; +} + +static int setup_demand_paging(struct kvm_vm *vm, + pthread_t *uffd_handler_thread, int pipefd) +{ + int uffd; + struct uffdio_api uffdio_api; + struct uffdio_register uffdio_register; + struct uffd_handler_args uffd_args; + + guest_data_prototype = malloc(host_page_size); + TEST_ASSERT(guest_data_prototype, + "Failed to allocate buffer for guest data pattern"); + memset(guest_data_prototype, 0xAB, host_page_size); + + uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); + if (uffd == -1) { + DEBUG("uffd creation failed\n"); + return -1; + } + + uffdio_api.api = UFFD_API; + uffdio_api.features = 0; + if (ioctl(uffd, UFFDIO_API, &uffdio_api) == -1) { + DEBUG("ioctl uffdio_api failed\n"); + return -1; + } + + uffdio_register.range.start = (uint64_t)host_test_mem; + uffdio_register.range.len = host_num_pages * host_page_size; + uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING; + if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) == -1) { + DEBUG("ioctl uffdio_register failed\n"); + return -1; + } + + if ((uffdio_register.ioctls & UFFD_API_RANGE_IOCTLS) != + UFFD_API_RANGE_IOCTLS) { + DEBUG("unexpected userfaultfd ioctl set\n"); + return -1; + } + + uffd_args.uffd = uffd; + uffd_args.pipefd = pipefd; + pthread_create(uffd_handler_thread, NULL, uffd_handler_thread_fn, + &uffd_args); + + return 0; +} + #define GUEST_MEM_SHIFT 30 /* 1G */ #define PAGE_SHIFT_4K 12 -static void run_test(enum vm_guest_mode mode) +static void run_test(enum vm_guest_mode mode, bool use_uffd) { pthread_t vcpu_thread; + pthread_t uffd_handler_thread; + int pipefd[2]; struct kvm_vm *vm; + int r; /* * We reserve page table for 2 times of extra dirty mem which @@ -173,6 +336,16 @@ static void run_test(enum vm_guest_mode mode) /* Cache the HVA pointer of the region */ host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem); + if (use_uffd) { + /* Set up user fault fd to handle demand paging requests. */ + r = pipe2(pipefd, O_CLOEXEC | O_NONBLOCK); + TEST_ASSERT(!r, "Failed to set up pipefd"); + + r = setup_demand_paging(vm, &uffd_handler_thread, pipefd[0]); + if (r < 0) + exit(-r); + } + #ifdef __x86_64__ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); #endif @@ -191,8 +364,20 @@ static void run_test(enum vm_guest_mode mode) /* Wait for the vcpu thread to quit */ pthread_join(vcpu_thread, NULL); + if (use_uffd) { + char c; + + /* Tell the user fault fd handler thread to quit */ + r = write(pipefd[1], &c, 1); + TEST_ASSERT(r == 1, "Unable to write to pipefd"); + + pthread_join(uffd_handler_thread, NULL); + } + ucall_uninit(vm); kvm_vm_free(vm); + + free(guest_data_prototype); } struct guest_mode { @@ -210,7 +395,7 @@ static void help(char *name) int i; puts(""); - printf("usage: %s [-h] [-m mode]\n", name); + printf("usage: %s [-h] [-m mode] [-u]\n", name); printf(" -m: specify the guest mode ID to test\n" " (default: test all supported modes)\n" " This option may be used multiple times.\n" @@ -219,6 +404,7 @@ static void help(char *name) printf(" %d: %s%s\n", i, vm_guest_mode_string(i), guest_modes[i].supported ? " (supported)" : ""); } + printf(" -u: Use User Fault FD to handle vCPU page faults.\n"); puts(""); exit(0); } @@ -228,6 +414,7 @@ int main(int argc, char *argv[]) bool mode_selected = false; unsigned int mode; int opt, i; + bool use_uffd = false; #ifdef __x86_64__ guest_mode_init(VM_MODE_PXXV48_4K, true, true); @@ -250,7 +437,7 @@ int main(int argc, char *argv[]) guest_mode_init(VM_MODE_P40V48_4K, true, true); #endif - while ((opt = getopt(argc, argv, "hm:")) != -1) { + while ((opt = getopt(argc, argv, "hm:u")) != -1) { switch (opt) { case 'm': if (!mode_selected) { @@ -263,6 +450,9 @@ int main(int argc, char *argv[]) "Guest mode ID %d too big", mode); guest_modes[mode].enabled = true; break; + case 'u': + use_uffd = true; + break; case 'h': default: help(argv[0]); @@ -276,8 +466,20 @@ int main(int argc, char *argv[]) TEST_ASSERT(guest_modes[i].supported, "Guest mode ID %d (%s) not supported.", i, vm_guest_mode_string(i)); - run_test(i); + run_test(i, use_uffd); } return 0; } + +#else /* __NR_userfaultfd */ + +#warning "missing __NR_userfaultfd definition" + +int main(void) +{ + printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n"); + return KSFT_SKIP; +} + +#endif /* __NR_userfaultfd */ -- cgit v1.2.3 From 0119cb365c93621535187c7527486c3b378a622d Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Thu, 20 Feb 2020 18:09:59 +0100 Subject: KVM: selftests: Add configurable demand paging delay When running the demand paging test with the -u option, the User Fault FD handler essentially adds an arbitrary delay to page fault resolution. To enable better simulation of a real demand paging scenario, add a configurable delay to the UFFD handler. Reviewed-by: Peter Xu Signed-off-by: Ben Gardon Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/demand_paging_test.c | 32 ++++++++++++++++++------ 1 file changed, 25 insertions(+), 7 deletions(-) (limited to 'tools/testing/selftests/kvm/demand_paging_test.c') diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index 6be9793789f1..ab302e1f5230 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -145,6 +145,7 @@ bool quit_uffd_thread; struct uffd_handler_args { int uffd; int pipefd; + useconds_t delay; }; static void *uffd_handler_thread_fn(void *arg) @@ -152,6 +153,7 @@ static void *uffd_handler_thread_fn(void *arg) struct uffd_handler_args *uffd_args = (struct uffd_handler_args *)arg; int uffd = uffd_args->uffd; int pipefd = uffd_args->pipefd; + useconds_t delay = uffd_args->delay; int64_t pages = 0; while (!quit_uffd_thread) { @@ -212,6 +214,8 @@ static void *uffd_handler_thread_fn(void *arg) if (!(msg.event & UFFD_EVENT_PAGEFAULT)) continue; + if (delay) + usleep(delay); addr = msg.arg.pagefault.address; r = handle_uffd_page_request(uffd, addr); if (r < 0) @@ -223,7 +227,8 @@ static void *uffd_handler_thread_fn(void *arg) } static int setup_demand_paging(struct kvm_vm *vm, - pthread_t *uffd_handler_thread, int pipefd) + pthread_t *uffd_handler_thread, int pipefd, + useconds_t uffd_delay) { int uffd; struct uffdio_api uffdio_api; @@ -264,6 +269,7 @@ static int setup_demand_paging(struct kvm_vm *vm, uffd_args.uffd = uffd; uffd_args.pipefd = pipefd; + uffd_args.delay = uffd_delay; pthread_create(uffd_handler_thread, NULL, uffd_handler_thread_fn, &uffd_args); @@ -273,7 +279,8 @@ static int setup_demand_paging(struct kvm_vm *vm, #define GUEST_MEM_SHIFT 30 /* 1G */ #define PAGE_SHIFT_4K 12 -static void run_test(enum vm_guest_mode mode, bool use_uffd) +static void run_test(enum vm_guest_mode mode, bool use_uffd, + useconds_t uffd_delay) { pthread_t vcpu_thread; pthread_t uffd_handler_thread; @@ -341,7 +348,8 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd) r = pipe2(pipefd, O_CLOEXEC | O_NONBLOCK); TEST_ASSERT(!r, "Failed to set up pipefd"); - r = setup_demand_paging(vm, &uffd_handler_thread, pipefd[0]); + r = setup_demand_paging(vm, &uffd_handler_thread, pipefd[0], + uffd_delay); if (r < 0) exit(-r); } @@ -395,7 +403,7 @@ static void help(char *name) int i; puts(""); - printf("usage: %s [-h] [-m mode] [-u]\n", name); + printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n", name); printf(" -m: specify the guest mode ID to test\n" " (default: test all supported modes)\n" " This option may be used multiple times.\n" @@ -404,7 +412,11 @@ static void help(char *name) printf(" %d: %s%s\n", i, vm_guest_mode_string(i), guest_modes[i].supported ? " (supported)" : ""); } - printf(" -u: Use User Fault FD to handle vCPU page faults.\n"); + printf(" -u: use User Fault FD to handle vCPU page\n" + " faults.\n"); + printf(" -d: add a delay in usec to the User Fault\n" + " FD handler to simulate demand paging\n" + " overheads. Ignored without -u.\n"); puts(""); exit(0); } @@ -415,6 +427,7 @@ int main(int argc, char *argv[]) unsigned int mode; int opt, i; bool use_uffd = false; + useconds_t uffd_delay = 0; #ifdef __x86_64__ guest_mode_init(VM_MODE_PXXV48_4K, true, true); @@ -437,7 +450,7 @@ int main(int argc, char *argv[]) guest_mode_init(VM_MODE_P40V48_4K, true, true); #endif - while ((opt = getopt(argc, argv, "hm:u")) != -1) { + while ((opt = getopt(argc, argv, "hm:ud:")) != -1) { switch (opt) { case 'm': if (!mode_selected) { @@ -453,6 +466,11 @@ int main(int argc, char *argv[]) case 'u': use_uffd = true; break; + case 'd': + uffd_delay = strtoul(optarg, NULL, 0); + TEST_ASSERT(uffd_delay >= 0, + "A negative UFFD delay is not supported."); + break; case 'h': default: help(argv[0]); @@ -466,7 +484,7 @@ int main(int argc, char *argv[]) TEST_ASSERT(guest_modes[i].supported, "Guest mode ID %d (%s) not supported.", i, vm_guest_mode_string(i)); - run_test(i, use_uffd); + run_test(i, use_uffd, uffd_delay); } return 0; -- cgit v1.2.3 From af99e1ad7e708d1a1a4e4c1bb10a2b851974fc04 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Thu, 23 Jan 2020 10:04:30 -0800 Subject: KVM: selftests: Add memory size parameter to the demand paging test Add an argument to allow the demand paging test to work on larger and smaller guest sizes. Signed-off-by: Ben Gardon [Rewrote parse_size() to simplify and provide user more flexibility as to how sizes are input. Also fixed size overflow assert.] Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/demand_paging_test.c | 57 +++++++++++++++--------- 1 file changed, 36 insertions(+), 21 deletions(-) (limited to 'tools/testing/selftests/kvm/demand_paging_test.c') diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index ab302e1f5230..c1880b3e3041 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -34,6 +34,8 @@ /* Default guest test virtual memory offset */ #define DEFAULT_GUEST_TEST_MEM 0xc0000000 +#define DEFAULT_GUEST_TEST_MEM_SIZE (1 << 30) /* 1G */ + /* * Guest/Host shared variables. Ensure addr_gva2hva() and/or * sync_global_to/from_guest() are used when accessing from @@ -276,11 +278,10 @@ static int setup_demand_paging(struct kvm_vm *vm, return 0; } -#define GUEST_MEM_SHIFT 30 /* 1G */ #define PAGE_SHIFT_4K 12 static void run_test(enum vm_guest_mode mode, bool use_uffd, - useconds_t uffd_delay) + useconds_t uffd_delay, uint64_t guest_memory_bytes) { pthread_t vcpu_thread; pthread_t uffd_handler_thread; @@ -289,33 +290,40 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, int r; /* - * We reserve page table for 2 times of extra dirty mem which - * will definitely cover the original (1G+) test range. Here - * we do the calculation with 4K page size which is the - * smallest so the page number will be enough for all archs - * (e.g., 64K page size guest will need even less memory for - * page tables). + * We reserve page table for twice the ammount of memory we intend + * to use in the test region for demand paging. Here we do the + * calculation with 4K page size which is the smallest so the page + * number will be enough for all archs. (e.g., 64K page size guest + * will need even less memory for page tables). */ vm = create_vm(mode, VCPU_ID, - 2ul << (GUEST_MEM_SHIFT - PAGE_SHIFT_4K), + (2 * guest_memory_bytes) >> PAGE_SHIFT_4K, guest_code); guest_page_size = vm_get_page_size(vm); - /* - * A little more than 1G of guest page sized pages. Cover the - * case where the size is not aligned to 64 pages. - */ - guest_num_pages = (1ul << (GUEST_MEM_SHIFT - - vm_get_page_shift(vm))) + 16; + + TEST_ASSERT(guest_memory_bytes % guest_page_size == 0, + "Guest memory size is not guest page size aligned."); + + guest_num_pages = guest_memory_bytes / guest_page_size; + #ifdef __s390x__ /* Round up to multiple of 1M (segment size) */ guest_num_pages = (guest_num_pages + 0xff) & ~0xffUL; #endif + /* + * If there should be more memory in the guest test region than there + * can be pages in the guest, it will definitely cause problems. + */ + TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm), + "Requested more guest memory than address space allows.\n" + " guest pages: %lx max gfn: %lx\n", + guest_num_pages, vm_get_max_gfn(vm)); host_page_size = getpagesize(); - host_num_pages = (guest_num_pages * guest_page_size) / host_page_size + - !!((guest_num_pages * guest_page_size) % - host_page_size); + TEST_ASSERT(guest_memory_bytes % host_page_size == 0, + "Guest memory size is not host page size aligned."); + host_num_pages = guest_memory_bytes / host_page_size; guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) * guest_page_size; @@ -403,7 +411,8 @@ static void help(char *name) int i; puts(""); - printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n", name); + printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n" + " [-b memory]\n", name); printf(" -m: specify the guest mode ID to test\n" " (default: test all supported modes)\n" " This option may be used multiple times.\n" @@ -417,6 +426,8 @@ static void help(char *name) printf(" -d: add a delay in usec to the User Fault\n" " FD handler to simulate demand paging\n" " overheads. Ignored without -u.\n"); + printf(" -b: specify the size of the memory region which should be\n" + " demand paged. e.g. 10M or 3G. Default: 1G\n"); puts(""); exit(0); } @@ -424,6 +435,7 @@ static void help(char *name) int main(int argc, char *argv[]) { bool mode_selected = false; + uint64_t guest_memory_bytes = DEFAULT_GUEST_TEST_MEM_SIZE; unsigned int mode; int opt, i; bool use_uffd = false; @@ -450,7 +462,7 @@ int main(int argc, char *argv[]) guest_mode_init(VM_MODE_P40V48_4K, true, true); #endif - while ((opt = getopt(argc, argv, "hm:ud:")) != -1) { + while ((opt = getopt(argc, argv, "hm:ud:b:")) != -1) { switch (opt) { case 'm': if (!mode_selected) { @@ -471,6 +483,9 @@ int main(int argc, char *argv[]) TEST_ASSERT(uffd_delay >= 0, "A negative UFFD delay is not supported."); break; + case 'b': + guest_memory_bytes = parse_size(optarg); + break; case 'h': default: help(argv[0]); @@ -484,7 +499,7 @@ int main(int argc, char *argv[]) TEST_ASSERT(guest_modes[i].supported, "Guest mode ID %d (%s) not supported.", i, vm_guest_mode_string(i)); - run_test(i, use_uffd, uffd_delay); + run_test(i, use_uffd, uffd_delay, guest_memory_bytes); } return 0; -- cgit v1.2.3 From 56a4210f4e4ed9c8ebec87d212453be8f6f8750f Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Thu, 23 Jan 2020 10:04:31 -0800 Subject: KVM: selftests: Pass args to vCPU in global vCPU args struct In preparation for supporting multiple vCPUs in the demand paging test, pass arguments to the vCPU in a consolidated global struct instead of syncing multiple globals. Signed-off-by: Ben Gardon Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/demand_paging_test.c | 38 +++++++++++++++++------- 1 file changed, 27 insertions(+), 11 deletions(-) (limited to 'tools/testing/selftests/kvm/demand_paging_test.c') diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index c1880b3e3041..aa39d065b3f2 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -44,7 +44,6 @@ */ static uint64_t host_page_size; static uint64_t guest_page_size; -static uint64_t guest_num_pages; static char *guest_data_prototype; @@ -61,18 +60,30 @@ static uint64_t guest_test_phys_mem; */ static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; +struct vcpu_args { + uint64_t gva; + uint64_t pages; + + /* Only used by the host userspace part of the vCPU thread */ + int vcpu_id; + struct kvm_vm *vm; +}; + +static struct vcpu_args vcpu_args; + /* * Continuously write to the first 8 bytes of each page in the demand paging * memory region. */ static void guest_code(void) { + uint64_t gva = vcpu_args.gva; + uint64_t pages = vcpu_args.pages; int i; - for (i = 0; i < guest_num_pages; i++) { - uint64_t addr = guest_test_virt_mem; + for (i = 0; i < pages; i++) { + uint64_t addr = gva + (i * guest_page_size); - addr += i * guest_page_size; addr &= ~(host_page_size - 1); *(uint64_t *)addr = 0x0123456789ABCDEF; } @@ -87,15 +98,16 @@ static uint64_t host_num_pages; static void *vcpu_worker(void *data) { int ret; - struct kvm_vm *vm = data; + struct kvm_vm *vm = vcpu_args.vm; + int vcpu_id = vcpu_args.vcpu_id; struct kvm_run *run; - run = vcpu_state(vm, VCPU_ID); + run = vcpu_state(vm, vcpu_id); /* Let the guest access its memory */ - ret = _vcpu_run(vm, VCPU_ID); + ret = _vcpu_run(vm, vcpu_id); TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); - if (get_ucall(vm, VCPU_ID, NULL) != UCALL_SYNC) { + if (get_ucall(vm, vcpu_id, NULL) != UCALL_SYNC) { TEST_ASSERT(false, "Invalid guest sync status: exit_reason=%s\n", exit_reason_str(run->exit_reason)); @@ -287,6 +299,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, pthread_t uffd_handler_thread; int pipefd[2]; struct kvm_vm *vm; + uint64_t guest_num_pages; int r; /* @@ -372,10 +385,13 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, /* Export the shared variables to the guest */ sync_global_to_guest(vm, host_page_size); sync_global_to_guest(vm, guest_page_size); - sync_global_to_guest(vm, guest_test_virt_mem); - sync_global_to_guest(vm, guest_num_pages); - pthread_create(&vcpu_thread, NULL, vcpu_worker, vm); + vcpu_args.vm = vm; + vcpu_args.vcpu_id = VCPU_ID; + vcpu_args.gva = guest_test_virt_mem; + vcpu_args.pages = guest_num_pages; + sync_global_to_guest(vm, vcpu_args); + pthread_create(&vcpu_thread, NULL, vcpu_worker, &vcpu_args); /* Wait for the vcpu thread to quit */ pthread_join(vcpu_thread, NULL); -- cgit v1.2.3 From 018494e6d8234c420e4f7236f502993df5584812 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Thu, 23 Jan 2020 10:04:33 -0800 Subject: KVM: selftests: Support multiple vCPUs in demand paging test Most VMs have multiple vCPUs, the concurrent execution of which has a substantial impact on demand paging performance. Add an option to create multiple vCPUs to each access disjoint regions of memory. Signed-off-by: Ben Gardon [guest_code() can't return, use GUEST_ASSERT(). Ensure the number of guests pages is compatible with the host.] Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/demand_paging_test.c | 253 +++++++++++++++-------- 1 file changed, 171 insertions(+), 82 deletions(-) (limited to 'tools/testing/selftests/kvm/demand_paging_test.c') diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index aa39d065b3f2..c516cece2368 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -26,7 +26,6 @@ #include "processor.h" #ifdef __NR_userfaultfd -#define VCPU_ID 1 /* The memory slot index demand page */ #define TEST_MEM_SLOT_INDEX 1 @@ -36,6 +35,14 @@ #define DEFAULT_GUEST_TEST_MEM_SIZE (1 << 30) /* 1G */ +#ifdef PRINT_PER_VCPU_UPDATES +#define PER_VCPU_DEBUG(...) DEBUG(__VA_ARGS__) +#else +#define PER_VCPU_DEBUG(...) +#endif + +#define MAX_VCPUS 512 + /* * Guest/Host shared variables. Ensure addr_gva2hva() and/or * sync_global_to/from_guest() are used when accessing from @@ -69,18 +76,24 @@ struct vcpu_args { struct kvm_vm *vm; }; -static struct vcpu_args vcpu_args; +static struct vcpu_args vcpu_args[MAX_VCPUS]; /* * Continuously write to the first 8 bytes of each page in the demand paging * memory region. */ -static void guest_code(void) +static void guest_code(uint32_t vcpu_id) { - uint64_t gva = vcpu_args.gva; - uint64_t pages = vcpu_args.pages; + uint64_t gva; + uint64_t pages; int i; + /* Make sure vCPU args data structure is not corrupt. */ + GUEST_ASSERT(vcpu_args[vcpu_id].vcpu_id == vcpu_id); + + gva = vcpu_args[vcpu_id].gva; + pages = vcpu_args[vcpu_id].pages; + for (i = 0; i < pages; i++) { uint64_t addr = gva + (i * guest_page_size); @@ -91,17 +104,15 @@ static void guest_code(void) GUEST_SYNC(1); } -/* Points to the test VM memory region on which we are doing demand paging */ -static void *host_test_mem; -static uint64_t host_num_pages; - static void *vcpu_worker(void *data) { int ret; - struct kvm_vm *vm = vcpu_args.vm; - int vcpu_id = vcpu_args.vcpu_id; + struct vcpu_args *args = (struct vcpu_args *)data; + struct kvm_vm *vm = args->vm; + int vcpu_id = args->vcpu_id; struct kvm_run *run; + vcpu_args_set(vm, vcpu_id, 1, vcpu_id); run = vcpu_state(vm, vcpu_id); /* Let the guest access its memory */ @@ -116,18 +127,34 @@ static void *vcpu_worker(void *data) return NULL; } -static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid, - uint64_t extra_mem_pages, void *guest_code) +#define PAGE_SHIFT_4K 12 +#define PTES_PER_4K_PT 512 + +static struct kvm_vm *create_vm(enum vm_guest_mode mode, int vcpus, + uint64_t vcpu_memory_bytes) { struct kvm_vm *vm; - uint64_t extra_pg_pages = extra_mem_pages / 512 * 2; + uint64_t pages = DEFAULT_GUEST_PHY_PAGES; + + /* Account for a few pages per-vCPU for stacks */ + pages += DEFAULT_STACK_PGS * vcpus; + + /* + * Reserve twice the ammount of memory needed to map the test region and + * the page table / stacks region, at 4k, for page tables. Do the + * calculation with 4K page size: the smallest of all archs. (e.g., 64K + * page size guest will need even less memory for page tables). + */ + pages += (2 * pages) / PTES_PER_4K_PT; + pages += ((2 * vcpus * vcpu_memory_bytes) >> PAGE_SHIFT_4K) / + PTES_PER_4K_PT; + pages = vm_adjust_num_guest_pages(mode, pages); - vm = _vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR); + vm = _vm_create(mode, pages, O_RDWR); kvm_vm_elf_load(vm, program_invocation_name, 0, 0); #ifdef __x86_64__ vm_create_irqchip(vm); #endif - vm_vcpu_add_default(vm, vcpuid, guest_code); return vm; } @@ -242,17 +269,13 @@ static void *uffd_handler_thread_fn(void *arg) static int setup_demand_paging(struct kvm_vm *vm, pthread_t *uffd_handler_thread, int pipefd, - useconds_t uffd_delay) + useconds_t uffd_delay, + struct uffd_handler_args *uffd_args, + void *hva, uint64_t len) { int uffd; struct uffdio_api uffdio_api; struct uffdio_register uffdio_register; - struct uffd_handler_args uffd_args; - - guest_data_prototype = malloc(host_page_size); - TEST_ASSERT(guest_data_prototype, - "Failed to allocate buffer for guest data pattern"); - memset(guest_data_prototype, 0xAB, host_page_size); uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); if (uffd == -1) { @@ -267,8 +290,8 @@ static int setup_demand_paging(struct kvm_vm *vm, return -1; } - uffdio_register.range.start = (uint64_t)host_test_mem; - uffdio_register.range.len = host_num_pages * host_page_size; + uffdio_register.range.start = (uint64_t)hva; + uffdio_register.range.len = len; uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING; if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) == -1) { DEBUG("ioctl uffdio_register failed\n"); @@ -281,44 +304,40 @@ static int setup_demand_paging(struct kvm_vm *vm, return -1; } - uffd_args.uffd = uffd; - uffd_args.pipefd = pipefd; - uffd_args.delay = uffd_delay; + uffd_args->uffd = uffd; + uffd_args->pipefd = pipefd; + uffd_args->delay = uffd_delay; pthread_create(uffd_handler_thread, NULL, uffd_handler_thread_fn, - &uffd_args); + uffd_args); + + PER_VCPU_DEBUG("Created uffd thread for HVA range [%p, %p)\n", + hva, hva + len); return 0; } -#define PAGE_SHIFT_4K 12 - static void run_test(enum vm_guest_mode mode, bool use_uffd, - useconds_t uffd_delay, uint64_t guest_memory_bytes) + useconds_t uffd_delay, int vcpus, + uint64_t vcpu_memory_bytes) { - pthread_t vcpu_thread; - pthread_t uffd_handler_thread; - int pipefd[2]; + pthread_t *vcpu_threads; + pthread_t *uffd_handler_threads = NULL; + struct uffd_handler_args *uffd_args = NULL; + int *pipefds = NULL; struct kvm_vm *vm; uint64_t guest_num_pages; + int vcpu_id; int r; - /* - * We reserve page table for twice the ammount of memory we intend - * to use in the test region for demand paging. Here we do the - * calculation with 4K page size which is the smallest so the page - * number will be enough for all archs. (e.g., 64K page size guest - * will need even less memory for page tables). - */ - vm = create_vm(mode, VCPU_ID, - (2 * guest_memory_bytes) >> PAGE_SHIFT_4K, - guest_code); + vm = create_vm(mode, vcpus, vcpu_memory_bytes); guest_page_size = vm_get_page_size(vm); - TEST_ASSERT(guest_memory_bytes % guest_page_size == 0, + TEST_ASSERT(vcpu_memory_bytes % guest_page_size == 0, "Guest memory size is not guest page size aligned."); - guest_num_pages = guest_memory_bytes / guest_page_size; + guest_num_pages = (vcpus * vcpu_memory_bytes) / guest_page_size; + guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages); #ifdef __s390x__ /* Round up to multiple of 1M (segment size) */ @@ -330,13 +349,13 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, */ TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm), "Requested more guest memory than address space allows.\n" - " guest pages: %lx max gfn: %lx\n", - guest_num_pages, vm_get_max_gfn(vm)); + " guest pages: %lx max gfn: %lx vcpus: %d wss: %lx]\n", + guest_num_pages, vm_get_max_gfn(vm), vcpus, + vcpu_memory_bytes); host_page_size = getpagesize(); - TEST_ASSERT(guest_memory_bytes % host_page_size == 0, + TEST_ASSERT(vcpu_memory_bytes % host_page_size == 0, "Guest memory size is not host page size aligned."); - host_num_pages = guest_memory_bytes / host_page_size; guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) * guest_page_size; @@ -361,55 +380,114 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages * guest_page_size, 0); - /* Cache the HVA pointer of the region */ - host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem); + ucall_init(vm, NULL); + + guest_data_prototype = malloc(host_page_size); + TEST_ASSERT(guest_data_prototype, + "Failed to allocate buffer for guest data pattern"); + memset(guest_data_prototype, 0xAB, host_page_size); + + vcpu_threads = malloc(vcpus * sizeof(*vcpu_threads)); + TEST_ASSERT(vcpu_threads, "Memory allocation failed"); if (use_uffd) { - /* Set up user fault fd to handle demand paging requests. */ - r = pipe2(pipefd, O_CLOEXEC | O_NONBLOCK); - TEST_ASSERT(!r, "Failed to set up pipefd"); + uffd_handler_threads = + malloc(vcpus * sizeof(*uffd_handler_threads)); + TEST_ASSERT(uffd_handler_threads, "Memory allocation failed"); - r = setup_demand_paging(vm, &uffd_handler_thread, pipefd[0], - uffd_delay); - if (r < 0) - exit(-r); + uffd_args = malloc(vcpus * sizeof(*uffd_args)); + TEST_ASSERT(uffd_args, "Memory allocation failed"); + + pipefds = malloc(sizeof(int) * vcpus * 2); + TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd"); } + for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) { + vm_paddr_t vcpu_gpa; + void *vcpu_hva; + + vm_vcpu_add_default(vm, vcpu_id, guest_code); + + vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes); + PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n", + vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes); + + /* Cache the HVA pointer of the region */ + vcpu_hva = addr_gpa2hva(vm, vcpu_gpa); + + if (use_uffd) { + /* + * Set up user fault fd to handle demand paging + * requests. + */ + r = pipe2(&pipefds[vcpu_id * 2], + O_CLOEXEC | O_NONBLOCK); + TEST_ASSERT(!r, "Failed to set up pipefd"); + + r = setup_demand_paging(vm, + &uffd_handler_threads[vcpu_id], + pipefds[vcpu_id * 2], + uffd_delay, &uffd_args[vcpu_id], + vcpu_hva, vcpu_memory_bytes); + if (r < 0) + exit(-r); + } + #ifdef __x86_64__ - vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); -#endif -#ifdef __aarch64__ - ucall_init(vm, NULL); + vcpu_set_cpuid(vm, vcpu_id, kvm_get_supported_cpuid()); #endif + vcpu_args[vcpu_id].vm = vm; + vcpu_args[vcpu_id].vcpu_id = vcpu_id; + vcpu_args[vcpu_id].gva = guest_test_virt_mem + + (vcpu_id * vcpu_memory_bytes); + vcpu_args[vcpu_id].pages = vcpu_memory_bytes / guest_page_size; + } + /* Export the shared variables to the guest */ sync_global_to_guest(vm, host_page_size); sync_global_to_guest(vm, guest_page_size); - - vcpu_args.vm = vm; - vcpu_args.vcpu_id = VCPU_ID; - vcpu_args.gva = guest_test_virt_mem; - vcpu_args.pages = guest_num_pages; sync_global_to_guest(vm, vcpu_args); - pthread_create(&vcpu_thread, NULL, vcpu_worker, &vcpu_args); - /* Wait for the vcpu thread to quit */ - pthread_join(vcpu_thread, NULL); + DEBUG("Finished creating vCPUs and starting uffd threads\n"); + + for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) { + pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker, + &vcpu_args[vcpu_id]); + } + + DEBUG("Started all vCPUs\n"); + + /* Wait for the vcpu threads to quit */ + for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) { + pthread_join(vcpu_threads[vcpu_id], NULL); + PER_VCPU_DEBUG("Joined thread for vCPU %d\n", vcpu_id); + } + + DEBUG("All vCPU threads joined\n"); if (use_uffd) { char c; - /* Tell the user fault fd handler thread to quit */ - r = write(pipefd[1], &c, 1); - TEST_ASSERT(r == 1, "Unable to write to pipefd"); + /* Tell the user fault fd handler threads to quit */ + for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) { + r = write(pipefds[vcpu_id * 2 + 1], &c, 1); + TEST_ASSERT(r == 1, "Unable to write to pipefd"); - pthread_join(uffd_handler_thread, NULL); + pthread_join(uffd_handler_threads[vcpu_id], NULL); + } } ucall_uninit(vm); kvm_vm_free(vm); free(guest_data_prototype); + free(vcpu_threads); + if (use_uffd) { + free(uffd_handler_threads); + free(uffd_args); + free(pipefds); + } } struct guest_mode { @@ -428,7 +506,7 @@ static void help(char *name) puts(""); printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n" - " [-b memory]\n", name); + " [-b memory] [-v vcpus]\n", name); printf(" -m: specify the guest mode ID to test\n" " (default: test all supported modes)\n" " This option may be used multiple times.\n" @@ -443,7 +521,9 @@ static void help(char *name) " FD handler to simulate demand paging\n" " overheads. Ignored without -u.\n"); printf(" -b: specify the size of the memory region which should be\n" - " demand paged. e.g. 10M or 3G. Default: 1G\n"); + " demand paged by each vCPU. e.g. 10M or 3G.\n" + " Default: 1G\n"); + printf(" -v: specify the number of vCPUs to run.\n"); puts(""); exit(0); } @@ -451,7 +531,8 @@ static void help(char *name) int main(int argc, char *argv[]) { bool mode_selected = false; - uint64_t guest_memory_bytes = DEFAULT_GUEST_TEST_MEM_SIZE; + uint64_t vcpu_memory_bytes = DEFAULT_GUEST_TEST_MEM_SIZE; + int vcpus = 1; unsigned int mode; int opt, i; bool use_uffd = false; @@ -478,7 +559,7 @@ int main(int argc, char *argv[]) guest_mode_init(VM_MODE_P40V48_4K, true, true); #endif - while ((opt = getopt(argc, argv, "hm:ud:b:")) != -1) { + while ((opt = getopt(argc, argv, "hm:ud:b:v:")) != -1) { switch (opt) { case 'm': if (!mode_selected) { @@ -500,7 +581,15 @@ int main(int argc, char *argv[]) "A negative UFFD delay is not supported."); break; case 'b': - guest_memory_bytes = parse_size(optarg); + vcpu_memory_bytes = parse_size(optarg); + break; + case 'v': + vcpus = atoi(optarg); + TEST_ASSERT(vcpus > 0, + "Must have a positive number of vCPUs"); + TEST_ASSERT(vcpus <= MAX_VCPUS, + "This test does not currently support\n" + "more than %d vCPUs.", MAX_VCPUS); break; case 'h': default: @@ -515,7 +604,7 @@ int main(int argc, char *argv[]) TEST_ASSERT(guest_modes[i].supported, "Guest mode ID %d (%s) not supported.", i, vm_guest_mode_string(i)); - run_test(i, use_uffd, uffd_delay, guest_memory_bytes); + run_test(i, use_uffd, uffd_delay, vcpus, vcpu_memory_bytes); } return 0; -- cgit v1.2.3 From f09205b99832f353088b7c82778b3f8175627620 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Thu, 23 Jan 2020 10:04:34 -0800 Subject: KVM: selftests: Time guest demand paging In order to quantify demand paging performance, time guest execution during demand paging. Signed-off-by: Ben Gardon [Move timespec-diff to test_util.h] Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/demand_paging_test.c | 50 +++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) (limited to 'tools/testing/selftests/kvm/demand_paging_test.c') diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index c516cece2368..8cdb8871e4d8 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -35,6 +35,12 @@ #define DEFAULT_GUEST_TEST_MEM_SIZE (1 << 30) /* 1G */ +#ifdef PRINT_PER_PAGE_UPDATES +#define PER_PAGE_DEBUG(...) DEBUG(__VA_ARGS__) +#else +#define PER_PAGE_DEBUG(...) +#endif + #ifdef PRINT_PER_VCPU_UPDATES #define PER_VCPU_DEBUG(...) DEBUG(__VA_ARGS__) #else @@ -111,10 +117,14 @@ static void *vcpu_worker(void *data) struct kvm_vm *vm = args->vm; int vcpu_id = args->vcpu_id; struct kvm_run *run; + struct timespec start; + struct timespec end; vcpu_args_set(vm, vcpu_id, 1, vcpu_id); run = vcpu_state(vm, vcpu_id); + clock_gettime(CLOCK_MONOTONIC, &start); + /* Let the guest access its memory */ ret = _vcpu_run(vm, vcpu_id); TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); @@ -124,6 +134,11 @@ static void *vcpu_worker(void *data) exit_reason_str(run->exit_reason)); } + clock_gettime(CLOCK_MONOTONIC, &end); + PER_VCPU_DEBUG("vCPU %d execution time: %lld.%.9lds\n", vcpu_id, + (long long)(timespec_diff(start, end).tv_sec), + timespec_diff(start, end).tv_nsec); + return NULL; } @@ -161,6 +176,8 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, int vcpus, static int handle_uffd_page_request(int uffd, uint64_t addr) { pid_t tid; + struct timespec start; + struct timespec end; struct uffdio_copy copy; int r; @@ -171,6 +188,8 @@ static int handle_uffd_page_request(int uffd, uint64_t addr) copy.len = host_page_size; copy.mode = 0; + clock_gettime(CLOCK_MONOTONIC, &start); + r = ioctl(uffd, UFFDIO_COPY, ©); if (r == -1) { DEBUG("Failed Paged in 0x%lx from thread %d with errno: %d\n", @@ -178,6 +197,13 @@ static int handle_uffd_page_request(int uffd, uint64_t addr) return r; } + clock_gettime(CLOCK_MONOTONIC, &end); + + PER_PAGE_DEBUG("UFFDIO_COPY %d \t%lld ns\n", tid, + (long long)timespec_to_ns(timespec_diff(start, end))); + PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n", + host_page_size, addr, tid); + return 0; } @@ -196,7 +222,10 @@ static void *uffd_handler_thread_fn(void *arg) int pipefd = uffd_args->pipefd; useconds_t delay = uffd_args->delay; int64_t pages = 0; + struct timespec start; + struct timespec end; + clock_gettime(CLOCK_MONOTONIC, &start); while (!quit_uffd_thread) { struct uffd_msg msg; struct pollfd pollfd[2]; @@ -264,6 +293,13 @@ static void *uffd_handler_thread_fn(void *arg) pages++; } + clock_gettime(CLOCK_MONOTONIC, &end); + PER_VCPU_DEBUG("userfaulted %ld pages over %lld.%.9lds. (%f/sec)\n", + pages, (long long)(timespec_diff(start, end).tv_sec), + timespec_diff(start, end).tv_nsec, pages / + ((double)timespec_diff(start, end).tv_sec + + (double)timespec_diff(start, end).tv_nsec / 100000000.0)); + return NULL; } @@ -328,6 +364,8 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, uint64_t guest_num_pages; int vcpu_id; int r; + struct timespec start; + struct timespec end; vm = create_vm(mode, vcpus, vcpu_memory_bytes); @@ -369,7 +407,6 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, DEBUG("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem); - /* Add an extra memory slot for testing demand paging */ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, guest_test_phys_mem, @@ -451,6 +488,8 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, DEBUG("Finished creating vCPUs and starting uffd threads\n"); + clock_gettime(CLOCK_MONOTONIC, &start); + for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) { pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker, &vcpu_args[vcpu_id]); @@ -466,6 +505,8 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, DEBUG("All vCPU threads joined\n"); + clock_gettime(CLOCK_MONOTONIC, &end); + if (use_uffd) { char c; @@ -478,6 +519,13 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, } } + DEBUG("Total guest execution time: %lld.%.9lds\n", + (long long)(timespec_diff(start, end).tv_sec), + timespec_diff(start, end).tv_nsec); + DEBUG("Overall demand paging rate: %f pgs/sec\n", + guest_num_pages / ((double)timespec_diff(start, end).tv_sec + + (double)timespec_diff(start, end).tv_nsec / 100000000.0)); + ucall_uninit(vm); kvm_vm_free(vm); -- cgit v1.2.3 From 3439d886e4d9b79b6b226e70c08d312bd31acbd4 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Fri, 14 Feb 2020 15:59:16 +0100 Subject: KVM: selftests: Rework debug message printing There were a few problems with the way we output "debug" messages. The first is that we used DEBUG() which is defined when NDEBUG is not defined, but NDEBUG will never be defined for kselftests because it relies too much on assert(). The next is that most of the DEBUG() messages were actually "info" messages, which users may want to turn off if they just want a silent test that either completes or asserts. Finally, a debug message output from a library function, and thus for all tests, was annoying when its information wasn't interesting for a test. Rework these messages so debug messages only output when DEBUG is defined and info messages output unless QUIET is defined. Also name the functions pr_debug and pr_info and make sure that when they're disabled we eat all the inputs. The later avoids unused variable warnings when the variables were only defined for the purpose of printing. Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/demand_paging_test.c | 54 ++++++++++++------------ 1 file changed, 27 insertions(+), 27 deletions(-) (limited to 'tools/testing/selftests/kvm/demand_paging_test.c') diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index 8cdb8871e4d8..c1e326d3ed7f 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -36,15 +36,15 @@ #define DEFAULT_GUEST_TEST_MEM_SIZE (1 << 30) /* 1G */ #ifdef PRINT_PER_PAGE_UPDATES -#define PER_PAGE_DEBUG(...) DEBUG(__VA_ARGS__) +#define PER_PAGE_DEBUG(...) printf(__VA_ARGS__) #else -#define PER_PAGE_DEBUG(...) +#define PER_PAGE_DEBUG(...) _no_printf(__VA_ARGS__) #endif #ifdef PRINT_PER_VCPU_UPDATES -#define PER_VCPU_DEBUG(...) DEBUG(__VA_ARGS__) +#define PER_VCPU_DEBUG(...) printf(__VA_ARGS__) #else -#define PER_VCPU_DEBUG(...) +#define PER_VCPU_DEBUG(...) _no_printf(__VA_ARGS__) #endif #define MAX_VCPUS 512 @@ -165,6 +165,8 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, int vcpus, PTES_PER_4K_PT; pages = vm_adjust_num_guest_pages(mode, pages); + pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode)); + vm = _vm_create(mode, pages, O_RDWR); kvm_vm_elf_load(vm, program_invocation_name, 0, 0); #ifdef __x86_64__ @@ -192,8 +194,8 @@ static int handle_uffd_page_request(int uffd, uint64_t addr) r = ioctl(uffd, UFFDIO_COPY, ©); if (r == -1) { - DEBUG("Failed Paged in 0x%lx from thread %d with errno: %d\n", - addr, tid, errno); + pr_info("Failed Paged in 0x%lx from thread %d with errno: %d\n", + addr, tid, errno); return r; } @@ -241,19 +243,19 @@ static void *uffd_handler_thread_fn(void *arg) r = poll(pollfd, 2, -1); switch (r) { case -1: - DEBUG("poll err"); + pr_info("poll err"); continue; case 0: continue; case 1: break; default: - DEBUG("Polling uffd returned %d", r); + pr_info("Polling uffd returned %d", r); return NULL; } if (pollfd[0].revents & POLLERR) { - DEBUG("uffd revents has POLLERR"); + pr_info("uffd revents has POLLERR"); return NULL; } @@ -271,13 +273,12 @@ static void *uffd_handler_thread_fn(void *arg) if (r == -1) { if (errno == EAGAIN) continue; - DEBUG("Read of uffd gor errno %d", errno); + pr_info("Read of uffd gor errno %d", errno); return NULL; } if (r != sizeof(msg)) { - DEBUG("Read on uffd returned unexpected size: %d bytes", - r); + pr_info("Read on uffd returned unexpected size: %d bytes", r); return NULL; } @@ -315,14 +316,14 @@ static int setup_demand_paging(struct kvm_vm *vm, uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); if (uffd == -1) { - DEBUG("uffd creation failed\n"); + pr_info("uffd creation failed\n"); return -1; } uffdio_api.api = UFFD_API; uffdio_api.features = 0; if (ioctl(uffd, UFFDIO_API, &uffdio_api) == -1) { - DEBUG("ioctl uffdio_api failed\n"); + pr_info("ioctl uffdio_api failed\n"); return -1; } @@ -330,13 +331,13 @@ static int setup_demand_paging(struct kvm_vm *vm, uffdio_register.range.len = len; uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING; if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) == -1) { - DEBUG("ioctl uffdio_register failed\n"); + pr_info("ioctl uffdio_register failed\n"); return -1; } if ((uffdio_register.ioctls & UFFD_API_RANGE_IOCTLS) != UFFD_API_RANGE_IOCTLS) { - DEBUG("unexpected userfaultfd ioctl set\n"); + pr_info("unexpected userfaultfd ioctl set\n"); return -1; } @@ -404,8 +405,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, guest_test_phys_mem &= ~((1 << 20) - 1); #endif - DEBUG("guest physical test memory offset: 0x%lx\n", - guest_test_phys_mem); + pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem); /* Add an extra memory slot for testing demand paging */ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, @@ -486,7 +486,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, sync_global_to_guest(vm, guest_page_size); sync_global_to_guest(vm, vcpu_args); - DEBUG("Finished creating vCPUs and starting uffd threads\n"); + pr_info("Finished creating vCPUs and starting uffd threads\n"); clock_gettime(CLOCK_MONOTONIC, &start); @@ -495,7 +495,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, &vcpu_args[vcpu_id]); } - DEBUG("Started all vCPUs\n"); + pr_info("Started all vCPUs\n"); /* Wait for the vcpu threads to quit */ for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) { @@ -503,7 +503,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, PER_VCPU_DEBUG("Joined thread for vCPU %d\n", vcpu_id); } - DEBUG("All vCPU threads joined\n"); + pr_info("All vCPU threads joined\n"); clock_gettime(CLOCK_MONOTONIC, &end); @@ -519,12 +519,12 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, } } - DEBUG("Total guest execution time: %lld.%.9lds\n", - (long long)(timespec_diff(start, end).tv_sec), - timespec_diff(start, end).tv_nsec); - DEBUG("Overall demand paging rate: %f pgs/sec\n", - guest_num_pages / ((double)timespec_diff(start, end).tv_sec + - (double)timespec_diff(start, end).tv_nsec / 100000000.0)); + pr_info("Total guest execution time: %lld.%.9lds\n", + (long long)(timespec_diff(start, end).tv_sec), + timespec_diff(start, end).tv_nsec); + pr_info("Overall demand paging rate: %f pgs/sec\n", + guest_num_pages / ((double)timespec_diff(start, end).tv_sec + + (double)timespec_diff(start, end).tv_nsec / 100000000.0)); ucall_uninit(vm); kvm_vm_free(vm); -- cgit v1.2.3 From 331b4de9a7e780f9648ced959c08f4d593aa2e7b Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Thu, 12 Mar 2020 11:40:55 +0100 Subject: KVM: selftests: s390x: Provide additional num-guest-pages adjustment s390 requires 1M aligned guest sizes. Embedding the rounding in vm_adjust_num_guest_pages() allows us to remove it from a few other places. Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/demand_paging_test.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'tools/testing/selftests/kvm/demand_paging_test.c') diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index c1e326d3ed7f..ae086c5dc118 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -378,10 +378,6 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, guest_num_pages = (vcpus * vcpu_memory_bytes) / guest_page_size; guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages); -#ifdef __s390x__ - /* Round up to multiple of 1M (segment size) */ - guest_num_pages = (guest_num_pages + 0xff) & ~0xffUL; -#endif /* * If there should be more memory in the guest test region than there * can be pages in the guest, it will definitely cause problems. -- cgit v1.2.3 From d9eaf19ecc12668caf280f3d8e24b22ff5ba716b Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 10 Mar 2020 10:15:55 +0100 Subject: KVM: selftests: Enable printf format warnings for TEST_ASSERT Use the format attribute to enable printf format warnings, and then fix them all. Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/demand_paging_test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing/selftests/kvm/demand_paging_test.c') diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index ae086c5dc118..8d99b6d78f89 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -384,7 +384,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, */ TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm), "Requested more guest memory than address space allows.\n" - " guest pages: %lx max gfn: %lx vcpus: %d wss: %lx]\n", + " guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n", guest_num_pages, vm_get_max_gfn(vm), vcpus, vcpu_memory_bytes); -- cgit v1.2.3 From d0aac3320d1f15ae2113ddf210945c3686951330 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 10 Mar 2020 10:15:56 +0100 Subject: KVM: selftests: Use consistent message for test skipping Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/demand_paging_test.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/testing/selftests/kvm/demand_paging_test.c') diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index 8d99b6d78f89..c4fc96bd064b 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -660,8 +660,8 @@ int main(int argc, char *argv[]) int main(void) { - printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n"); - return KSFT_SKIP; + print_skip("__NR_userfaultfd must be present for userfaultfd test"); + return KSFT_SKIP; } #endif /* __NR_userfaultfd */ -- cgit v1.2.3 From beca54702dc694970dd9727dde59cf5f56c4dbd8 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Fri, 13 Mar 2020 16:56:43 +0100 Subject: KVM: selftests: virt_map should take npages, not size Also correct the comment and prototype for vm_create_default(), as it takes a number of pages, not a size. Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/demand_paging_test.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'tools/testing/selftests/kvm/demand_paging_test.c') diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index c4fc96bd064b..d82f7bc060c3 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -410,8 +410,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, guest_num_pages, 0); /* Do mapping for the demand paging memory slot */ - virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, - guest_num_pages * guest_page_size, 0); + virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0); ucall_init(vm, NULL); -- cgit v1.2.3 From bfcaa84975fa0c75deca3e997533aaa35ffed12b Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Mon, 16 Mar 2020 18:37:03 +0100 Subject: KVM: selftests: Rework timespec functions and usage The steal_time test's timespec stop condition was wrong and should have used the timespec functions instead to avoid being wrong, but timespec_diff had a strange interface. Rework all the timespec API and its use. Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/demand_paging_test.c | 37 ++++++++++-------------- 1 file changed, 16 insertions(+), 21 deletions(-) (limited to 'tools/testing/selftests/kvm/demand_paging_test.c') diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index d82f7bc060c3..360cd3ea4cd6 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -117,8 +117,7 @@ static void *vcpu_worker(void *data) struct kvm_vm *vm = args->vm; int vcpu_id = args->vcpu_id; struct kvm_run *run; - struct timespec start; - struct timespec end; + struct timespec start, end, ts_diff; vcpu_args_set(vm, vcpu_id, 1, vcpu_id); run = vcpu_state(vm, vcpu_id); @@ -135,9 +134,9 @@ static void *vcpu_worker(void *data) } clock_gettime(CLOCK_MONOTONIC, &end); - PER_VCPU_DEBUG("vCPU %d execution time: %lld.%.9lds\n", vcpu_id, - (long long)(timespec_diff(start, end).tv_sec), - timespec_diff(start, end).tv_nsec); + ts_diff = timespec_sub(end, start); + PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_id, + ts_diff.tv_sec, ts_diff.tv_nsec); return NULL; } @@ -201,8 +200,8 @@ static int handle_uffd_page_request(int uffd, uint64_t addr) clock_gettime(CLOCK_MONOTONIC, &end); - PER_PAGE_DEBUG("UFFDIO_COPY %d \t%lld ns\n", tid, - (long long)timespec_to_ns(timespec_diff(start, end))); + PER_PAGE_DEBUG("UFFDIO_COPY %d \t%ld ns\n", tid, + timespec_to_ns(timespec_sub(end, start))); PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n", host_page_size, addr, tid); @@ -224,8 +223,7 @@ static void *uffd_handler_thread_fn(void *arg) int pipefd = uffd_args->pipefd; useconds_t delay = uffd_args->delay; int64_t pages = 0; - struct timespec start; - struct timespec end; + struct timespec start, end, ts_diff; clock_gettime(CLOCK_MONOTONIC, &start); while (!quit_uffd_thread) { @@ -295,11 +293,10 @@ static void *uffd_handler_thread_fn(void *arg) } clock_gettime(CLOCK_MONOTONIC, &end); - PER_VCPU_DEBUG("userfaulted %ld pages over %lld.%.9lds. (%f/sec)\n", - pages, (long long)(timespec_diff(start, end).tv_sec), - timespec_diff(start, end).tv_nsec, pages / - ((double)timespec_diff(start, end).tv_sec + - (double)timespec_diff(start, end).tv_nsec / 100000000.0)); + ts_diff = timespec_sub(end, start); + PER_VCPU_DEBUG("userfaulted %ld pages over %ld.%.9lds. (%f/sec)\n", + pages, ts_diff.tv_sec, ts_diff.tv_nsec, + pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0)); return NULL; } @@ -360,13 +357,12 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, pthread_t *vcpu_threads; pthread_t *uffd_handler_threads = NULL; struct uffd_handler_args *uffd_args = NULL; + struct timespec start, end, ts_diff; int *pipefds = NULL; struct kvm_vm *vm; uint64_t guest_num_pages; int vcpu_id; int r; - struct timespec start; - struct timespec end; vm = create_vm(mode, vcpus, vcpu_memory_bytes); @@ -514,12 +510,11 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, } } - pr_info("Total guest execution time: %lld.%.9lds\n", - (long long)(timespec_diff(start, end).tv_sec), - timespec_diff(start, end).tv_nsec); + ts_diff = timespec_sub(end, start); + pr_info("Total guest execution time: %ld.%.9lds\n", + ts_diff.tv_sec, ts_diff.tv_nsec); pr_info("Overall demand paging rate: %f pgs/sec\n", - guest_num_pages / ((double)timespec_diff(start, end).tv_sec + - (double)timespec_diff(start, end).tv_nsec / 100000000.0)); + guest_num_pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0)); ucall_uninit(vm); kvm_vm_free(vm); -- cgit v1.2.3