diff options
Diffstat (limited to 'tools/testing')
198 files changed, 2853 insertions, 2838 deletions
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c index feedd5ab7058..0607913a3022 100644 --- a/tools/testing/radix-tree/maple.c +++ b/tools/testing/radix-tree/maple.c @@ -2,7 +2,7 @@ /* * maple_tree.c: Userspace testing for maple tree test-suite * Copyright (c) 2018-2022 Oracle Corporation - * Author: Liam R. Howlett <Liam.Howlett@Oracle.com> + * Author: Liam R. Howlett <liam@infradead.org> * * Any tests that require internal knowledge of the tree or threads and other * difficult to handle in kernel tests. diff --git a/tools/testing/selftests/arm64/gcs/gcs-util.h b/tools/testing/selftests/arm64/gcs/gcs-util.h index c99a6b39ac14..7a81bb07ed4b 100644 --- a/tools/testing/selftests/arm64/gcs/gcs-util.h +++ b/tools/testing/selftests/arm64/gcs/gcs-util.h @@ -18,12 +18,6 @@ #ifndef NT_ARM_GCS #define NT_ARM_GCS 0x410 - -struct user_gcs { - __u64 features_enabled; - __u64 features_locked; - __u64 gcspr_el0; -}; #endif /* Shadow Stack/Guarded Control Stack interface */ diff --git a/tools/testing/selftests/arm64/gcs/libc-gcs.c b/tools/testing/selftests/arm64/gcs/libc-gcs.c index 17b2fabfec38..72e82bfbecc9 100644 --- a/tools/testing/selftests/arm64/gcs/libc-gcs.c +++ b/tools/testing/selftests/arm64/gcs/libc-gcs.c @@ -16,6 +16,7 @@ #include <asm/hwcap.h> #include <asm/mman.h> +#include <asm/ptrace.h> #include <linux/compiler.h> diff --git a/tools/testing/selftests/drivers/net/README.rst b/tools/testing/selftests/drivers/net/README.rst index c8588436c224..c6bed9a985bc 100644 --- a/tools/testing/selftests/drivers/net/README.rst +++ b/tools/testing/selftests/drivers/net/README.rst @@ -211,8 +211,8 @@ Avoid libraries and frameworks Test files should be relatively self contained. The libraries should only include very core or non-trivial code. -It may be tempting to "factor out" the common code, but fight that urge. -Library code increases the barrier of entry, and complexity in general. +It may be tempting to "factor out" the common code to lib/py/, but fight that +urge. Library code increases the barrier of entry, and complexity in general. Avoid mixing test code and boilerplate ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -290,6 +290,12 @@ or:: def test(cfg, mode, protocol): pass +Linters +~~~~~~~ + +We expect clean ``ruff check`` and ``pylint --disable=R``. +The code should be clean, avoid disabling pylint warnings explicitly! + Running tests CI-style ====================== diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h index 6d809f08ab7b..60838b61a2da 100644 --- a/tools/testing/selftests/kselftest.h +++ b/tools/testing/selftests/kselftest.h @@ -450,7 +450,7 @@ static inline __noreturn __printf(1, 2) void ksft_exit_skip(const char *msg, ... */ if (ksft_plan || ksft_test_num()) { ksft_cnt.ksft_xskip++; - printf("ok %u # SKIP ", 1 + ksft_test_num()); + printf("ok %u # SKIP ", ksft_test_num()); } else { printf("1..0 # SKIP "); } diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h index 75fb016cd190..cfdce9cd252e 100644 --- a/tools/testing/selftests/kselftest_harness.h +++ b/tools/testing/selftests/kselftest_harness.h @@ -76,7 +76,7 @@ static inline void __kselftest_memset_safe(void *s, int c, size_t n) memset(s, c, n); } -#define KSELFTEST_PRIO_TEST_F 20000 +#define KSELFTEST_PRIO_TEST 20000 #define KSELFTEST_PRIO_XFAIL 20001 #define TEST_TIMEOUT_DEFAULT 30 @@ -194,7 +194,7 @@ static inline void __kselftest_memset_safe(void *s, int c, size_t n) .fixture = &_fixture_global, \ .termsig = _signal, \ .timeout = TEST_TIMEOUT_DEFAULT, }; \ - static void __attribute__((constructor)) _register_##test_name(void) \ + static void __attribute__((constructor(KSELFTEST_PRIO_TEST))) _register_##test_name(void) \ { \ __register_test(&_##test_name##_object); \ } \ @@ -238,7 +238,7 @@ static inline void __kselftest_memset_safe(void *s, int c, size_t n) FIXTURE_VARIANT(fixture_name); \ static struct __fixture_metadata _##fixture_name##_fixture_object = \ { .name = #fixture_name, }; \ - static void __attribute__((constructor)) \ + static void __attribute__((constructor(KSELFTEST_PRIO_TEST))) \ _register_##fixture_name##_data(void) \ { \ __register_fixture(&_##fixture_name##_fixture_object); \ @@ -364,7 +364,7 @@ static inline void __kselftest_memset_safe(void *s, int c, size_t n) _##fixture_name##_##variant_name##_object = \ { .name = #variant_name, \ .data = &_##fixture_name##_##variant_name##_variant}; \ - static void __attribute__((constructor)) \ + static void __attribute__((constructor(KSELFTEST_PRIO_TEST))) \ _register_##fixture_name##_##variant_name(void) \ { \ __register_fixture_variant(&_##fixture_name##_fixture_object, \ @@ -468,7 +468,7 @@ static inline void __kselftest_memset_safe(void *s, int c, size_t n) fixture_name##_teardown(_metadata, self, variant); \ } \ static struct __test_metadata *_##fixture_name##_##test_name##_object; \ - static void __attribute__((constructor(KSELFTEST_PRIO_TEST_F))) \ + static void __attribute__((constructor(KSELFTEST_PRIO_TEST))) \ _register_##fixture_name##_##test_name(void) \ { \ struct __test_metadata *object = mmap(NULL, sizeof(*object), \ @@ -1323,7 +1323,7 @@ static int test_harness_run(int argc, char **argv) return KSFT_FAIL; } -static void __attribute__((constructor)) __constructor_order_first(void) +static void __attribute__((constructor(KSELFTEST_PRIO_TEST))) __constructor_order_first(void) { __constructor_order_forward = true; } diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c index b058f27b2141..e5bbdb5bbdc3 100644 --- a/tools/testing/selftests/kvm/access_tracking_perf_test.c +++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c @@ -101,15 +101,15 @@ struct test_params { enum vm_mem_backing_src_type backing_src; /* The amount of memory to allocate for each vCPU. */ - uint64_t vcpu_memory_bytes; + u64 vcpu_memory_bytes; /* The number of vCPUs to create in the VM. */ int nr_vcpus; }; -static uint64_t pread_uint64(int fd, const char *filename, uint64_t index) +static u64 pread_u64(int fd, const char *filename, u64 index) { - uint64_t value; + u64 value; off_t offset = index * sizeof(value); TEST_ASSERT(pread(fd, &value, sizeof(value), offset) == sizeof(value), @@ -123,13 +123,13 @@ static uint64_t pread_uint64(int fd, const char *filename, uint64_t index) #define PAGEMAP_PRESENT (1ULL << 63) #define PAGEMAP_PFN_MASK ((1ULL << 55) - 1) -static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva) +static u64 lookup_pfn(int pagemap_fd, struct kvm_vm *vm, gva_t gva) { - uint64_t hva = (uint64_t) addr_gva2hva(vm, gva); - uint64_t entry; - uint64_t pfn; + u64 hva = (u64)addr_gva2hva(vm, gva); + u64 entry; + u64 pfn; - entry = pread_uint64(pagemap_fd, "pagemap", hva / getpagesize()); + entry = pread_u64(pagemap_fd, "pagemap", hva / getpagesize()); if (!(entry & PAGEMAP_PRESENT)) return 0; @@ -139,16 +139,16 @@ static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva) return pfn; } -static bool is_page_idle(int page_idle_fd, uint64_t pfn) +static bool is_page_idle(int page_idle_fd, u64 pfn) { - uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64); + u64 bits = pread_u64(page_idle_fd, "page_idle", pfn / 64); return !!((bits >> (pfn % 64)) & 1); } -static void mark_page_idle(int page_idle_fd, uint64_t pfn) +static void mark_page_idle(int page_idle_fd, u64 pfn) { - uint64_t bits = 1ULL << (pfn % 64); + u64 bits = 1ULL << (pfn % 64); TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8, "Set page_idle bits for PFN 0x%" PRIx64, pfn); @@ -174,11 +174,11 @@ static void pageidle_mark_vcpu_memory_idle(struct kvm_vm *vm, struct memstress_vcpu_args *vcpu_args) { int vcpu_idx = vcpu_args->vcpu_idx; - uint64_t base_gva = vcpu_args->gva; - uint64_t pages = vcpu_args->pages; - uint64_t page; - uint64_t still_idle = 0; - uint64_t no_pfn = 0; + gva_t base_gva = vcpu_args->gva; + u64 pages = vcpu_args->pages; + u64 page; + u64 still_idle = 0; + u64 no_pfn = 0; int page_idle_fd; int pagemap_fd; @@ -193,8 +193,8 @@ static void pageidle_mark_vcpu_memory_idle(struct kvm_vm *vm, TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap."); for (page = 0; page < pages; page++) { - uint64_t gva = base_gva + page * memstress_args.guest_page_size; - uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva); + gva_t gva = base_gva + page * memstress_args.guest_page_size; + u64 pfn = lookup_pfn(pagemap_fd, vm, gva); if (!pfn) { no_pfn++; @@ -297,10 +297,10 @@ static void lru_gen_mark_memory_idle(struct kvm_vm *vm) lru_gen_last_gen = new_gen; } -static void assert_ucall(struct kvm_vcpu *vcpu, uint64_t expected_ucall) +static void assert_ucall(struct kvm_vcpu *vcpu, u64 expected_ucall) { struct ucall uc; - uint64_t actual_ucall = get_ucall(vcpu, &uc); + u64 actual_ucall = get_ucall(vcpu, &uc); TEST_ASSERT(expected_ucall == actual_ucall, "Guest exited unexpectedly (expected ucall %" PRIu64 @@ -417,7 +417,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) */ test_pages = params->nr_vcpus * params->vcpu_memory_bytes / max(memstress_args.guest_page_size, - (uint64_t)getpagesize()); + (u64)getpagesize()); memstress_start_vcpu_threads(nr_vcpus, vcpu_thread_main); diff --git a/tools/testing/selftests/kvm/arch_timer.c b/tools/testing/selftests/kvm/arch_timer.c index cf8fb67104f1..90c475a61b22 100644 --- a/tools/testing/selftests/kvm/arch_timer.c +++ b/tools/testing/selftests/kvm/arch_timer.c @@ -78,9 +78,9 @@ static void *test_vcpu_run(void *arg) return NULL; } -static uint32_t test_get_pcpu(void) +static u32 test_get_pcpu(void) { - uint32_t pcpu; + u32 pcpu; unsigned int nproc_conf; cpu_set_t online_cpuset; @@ -98,7 +98,7 @@ static uint32_t test_get_pcpu(void) static int test_migrate_vcpu(unsigned int vcpu_idx) { int ret; - uint32_t new_pcpu = test_get_pcpu(); + u32 new_pcpu = test_get_pcpu(); pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu); diff --git a/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c b/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c index 713005b6f508..8a019cbaf4c4 100644 --- a/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c +++ b/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c @@ -66,7 +66,7 @@ static void test_guest_raz(struct kvm_vcpu *vcpu) } } -static uint64_t raz_wi_reg_ids[] = { +static u64 raz_wi_reg_ids[] = { KVM_ARM64_SYS_REG(SYS_ID_PFR0_EL1), KVM_ARM64_SYS_REG(SYS_ID_PFR1_EL1), KVM_ARM64_SYS_REG(SYS_ID_DFR0_EL1), @@ -94,8 +94,8 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu) int i; for (i = 0; i < ARRAY_SIZE(raz_wi_reg_ids); i++) { - uint64_t reg_id = raz_wi_reg_ids[i]; - uint64_t val; + u64 reg_id = raz_wi_reg_ids[i]; + u64 val; val = vcpu_get_reg(vcpu, reg_id); TEST_ASSERT_EQ(val, 0); @@ -111,7 +111,7 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu) } } -static uint64_t raz_invariant_reg_ids[] = { +static u64 raz_invariant_reg_ids[] = { KVM_ARM64_SYS_REG(SYS_ID_AFR0_EL1), KVM_ARM64_SYS_REG(sys_reg(3, 0, 0, 3, 3)), KVM_ARM64_SYS_REG(SYS_ID_DFR1_EL1), @@ -123,8 +123,8 @@ static void test_user_raz_invariant(struct kvm_vcpu *vcpu) int i, r; for (i = 0; i < ARRAY_SIZE(raz_invariant_reg_ids); i++) { - uint64_t reg_id = raz_invariant_reg_ids[i]; - uint64_t val; + u64 reg_id = raz_invariant_reg_ids[i]; + u64 val; val = vcpu_get_reg(vcpu, reg_id); TEST_ASSERT_EQ(val, 0); @@ -142,7 +142,7 @@ static void test_user_raz_invariant(struct kvm_vcpu *vcpu) static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu) { - uint64_t val, el0; + u64 val, el0; val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); diff --git a/tools/testing/selftests/kvm/arm64/arch_timer.c b/tools/testing/selftests/kvm/arm64/arch_timer.c index d592a4515399..5fa5c0ec2b3e 100644 --- a/tools/testing/selftests/kvm/arm64/arch_timer.c +++ b/tools/testing/selftests/kvm/arm64/arch_timer.c @@ -56,7 +56,7 @@ static void guest_validate_irq(unsigned int intid, struct test_vcpu_shared_data *shared_data) { enum guest_stage stage = shared_data->guest_stage; - uint64_t xcnt = 0, xcnt_diff_us, cval = 0; + u64 xcnt = 0, xcnt_diff_us, cval = 0; unsigned long xctl = 0; unsigned int timer_irq = 0; unsigned int accessor; @@ -105,7 +105,7 @@ static void guest_validate_irq(unsigned int intid, static void guest_irq_handler(struct ex_regs *regs) { unsigned int intid = gic_get_and_ack_irq(); - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; guest_validate_irq(intid, shared_data); @@ -116,7 +116,7 @@ static void guest_irq_handler(struct ex_regs *regs) static void guest_run_stage(struct test_vcpu_shared_data *shared_data, enum guest_stage stage) { - uint32_t irq_iter, config_iter; + u32 irq_iter, config_iter; shared_data->guest_stage = stage; shared_data->nr_iter = 0; @@ -140,7 +140,7 @@ static void guest_run_stage(struct test_vcpu_shared_data *shared_data, static void guest_code(void) { - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; local_irq_disable(); diff --git a/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c b/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c index 993c9e38e729..f7625eb711d6 100644 --- a/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c +++ b/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c @@ -23,25 +23,25 @@ #include "vgic.h" /* Depends on counter width. */ -static uint64_t CVAL_MAX; +static u64 CVAL_MAX; /* tval is a signed 32-bit int. */ -static const int32_t TVAL_MAX = INT32_MAX; -static const int32_t TVAL_MIN = INT32_MIN; +static const s32 TVAL_MAX = INT32_MAX; +static const s32 TVAL_MIN = INT32_MIN; /* After how much time we say there is no IRQ. */ -static const uint32_t TIMEOUT_NO_IRQ_US = 50000; +static const u32 TIMEOUT_NO_IRQ_US = 50000; /* Counter value to use as the starting one for most tests. Set to CVAL_MAX/2 */ -static uint64_t DEF_CNT; +static u64 DEF_CNT; /* Number of runs. */ -static const uint32_t NR_TEST_ITERS_DEF = 5; +static const u32 NR_TEST_ITERS_DEF = 5; /* Default wait test time in ms. */ -static const uint32_t WAIT_TEST_MS = 10; +static const u32 WAIT_TEST_MS = 10; /* Default "long" wait test time in ms. */ -static const uint32_t LONG_WAIT_TEST_MS = 100; +static const u32 LONG_WAIT_TEST_MS = 100; /* Shared with IRQ handler. */ struct test_vcpu_shared_data { @@ -53,9 +53,9 @@ struct test_args { /* Virtual or physical timer and counter tests. */ enum arch_timer timer; /* Delay used for most timer tests. */ - uint64_t wait_ms; + u64 wait_ms; /* Delay used in the test_long_timer_delays test. */ - uint64_t long_wait_ms; + u64 long_wait_ms; /* Number of iterations. */ int iterations; /* Whether to test the physical timer. */ @@ -82,12 +82,12 @@ enum sync_cmd { NO_USERSPACE_CMD, }; -typedef void (*sleep_method_t)(enum arch_timer timer, uint64_t usec); +typedef void (*sleep_method_t)(enum arch_timer timer, u64 usec); -static void sleep_poll(enum arch_timer timer, uint64_t usec); -static void sleep_sched_poll(enum arch_timer timer, uint64_t usec); -static void sleep_in_userspace(enum arch_timer timer, uint64_t usec); -static void sleep_migrate(enum arch_timer timer, uint64_t usec); +static void sleep_poll(enum arch_timer timer, u64 usec); +static void sleep_sched_poll(enum arch_timer timer, u64 usec); +static void sleep_in_userspace(enum arch_timer timer, u64 usec); +static void sleep_migrate(enum arch_timer timer, u64 usec); sleep_method_t sleep_method[] = { sleep_poll, @@ -115,14 +115,14 @@ enum timer_view { TIMER_TVAL, }; -static void assert_irqs_handled(uint32_t n) +static void assert_irqs_handled(u32 n) { int h = atomic_read(&shared_data.handled); __GUEST_ASSERT(h == n, "Handled %d IRQS but expected %d", h, n); } -static void userspace_cmd(uint64_t cmd) +static void userspace_cmd(u64 cmd) { GUEST_SYNC_ARGS(cmd, 0, 0, 0, 0); } @@ -132,12 +132,12 @@ static void userspace_migrate_vcpu(void) userspace_cmd(USERSPACE_MIGRATE_SELF); } -static void userspace_sleep(uint64_t usecs) +static void userspace_sleep(u64 usecs) { GUEST_SYNC_ARGS(USERSPACE_USLEEP, usecs, 0, 0, 0); } -static void set_counter(enum arch_timer timer, uint64_t counter) +static void set_counter(enum arch_timer timer, u64 counter) { GUEST_SYNC_ARGS(SET_COUNTER_VALUE, counter, timer, 0, 0); } @@ -146,8 +146,8 @@ static void guest_irq_handler(struct ex_regs *regs) { unsigned int intid = gic_get_and_ack_irq(); enum arch_timer timer; - uint64_t cnt, cval; - uint32_t ctl; + u64 cnt, cval; + u32 ctl; bool timer_condition, istatus; if (intid == IAR_SPURIOUS) { @@ -178,8 +178,8 @@ out: gic_set_eoi(intid); } -static void set_cval_irq(enum arch_timer timer, uint64_t cval_cycles, - uint32_t ctl) +static void set_cval_irq(enum arch_timer timer, u64 cval_cycles, + u32 ctl) { atomic_set(&shared_data.handled, 0); atomic_set(&shared_data.spurious, 0); @@ -187,8 +187,8 @@ static void set_cval_irq(enum arch_timer timer, uint64_t cval_cycles, timer_set_ctl(timer, ctl); } -static void set_tval_irq(enum arch_timer timer, uint64_t tval_cycles, - uint32_t ctl) +static void set_tval_irq(enum arch_timer timer, u64 tval_cycles, + u32 ctl) { atomic_set(&shared_data.handled, 0); atomic_set(&shared_data.spurious, 0); @@ -196,7 +196,7 @@ static void set_tval_irq(enum arch_timer timer, uint64_t tval_cycles, timer_set_ctl(timer, ctl); } -static void set_xval_irq(enum arch_timer timer, uint64_t xval, uint32_t ctl, +static void set_xval_irq(enum arch_timer timer, u64 xval, u32 ctl, enum timer_view tv) { switch (tv) { @@ -275,13 +275,13 @@ static void wait_migrate_poll_for_irq(void) * Sleep for usec microseconds by polling in the guest or in * userspace (e.g. userspace_cmd=USERSPACE_SCHEDULE). */ -static void guest_poll(enum arch_timer test_timer, uint64_t usec, +static void guest_poll(enum arch_timer test_timer, u64 usec, enum sync_cmd usp_cmd) { - uint64_t cycles = usec_to_cycles(usec); + u64 cycles = usec_to_cycles(usec); /* Whichever timer we are testing with, sleep with the other. */ enum arch_timer sleep_timer = 1 - test_timer; - uint64_t start = timer_get_cntct(sleep_timer); + u64 start = timer_get_cntct(sleep_timer); while ((timer_get_cntct(sleep_timer) - start) < cycles) { if (usp_cmd == NO_USERSPACE_CMD) @@ -291,22 +291,22 @@ static void guest_poll(enum arch_timer test_timer, uint64_t usec, } } -static void sleep_poll(enum arch_timer timer, uint64_t usec) +static void sleep_poll(enum arch_timer timer, u64 usec) { guest_poll(timer, usec, NO_USERSPACE_CMD); } -static void sleep_sched_poll(enum arch_timer timer, uint64_t usec) +static void sleep_sched_poll(enum arch_timer timer, u64 usec) { guest_poll(timer, usec, USERSPACE_SCHED_YIELD); } -static void sleep_migrate(enum arch_timer timer, uint64_t usec) +static void sleep_migrate(enum arch_timer timer, u64 usec) { guest_poll(timer, usec, USERSPACE_MIGRATE_SELF); } -static void sleep_in_userspace(enum arch_timer timer, uint64_t usec) +static void sleep_in_userspace(enum arch_timer timer, u64 usec) { userspace_sleep(usec); } @@ -315,15 +315,15 @@ static void sleep_in_userspace(enum arch_timer timer, uint64_t usec) * Reset the timer state to some nice values like the counter not being close * to the edge, and the control register masked and disabled. */ -static void reset_timer_state(enum arch_timer timer, uint64_t cnt) +static void reset_timer_state(enum arch_timer timer, u64 cnt) { set_counter(timer, cnt); timer_set_ctl(timer, CTL_IMASK); } -static void test_timer_xval(enum arch_timer timer, uint64_t xval, +static void test_timer_xval(enum arch_timer timer, u64 xval, enum timer_view tv, irq_wait_method_t wm, bool reset_state, - uint64_t reset_cnt) + u64 reset_cnt) { local_irq_disable(); @@ -348,23 +348,23 @@ static void test_timer_xval(enum arch_timer timer, uint64_t xval, * the "runner", like: tools/testing/selftests/kselftest/runner.sh. */ -static void test_timer_cval(enum arch_timer timer, uint64_t cval, +static void test_timer_cval(enum arch_timer timer, u64 cval, irq_wait_method_t wm, bool reset_state, - uint64_t reset_cnt) + u64 reset_cnt) { test_timer_xval(timer, cval, TIMER_CVAL, wm, reset_state, reset_cnt); } -static void test_timer_tval(enum arch_timer timer, int32_t tval, +static void test_timer_tval(enum arch_timer timer, s32 tval, irq_wait_method_t wm, bool reset_state, - uint64_t reset_cnt) + u64 reset_cnt) { - test_timer_xval(timer, (uint64_t) tval, TIMER_TVAL, wm, reset_state, + test_timer_xval(timer, (u64)tval, TIMER_TVAL, wm, reset_state, reset_cnt); } -static void test_xval_check_no_irq(enum arch_timer timer, uint64_t xval, - uint64_t usec, enum timer_view timer_view, +static void test_xval_check_no_irq(enum arch_timer timer, u64 xval, + u64 usec, enum timer_view timer_view, sleep_method_t guest_sleep) { local_irq_disable(); @@ -379,17 +379,17 @@ static void test_xval_check_no_irq(enum arch_timer timer, uint64_t xval, assert_irqs_handled(0); } -static void test_cval_no_irq(enum arch_timer timer, uint64_t cval, - uint64_t usec, sleep_method_t wm) +static void test_cval_no_irq(enum arch_timer timer, u64 cval, + u64 usec, sleep_method_t wm) { test_xval_check_no_irq(timer, cval, usec, TIMER_CVAL, wm); } -static void test_tval_no_irq(enum arch_timer timer, int32_t tval, uint64_t usec, +static void test_tval_no_irq(enum arch_timer timer, s32 tval, u64 usec, sleep_method_t wm) { - /* tval will be cast to an int32_t in test_xval_check_no_irq */ - test_xval_check_no_irq(timer, (uint64_t) tval, usec, TIMER_TVAL, wm); + /* tval will be cast to an s32 in test_xval_check_no_irq */ + test_xval_check_no_irq(timer, (u64)tval, usec, TIMER_TVAL, wm); } /* Test masking/unmasking a timer using the timer mask (not the IRQ mask). */ @@ -463,7 +463,7 @@ static void test_timers_fired_multiple_times(enum arch_timer timer) * timeout for the wait: we use the wfi instruction. */ static void test_reprogramming_timer(enum arch_timer timer, irq_wait_method_t wm, - int32_t delta_1_ms, int32_t delta_2_ms) + s32 delta_1_ms, s32 delta_2_ms) { local_irq_disable(); reset_timer_state(timer, DEF_CNT); @@ -488,7 +488,7 @@ static void test_reprogramming_timer(enum arch_timer timer, irq_wait_method_t wm static void test_reprogram_timers(enum arch_timer timer) { int i; - uint64_t base_wait = test_args.wait_ms; + u64 base_wait = test_args.wait_ms; for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) { /* @@ -504,8 +504,8 @@ static void test_reprogram_timers(enum arch_timer timer) static void test_basic_functionality(enum arch_timer timer) { - int32_t tval = (int32_t) msec_to_cycles(test_args.wait_ms); - uint64_t cval = DEF_CNT + msec_to_cycles(test_args.wait_ms); + s32 tval = (s32)msec_to_cycles(test_args.wait_ms); + u64 cval = DEF_CNT + msec_to_cycles(test_args.wait_ms); int i; for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) { @@ -593,7 +593,7 @@ static void test_set_cnt_after_tval_max(enum arch_timer timer, irq_wait_method_t reset_timer_state(timer, DEF_CNT); set_cval_irq(timer, - (uint64_t) TVAL_MAX + + (u64)TVAL_MAX + msec_to_cycles(test_args.wait_ms) / 2, CTL_ENABLE); set_counter(timer, TVAL_MAX); @@ -608,7 +608,7 @@ static void test_set_cnt_after_tval_max(enum arch_timer timer, irq_wait_method_t /* Test timers set for: cval = now + TVAL_MAX + wait_ms / 2 */ static void test_timers_above_tval_max(enum arch_timer timer) { - uint64_t cval; + u64 cval; int i; /* @@ -638,8 +638,8 @@ static void test_timers_above_tval_max(enum arch_timer timer) * sets the counter to cnt_1, the [c|t]val, the counter to cnt_2, and * then waits for an IRQ. */ -static void test_set_cnt_after_xval(enum arch_timer timer, uint64_t cnt_1, - uint64_t xval, uint64_t cnt_2, +static void test_set_cnt_after_xval(enum arch_timer timer, u64 cnt_1, + u64 xval, u64 cnt_2, irq_wait_method_t wm, enum timer_view tv) { local_irq_disable(); @@ -662,8 +662,8 @@ static void test_set_cnt_after_xval(enum arch_timer timer, uint64_t cnt_1, * then waits for an IRQ. */ static void test_set_cnt_after_xval_no_irq(enum arch_timer timer, - uint64_t cnt_1, uint64_t xval, - uint64_t cnt_2, + u64 cnt_1, u64 xval, + u64 cnt_2, sleep_method_t guest_sleep, enum timer_view tv) { @@ -684,31 +684,31 @@ static void test_set_cnt_after_xval_no_irq(enum arch_timer timer, timer_set_ctl(timer, CTL_IMASK); } -static void test_set_cnt_after_tval(enum arch_timer timer, uint64_t cnt_1, - int32_t tval, uint64_t cnt_2, +static void test_set_cnt_after_tval(enum arch_timer timer, u64 cnt_1, + s32 tval, u64 cnt_2, irq_wait_method_t wm) { test_set_cnt_after_xval(timer, cnt_1, tval, cnt_2, wm, TIMER_TVAL); } -static void test_set_cnt_after_cval(enum arch_timer timer, uint64_t cnt_1, - uint64_t cval, uint64_t cnt_2, +static void test_set_cnt_after_cval(enum arch_timer timer, u64 cnt_1, + u64 cval, u64 cnt_2, irq_wait_method_t wm) { test_set_cnt_after_xval(timer, cnt_1, cval, cnt_2, wm, TIMER_CVAL); } static void test_set_cnt_after_tval_no_irq(enum arch_timer timer, - uint64_t cnt_1, int32_t tval, - uint64_t cnt_2, sleep_method_t wm) + u64 cnt_1, s32 tval, + u64 cnt_2, sleep_method_t wm) { test_set_cnt_after_xval_no_irq(timer, cnt_1, tval, cnt_2, wm, TIMER_TVAL); } static void test_set_cnt_after_cval_no_irq(enum arch_timer timer, - uint64_t cnt_1, uint64_t cval, - uint64_t cnt_2, sleep_method_t wm) + u64 cnt_1, u64 cval, + u64 cnt_2, sleep_method_t wm) { test_set_cnt_after_xval_no_irq(timer, cnt_1, cval, cnt_2, wm, TIMER_CVAL); @@ -718,7 +718,7 @@ static void test_set_cnt_after_cval_no_irq(enum arch_timer timer, static void test_move_counters_ahead_of_timers(enum arch_timer timer) { int i; - int32_t tval; + s32 tval; for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) { irq_wait_method_t wm = irq_wait_method[i]; @@ -730,8 +730,7 @@ static void test_move_counters_ahead_of_timers(enum arch_timer timer) test_set_cnt_after_tval(timer, 0, -1, DEF_CNT + 1, wm); test_set_cnt_after_tval(timer, 0, -1, TVAL_MAX, wm); tval = TVAL_MAX; - test_set_cnt_after_tval(timer, 0, tval, (uint64_t) tval + 1, - wm); + test_set_cnt_after_tval(timer, 0, tval, (u64)tval + 1, wm); } } @@ -754,8 +753,8 @@ static void test_move_counters_behind_timers(enum arch_timer timer) static void test_timers_in_the_past(enum arch_timer timer) { - int32_t tval = -1 * (int32_t) msec_to_cycles(test_args.wait_ms); - uint64_t cval; + s32 tval = -1 * (s32)msec_to_cycles(test_args.wait_ms); + u64 cval; int i; for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) { @@ -790,8 +789,8 @@ static void test_timers_in_the_past(enum arch_timer timer) static void test_long_timer_delays(enum arch_timer timer) { - int32_t tval = (int32_t) msec_to_cycles(test_args.long_wait_ms); - uint64_t cval = DEF_CNT + msec_to_cycles(test_args.long_wait_ms); + s32 tval = (s32)msec_to_cycles(test_args.long_wait_ms); + u64 cval = DEF_CNT + msec_to_cycles(test_args.long_wait_ms); int i; for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) { @@ -846,11 +845,11 @@ static void guest_code(enum arch_timer timer) static cpu_set_t default_cpuset; -static uint32_t next_pcpu(void) +static u32 next_pcpu(void) { - uint32_t max = get_nprocs(); - uint32_t cur = sched_getcpu(); - uint32_t next = cur; + u32 max = get_nprocs(); + u32 cur = sched_getcpu(); + u32 next = cur; cpu_set_t cpuset = default_cpuset; TEST_ASSERT(max > 1, "Need at least two physical cpus"); @@ -862,7 +861,7 @@ static uint32_t next_pcpu(void) return next; } -static void kvm_set_cntxct(struct kvm_vcpu *vcpu, uint64_t cnt, +static void kvm_set_cntxct(struct kvm_vcpu *vcpu, u64 cnt, enum arch_timer timer) { if (timer == PHYSICAL) @@ -874,7 +873,7 @@ static void kvm_set_cntxct(struct kvm_vcpu *vcpu, uint64_t cnt, static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc) { enum sync_cmd cmd = uc->args[1]; - uint64_t val = uc->args[2]; + u64 val = uc->args[2]; enum arch_timer timer = uc->args[3]; switch (cmd) { @@ -1018,8 +1017,8 @@ static bool parse_args(int argc, char *argv[]) static void set_counter_defaults(void) { - const uint64_t MIN_ROLLOVER_SECS = 40ULL * 365 * 24 * 3600; - uint64_t freq = read_sysreg(CNTFRQ_EL0); + const u64 MIN_ROLLOVER_SECS = 40ULL * 365 * 24 * 3600; + u64 freq = read_sysreg(CNTFRQ_EL0); int width = ilog2(MIN_ROLLOVER_SECS * freq); width = clamp(width, 56, 64); diff --git a/tools/testing/selftests/kvm/arm64/debug-exceptions.c b/tools/testing/selftests/kvm/arm64/debug-exceptions.c index 1d431de8729c..3eb4b1b6682d 100644 --- a/tools/testing/selftests/kvm/arm64/debug-exceptions.c +++ b/tools/testing/selftests/kvm/arm64/debug-exceptions.c @@ -31,14 +31,14 @@ extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start, hw_bp_ctx; extern unsigned char iter_ss_begin, iter_ss_end; -static volatile uint64_t sw_bp_addr, hw_bp_addr; -static volatile uint64_t wp_addr, wp_data_addr; -static volatile uint64_t svc_addr; -static volatile uint64_t ss_addr[4], ss_idx; -#define PC(v) ((uint64_t)&(v)) +static volatile u64 sw_bp_addr, hw_bp_addr; +static volatile u64 wp_addr, wp_data_addr; +static volatile u64 svc_addr; +static volatile u64 ss_addr[4], ss_idx; +#define PC(v) ((u64)&(v)) #define GEN_DEBUG_WRITE_REG(reg_name) \ -static void write_##reg_name(int num, uint64_t val) \ +static void write_##reg_name(int num, u64 val) \ { \ switch (num) { \ case 0: \ @@ -102,8 +102,8 @@ GEN_DEBUG_WRITE_REG(dbgwvr) static void reset_debug_state(void) { - uint8_t brps, wrps, i; - uint64_t dfr0; + u8 brps, wrps, i; + u64 dfr0; asm volatile("msr daifset, #8"); @@ -140,7 +140,7 @@ static void enable_os_lock(void) static void enable_monitor_debug_exceptions(void) { - uint64_t mdscr; + u64 mdscr; asm volatile("msr daifclr, #8"); @@ -149,9 +149,9 @@ static void enable_monitor_debug_exceptions(void) isb(); } -static void install_wp(uint8_t wpn, uint64_t addr) +static void install_wp(u8 wpn, u64 addr) { - uint32_t wcr; + u32 wcr; wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E; write_dbgwcr(wpn, wcr); @@ -162,9 +162,9 @@ static void install_wp(uint8_t wpn, uint64_t addr) enable_monitor_debug_exceptions(); } -static void install_hw_bp(uint8_t bpn, uint64_t addr) +static void install_hw_bp(u8 bpn, u64 addr) { - uint32_t bcr; + u32 bcr; bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E; write_dbgbcr(bpn, bcr); @@ -174,11 +174,10 @@ static void install_hw_bp(uint8_t bpn, uint64_t addr) enable_monitor_debug_exceptions(); } -static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr, - uint64_t ctx) +static void install_wp_ctx(u8 addr_wp, u8 ctx_bp, u64 addr, u64 ctx) { - uint32_t wcr; - uint64_t ctx_bcr; + u32 wcr; + u64 ctx_bcr; /* Setup a context-aware breakpoint for Linked Context ID Match */ ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E | @@ -188,7 +187,7 @@ static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr, /* Setup a linked watchpoint (linked to the context-aware breakpoint) */ wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E | - DBGWCR_WT_LINK | ((uint32_t)ctx_bp << DBGWCR_LBN_SHIFT); + DBGWCR_WT_LINK | ((u32)ctx_bp << DBGWCR_LBN_SHIFT); write_dbgwcr(addr_wp, wcr); write_dbgwvr(addr_wp, addr); isb(); @@ -196,10 +195,9 @@ static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr, enable_monitor_debug_exceptions(); } -void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr, - uint64_t ctx) +void install_hw_bp_ctx(u8 addr_bp, u8 ctx_bp, u64 addr, u64 ctx) { - uint32_t addr_bcr, ctx_bcr; + u32 addr_bcr, ctx_bcr; /* Setup a context-aware breakpoint for Linked Context ID Match */ ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E | @@ -213,7 +211,7 @@ void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr, */ addr_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E | DBGBCR_BT_ADDR_LINK_CTX | - ((uint32_t)ctx_bp << DBGBCR_LBN_SHIFT); + ((u32)ctx_bp << DBGBCR_LBN_SHIFT); write_dbgbcr(addr_bp, addr_bcr); write_dbgbvr(addr_bp, addr); isb(); @@ -223,7 +221,7 @@ void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr, static void install_ss(void) { - uint64_t mdscr; + u64 mdscr; asm volatile("msr daifclr, #8"); @@ -234,9 +232,9 @@ static void install_ss(void) static volatile char write_data; -static void guest_code(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn) +static void guest_code(u8 bpn, u8 wpn, u8 ctx_bpn) { - uint64_t ctx = 0xabcdef; /* a random context number */ + u64 ctx = 0xabcdef; /* a random context number */ /* Software-breakpoint */ reset_debug_state(); @@ -377,8 +375,8 @@ static void guest_svc_handler(struct ex_regs *regs) static void guest_code_ss(int test_cnt) { - uint64_t i; - uint64_t bvr, wvr, w_bvr, w_wvr; + u64 i; + u64 bvr, wvr, w_bvr, w_wvr; for (i = 0; i < test_cnt; i++) { /* Bits [1:0] of dbg{b,w}vr are RES0 */ @@ -416,12 +414,12 @@ static void guest_code_ss(int test_cnt) GUEST_DONE(); } -static int debug_version(uint64_t id_aa64dfr0) +static int debug_version(u64 id_aa64dfr0) { return FIELD_GET(ID_AA64DFR0_EL1_DebugVer, id_aa64dfr0); } -static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn) +static void test_guest_debug_exceptions(u8 bpn, u8 wpn, u8 ctx_bpn) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -468,8 +466,8 @@ void test_single_step_from_userspace(int test_cnt) struct kvm_vm *vm; struct ucall uc; struct kvm_run *run; - uint64_t pc, cmd; - uint64_t test_pc = 0; + u64 pc, cmd; + u64 test_pc = 0; bool ss_enable = false; struct kvm_guest_debug debug = {}; @@ -506,7 +504,7 @@ void test_single_step_from_userspace(int test_cnt) "Unexpected pc 0x%lx (expected 0x%lx)", pc, test_pc); - if ((pc + 4) == (uint64_t)&iter_ss_end) { + if ((pc + 4) == (u64)&iter_ss_end) { test_pc = 0; debug.control = KVM_GUESTDBG_ENABLE; ss_enable = false; @@ -519,8 +517,8 @@ void test_single_step_from_userspace(int test_cnt) * iter_ss_end, the pc for the next KVM_EXIT_DEBUG should * be the current pc + 4. */ - if ((pc >= (uint64_t)&iter_ss_begin) && - (pc < (uint64_t)&iter_ss_end)) + if ((pc >= (u64)&iter_ss_begin) && + (pc < (u64)&iter_ss_end)) test_pc = pc + 4; else test_pc = 0; @@ -533,9 +531,9 @@ void test_single_step_from_userspace(int test_cnt) * Run debug testing using the various breakpoint#, watchpoint# and * context-aware breakpoint# with the given ID_AA64DFR0_EL1 configuration. */ -void test_guest_debug_exceptions_all(uint64_t aa64dfr0) +void test_guest_debug_exceptions_all(u64 aa64dfr0) { - uint8_t brp_num, wrp_num, ctx_brp_num, normal_brp_num, ctx_brp_base; + u8 brp_num, wrp_num, ctx_brp_num, normal_brp_num, ctx_brp_base; int b, w, c; /* Number of breakpoints */ @@ -580,7 +578,7 @@ int main(int argc, char *argv[]) struct kvm_vm *vm; int opt; int ss_iteration = 10000; - uint64_t aa64dfr0; + u64 aa64dfr0; vm = vm_create_with_one_vcpu(&vcpu, guest_code); aa64dfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1)); diff --git a/tools/testing/selftests/kvm/arm64/hypercalls.c b/tools/testing/selftests/kvm/arm64/hypercalls.c index bf038a0371f4..5d96cdf382c4 100644 --- a/tools/testing/selftests/kvm/arm64/hypercalls.c +++ b/tools/testing/selftests/kvm/arm64/hypercalls.c @@ -29,9 +29,9 @@ #define KVM_REG_ARM_VENDOR_HYP_BMAP_2_RESET_VAL 0 struct kvm_fw_reg_info { - uint64_t reg; /* Register definition */ - uint64_t max_feat_bit; /* Bit that represents the upper limit of the feature-map */ - uint64_t reset_val; /* Reset value for the register */ + u64 reg; /* Register definition */ + u64 max_feat_bit; /* Bit that represents the upper limit of the feature-map */ + u64 reset_val; /* Reset value for the register */ }; #define FW_REG_INFO(r) \ @@ -59,8 +59,8 @@ enum test_stage { static int stage = TEST_STAGE_REG_IFACE; struct test_hvc_info { - uint32_t func_id; - uint64_t arg1; + u32 func_id; + u64 arg1; }; #define TEST_HVC_INFO(f, a1) \ @@ -152,9 +152,9 @@ static void guest_code(void) } struct st_time { - uint32_t rev; - uint32_t attr; - uint64_t st_time; + u32 rev; + u32 attr; + u64 st_time; }; #define STEAL_TIME_SIZE ((sizeof(struct st_time) + 63) & ~63) @@ -162,7 +162,7 @@ struct st_time { static void steal_time_init(struct kvm_vcpu *vcpu) { - uint64_t st_ipa = (ulong)ST_GPA_BASE; + u64 st_ipa = (ulong)ST_GPA_BASE; unsigned int gpages; gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE); @@ -174,13 +174,13 @@ static void steal_time_init(struct kvm_vcpu *vcpu) static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu) { - uint64_t val; + u64 val; unsigned int i; int ret; for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) { const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i]; - uint64_t set_val; + u64 set_val; /* First 'read' should be the reset value for the reg */ val = vcpu_get_reg(vcpu, reg_info->reg); @@ -229,7 +229,7 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu) static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu) { - uint64_t val; + u64 val; unsigned int i; int ret; diff --git a/tools/testing/selftests/kvm/arm64/idreg-idst.c b/tools/testing/selftests/kvm/arm64/idreg-idst.c index 9ca9f125abdb..a3e84701d814 100644 --- a/tools/testing/selftests/kvm/arm64/idreg-idst.c +++ b/tools/testing/selftests/kvm/arm64/idreg-idst.c @@ -13,7 +13,7 @@ static volatile bool sys64, undef; #define __check_sr_read(r) \ ({ \ - uint64_t val; \ + u64 val; \ \ sys64 = false; \ undef = false; \ @@ -101,7 +101,7 @@ int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - uint64_t mmfr2; + u64 mmfr2; test_disable_default_vgic(); diff --git a/tools/testing/selftests/kvm/arm64/no-vgic.c b/tools/testing/selftests/kvm/arm64/no-vgic.c index b14686ef17d1..25b2e3222f68 100644 --- a/tools/testing/selftests/kvm/arm64/no-vgic.c +++ b/tools/testing/selftests/kvm/arm64/no-vgic.c @@ -15,7 +15,7 @@ static volatile bool handled; #define __check_sr_read(r) \ ({ \ - uint64_t val; \ + u64 val; \ \ handled = false; \ dsb(sy); \ @@ -33,7 +33,7 @@ static volatile bool handled; #define __check_gicv5_gicr_op(r) \ ({ \ - uint64_t val; \ + u64 val; \ \ handled = false; \ dsb(sy); \ @@ -82,7 +82,7 @@ static volatile bool handled; static void guest_code_gicv3(void) { - uint64_t val; + u64 val; /* * Check that we advertise that ID_AA64PFR0_EL1.GIC == 0, having @@ -262,7 +262,7 @@ int main(int argc, char *argv[]) struct kvm_vcpu *vcpu; struct kvm_vm *vm; bool has_v3, has_v5; - uint64_t pfr; + u64 pfr; test_disable_default_vgic(); diff --git a/tools/testing/selftests/kvm/arm64/page_fault_test.c b/tools/testing/selftests/kvm/arm64/page_fault_test.c index 4ccbd389d133..6bb3d82906b2 100644 --- a/tools/testing/selftests/kvm/arm64/page_fault_test.c +++ b/tools/testing/selftests/kvm/arm64/page_fault_test.c @@ -23,7 +23,7 @@ #define TEST_PTE_GVA 0xb0000000 #define TEST_DATA 0x0123456789ABCDEF -static uint64_t *guest_test_memory = (uint64_t *)TEST_GVA; +static u64 *guest_test_memory = (u64 *)TEST_GVA; #define CMD_NONE (0) #define CMD_SKIP_TEST (1ULL << 1) @@ -48,7 +48,7 @@ static struct event_cnt { struct test_desc { const char *name; - uint64_t mem_mark_cmd; + u64 mem_mark_cmd; /* Skip the test if any prepare function returns false */ bool (*guest_prepare[PREPARE_FN_NR])(void); void (*guest_test)(void); @@ -59,8 +59,8 @@ struct test_desc { void (*iabt_handler)(struct ex_regs *regs); void (*mmio_handler)(struct kvm_vm *vm, struct kvm_run *run); void (*fail_vcpu_run_handler)(int ret); - uint32_t pt_memslot_flags; - uint32_t data_memslot_flags; + u32 pt_memslot_flags; + u32 data_memslot_flags; bool skip; struct event_cnt expected_events; }; @@ -70,9 +70,9 @@ struct test_params { struct test_desc *test_desc; }; -static inline void flush_tlb_page(uint64_t vaddr) +static inline void flush_tlb_page(gva_t gva) { - uint64_t page = vaddr >> 12; + gva_t page = gva >> 12; dsb(ishst); asm volatile("tlbi vaae1is, %0" :: "r" (page)); @@ -82,7 +82,7 @@ static inline void flush_tlb_page(uint64_t vaddr) static void guest_write64(void) { - uint64_t val; + u64 val; WRITE_ONCE(*guest_test_memory, TEST_DATA); val = READ_ONCE(*guest_test_memory); @@ -92,8 +92,8 @@ static void guest_write64(void) /* Check the system for atomic instructions. */ static bool guest_check_lse(void) { - uint64_t isar0 = read_sysreg(id_aa64isar0_el1); - uint64_t atomic; + u64 isar0 = read_sysreg(id_aa64isar0_el1); + u64 atomic; atomic = FIELD_GET(ID_AA64ISAR0_EL1_ATOMIC, isar0); return atomic >= 2; @@ -101,8 +101,8 @@ static bool guest_check_lse(void) static bool guest_check_dc_zva(void) { - uint64_t dczid = read_sysreg(dczid_el0); - uint64_t dzp = FIELD_GET(DCZID_EL0_DZP, dczid); + u64 dczid = read_sysreg(dczid_el0); + u64 dzp = FIELD_GET(DCZID_EL0_DZP, dczid); return dzp == 0; } @@ -110,7 +110,7 @@ static bool guest_check_dc_zva(void) /* Compare and swap instruction. */ static void guest_cas(void) { - uint64_t val; + u64 val; GUEST_ASSERT(guest_check_lse()); asm volatile(".arch_extension lse\n" @@ -122,7 +122,7 @@ static void guest_cas(void) static void guest_read64(void) { - uint64_t val; + u64 val; val = READ_ONCE(*guest_test_memory); GUEST_ASSERT_EQ(val, 0); @@ -131,7 +131,7 @@ static void guest_read64(void) /* Address translation instruction */ static void guest_at(void) { - uint64_t par; + u64 par; asm volatile("at s1e1r, %0" :: "r" (guest_test_memory)); isb(); @@ -148,7 +148,7 @@ static void guest_at(void) */ static void guest_dc_zva(void) { - uint16_t val; + u16 val; asm volatile("dc zva, %0" :: "r" (guest_test_memory)); dsb(ish); @@ -164,8 +164,8 @@ static void guest_dc_zva(void) */ static void guest_ld_preidx(void) { - uint64_t val; - uint64_t addr = TEST_GVA - 8; + u64 val; + u64 addr = TEST_GVA - 8; /* * This ends up accessing "TEST_GVA + 8 - 8", where "TEST_GVA - 8" is @@ -179,8 +179,8 @@ static void guest_ld_preidx(void) static void guest_st_preidx(void) { - uint64_t val = TEST_DATA; - uint64_t addr = TEST_GVA - 8; + u64 val = TEST_DATA; + u64 addr = TEST_GVA - 8; asm volatile("str %0, [%1, #8]!" : "+r" (val), "+r" (addr)); @@ -191,8 +191,8 @@ static void guest_st_preidx(void) static bool guest_set_ha(void) { - uint64_t mmfr1 = read_sysreg(id_aa64mmfr1_el1); - uint64_t hadbs, tcr; + u64 mmfr1 = read_sysreg(id_aa64mmfr1_el1); + u64 hadbs, tcr; /* Skip if HA is not supported. */ hadbs = FIELD_GET(ID_AA64MMFR1_EL1_HAFDBS, mmfr1); @@ -208,7 +208,7 @@ static bool guest_set_ha(void) static bool guest_clear_pte_af(void) { - *((uint64_t *)TEST_PTE_GVA) &= ~PTE_AF; + *((u64 *)TEST_PTE_GVA) &= ~PTE_AF; flush_tlb_page(TEST_GVA); return true; @@ -217,7 +217,7 @@ static bool guest_clear_pte_af(void) static void guest_check_pte_af(void) { dsb(ish); - GUEST_ASSERT_EQ(*((uint64_t *)TEST_PTE_GVA) & PTE_AF, PTE_AF); + GUEST_ASSERT_EQ(*((u64 *)TEST_PTE_GVA) & PTE_AF, PTE_AF); } static void guest_check_write_in_dirty_log(void) @@ -302,26 +302,26 @@ static void no_iabt_handler(struct ex_regs *regs) static struct uffd_args { char *copy; void *hva; - uint64_t paging_size; + u64 paging_size; } pt_args, data_args; /* Returns true to continue the test, and false if it should be skipped. */ static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg, struct uffd_args *args) { - uint64_t addr = msg->arg.pagefault.address; - uint64_t flags = msg->arg.pagefault.flags; + u64 addr = msg->arg.pagefault.address; + u64 flags = msg->arg.pagefault.flags; struct uffdio_copy copy; int ret; TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING, "The only expected UFFD mode is MISSING"); - TEST_ASSERT_EQ(addr, (uint64_t)args->hva); + TEST_ASSERT_EQ(addr, (u64)args->hva); pr_debug("uffd fault: addr=%p write=%d\n", (void *)addr, !!(flags & UFFD_PAGEFAULT_FLAG_WRITE)); - copy.src = (uint64_t)args->copy; + copy.src = (u64)args->copy; copy.dst = addr; copy.len = args->paging_size; copy.mode = 0; @@ -407,7 +407,7 @@ static bool punch_hole_in_backing_store(struct kvm_vm *vm, struct userspace_mem_region *region) { void *hva = (void *)region->region.userspace_addr; - uint64_t paging_size = region->region.memory_size; + u64 paging_size = region->region.memory_size; int ret, fd = region->fd; if (fd != -1) { @@ -438,7 +438,7 @@ static void mmio_on_test_gpa_handler(struct kvm_vm *vm, struct kvm_run *run) static void mmio_no_handler(struct kvm_vm *vm, struct kvm_run *run) { - uint64_t data; + u64 data; memcpy(&data, run->mmio.data, sizeof(data)); pr_debug("addr=%lld len=%d w=%d data=%lx\n", @@ -449,11 +449,11 @@ static void mmio_no_handler(struct kvm_vm *vm, struct kvm_run *run) static bool check_write_in_dirty_log(struct kvm_vm *vm, struct userspace_mem_region *region, - uint64_t host_pg_nr) + u64 host_pg_nr) { unsigned long *bmap; bool first_page_dirty; - uint64_t size = region->region.memory_size; + u64 size = region->region.memory_size; /* getpage_size() is not always equal to vm->page_size */ bmap = bitmap_zalloc(size / getpagesize()); @@ -468,7 +468,7 @@ static bool handle_cmd(struct kvm_vm *vm, int cmd) { struct userspace_mem_region *data_region, *pt_region; bool continue_test = true; - uint64_t pte_gpa, pte_pg; + u64 pte_gpa, pte_pg; data_region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA); pt_region = vm_get_mem_region(vm, MEM_REGION_PT); @@ -510,7 +510,7 @@ void fail_vcpu_run_mmio_no_syndrome_handler(int ret) events.fail_vcpu_runs += 1; } -typedef uint32_t aarch64_insn_t; +typedef u32 aarch64_insn_t; extern aarch64_insn_t __exec_test[2]; noinline void __return_0x77(void) @@ -525,7 +525,7 @@ noinline void __return_0x77(void) */ static void load_exec_code_for_test(struct kvm_vm *vm) { - uint64_t *code; + u64 *code; struct userspace_mem_region *region; void *hva; @@ -552,7 +552,7 @@ static void setup_abort_handlers(struct kvm_vm *vm, struct kvm_vcpu *vcpu, static void setup_gva_maps(struct kvm_vm *vm) { struct userspace_mem_region *region; - uint64_t pte_gpa; + u64 pte_gpa; region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA); /* Map TEST_GVA first. This will install a new PTE. */ @@ -574,12 +574,12 @@ enum pf_test_memslots { */ static void setup_memslots(struct kvm_vm *vm, struct test_params *p) { - uint64_t backing_src_pagesz = get_backing_src_pagesz(p->src_type); - uint64_t guest_page_size = vm->page_size; - uint64_t max_gfn = vm_compute_max_gfn(vm); + u64 backing_src_pagesz = get_backing_src_pagesz(p->src_type); + u64 guest_page_size = vm->page_size; + u64 max_gfn = vm_compute_max_gfn(vm); /* Enough for 2M of code when using 4K guest pages. */ - uint64_t code_npages = 512; - uint64_t pt_size, data_size, data_gpa; + u64 code_npages = 512; + u64 pt_size, data_size, data_gpa; /* * This test requires 1 pgd, 2 pud, 4 pmd, and 6 pte pages when using diff --git a/tools/testing/selftests/kvm/arm64/psci_test.c b/tools/testing/selftests/kvm/arm64/psci_test.c index 98e49f710aef..e775faf20868 100644 --- a/tools/testing/selftests/kvm/arm64/psci_test.c +++ b/tools/testing/selftests/kvm/arm64/psci_test.c @@ -22,8 +22,7 @@ #define CPU_ON_ENTRY_ADDR 0xfeedf00dul #define CPU_ON_CONTEXT_ID 0xdeadc0deul -static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr, - uint64_t context_id) +static u64 psci_cpu_on(u64 target_cpu, u64 entry_addr, u64 context_id) { struct arm_smccc_res res; @@ -33,8 +32,7 @@ static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr, return res.a0; } -static uint64_t psci_affinity_info(uint64_t target_affinity, - uint64_t lowest_affinity_level) +static u64 psci_affinity_info(u64 target_affinity, u64 lowest_affinity_level) { struct arm_smccc_res res; @@ -44,7 +42,7 @@ static uint64_t psci_affinity_info(uint64_t target_affinity, return res.a0; } -static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id) +static u64 psci_system_suspend(u64 entry_addr, u64 context_id) { struct arm_smccc_res res; @@ -54,7 +52,7 @@ static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id) return res.a0; } -static uint64_t psci_system_off2(uint64_t type, uint64_t cookie) +static u64 psci_system_off2(u64 type, u64 cookie) { struct arm_smccc_res res; @@ -63,7 +61,7 @@ static uint64_t psci_system_off2(uint64_t type, uint64_t cookie) return res.a0; } -static uint64_t psci_features(uint32_t func_id) +static u64 psci_features(u32 func_id) { struct arm_smccc_res res; @@ -110,7 +108,7 @@ static void enter_guest(struct kvm_vcpu *vcpu) static void assert_vcpu_reset(struct kvm_vcpu *vcpu) { - uint64_t obs_pc, obs_x0; + u64 obs_pc, obs_x0; obs_pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc)); obs_x0 = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0])); @@ -123,9 +121,9 @@ static void assert_vcpu_reset(struct kvm_vcpu *vcpu) obs_x0, CPU_ON_CONTEXT_ID); } -static void guest_test_cpu_on(uint64_t target_cpu) +static void guest_test_cpu_on(u64 target_cpu) { - uint64_t target_state; + u64 target_state; GUEST_ASSERT(!psci_cpu_on(target_cpu, CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID)); @@ -142,7 +140,7 @@ static void guest_test_cpu_on(uint64_t target_cpu) static void host_test_cpu_on(void) { struct kvm_vcpu *source, *target; - uint64_t target_mpidr; + u64 target_mpidr; struct kvm_vm *vm; struct ucall uc; @@ -166,7 +164,7 @@ static void host_test_cpu_on(void) static void guest_test_system_suspend(void) { - uint64_t ret; + u64 ret; /* assert that SYSTEM_SUSPEND is discoverable */ GUEST_ASSERT(!psci_features(PSCI_1_0_FN_SYSTEM_SUSPEND)); @@ -200,7 +198,7 @@ static void host_test_system_suspend(void) static void guest_test_system_off2(void) { - uint64_t ret; + u64 ret; /* assert that SYSTEM_OFF2 is discoverable */ GUEST_ASSERT(psci_features(PSCI_1_3_FN_SYSTEM_OFF2) & @@ -238,7 +236,7 @@ static void host_test_system_off2(void) { struct kvm_vcpu *source, *target; struct kvm_mp_state mps; - uint64_t psci_version = 0; + u64 psci_version = 0; int nr_shutdowns = 0; struct kvm_run *run; struct ucall uc; diff --git a/tools/testing/selftests/kvm/arm64/sea_to_user.c b/tools/testing/selftests/kvm/arm64/sea_to_user.c index 573dd790aeb8..e96d8982c28b 100644 --- a/tools/testing/selftests/kvm/arm64/sea_to_user.c +++ b/tools/testing/selftests/kvm/arm64/sea_to_user.c @@ -51,18 +51,16 @@ #define EINJ_OFFSET 0x01234badUL #define EINJ_GVA ((START_GVA) + (EINJ_OFFSET)) -static vm_paddr_t einj_gpa; +static gpa_t einj_gpa; static void *einj_hva; -static uint64_t einj_hpa; +static u64 einj_hpa; static bool far_invalid; -static uint64_t translate_to_host_paddr(unsigned long vaddr) +static u64 translate_hva_to_hpa(unsigned long hva) { - uint64_t pinfo; - int64_t offset = vaddr / getpagesize() * sizeof(pinfo); + u64 pinfo; + s64 offset = hva / getpagesize() * sizeof(pinfo); int fd; - uint64_t page_addr; - uint64_t paddr; fd = open("/proc/self/pagemap", O_RDONLY); if (fd < 0) @@ -77,12 +75,11 @@ static uint64_t translate_to_host_paddr(unsigned long vaddr) if ((pinfo & PAGE_PRESENT) == 0) ksft_exit_fail_perror("Page not present"); - page_addr = (pinfo & PAGE_PHYSICAL) << MIN_PAGE_SHIFT; - paddr = page_addr + (vaddr & (getpagesize() - 1)); - return paddr; + return ((pinfo & PAGE_PHYSICAL) << MIN_PAGE_SHIFT) + + (hva & (getpagesize() - 1)); } -static void write_einj_entry(const char *einj_path, uint64_t val) +static void write_einj_entry(const char *einj_path, u64 val) { char cmd[256] = {0}; FILE *cmdfile = NULL; @@ -96,7 +93,7 @@ static void write_einj_entry(const char *einj_path, uint64_t val) ksft_exit_fail_perror("Failed to write EINJ entry"); } -static void inject_uer(uint64_t paddr) +static void inject_uer(u64 hpa) { if (access("/sys/firmware/acpi/tables/EINJ", R_OK) == -1) ksft_test_result_skip("EINJ table no available in firmware"); @@ -106,7 +103,7 @@ static void inject_uer(uint64_t paddr) write_einj_entry(EINJ_ETYPE, ERROR_TYPE_MEMORY_UER); write_einj_entry(EINJ_FLAGS, MASK_MEMORY_UER); - write_einj_entry(EINJ_ADDR, paddr); + write_einj_entry(EINJ_ADDR, hpa); write_einj_entry(EINJ_MASK, ~0x0UL); write_einj_entry(EINJ_NOTRIGGER, 1); write_einj_entry(EINJ_DOIT, 1); @@ -145,10 +142,10 @@ static void setup_sigbus_handler(void) static void guest_code(void) { - uint64_t guest_data; + u64 guest_data; /* Consumes error will cause a SEA. */ - guest_data = *(uint64_t *)EINJ_GVA; + guest_data = *(u64 *)EINJ_GVA; GUEST_FAIL("Poison not protected by SEA: gva=%#lx, guest_data=%#lx\n", EINJ_GVA, guest_data); @@ -253,8 +250,8 @@ static struct kvm_vm *vm_create_with_sea_handler(struct kvm_vcpu **vcpu) size_t backing_page_size; size_t guest_page_size; size_t alignment; - uint64_t num_guest_pages; - vm_paddr_t start_gpa; + u64 num_guest_pages; + gpa_t start_gpa; enum vm_mem_backing_src_type src_type = VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB; struct kvm_vm *vm; @@ -278,7 +275,7 @@ static struct kvm_vm *vm_create_with_sea_handler(struct kvm_vcpu **vcpu) vm_userspace_mem_region_add( /*vm=*/vm, /*src_type=*/src_type, - /*guest_paddr=*/start_gpa, + /*gpa=*/start_gpa, /*slot=*/1, /*npages=*/num_guest_pages, /*flags=*/0); @@ -292,18 +289,18 @@ static struct kvm_vm *vm_create_with_sea_handler(struct kvm_vcpu **vcpu) static void vm_inject_memory_uer(struct kvm_vm *vm) { - uint64_t guest_data; + u64 guest_data; einj_gpa = addr_gva2gpa(vm, EINJ_GVA); einj_hva = addr_gva2hva(vm, EINJ_GVA); /* Populate certain data before injecting UER. */ - *(uint64_t *)einj_hva = 0xBAADCAFE; - guest_data = *(uint64_t *)einj_hva; + *(u64 *)einj_hva = 0xBAADCAFE; + guest_data = *(u64 *)einj_hva; ksft_print_msg("Before EINJect: data=%#lx\n", guest_data); - einj_hpa = translate_to_host_paddr((unsigned long)einj_hva); + einj_hpa = translate_hva_to_hpa((unsigned long)einj_hva); ksft_print_msg("EINJ_GVA=%#lx, einj_gpa=%#lx, einj_hva=%p, einj_hpa=%#lx\n", EINJ_GVA, einj_gpa, einj_hva, einj_hpa); diff --git a/tools/testing/selftests/kvm/arm64/set_id_regs.c b/tools/testing/selftests/kvm/arm64/set_id_regs.c index 3a7e5fe9ae7a..7429a1055df5 100644 --- a/tools/testing/selftests/kvm/arm64/set_id_regs.c +++ b/tools/testing/selftests/kvm/arm64/set_id_regs.c @@ -30,20 +30,20 @@ struct reg_ftr_bits { char *name; bool sign; enum ftr_type type; - uint8_t shift; - uint64_t mask; + u8 shift; + u64 mask; /* * For FTR_EXACT, safe_val is used as the exact safe value. * For FTR_LOWER_SAFE, safe_val is used as the minimal safe value. */ - int64_t safe_val; + s64 safe_val; /* Allowed to be changed by the host after run */ bool mutable; }; struct test_feature_reg { - uint32_t reg; + u32 reg; const struct reg_ftr_bits *ftr_bits; }; @@ -275,9 +275,9 @@ static void guest_code(void) } /* Return a safe value to a given ftr_bits an ftr value */ -uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr) +u64 get_safe_value(const struct reg_ftr_bits *ftr_bits, u64 ftr) { - uint64_t ftr_max = ftr_bits->mask >> ftr_bits->shift; + u64 ftr_max = ftr_bits->mask >> ftr_bits->shift; TEST_ASSERT(ftr_max > 1, "This test doesn't support single bit features"); @@ -329,16 +329,16 @@ uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr) } /* Return an invalid value to a given ftr_bits an ftr value */ -uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr) +u64 get_invalid_value(const struct reg_ftr_bits *ftr_bits, u64 ftr) { - uint64_t ftr_max = ftr_bits->mask >> ftr_bits->shift; + u64 ftr_max = ftr_bits->mask >> ftr_bits->shift; TEST_ASSERT(ftr_max > 1, "This test doesn't support single bit features"); if (ftr_bits->sign == FTR_UNSIGNED) { switch (ftr_bits->type) { case FTR_EXACT: - ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1); + ftr = max((u64)ftr_bits->safe_val + 1, ftr + 1); break; case FTR_LOWER_SAFE: ftr++; @@ -358,7 +358,7 @@ uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr) } else if (ftr != ftr_max) { switch (ftr_bits->type) { case FTR_EXACT: - ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1); + ftr = max((u64)ftr_bits->safe_val + 1, ftr + 1); break; case FTR_LOWER_SAFE: ftr++; @@ -382,12 +382,12 @@ uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr) return ftr; } -static uint64_t test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg, - const struct reg_ftr_bits *ftr_bits) +static u64 test_reg_set_success(struct kvm_vcpu *vcpu, u64 reg, + const struct reg_ftr_bits *ftr_bits) { - uint8_t shift = ftr_bits->shift; - uint64_t mask = ftr_bits->mask; - uint64_t val, new_val, ftr; + u8 shift = ftr_bits->shift; + u64 mask = ftr_bits->mask; + u64 val, new_val, ftr; val = vcpu_get_reg(vcpu, reg); ftr = (val & mask) >> shift; @@ -405,12 +405,12 @@ static uint64_t test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg, return new_val; } -static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg, +static void test_reg_set_fail(struct kvm_vcpu *vcpu, u64 reg, const struct reg_ftr_bits *ftr_bits) { - uint8_t shift = ftr_bits->shift; - uint64_t mask = ftr_bits->mask; - uint64_t val, old_val, ftr; + u8 shift = ftr_bits->shift; + u64 mask = ftr_bits->mask; + u64 val, old_val, ftr; int r; val = vcpu_get_reg(vcpu, reg); @@ -431,7 +431,7 @@ static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg, TEST_ASSERT_EQ(val, old_val); } -static uint64_t test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE]; +static u64 test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE]; #define encoding_to_range_idx(encoding) \ KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(encoding), sys_reg_Op1(encoding), \ @@ -441,7 +441,7 @@ static uint64_t test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE]; static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only) { - uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; + u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; struct reg_mask_range range = { .addr = (__u64)masks, }; @@ -458,8 +458,8 @@ static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only) for (int i = 0; i < ARRAY_SIZE(test_regs); i++) { const struct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits; - uint32_t reg_id = test_regs[i].reg; - uint64_t reg = KVM_ARM64_SYS_REG(reg_id); + u32 reg_id = test_regs[i].reg; + u64 reg = KVM_ARM64_SYS_REG(reg_id); int idx; /* Get the index to masks array for the idreg */ @@ -489,11 +489,11 @@ static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only) #define MPAM_IDREG_TEST 6 static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu) { - uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; + u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; struct reg_mask_range range = { .addr = (__u64)masks, }; - uint64_t val; + u64 val; int idx, err; /* @@ -584,13 +584,13 @@ static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu) #define MTE_IDREG_TEST 1 static void test_user_set_mte_reg(struct kvm_vcpu *vcpu) { - uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; + u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; struct reg_mask_range range = { .addr = (__u64)masks, }; - uint64_t val; - uint64_t mte; - uint64_t mte_frac; + u64 val; + u64 mte; + u64 mte_frac; int idx, err; val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1)); @@ -644,7 +644,7 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu) ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac no longer 0xF\n"); } -static uint64_t reset_mutable_bits(uint32_t id, uint64_t val) +static u64 reset_mutable_bits(u32 id, u64 val) { struct test_feature_reg *reg = NULL; @@ -674,7 +674,7 @@ static void test_guest_reg_read(struct kvm_vcpu *vcpu) struct ucall uc; while (!done) { - uint64_t val; + u64 val; vcpu_run(vcpu); @@ -707,7 +707,7 @@ static void test_guest_reg_read(struct kvm_vcpu *vcpu) static void test_clidr(struct kvm_vcpu *vcpu) { - uint64_t clidr; + u64 clidr; int level; clidr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1)); @@ -772,10 +772,10 @@ static void test_vcpu_non_ftr_id_regs(struct kvm_vcpu *vcpu) ksft_test_result_pass("%s\n", __func__); } -static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, uint32_t encoding) +static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, u32 encoding) { size_t idx = encoding_to_range_idx(encoding); - uint64_t observed; + u64 observed; observed = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding)); TEST_ASSERT_EQ(reset_mutable_bits(encoding, test_reg_vals[idx]), @@ -808,7 +808,7 @@ int main(void) struct kvm_vcpu *vcpu; struct kvm_vm *vm; bool aarch64_only; - uint64_t val, el0; + u64 val, el0; int test_cnt, i, j; TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES)); diff --git a/tools/testing/selftests/kvm/arm64/smccc_filter.c b/tools/testing/selftests/kvm/arm64/smccc_filter.c index 1763b9d45400..21e41880261b 100644 --- a/tools/testing/selftests/kvm/arm64/smccc_filter.c +++ b/tools/testing/selftests/kvm/arm64/smccc_filter.c @@ -37,7 +37,7 @@ static bool test_runs_at_el2(void) for (conduit = test_runs_at_el2() ? SMC_INSN : HVC_INSN; \ conduit <= SMC_INSN; conduit++) -static void guest_main(uint32_t func_id, enum smccc_conduit conduit) +static void guest_main(u32 func_id, enum smccc_conduit conduit) { struct arm_smccc_res res; @@ -49,7 +49,7 @@ static void guest_main(uint32_t func_id, enum smccc_conduit conduit) GUEST_SYNC(res.a0); } -static int __set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions, +static int __set_smccc_filter(struct kvm_vm *vm, u32 start, u32 nr_functions, enum kvm_smccc_filter_action action) { struct kvm_smccc_filter filter = { @@ -62,7 +62,7 @@ static int __set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_fun KVM_ARM_VM_SMCCC_FILTER, &filter); } -static void set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions, +static void set_smccc_filter(struct kvm_vm *vm, u32 start, u32 nr_functions, enum kvm_smccc_filter_action action) { int ret = __set_smccc_filter(vm, start, nr_functions, action); @@ -112,7 +112,7 @@ static void test_filter_reserved_range(void) { struct kvm_vcpu *vcpu; struct kvm_vm *vm = setup_vm(&vcpu); - uint32_t smc64_fn; + u32 smc64_fn; int r; r = __set_smccc_filter(vm, ARM_SMCCC_ARCH_WORKAROUND_1, @@ -217,7 +217,7 @@ static void test_filter_denied(void) } } -static void expect_call_fwd_to_user(struct kvm_vcpu *vcpu, uint32_t func_id, +static void expect_call_fwd_to_user(struct kvm_vcpu *vcpu, u32 func_id, enum smccc_conduit conduit) { struct kvm_run *run = vcpu->run; diff --git a/tools/testing/selftests/kvm/arm64/vgic_init.c b/tools/testing/selftests/kvm/arm64/vgic_init.c index 8d6d3a4ae4db..47e34b43afb2 100644 --- a/tools/testing/selftests/kvm/arm64/vgic_init.c +++ b/tools/testing/selftests/kvm/arm64/vgic_init.c @@ -19,7 +19,7 @@ #define NR_VCPUS 4 -#define REG_OFFSET(vcpu, offset) (((uint64_t)vcpu << 32) | offset) +#define REG_OFFSET(vcpu, offset) (((u64)vcpu << 32) | offset) #define VGIC_DEV_IS_V2(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V2) #define VGIC_DEV_IS_V3(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V3) @@ -27,10 +27,10 @@ struct vm_gic { struct kvm_vm *vm; int gic_fd; - uint32_t gic_dev_type; + u32 gic_dev_type; }; -static uint64_t max_phys_size; +static u64 max_phys_size; /* * Helpers to access a redistributor register and verify the ioctl() failed or @@ -39,17 +39,17 @@ static uint64_t max_phys_size; static void v3_redist_reg_get_errno(int gicv3_fd, int vcpu, int offset, int want, const char *msg) { - uint32_t ignored_val; + u32 ignored_val; int ret = __kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS, REG_OFFSET(vcpu, offset), &ignored_val); TEST_ASSERT(ret && errno == want, "%s; want errno = %d", msg, want); } -static void v3_redist_reg_get(int gicv3_fd, int vcpu, int offset, uint32_t want, +static void v3_redist_reg_get(int gicv3_fd, int vcpu, int offset, u32 want, const char *msg) { - uint32_t val; + u32 val; kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS, REG_OFFSET(vcpu, offset), &val); @@ -71,8 +71,8 @@ static int run_vcpu(struct kvm_vcpu *vcpu) return __vcpu_run(vcpu) ? -errno : 0; } -static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type, - uint32_t nr_vcpus, +static struct vm_gic vm_gic_create_with_vcpus(u32 gic_dev_type, + u32 nr_vcpus, struct kvm_vcpu *vcpus[]) { struct vm_gic v; @@ -84,7 +84,7 @@ static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type, return v; } -static struct vm_gic vm_gic_create_barebones(uint32_t gic_dev_type) +static struct vm_gic vm_gic_create_barebones(u32 gic_dev_type) { struct vm_gic v; @@ -103,9 +103,9 @@ static void vm_gic_destroy(struct vm_gic *v) } struct vgic_region_attr { - uint64_t attr; - uint64_t size; - uint64_t alignment; + u64 attr; + u64 size; + u64 alignment; }; struct vgic_region_attr gic_v3_dist_region = { @@ -143,7 +143,7 @@ struct vgic_region_attr gic_v2_cpu_region = { static void subtest_dist_rdist(struct vm_gic *v) { int ret; - uint64_t addr; + u64 addr; struct vgic_region_attr rdist; /* CPU interface in GICv2*/ struct vgic_region_attr dist; @@ -223,7 +223,7 @@ static void subtest_dist_rdist(struct vm_gic *v) /* Test the new REDIST region API */ static void subtest_v3_redist_regions(struct vm_gic *v) { - uint64_t addr, expected_addr; + u64 addr, expected_addr; int ret; ret = __kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, @@ -332,7 +332,7 @@ static void subtest_v3_redist_regions(struct vm_gic *v) * VGIC KVM device is created and initialized before the secondary CPUs * get created */ -static void test_vgic_then_vcpus(uint32_t gic_dev_type) +static void test_vgic_then_vcpus(u32 gic_dev_type) { struct kvm_vcpu *vcpus[NR_VCPUS]; struct vm_gic v; @@ -353,7 +353,7 @@ static void test_vgic_then_vcpus(uint32_t gic_dev_type) } /* All the VCPUs are created before the VGIC KVM device gets initialized */ -static void test_vcpus_then_vgic(uint32_t gic_dev_type) +static void test_vcpus_then_vgic(u32 gic_dev_type) { struct kvm_vcpu *vcpus[NR_VCPUS]; struct vm_gic v; @@ -408,7 +408,7 @@ static void test_v3_new_redist_regions(void) struct kvm_vcpu *vcpus[NR_VCPUS]; void *dummy = NULL; struct vm_gic v; - uint64_t addr; + u64 addr; int ret; v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus); @@ -460,7 +460,7 @@ static void test_v3_new_redist_regions(void) static void test_v3_typer_accesses(void) { struct vm_gic v; - uint64_t addr; + u64 addr; int ret, i; v.vm = vm_create(NR_VCPUS); @@ -518,7 +518,7 @@ static void test_v3_typer_accesses(void) } static struct vm_gic vm_gic_v3_create_with_vcpuids(int nr_vcpus, - uint32_t vcpuids[]) + u32 vcpuids[]) { struct vm_gic v; int i; @@ -544,9 +544,9 @@ static struct vm_gic vm_gic_v3_create_with_vcpuids(int nr_vcpus, */ static void test_v3_last_bit_redist_regions(void) { - uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 }; + u32 vcpuids[] = { 0, 3, 5, 4, 1, 2 }; struct vm_gic v; - uint64_t addr; + u64 addr; v = vm_gic_v3_create_with_vcpuids(ARRAY_SIZE(vcpuids), vcpuids); @@ -578,9 +578,9 @@ static void test_v3_last_bit_redist_regions(void) /* Test last bit with legacy region */ static void test_v3_last_bit_single_rdist(void) { - uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 }; + u32 vcpuids[] = { 0, 3, 5, 4, 1, 2 }; struct vm_gic v; - uint64_t addr; + u64 addr; v = vm_gic_v3_create_with_vcpuids(ARRAY_SIZE(vcpuids), vcpuids); @@ -606,7 +606,7 @@ static void test_v3_redist_ipa_range_check_at_vcpu_run(void) struct kvm_vcpu *vcpus[NR_VCPUS]; struct vm_gic v; int ret, i; - uint64_t addr; + u64 addr; v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, 1, vcpus); @@ -638,7 +638,7 @@ static void test_v3_its_region(void) { struct kvm_vcpu *vcpus[NR_VCPUS]; struct vm_gic v; - uint64_t addr; + u64 addr; int its_fd, ret; v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus); @@ -717,11 +717,11 @@ static void test_v3_nassgicap(void) /* * Returns 0 if it's possible to create GIC device of a given type (V2 or V3). */ -int test_kvm_device(uint32_t gic_dev_type) +int test_kvm_device(u32 gic_dev_type) { struct kvm_vcpu *vcpus[NR_VCPUS]; struct vm_gic v; - uint32_t other; + u32 other; int ret; v.vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus); @@ -968,7 +968,7 @@ static void test_v3_sysregs(void) kvm_vm_free(vm); } -void run_tests(uint32_t gic_dev_type) +void run_tests(u32 gic_dev_type) { test_vcpus_then_vgic(gic_dev_type); test_vgic_then_vcpus(gic_dev_type); diff --git a/tools/testing/selftests/kvm/arm64/vgic_irq.c b/tools/testing/selftests/kvm/arm64/vgic_irq.c index 2fb2c7939fe9..5e231998617e 100644 --- a/tools/testing/selftests/kvm/arm64/vgic_irq.c +++ b/tools/testing/selftests/kvm/arm64/vgic_irq.c @@ -24,12 +24,12 @@ * function. */ struct test_args { - uint32_t nr_irqs; /* number of KVM supported IRQs. */ + u32 nr_irqs; /* number of KVM supported IRQs. */ bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */ bool level_sensitive; /* 1 is level, 0 is edge */ int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */ bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */ - uint32_t shared_data; + u32 shared_data; }; /* @@ -64,15 +64,15 @@ typedef enum { struct kvm_inject_args { kvm_inject_cmd cmd; - uint32_t first_intid; - uint32_t num; + u32 first_intid; + u32 num; int level; bool expect_failure; }; /* Used on the guest side to perform the hypercall. */ -static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, - uint32_t num, int level, bool expect_failure); +static void kvm_inject_call(kvm_inject_cmd cmd, u32 first_intid, + u32 num, int level, bool expect_failure); /* Used on the host side to get the hypercall info. */ static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, @@ -133,8 +133,8 @@ static struct kvm_inject_desc set_active_fns[] = { for_each_supported_inject_fn((args), (t), (f)) /* Shared between the guest main thread and the IRQ handlers. */ -volatile uint64_t irq_handled; -volatile uint32_t irqnr_received[MAX_SPI + 1]; +volatile u64 irq_handled; +volatile u32 irqnr_received[MAX_SPI + 1]; static void reset_stats(void) { @@ -145,25 +145,25 @@ static void reset_stats(void) irqnr_received[i] = 0; } -static uint64_t gic_read_ap1r0(void) +static u64 gic_read_ap1r0(void) { - uint64_t reg = read_sysreg_s(SYS_ICC_AP1R0_EL1); + u64 reg = read_sysreg_s(SYS_ICC_AP1R0_EL1); dsb(sy); return reg; } -static void gic_write_ap1r0(uint64_t val) +static void gic_write_ap1r0(u64 val) { write_sysreg_s(val, SYS_ICC_AP1R0_EL1); isb(); } -static void guest_set_irq_line(uint32_t intid, uint32_t level); +static void guest_set_irq_line(u32 intid, u32 level); static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive) { - uint32_t intid = gic_get_and_ack_irq(); + u32 intid = gic_get_and_ack_irq(); if (intid == IAR_SPURIOUS) return; @@ -189,8 +189,8 @@ static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive) GUEST_ASSERT(!gic_irq_get_pending(intid)); } -static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, - uint32_t num, int level, bool expect_failure) +static void kvm_inject_call(kvm_inject_cmd cmd, u32 first_intid, + u32 num, int level, bool expect_failure) { struct kvm_inject_args args = { .cmd = cmd, @@ -204,7 +204,7 @@ static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, #define GUEST_ASSERT_IAR_EMPTY() \ do { \ - uint32_t _intid; \ + u32 _intid; \ _intid = gic_get_and_ack_irq(); \ GUEST_ASSERT(_intid == IAR_SPURIOUS); \ } while (0) @@ -237,13 +237,13 @@ static void reset_priorities(struct test_args *args) gic_set_priority(i, IRQ_DEFAULT_PRIO_REG); } -static void guest_set_irq_line(uint32_t intid, uint32_t level) +static void guest_set_irq_line(u32 intid, u32 level) { kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false); } static void test_inject_fail(struct test_args *args, - uint32_t intid, kvm_inject_cmd cmd) + u32 intid, kvm_inject_cmd cmd) { reset_stats(); @@ -255,10 +255,10 @@ static void test_inject_fail(struct test_args *args, } static void guest_inject(struct test_args *args, - uint32_t first_intid, uint32_t num, - kvm_inject_cmd cmd) + u32 first_intid, u32 num, + kvm_inject_cmd cmd) { - uint32_t i; + u32 i; reset_stats(); @@ -292,10 +292,10 @@ static void guest_inject(struct test_args *args, * deactivated yet. */ static void guest_restore_active(struct test_args *args, - uint32_t first_intid, uint32_t num, - kvm_inject_cmd cmd) + u32 first_intid, u32 num, + kvm_inject_cmd cmd) { - uint32_t prio, intid, ap1r; + u32 prio, intid, ap1r; int i; /* @@ -342,9 +342,9 @@ static void guest_restore_active(struct test_args *args, * This function should only be used in test_inject_preemption (with IRQs * masked). */ -static uint32_t wait_for_and_activate_irq(void) +static u32 wait_for_and_activate_irq(void) { - uint32_t intid; + u32 intid; do { asm volatile("wfi" : : : "memory"); @@ -360,11 +360,11 @@ static uint32_t wait_for_and_activate_irq(void) * interrupts for the whole test. */ static void test_inject_preemption(struct test_args *args, - uint32_t first_intid, int num, + u32 first_intid, int num, const unsigned long *exclude, kvm_inject_cmd cmd) { - uint32_t intid, prio, step = KVM_PRIO_STEPS; + u32 intid, prio, step = KVM_PRIO_STEPS; int i; /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs @@ -379,7 +379,7 @@ static void test_inject_preemption(struct test_args *args, local_irq_disable(); for (i = 0; i < num; i++) { - uint32_t tmp; + u32 tmp; intid = i + first_intid; if (exclude && test_bit(i, exclude)) @@ -431,7 +431,7 @@ static void test_inject_preemption(struct test_args *args, static void test_injection(struct test_args *args, struct kvm_inject_desc *f) { - uint32_t nr_irqs = args->nr_irqs; + u32 nr_irqs = args->nr_irqs; if (f->sgi) { guest_inject(args, MIN_SGI, 1, f->cmd); @@ -451,7 +451,7 @@ static void test_injection(struct test_args *args, struct kvm_inject_desc *f) static void test_injection_failure(struct test_args *args, struct kvm_inject_desc *f) { - uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, }; + u32 bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, }; int i; for (i = 0; i < ARRAY_SIZE(bad_intid); i++) @@ -490,7 +490,7 @@ static void test_restore_active(struct test_args *args, struct kvm_inject_desc * static void guest_code(struct test_args *args) { - uint32_t i, nr_irqs = args->nr_irqs; + u32 i, nr_irqs = args->nr_irqs; bool level_sensitive = args->level_sensitive; struct kvm_inject_desc *f, *inject_fns; @@ -529,8 +529,8 @@ static void guest_code(struct test_args *args) GUEST_DONE(); } -static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level, - struct test_args *test_args, bool expect_failure) +static void kvm_irq_line_check(struct kvm_vm *vm, u32 intid, int level, + struct test_args *test_args, bool expect_failure) { int ret; @@ -548,8 +548,8 @@ static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level, } } -void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level, - bool expect_failure) +void kvm_irq_set_level_info_check(int gic_fd, u32 intid, int level, + bool expect_failure) { if (!expect_failure) { kvm_irq_set_level_info(gic_fd, intid, level); @@ -573,17 +573,18 @@ void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level, } static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm, - uint32_t intid, uint32_t num, uint32_t kvm_max_routes, - bool expect_failure) + u32 intid, u32 num, + u32 kvm_max_routes, + bool expect_failure) { struct kvm_irq_routing *routing; int ret; - uint64_t i; + u64 i; assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES); routing = kvm_gsi_routing_create(); - for (i = intid; i < (uint64_t)intid + num; i++) + for (i = intid; i < (u64)intid + num; i++) kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI); if (!expect_failure) { @@ -591,7 +592,7 @@ static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm, } else { ret = _kvm_gsi_routing_write(vm, routing); /* The kernel only checks e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS */ - if (((uint64_t)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS) + if (((u64)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS) TEST_ASSERT(ret != 0 && errno == EINVAL, "Bad intid %u did not cause KVM_SET_GSI_ROUTING " "error: rc: %i errno: %i", intid, ret, errno); @@ -602,7 +603,7 @@ static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm, } } -static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid, +static void kvm_irq_write_ispendr_check(int gic_fd, u32 intid, struct kvm_vcpu *vcpu, bool expect_failure) { @@ -618,13 +619,13 @@ static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid, } static void kvm_routing_and_irqfd_check(struct kvm_vm *vm, - uint32_t intid, uint32_t num, uint32_t kvm_max_routes, - bool expect_failure) + u32 intid, u32 num, u32 kvm_max_routes, + bool expect_failure) { int fd[MAX_SPI]; - uint64_t val; + u64 val; int ret, f; - uint64_t i; + u64 i; /* * There is no way to try injecting an SGI or PPI as the interface @@ -643,29 +644,29 @@ static void kvm_routing_and_irqfd_check(struct kvm_vm *vm, * that no actual interrupt was injected for those cases. */ - for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) + for (f = 0, i = intid; i < (u64)intid + num; i++, f++) fd[f] = kvm_new_eventfd(); - for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) { - assert(i <= (uint64_t)UINT_MAX); + for (f = 0, i = intid; i < (u64)intid + num; i++, f++) { + assert(i <= (u64)UINT_MAX); kvm_assign_irqfd(vm, i - MIN_SPI, fd[f]); } - for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) { + for (f = 0, i = intid; i < (u64)intid + num; i++, f++) { val = 1; - ret = write(fd[f], &val, sizeof(uint64_t)); - TEST_ASSERT(ret == sizeof(uint64_t), + ret = write(fd[f], &val, sizeof(u64)); + TEST_ASSERT(ret == sizeof(u64), __KVM_SYSCALL_ERROR("write()", ret)); } - for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) + for (f = 0, i = intid; i < (u64)intid + num; i++, f++) kvm_close(fd[f]); } /* handles the valid case: intid=0xffffffff num=1 */ #define for_each_intid(first, num, tmp, i) \ for ((tmp) = (i) = (first); \ - (tmp) < (uint64_t)(first) + (uint64_t)(num); \ + (tmp) < (u64)(first) + (u64)(num); \ (tmp)++, (i)++) static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd, @@ -673,13 +674,13 @@ static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd, struct test_args *test_args) { kvm_inject_cmd cmd = inject_args->cmd; - uint32_t intid = inject_args->first_intid; - uint32_t num = inject_args->num; + u32 intid = inject_args->first_intid; + u32 num = inject_args->num; int level = inject_args->level; bool expect_failure = inject_args->expect_failure; struct kvm_vm *vm = vcpu->vm; - uint64_t tmp; - uint32_t i; + u64 tmp; + u32 i; /* handles the valid case: intid=0xffffffff num=1 */ assert(intid < UINT_MAX - num || num == 1); @@ -731,7 +732,7 @@ static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, struct kvm_inject_args *args) { struct kvm_inject_args *kvm_args_hva; - vm_vaddr_t kvm_args_gva; + gva_t kvm_args_gva; kvm_args_gva = uc->args[1]; kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva); @@ -745,14 +746,14 @@ static void print_args(struct test_args *args) args->eoi_split); } -static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) +static void test_vgic(u32 nr_irqs, bool level_sensitive, bool eoi_split) { struct ucall uc; int gic_fd; struct kvm_vcpu *vcpu; struct kvm_vm *vm; struct kvm_inject_args inject_args; - vm_vaddr_t args_gva; + gva_t args_gva; struct test_args args = { .nr_irqs = nr_irqs, @@ -770,7 +771,7 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) vcpu_init_descriptor_tables(vcpu); /* Setup the guest args page (so it gets the args). */ - args_gva = vm_vaddr_alloc_page(vm); + args_gva = vm_alloc_page(vm); memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args)); vcpu_args_set(vcpu, 1, args_gva); @@ -810,7 +811,7 @@ static void guest_code_asym_dir(struct test_args *args, int cpuid) gic_set_priority_mask(CPU_PRIO_MASK); if (cpuid == 0) { - uint32_t intid; + u32 intid; local_irq_disable(); @@ -848,7 +849,7 @@ static void guest_code_asym_dir(struct test_args *args, int cpuid) static void guest_code_group_en(struct test_args *args, int cpuid) { - uint32_t intid; + u32 intid; gic_init(GIC_V3, 2); @@ -896,7 +897,7 @@ static void guest_code_group_en(struct test_args *args, int cpuid) static void guest_code_timer_spi(struct test_args *args, int cpuid) { - uint32_t intid; + u32 intid; u64 val; gic_init(GIC_V3, 2); @@ -986,7 +987,7 @@ static void test_vgic_two_cpus(void *gcode) struct kvm_vcpu *vcpus[2]; struct test_args args = {}; struct kvm_vm *vm; - vm_vaddr_t args_gva; + gva_t args_gva; int gic_fd, ret; vm = vm_create_with_vcpus(2, gcode, vcpus); @@ -996,7 +997,7 @@ static void test_vgic_two_cpus(void *gcode) vcpu_init_descriptor_tables(vcpus[1]); /* Setup the guest args page (so it gets the args). */ - args_gva = vm_vaddr_alloc_page(vm); + args_gva = vm_alloc_page(vm); memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args)); vcpu_args_set(vcpus[0], 2, args_gva, 0); vcpu_args_set(vcpus[1], 2, args_gva, 1); @@ -1033,7 +1034,7 @@ static void help(const char *name) int main(int argc, char **argv) { - uint32_t nr_irqs = 64; + u32 nr_irqs = 64; bool default_args = true; bool level_sensitive = false; int opt; diff --git a/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c b/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c index e857a605f577..d64d434d3f06 100644 --- a/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c +++ b/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c @@ -23,7 +23,7 @@ #define GIC_LPI_OFFSET 8192 static size_t nr_iterations = 1000; -static vm_paddr_t gpa_base; +static gpa_t gpa_base; static struct kvm_vm *vm; static struct kvm_vcpu **vcpus; @@ -35,14 +35,14 @@ static struct test_data { u32 nr_devices; u32 nr_event_ids; - vm_paddr_t device_table; - vm_paddr_t collection_table; - vm_paddr_t cmdq_base; + gpa_t device_table; + gpa_t collection_table; + gpa_t cmdq_base; void *cmdq_base_va; - vm_paddr_t itt_tables; + gpa_t itt_tables; - vm_paddr_t lpi_prop_table; - vm_paddr_t lpi_pend_tables; + gpa_t lpi_prop_table; + gpa_t lpi_pend_tables; } test_data = { .nr_cpus = 1, .nr_devices = 1, @@ -73,7 +73,7 @@ static void guest_setup_its_mappings(void) /* Round-robin the LPIs to all of the vCPUs in the VM */ coll_id = 0; for (device_id = 0; device_id < nr_devices; device_id++) { - vm_paddr_t itt_base = test_data.itt_tables + (device_id * SZ_64K); + gpa_t itt_base = test_data.itt_tables + (device_id * SZ_64K); its_send_mapd_cmd(test_data.cmdq_base_va, device_id, itt_base, SZ_64K, true); @@ -188,7 +188,7 @@ static void setup_test_data(void) size_t pages_per_64k = vm_calc_num_guest_pages(vm->mode, SZ_64K); u32 nr_devices = test_data.nr_devices; u32 nr_cpus = test_data.nr_cpus; - vm_paddr_t cmdq_base; + gpa_t cmdq_base; test_data.device_table = vm_phy_pages_alloc(vm, pages_per_64k, gpa_base, @@ -224,7 +224,7 @@ static void setup_gic(void) static void signal_lpi(u32 device_id, u32 event_id) { - vm_paddr_t db_addr = GITS_BASE_GPA + GITS_TRANSLATER; + gpa_t db_addr = GITS_BASE_GPA + GITS_TRANSLATER; struct kvm_msi msi = { .address_lo = db_addr, diff --git a/tools/testing/selftests/kvm/arm64/vgic_v5.c b/tools/testing/selftests/kvm/arm64/vgic_v5.c index 3ce6cf37a629..d785b660d847 100644 --- a/tools/testing/selftests/kvm/arm64/vgic_v5.c +++ b/tools/testing/selftests/kvm/arm64/vgic_v5.c @@ -17,10 +17,10 @@ struct vm_gic { struct kvm_vm *vm; int gic_fd; - uint32_t gic_dev_type; + u32 gic_dev_type; }; -static uint64_t max_phys_size; +static u64 max_phys_size; #define GUEST_CMD_IRQ_CDIA 10 #define GUEST_CMD_IRQ_DIEOI 11 @@ -96,7 +96,7 @@ static void vm_gic_destroy(struct vm_gic *v) kvm_vm_free(v->vm); } -static void test_vgic_v5_ppis(uint32_t gic_dev_type) +static void test_vgic_v5_ppis(u32 gic_dev_type) { struct kvm_vcpu *vcpus[NR_VCPUS]; struct ucall uc; @@ -173,7 +173,7 @@ done: /* * Returns 0 if it's possible to create GIC device of a given type (V5). */ -int test_kvm_device(uint32_t gic_dev_type) +int test_kvm_device(u32 gic_dev_type) { struct kvm_vcpu *vcpus[NR_VCPUS]; struct vm_gic v; @@ -199,7 +199,7 @@ int test_kvm_device(uint32_t gic_dev_type) return 0; } -void run_tests(uint32_t gic_dev_type) +void run_tests(u32 gic_dev_type) { pr_info("Test VGICv5 PPIs\n"); test_vgic_v5_ppis(gic_dev_type); diff --git a/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c b/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c index ae36325c022f..22223395969e 100644 --- a/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c +++ b/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c @@ -33,20 +33,20 @@ struct vpmu_vm { static struct vpmu_vm vpmu_vm; struct pmreg_sets { - uint64_t set_reg_id; - uint64_t clr_reg_id; + u64 set_reg_id; + u64 clr_reg_id; }; #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr} -static uint64_t get_pmcr_n(uint64_t pmcr) +static u64 get_pmcr_n(u64 pmcr) { return FIELD_GET(ARMV8_PMU_PMCR_N, pmcr); } -static uint64_t get_counters_mask(uint64_t n) +static u64 get_counters_mask(u64 n) { - uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); + u64 mask = BIT(ARMV8_PMU_CYCLE_IDX); if (n) mask |= GENMASK(n - 1, 0); @@ -89,7 +89,7 @@ static inline void write_sel_evtyper(int sel, unsigned long val) static void pmu_disable_reset(void) { - uint64_t pmcr = read_sysreg(pmcr_el0); + u64 pmcr = read_sysreg(pmcr_el0); /* Reset all counters, disabling them */ pmcr &= ~ARMV8_PMU_PMCR_E; @@ -169,7 +169,7 @@ struct pmc_accessor pmc_accessors[] = { #define GUEST_ASSERT_BITMAP_REG(regname, mask, set_expected) \ { \ - uint64_t _tval = read_sysreg(regname); \ + u64 _tval = read_sysreg(regname); \ \ if (set_expected) \ __GUEST_ASSERT((_tval & mask), \ @@ -185,7 +185,7 @@ struct pmc_accessor pmc_accessors[] = { * Check if @mask bits in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers * are set or cleared as specified in @set_expected. */ -static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected) +static void check_bitmap_pmu_regs(u64 mask, bool set_expected) { GUEST_ASSERT_BITMAP_REG(pmcntenset_el0, mask, set_expected); GUEST_ASSERT_BITMAP_REG(pmcntenclr_el0, mask, set_expected); @@ -207,7 +207,7 @@ static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected) */ static void test_bitmap_pmu_regs(int pmc_idx, bool set_op) { - uint64_t pmcr_n, test_bit = BIT(pmc_idx); + u64 pmcr_n, test_bit = BIT(pmc_idx); bool set_expected = false; if (set_op) { @@ -232,7 +232,7 @@ static void test_bitmap_pmu_regs(int pmc_idx, bool set_op) */ static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx) { - uint64_t write_data, read_data; + u64 write_data, read_data; /* Disable all PMCs and reset all PMCs to zero. */ pmu_disable_reset(); @@ -287,11 +287,11 @@ static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx) } #define INVALID_EC (-1ul) -uint64_t expected_ec = INVALID_EC; +u64 expected_ec = INVALID_EC; static void guest_sync_handler(struct ex_regs *regs) { - uint64_t esr, ec; + u64 esr, ec; esr = read_sysreg(esr_el1); ec = ESR_ELx_EC(esr); @@ -351,9 +351,9 @@ static void test_access_invalid_pmc_regs(struct pmc_accessor *acc, int pmc_idx) * if reading/writing PMU registers for implemented or unimplemented * counters works as expected. */ -static void guest_code(uint64_t expected_pmcr_n) +static void guest_code(u64 expected_pmcr_n) { - uint64_t pmcr, pmcr_n, unimp_mask; + u64 pmcr, pmcr_n, unimp_mask; int i, pmc; __GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS, @@ -402,12 +402,12 @@ static void guest_code(uint64_t expected_pmcr_n) static void create_vpmu_vm(void *guest_code) { struct kvm_vcpu_init init; - uint8_t pmuver, ec; - uint64_t dfr0, irq = 23; + u8 pmuver, ec; + u64 dfr0, irq = 23; struct kvm_device_attr irq_attr = { .group = KVM_ARM_VCPU_PMU_V3_CTRL, .attr = KVM_ARM_VCPU_PMU_V3_IRQ, - .addr = (uint64_t)&irq, + .addr = (u64)&irq, }; /* The test creates the vpmu_vm multiple times. Ensure a clean state */ @@ -443,7 +443,7 @@ static void destroy_vpmu_vm(void) kvm_vm_free(vpmu_vm.vm); } -static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n) +static void run_vcpu(struct kvm_vcpu *vcpu, u64 pmcr_n) { struct ucall uc; @@ -489,9 +489,9 @@ static void test_create_vpmu_vm_with_nr_counters(unsigned int nr_counters, bool * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n, * and run the test. */ -static void run_access_test(uint64_t pmcr_n) +static void run_access_test(u64 pmcr_n) { - uint64_t sp; + u64 sp; struct kvm_vcpu *vcpu; struct kvm_vcpu_init init; @@ -514,7 +514,7 @@ static void run_access_test(uint64_t pmcr_n) aarch64_vcpu_setup(vcpu, &init); vcpu_init_descriptor_tables(vcpu); vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), sp); - vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); + vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (u64)guest_code); run_vcpu(vcpu, pmcr_n); @@ -531,12 +531,12 @@ static struct pmreg_sets validity_check_reg_sets[] = { * Create a VM, and check if KVM handles the userspace accesses of * the PMU register sets in @validity_check_reg_sets[] correctly. */ -static void run_pmregs_validity_test(uint64_t pmcr_n) +static void run_pmregs_validity_test(u64 pmcr_n) { int i; struct kvm_vcpu *vcpu; - uint64_t set_reg_id, clr_reg_id, reg_val; - uint64_t valid_counters_mask, max_counters_mask; + u64 set_reg_id, clr_reg_id, reg_val; + u64 valid_counters_mask, max_counters_mask; test_create_vpmu_vm_with_nr_counters(pmcr_n, false); vcpu = vpmu_vm.vcpu; @@ -588,7 +588,7 @@ static void run_pmregs_validity_test(uint64_t pmcr_n) * the vCPU to @pmcr_n, which is larger than the host value. * The attempt should fail as @pmcr_n is too big to set for the vCPU. */ -static void run_error_test(uint64_t pmcr_n) +static void run_error_test(u64 pmcr_n) { pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n); @@ -600,9 +600,9 @@ static void run_error_test(uint64_t pmcr_n) * Return the default number of implemented PMU event counters excluding * the cycle counter (i.e. PMCR_EL0.N value) for the guest. */ -static uint64_t get_pmcr_n_limit(void) +static u64 get_pmcr_n_limit(void) { - uint64_t pmcr; + u64 pmcr; create_vpmu_vm(guest_code); pmcr = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0)); @@ -624,7 +624,7 @@ static bool kvm_supports_nr_counters_attr(void) int main(void) { - uint64_t i, pmcr_n; + u64 i, pmcr_n; TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3)); TEST_REQUIRE(kvm_supports_vgic_v3()); diff --git a/tools/testing/selftests/kvm/coalesced_io_test.c b/tools/testing/selftests/kvm/coalesced_io_test.c index 60cb25454899..df4ed5e3877c 100644 --- a/tools/testing/selftests/kvm/coalesced_io_test.c +++ b/tools/testing/selftests/kvm/coalesced_io_test.c @@ -14,16 +14,16 @@ struct kvm_coalesced_io { struct kvm_coalesced_mmio_ring *ring; - uint32_t ring_size; - uint64_t mmio_gpa; - uint64_t *mmio; + u32 ring_size; + u64 mmio_gpa; + u64 *mmio; /* * x86-only, but define pio_port for all architectures to minimize the * amount of #ifdeffery and complexity, without having to sacrifice * verbose error messages. */ - uint8_t pio_port; + u8 pio_port; }; static struct kvm_coalesced_io kvm_builtin_io_ring; @@ -70,13 +70,13 @@ static void guest_code(struct kvm_coalesced_io *io) static void vcpu_run_and_verify_io_exit(struct kvm_vcpu *vcpu, struct kvm_coalesced_io *io, - uint32_t ring_start, - uint32_t expected_exit) + u32 ring_start, + u32 expected_exit) { const bool want_pio = expected_exit == KVM_EXIT_IO; struct kvm_coalesced_mmio_ring *ring = io->ring; struct kvm_run *run = vcpu->run; - uint32_t pio_value; + u32 pio_value; WRITE_ONCE(ring->first, ring_start); WRITE_ONCE(ring->last, ring_start); @@ -88,13 +88,13 @@ static void vcpu_run_and_verify_io_exit(struct kvm_vcpu *vcpu, * data_offset is garbage, e.g. an MMIO gpa. */ if (run->exit_reason == KVM_EXIT_IO) - pio_value = *(uint32_t *)((void *)run + run->io.data_offset); + pio_value = *(u32 *)((void *)run + run->io.data_offset); else pio_value = 0; TEST_ASSERT((!want_pio && (run->exit_reason == KVM_EXIT_MMIO && run->mmio.is_write && run->mmio.phys_addr == io->mmio_gpa && run->mmio.len == 8 && - *(uint64_t *)run->mmio.data == io->mmio_gpa + io->ring_size - 1)) || + *(u64 *)run->mmio.data == io->mmio_gpa + io->ring_size - 1)) || (want_pio && (run->exit_reason == KVM_EXIT_IO && run->io.port == io->pio_port && run->io.direction == KVM_EXIT_IO_OUT && run->io.count == 1 && pio_value == io->pio_port + io->ring_size - 1)), @@ -105,14 +105,14 @@ static void vcpu_run_and_verify_io_exit(struct kvm_vcpu *vcpu, want_pio ? (unsigned long long)io->pio_port : io->mmio_gpa, (want_pio ? io->pio_port : io->mmio_gpa) + io->ring_size - 1, run->exit_reason, run->exit_reason == KVM_EXIT_MMIO ? "MMIO" : run->exit_reason == KVM_EXIT_IO ? "PIO" : "other", - run->mmio.phys_addr, run->mmio.is_write, run->mmio.len, *(uint64_t *)run->mmio.data, + run->mmio.phys_addr, run->mmio.is_write, run->mmio.len, *(u64 *)run->mmio.data, run->io.port, run->io.direction, run->io.size, run->io.count, pio_value); } static void vcpu_run_and_verify_coalesced_io(struct kvm_vcpu *vcpu, struct kvm_coalesced_io *io, - uint32_t ring_start, - uint32_t expected_exit) + u32 ring_start, + u32 expected_exit) { struct kvm_coalesced_mmio_ring *ring = io->ring; int i; @@ -124,18 +124,18 @@ static void vcpu_run_and_verify_coalesced_io(struct kvm_vcpu *vcpu, ring->first, ring->last, io->ring_size, ring_start); for (i = 0; i < io->ring_size - 1; i++) { - uint32_t idx = (ring->first + i) % io->ring_size; + u32 idx = (ring->first + i) % io->ring_size; struct kvm_coalesced_mmio *entry = &ring->coalesced_mmio[idx]; #ifdef __x86_64__ if (i & 1) TEST_ASSERT(entry->phys_addr == io->pio_port && entry->len == 4 && entry->pio && - *(uint32_t *)entry->data == io->pio_port + i, + *(u32 *)entry->data == io->pio_port + i, "Wanted 4-byte port I/O 0x%x = 0x%x in entry %u, got %u-byte %s 0x%llx = 0x%x", io->pio_port, io->pio_port + i, i, entry->len, entry->pio ? "PIO" : "MMIO", - entry->phys_addr, *(uint32_t *)entry->data); + entry->phys_addr, *(u32 *)entry->data); else #endif TEST_ASSERT(entry->phys_addr == io->mmio_gpa && @@ -143,12 +143,12 @@ static void vcpu_run_and_verify_coalesced_io(struct kvm_vcpu *vcpu, "Wanted 8-byte MMIO to 0x%lx = %lx in entry %u, got %u-byte %s 0x%llx = 0x%lx", io->mmio_gpa, io->mmio_gpa + i, i, entry->len, entry->pio ? "PIO" : "MMIO", - entry->phys_addr, *(uint64_t *)entry->data); + entry->phys_addr, *(u64 *)entry->data); } } static void test_coalesced_io(struct kvm_vcpu *vcpu, - struct kvm_coalesced_io *io, uint32_t ring_start) + struct kvm_coalesced_io *io, u32 ring_start) { struct kvm_coalesced_mmio_ring *ring = io->ring; @@ -219,11 +219,11 @@ int main(int argc, char *argv[]) * the MMIO GPA identity mapped in the guest. */ .mmio_gpa = 4ull * SZ_1G, - .mmio = (uint64_t *)(4ull * SZ_1G), + .mmio = (u64 *)(4ull * SZ_1G), .pio_port = 0x80, }; - virt_map(vm, (uint64_t)kvm_builtin_io_ring.mmio, kvm_builtin_io_ring.mmio_gpa, 1); + virt_map(vm, (u64)kvm_builtin_io_ring.mmio, kvm_builtin_io_ring.mmio_gpa, 1); sync_global_to_guest(vm, kvm_builtin_io_ring); vcpu_args_set(vcpu, 1, &kvm_builtin_io_ring); diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index 0202b78f8680..302c4923d093 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -24,7 +24,7 @@ #ifdef __NR_userfaultfd static int nr_vcpus = 1; -static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; +static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; static size_t demand_paging_size; static char *guest_data_prototype; @@ -58,7 +58,7 @@ static int handle_uffd_page_request(int uffd_mode, int uffd, struct uffd_msg *msg) { pid_t tid = syscall(__NR_gettid); - uint64_t addr = msg->arg.pagefault.address; + u64 addr = msg->arg.pagefault.address; struct timespec start; struct timespec ts_diff; int r; @@ -68,7 +68,7 @@ static int handle_uffd_page_request(int uffd_mode, int uffd, if (uffd_mode == UFFDIO_REGISTER_MODE_MISSING) { struct uffdio_copy copy; - copy.src = (uint64_t)guest_data_prototype; + copy.src = (u64)guest_data_prototype; copy.dst = addr; copy.len = demand_paging_size; copy.mode = 0; @@ -138,7 +138,7 @@ struct test_params { bool partition_vcpu_memory_access; }; -static void prefault_mem(void *alias, uint64_t len) +static void prefault_mem(void *alias, u64 len) { size_t p; @@ -154,7 +154,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) struct memstress_vcpu_args *vcpu_args; struct test_params *p = arg; struct uffd_desc **uffd_descs = NULL; - uint64_t uffd_region_size; + u64 uffd_region_size; struct timespec start; struct timespec ts_diff; double vcpu_paging_rate; diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c index 0a1ea1d1e2d8..ef779fa91827 100644 --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c @@ -24,7 +24,7 @@ #define TEST_HOST_LOOP_N 2UL static int nr_vcpus = 1; -static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; +static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; static bool run_vcpus_while_disabling_dirty_logging; /* Host variables */ @@ -37,7 +37,7 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) { struct kvm_vcpu *vcpu = vcpu_args->vcpu; int vcpu_idx = vcpu_args->vcpu_idx; - uint64_t pages_count = 0; + u64 pages_count = 0; struct kvm_run *run; struct timespec start; struct timespec ts_diff; @@ -93,11 +93,11 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) struct test_params { unsigned long iterations; - uint64_t phys_offset; + u64 phys_offset; bool partition_vcpu_memory_access; enum vm_mem_backing_src_type backing_src; int slots; - uint32_t write_percent; + u32 write_percent; bool random_access; }; @@ -106,9 +106,9 @@ static void run_test(enum vm_guest_mode mode, void *arg) struct test_params *p = arg; struct kvm_vm *vm; unsigned long **bitmaps; - uint64_t guest_num_pages; - uint64_t host_num_pages; - uint64_t pages_per_slot; + u64 guest_num_pages; + u64 host_num_pages; + u64 pages_per_slot; struct timespec start; struct timespec ts_diff; struct timespec get_dirty_log_total = (struct timespec){0}; diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c index 7627b328f18a..12446a4b6e8d 100644 --- a/tools/testing/selftests/kvm/dirty_log_test.c +++ b/tools/testing/selftests/kvm/dirty_log_test.c @@ -74,11 +74,11 @@ * the host. READ/WRITE_ONCE() should also be used with anything * that may change. */ -static uint64_t host_page_size; -static uint64_t guest_page_size; -static uint64_t guest_num_pages; -static uint64_t iteration; -static uint64_t nr_writes; +static u64 host_page_size; +static u64 guest_page_size; +static u64 guest_num_pages; +static u64 iteration; +static u64 nr_writes; static bool vcpu_stop; /* @@ -86,13 +86,13 @@ static bool vcpu_stop; * This will be set to the topmost valid physical address minus * the test memory size. */ -static uint64_t guest_test_phys_mem; +static u64 guest_test_phys_mem; /* * Guest virtual memory offset of the testing memory slot. * Must not conflict with identity mapped test code. */ -static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; +static u64 guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; /* * Continuously write to the first 8 bytes of a random pages within @@ -100,10 +100,10 @@ static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; */ static void guest_code(void) { - uint64_t addr; + u64 addr; #ifdef __s390x__ - uint64_t i; + u64 i; /* * On s390x, all pages of a 1M segment are initially marked as dirty @@ -113,7 +113,7 @@ static void guest_code(void) */ for (i = 0; i < guest_num_pages; i++) { addr = guest_test_virt_mem + i * guest_page_size; - vcpu_arch_put_guest(*(uint64_t *)addr, READ_ONCE(iteration)); + vcpu_arch_put_guest(*(u64 *)addr, READ_ONCE(iteration)); nr_writes++; } #endif @@ -125,7 +125,7 @@ static void guest_code(void) * guest_page_size; addr = align_down(addr, host_page_size); - vcpu_arch_put_guest(*(uint64_t *)addr, READ_ONCE(iteration)); + vcpu_arch_put_guest(*(u64 *)addr, READ_ONCE(iteration)); nr_writes++; } @@ -138,11 +138,11 @@ static bool host_quit; /* Points to the test VM memory region on which we track dirty logs */ static void *host_test_mem; -static uint64_t host_num_pages; +static u64 host_num_pages; /* For statistics only */ -static uint64_t host_dirty_count; -static uint64_t host_clear_count; +static u64 host_dirty_count; +static u64 host_clear_count; /* Whether dirty ring reset is requested, or finished */ static sem_t sem_vcpu_stop; @@ -169,7 +169,7 @@ static bool dirty_ring_vcpu_ring_full; * dirty gfn we've collected, so that if a mismatch of data found later in the * verifying process, we let it pass. */ -static uint64_t dirty_ring_last_page = -1ULL; +static u64 dirty_ring_last_page = -1ULL; /* * In addition to the above, it is possible (especially if this @@ -213,7 +213,7 @@ static uint64_t dirty_ring_last_page = -1ULL; * and also don't fail when it is reported in the next iteration, together with * an outdated iteration count. */ -static uint64_t dirty_ring_prev_iteration_last_page; +static u64 dirty_ring_prev_iteration_last_page; enum log_mode_t { /* Only use KVM_GET_DIRTY_LOG for logging */ @@ -236,7 +236,7 @@ static enum log_mode_t host_log_mode_option = LOG_MODE_ALL; /* Logging mode for current run */ static enum log_mode_t host_log_mode; static pthread_t vcpu_thread; -static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT; +static u32 test_dirty_ring_count = TEST_DIRTY_RING_COUNT; static bool clear_log_supported(void) { @@ -255,15 +255,15 @@ static void clear_log_create_vm_done(struct kvm_vm *vm) } static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, - void *bitmap, uint32_t num_pages, - uint32_t *unused) + void *bitmap, u32 num_pages, + u32 *unused) { kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap); } static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, - void *bitmap, uint32_t num_pages, - uint32_t *unused) + void *bitmap, u32 num_pages, + u32 *unused) { kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap); kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages); @@ -297,8 +297,8 @@ static bool dirty_ring_supported(void) static void dirty_ring_create_vm_done(struct kvm_vm *vm) { - uint64_t pages; - uint32_t limit; + u64 pages; + u32 limit; /* * We rely on vcpu exit due to full dirty ring state. Adjust @@ -333,12 +333,12 @@ static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn) smp_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET); } -static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns, - int slot, void *bitmap, - uint32_t num_pages, uint32_t *fetch_index) +static u32 dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns, + int slot, void *bitmap, + u32 num_pages, u32 *fetch_index) { struct kvm_dirty_gfn *cur; - uint32_t count = 0; + u32 count = 0; while (true) { cur = &dirty_gfns[*fetch_index % test_dirty_ring_count]; @@ -359,10 +359,10 @@ static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns, } static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, - void *bitmap, uint32_t num_pages, - uint32_t *ring_buf_idx) + void *bitmap, u32 num_pages, + u32 *ring_buf_idx) { - uint32_t count, cleared; + u32 count, cleared; /* Only have one vcpu */ count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu), @@ -404,8 +404,8 @@ struct log_mode { void (*create_vm_done)(struct kvm_vm *vm); /* Hook to collect the dirty pages into the bitmap provided */ void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot, - void *bitmap, uint32_t num_pages, - uint32_t *ring_buf_idx); + void *bitmap, u32 num_pages, + u32 *ring_buf_idx); /* Hook to call when after each vcpu run */ void (*after_vcpu_run)(struct kvm_vcpu *vcpu); } log_modes[LOG_MODE_NUM] = { @@ -459,8 +459,8 @@ static void log_mode_create_vm_done(struct kvm_vm *vm) } static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, - void *bitmap, uint32_t num_pages, - uint32_t *ring_buf_idx) + void *bitmap, u32 num_pages, + u32 *ring_buf_idx) { struct log_mode *mode = &log_modes[host_log_mode]; @@ -494,11 +494,11 @@ static void *vcpu_worker(void *data) static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long **bmap) { - uint64_t page, nr_dirty_pages = 0, nr_clean_pages = 0; - uint64_t step = vm_num_host_pages(mode, 1); + u64 page, nr_dirty_pages = 0, nr_clean_pages = 0; + u64 step = vm_num_host_pages(mode, 1); for (page = 0; page < host_num_pages; page += step) { - uint64_t val = *(uint64_t *)(host_test_mem + page * host_page_size); + u64 val = *(u64 *)(host_test_mem + page * host_page_size); bool bmap0_dirty = __test_and_clear_bit_le(page, bmap[0]); /* @@ -575,7 +575,7 @@ static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long **bmap) } static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu, - uint64_t extra_mem_pages, void *guest_code) + u64 extra_mem_pages, void *guest_code) { struct kvm_vm *vm; @@ -592,7 +592,7 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu, struct test_params { unsigned long iterations; unsigned long interval; - uint64_t phys_offset; + u64 phys_offset; }; static void run_test(enum vm_guest_mode mode, void *arg) @@ -601,7 +601,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) struct kvm_vcpu *vcpu; struct kvm_vm *vm; unsigned long *bmap[2]; - uint32_t ring_buf_idx = 0; + u32 ring_buf_idx = 0; int sem_val; if (!log_mode_supported()) { @@ -667,7 +667,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages); /* Cache the HVA pointer of the region */ - host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem); + host_test_mem = addr_gpa2hva(vm, (gpa_t)guest_test_phys_mem); /* Export the shared variables to the guest */ sync_global_to_guest(vm, host_page_size); diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c index f4644c9d2d3b..216f10644c1a 100644 --- a/tools/testing/selftests/kvm/get-reg-list.c +++ b/tools/testing/selftests/kvm/get-reg-list.c @@ -216,7 +216,7 @@ static void run_test(struct vcpu_reg_list *c) * since we don't know the capabilities of any new registers. */ for_each_present_blessed_reg(i) { - uint8_t addr[2048 / 8]; + u8 addr[2048 / 8]; struct kvm_one_reg reg = { .id = reg_list->reg[i], .addr = (__u64)&addr, diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c index ec7644aae999..d6528c6f5e03 100644 --- a/tools/testing/selftests/kvm/guest_memfd_test.c +++ b/tools/testing/selftests/kvm/guest_memfd_test.c @@ -171,7 +171,7 @@ static void test_numa_allocation(int fd, size_t total_size) kvm_munmap(mem, total_size); } -static void test_collapse(int fd, uint64_t flags) +static void test_collapse(int fd, u64 flags) { const size_t pmd_size = get_trans_hugepagesz(); void *reserved_addr; @@ -346,7 +346,7 @@ static void test_invalid_punch_hole(int fd, size_t total_size) } static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm, - uint64_t guest_memfd_flags) + u64 guest_memfd_flags) { size_t size; int fd; @@ -389,8 +389,8 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm) static void test_guest_memfd_flags(struct kvm_vm *vm) { - uint64_t valid_flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS); - uint64_t flag; + u64 valid_flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS); + u64 flag; int fd; for (flag = BIT(0); flag; flag <<= 1) { @@ -419,7 +419,7 @@ do { \ #define gmem_test(__test, __vm, __flags) \ __gmem_test(__test, __vm, __flags, page_size * 4) -static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags) +static void __test_guest_memfd(struct kvm_vm *vm, u64 flags) { test_create_guest_memfd_multiple(vm); test_create_guest_memfd_invalid_sizes(vm, flags); @@ -452,7 +452,7 @@ static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags) static void test_guest_memfd(unsigned long vm_type) { struct kvm_vm *vm = vm_create_barebones_type(vm_type); - uint64_t flags; + u64 flags; test_guest_memfd_flags(vm); @@ -470,7 +470,7 @@ static void test_guest_memfd(unsigned long vm_type) kvm_vm_free(vm); } -static void guest_code(uint8_t *mem, uint64_t size) +static void guest_code(u8 *mem, u64 size) { size_t i; @@ -489,12 +489,12 @@ static void test_guest_memfd_guest(void) * the guest's code, stack, and page tables, and low memory contains * the PCI hole and other MMIO regions that need to be avoided. */ - const uint64_t gpa = SZ_4G; + const gpa_t gpa = SZ_4G; const int slot = 1; struct kvm_vcpu *vcpu; struct kvm_vm *vm; - uint8_t *mem; + u8 *mem; size_t size; int fd, i; diff --git a/tools/testing/selftests/kvm/guest_print_test.c b/tools/testing/selftests/kvm/guest_print_test.c index bcf582852db9..79d3fc326e91 100644 --- a/tools/testing/selftests/kvm/guest_print_test.c +++ b/tools/testing/selftests/kvm/guest_print_test.c @@ -16,22 +16,22 @@ #include "ucall_common.h" struct guest_vals { - uint64_t a; - uint64_t b; - uint64_t type; + u64 a; + u64 b; + u64 type; }; static struct guest_vals vals; /* GUEST_PRINTF()/GUEST_ASSERT_FMT() does not support float or double. */ #define TYPE_LIST \ -TYPE(test_type_i64, I64, "%ld", int64_t) \ -TYPE(test_type_u64, U64u, "%lu", uint64_t) \ -TYPE(test_type_x64, U64x, "0x%lx", uint64_t) \ -TYPE(test_type_X64, U64X, "0x%lX", uint64_t) \ -TYPE(test_type_u32, U32u, "%u", uint32_t) \ -TYPE(test_type_x32, U32x, "0x%x", uint32_t) \ -TYPE(test_type_X32, U32X, "0x%X", uint32_t) \ +TYPE(test_type_i64, I64, "%ld", s64) \ +TYPE(test_type_u64, U64u, "%lu", u64) \ +TYPE(test_type_x64, U64x, "0x%lx", u64) \ +TYPE(test_type_X64, U64X, "0x%lX", u64) \ +TYPE(test_type_u32, U32u, "%u", u32) \ +TYPE(test_type_x32, U32x, "0x%x", u32) \ +TYPE(test_type_X32, U32X, "0x%X", u32) \ TYPE(test_type_int, INT, "%d", int) \ TYPE(test_type_char, CHAR, "%c", char) \ TYPE(test_type_str, STR, "'%s'", const char *) \ @@ -56,7 +56,7 @@ static void fn(struct kvm_vcpu *vcpu, T a, T b) \ \ snprintf(expected_printf, UCALL_BUFFER_LEN, PRINTF_FMT_##ext, a, b); \ snprintf(expected_assert, UCALL_BUFFER_LEN, ASSERT_FMT_##ext, a, b); \ - vals = (struct guest_vals){ (uint64_t)a, (uint64_t)b, TYPE_##ext }; \ + vals = (struct guest_vals){ (u64)a, (u64)b, TYPE_##ext }; \ sync_global_to_guest(vcpu->vm, vals); \ run_test(vcpu, expected_printf, expected_assert); \ } diff --git a/tools/testing/selftests/kvm/hardware_disable_test.c b/tools/testing/selftests/kvm/hardware_disable_test.c index 94bd6ed24cf3..3147f5c97e94 100644 --- a/tools/testing/selftests/kvm/hardware_disable_test.c +++ b/tools/testing/selftests/kvm/hardware_disable_test.c @@ -80,7 +80,7 @@ static inline void check_join(pthread_t thread, void **retval) TEST_ASSERT(r == 0, "%s: failed to join thread", __func__); } -static void run_test(uint32_t run) +static void run_test(u32 run) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -88,7 +88,7 @@ static void run_test(uint32_t run) pthread_t threads[VCPU_NUM]; pthread_t throw_away; void *b; - uint32_t i, j; + u32 i, j; CPU_ZERO(&cpu_set); for (i = 0; i < VCPU_NUM; i++) @@ -149,7 +149,7 @@ void wait_for_child_setup(pid_t pid) int main(int argc, char **argv) { - uint32_t i; + u32 i; int s, r; pid_t pid; diff --git a/tools/testing/selftests/kvm/include/arm64/arch_timer.h b/tools/testing/selftests/kvm/include/arm64/arch_timer.h index e2c4e9f0010f..a5836d4ab7ee 100644 --- a/tools/testing/selftests/kvm/include/arm64/arch_timer.h +++ b/tools/testing/selftests/kvm/include/arm64/arch_timer.h @@ -18,20 +18,20 @@ enum arch_timer { #define CTL_ISTATUS (1 << 2) #define msec_to_cycles(msec) \ - (timer_get_cntfrq() * (uint64_t)(msec) / 1000) + (timer_get_cntfrq() * (u64)(msec) / 1000) #define usec_to_cycles(usec) \ - (timer_get_cntfrq() * (uint64_t)(usec) / 1000000) + (timer_get_cntfrq() * (u64)(usec) / 1000000) #define cycles_to_usec(cycles) \ - ((uint64_t)(cycles) * 1000000 / timer_get_cntfrq()) + ((u64)(cycles) * 1000000 / timer_get_cntfrq()) -static inline uint32_t timer_get_cntfrq(void) +static inline u32 timer_get_cntfrq(void) { return read_sysreg(cntfrq_el0); } -static inline uint64_t timer_get_cntct(enum arch_timer timer) +static inline u64 timer_get_cntct(enum arch_timer timer) { isb(); @@ -48,7 +48,7 @@ static inline uint64_t timer_get_cntct(enum arch_timer timer) return 0; } -static inline void timer_set_cval(enum arch_timer timer, uint64_t cval) +static inline void timer_set_cval(enum arch_timer timer, u64 cval) { switch (timer) { case VIRTUAL: @@ -64,7 +64,7 @@ static inline void timer_set_cval(enum arch_timer timer, uint64_t cval) isb(); } -static inline uint64_t timer_get_cval(enum arch_timer timer) +static inline u64 timer_get_cval(enum arch_timer timer) { switch (timer) { case VIRTUAL: @@ -79,7 +79,7 @@ static inline uint64_t timer_get_cval(enum arch_timer timer) return 0; } -static inline void timer_set_tval(enum arch_timer timer, int32_t tval) +static inline void timer_set_tval(enum arch_timer timer, s32 tval) { switch (timer) { case VIRTUAL: @@ -95,7 +95,7 @@ static inline void timer_set_tval(enum arch_timer timer, int32_t tval) isb(); } -static inline int32_t timer_get_tval(enum arch_timer timer) +static inline s32 timer_get_tval(enum arch_timer timer) { isb(); switch (timer) { @@ -111,7 +111,7 @@ static inline int32_t timer_get_tval(enum arch_timer timer) return 0; } -static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl) +static inline void timer_set_ctl(enum arch_timer timer, u32 ctl) { switch (timer) { case VIRTUAL: @@ -127,7 +127,7 @@ static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl) isb(); } -static inline uint32_t timer_get_ctl(enum arch_timer timer) +static inline u32 timer_get_ctl(enum arch_timer timer) { switch (timer) { case VIRTUAL: @@ -142,15 +142,15 @@ static inline uint32_t timer_get_ctl(enum arch_timer timer) return 0; } -static inline void timer_set_next_cval_ms(enum arch_timer timer, uint32_t msec) +static inline void timer_set_next_cval_ms(enum arch_timer timer, u32 msec) { - uint64_t now_ct = timer_get_cntct(timer); - uint64_t next_ct = now_ct + msec_to_cycles(msec); + u64 now_ct = timer_get_cntct(timer); + u64 next_ct = now_ct + msec_to_cycles(msec); timer_set_cval(timer, next_ct); } -static inline void timer_set_next_tval_ms(enum arch_timer timer, uint32_t msec) +static inline void timer_set_next_tval_ms(enum arch_timer timer, u32 msec) { timer_set_tval(timer, msec_to_cycles(msec)); } diff --git a/tools/testing/selftests/kvm/include/arm64/delay.h b/tools/testing/selftests/kvm/include/arm64/delay.h index 329e4f5079ea..6a5d4634af2c 100644 --- a/tools/testing/selftests/kvm/include/arm64/delay.h +++ b/tools/testing/selftests/kvm/include/arm64/delay.h @@ -8,10 +8,10 @@ #include "arch_timer.h" -static inline void __delay(uint64_t cycles) +static inline void __delay(u64 cycles) { enum arch_timer timer = VIRTUAL; - uint64_t start = timer_get_cntct(timer); + u64 start = timer_get_cntct(timer); while ((timer_get_cntct(timer) - start) < cycles) cpu_relax(); diff --git a/tools/testing/selftests/kvm/include/arm64/gic.h b/tools/testing/selftests/kvm/include/arm64/gic.h index cc7a7f34ed37..615745093c98 100644 --- a/tools/testing/selftests/kvm/include/arm64/gic.h +++ b/tools/testing/selftests/kvm/include/arm64/gic.h @@ -48,8 +48,8 @@ void gic_set_dir(unsigned int intid); * split is true, EOI drops the priority and deactivates the interrupt. */ void gic_set_eoi_split(bool split); -void gic_set_priority_mask(uint64_t mask); -void gic_set_priority(uint32_t intid, uint32_t prio); +void gic_set_priority_mask(u64 mask); +void gic_set_priority(u32 intid, u32 prio); void gic_irq_set_active(unsigned int intid); void gic_irq_clear_active(unsigned int intid); bool gic_irq_get_active(unsigned int intid); @@ -59,7 +59,7 @@ bool gic_irq_get_pending(unsigned int intid); void gic_irq_set_config(unsigned int intid, bool is_edge); void gic_irq_set_group(unsigned int intid, bool group); -void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size, - vm_paddr_t pend_table); +void gic_rdist_enable_lpis(gpa_t cfg_table, size_t cfg_table_size, + gpa_t pend_table); #endif /* SELFTEST_KVM_GIC_H */ diff --git a/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h b/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h index 58feef3eb386..a43a407e2d5c 100644 --- a/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h +++ b/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h @@ -5,11 +5,10 @@ #include <linux/sizes.h> -void its_init(vm_paddr_t coll_tbl, size_t coll_tbl_sz, - vm_paddr_t device_tbl, size_t device_tbl_sz, - vm_paddr_t cmdq, size_t cmdq_size); +void its_init(gpa_t coll_tbl, size_t coll_tbl_sz, gpa_t device_tbl, + size_t device_tbl_sz, gpa_t cmdq, size_t cmdq_size); -void its_send_mapd_cmd(void *cmdq_base, u32 device_id, vm_paddr_t itt_base, +void its_send_mapd_cmd(void *cmdq_base, u32 device_id, gpa_t itt_base, size_t itt_size, bool valid); void its_send_mapc_cmd(void *cmdq_base, u32 vcpu_id, u32 collection_id, bool valid); void its_send_mapti_cmd(void *cmdq_base, u32 device_id, u32 event_id, diff --git a/tools/testing/selftests/kvm/include/arm64/processor.h b/tools/testing/selftests/kvm/include/arm64/processor.h index ac97a1c436fc..b8a902ba8573 100644 --- a/tools/testing/selftests/kvm/include/arm64/processor.h +++ b/tools/testing/selftests/kvm/include/arm64/processor.h @@ -128,7 +128,7 @@ #define PTE_ADDR_51_50_LPA2_SHIFT 8 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init); -struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, +struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, struct kvm_vcpu_init *init, void *guest_code); struct ex_regs { @@ -167,8 +167,8 @@ enum { (v) == VECTOR_SYNC_LOWER_64 || \ (v) == VECTOR_SYNC_LOWER_32) -void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k, - uint32_t *ipa16k, uint32_t *ipa64k); +void aarch64_get_supported_page_sizes(u32 ipa, u32 *ipa4k, + u32 *ipa16k, u32 *ipa64k); void vm_init_descriptor_tables(struct kvm_vm *vm); void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu); @@ -179,8 +179,8 @@ void vm_install_exception_handler(struct kvm_vm *vm, void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec, handler_fn handler); -uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level); -uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva); +u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level); +u64 *virt_get_pte_hva(struct kvm_vm *vm, gva_t gva); static inline void cpu_relax(void) { @@ -287,9 +287,9 @@ struct arm_smccc_res { * @res: pointer to write the return values from registers x0-x3 * */ -void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1, - uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, - uint64_t arg6, struct arm_smccc_res *res); +void smccc_hvc(u32 function_id, u64 arg0, u64 arg1, + u64 arg2, u64 arg3, u64 arg4, u64 arg5, + u64 arg6, struct arm_smccc_res *res); /** * smccc_smc - Invoke a SMCCC function using the smc conduit @@ -298,9 +298,9 @@ void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1, * @res: pointer to write the return values from registers x0-x3 * */ -void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1, - uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, - uint64_t arg6, struct arm_smccc_res *res); +void smccc_smc(u32 function_id, u64 arg0, u64 arg1, + u64 arg2, u64 arg3, u64 arg4, u64 arg5, + u64 arg6, struct arm_smccc_res *res); /* Execute a Wait For Interrupt instruction. */ void wfi(void); diff --git a/tools/testing/selftests/kvm/include/arm64/ucall.h b/tools/testing/selftests/kvm/include/arm64/ucall.h index 4ec801f37f00..2210d3d94c40 100644 --- a/tools/testing/selftests/kvm/include/arm64/ucall.h +++ b/tools/testing/selftests/kvm/include/arm64/ucall.h @@ -10,9 +10,9 @@ * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each * VM), it must not be accessed from host code. */ -extern vm_vaddr_t *ucall_exit_mmio_addr; +extern gva_t *ucall_exit_mmio_addr; -static inline void ucall_arch_do_ucall(vm_vaddr_t uc) +static inline void ucall_arch_do_ucall(gva_t uc) { WRITE_ONCE(*ucall_exit_mmio_addr, uc); } diff --git a/tools/testing/selftests/kvm/include/arm64/vgic.h b/tools/testing/selftests/kvm/include/arm64/vgic.h index 688beccc9436..1f8b04373987 100644 --- a/tools/testing/selftests/kvm/include/arm64/vgic.h +++ b/tools/testing/selftests/kvm/include/arm64/vgic.h @@ -11,27 +11,27 @@ #include "kvm_util.h" #define REDIST_REGION_ATTR_ADDR(count, base, flags, index) \ - (((uint64_t)(count) << 52) | \ - ((uint64_t)((base) >> 16) << 16) | \ - ((uint64_t)(flags) << 12) | \ + (((u64)(count) << 52) | \ + ((u64)((base) >> 16) << 16) | \ + ((u64)(flags) << 12) | \ index) bool kvm_supports_vgic_v3(void); -int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs); +int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs); void __vgic_v3_init(int fd); -int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs); +int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs); #define VGIC_MAX_RESERVED 1023 -void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level); -int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level); +void kvm_irq_set_level_info(int gic_fd, u32 intid, int level); +int _kvm_irq_set_level_info(int gic_fd, u32 intid, int level); -void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level); -int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level); +void kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level); +int _kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level); /* The vcpu arg only applies to private interrupts. */ -void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu); -void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu); +void kvm_irq_write_ispendr(int gic_fd, u32 intid, struct kvm_vcpu *vcpu); +void kvm_irq_write_isactiver(int gic_fd, u32 intid, struct kvm_vcpu *vcpu); #define KVM_IRQCHIP_NUM_PINS (1020 - 32) diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index f861242b4ae8..2ecaaa0e9965 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -58,7 +58,7 @@ struct kvm_binary_stats { struct kvm_vcpu { struct list_head list; - uint32_t id; + u32 id; int fd; struct kvm_vm *vm; struct kvm_run *run; @@ -70,8 +70,8 @@ struct kvm_vcpu { #endif struct kvm_binary_stats stats; struct kvm_dirty_gfn *dirty_gfns; - uint32_t fetch_index; - uint32_t dirty_gfns_count; + u32 fetch_index; + u32 dirty_gfns_count; }; struct userspace_mem_regions { @@ -90,7 +90,7 @@ enum kvm_mem_region_type { struct kvm_mmu { bool pgd_created; - uint64_t pgd; + u64 pgd; int pgtable_levels; struct kvm_mmu_arch arch; @@ -105,16 +105,16 @@ struct kvm_vm { unsigned int page_shift; unsigned int pa_bits; unsigned int va_bits; - uint64_t max_gfn; + u64 max_gfn; struct list_head vcpus; struct userspace_mem_regions regions; struct sparsebit *vpages_valid; struct sparsebit *vpages_mapped; bool has_irqchip; - vm_paddr_t ucall_mmio_addr; - vm_vaddr_t handlers; - uint32_t dirty_ring_size; - uint64_t gpa_tag_mask; + gpa_t ucall_mmio_addr; + gva_t handlers; + u32 dirty_ring_size; + gpa_t gpa_tag_mask; /* * "mmu" is the guest's stage-1, with a short name because the vast @@ -132,7 +132,7 @@ struct kvm_vm { * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE] * memslot. */ - uint32_t memslots[NR_MEM_REGIONS]; + u32 memslots[NR_MEM_REGIONS]; }; struct vcpu_reg_sublist { @@ -164,7 +164,7 @@ struct vcpu_reg_list { else struct userspace_mem_region * -memslot2region(struct kvm_vm *vm, uint32_t memslot); +memslot2region(struct kvm_vm *vm, u32 memslot); static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, enum kvm_mem_region_type type) @@ -213,13 +213,13 @@ enum vm_guest_mode { }; struct vm_shape { - uint32_t type; - uint8_t mode; - uint8_t pad0; - uint16_t pad1; + u32 type; + u8 mode; + u8 pad0; + u16 pad1; }; -kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t)); +kvm_static_assert(sizeof(struct vm_shape) == sizeof(u64)); #define VM_TYPE_DEFAULT 0 @@ -404,21 +404,22 @@ static inline int vm_check_cap(struct kvm_vm *vm, long cap) return ret; } -static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) +static inline int __vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0) { struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); } -static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) + +static inline void vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0) { struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); } -static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa, - uint64_t size, uint64_t attributes) +static inline void vm_set_memory_attributes(struct kvm_vm *vm, gpa_t gpa, + u64 size, u64 attributes) { struct kvm_memory_attributes attr = { .attributes = attributes, @@ -438,35 +439,35 @@ static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa, } -static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa, - uint64_t size) +static inline void vm_mem_set_private(struct kvm_vm *vm, gpa_t gpa, + u64 size) { vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE); } -static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa, - uint64_t size) +static inline void vm_mem_set_shared(struct kvm_vm *vm, gpa_t gpa, + u64 size) { vm_set_memory_attributes(vm, gpa, size, 0); } -void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size, +void vm_guest_mem_fallocate(struct kvm_vm *vm, gpa_t gpa, u64 size, bool punch_hole); -static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa, - uint64_t size) +static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, gpa_t gpa, + u64 size) { vm_guest_mem_fallocate(vm, gpa, size, true); } -static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa, - uint64_t size) +static inline void vm_guest_mem_allocate(struct kvm_vm *vm, gpa_t gpa, + u64 size) { vm_guest_mem_fallocate(vm, gpa, size, false); } -void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size); -const char *vm_guest_mode_string(uint32_t i); +void vm_enable_dirty_ring(struct kvm_vm *vm, u32 ring_size); +const char *vm_guest_mode_string(u32 i); void kvm_vm_free(struct kvm_vm *vmp); void kvm_vm_restart(struct kvm_vm *vmp); @@ -474,7 +475,7 @@ void kvm_vm_release(struct kvm_vm *vmp); void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename); int kvm_memfd_alloc(size_t size, bool hugepages); -void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); +void vm_dump(FILE *stream, struct kvm_vm *vm, u8 indent); static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) { @@ -484,7 +485,7 @@ static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) } static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, - uint64_t first_page, uint32_t num_pages) + u64 first_page, u32 num_pages) { struct kvm_clear_dirty_log args = { .dirty_bitmap = log, @@ -496,14 +497,14 @@ static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args); } -static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) +static inline u32 kvm_vm_reset_dirty_ring(struct kvm_vm *vm) { return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL); } static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm, - uint64_t address, - uint64_t size, bool pio) + u64 address, + u64 size, bool pio) { struct kvm_coalesced_mmio_zone zone = { .addr = address, @@ -515,8 +516,8 @@ static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm, } static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm, - uint64_t address, - uint64_t size, bool pio) + u64 address, + u64 size, bool pio) { struct kvm_coalesced_mmio_zone zone = { .addr = address, @@ -535,8 +536,8 @@ static inline int vm_get_stats_fd(struct kvm_vm *vm) return fd; } -static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, - uint32_t flags) +static inline int __kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd, + u32 flags) { struct kvm_irqfd irqfd = { .fd = eventfd, @@ -548,20 +549,19 @@ static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, return __vm_ioctl(vm, KVM_IRQFD, &irqfd); } -static inline void kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, - uint32_t flags) +static inline void kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd, u32 flags) { int ret = __kvm_irqfd(vm, gsi, eventfd, flags); TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_IRQFD, ret, vm); } -static inline void kvm_assign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) +static inline void kvm_assign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd) { kvm_irqfd(vm, gsi, eventfd, 0); } -static inline void kvm_deassign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) +static inline void kvm_deassign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd) { kvm_irqfd(vm, gsi, eventfd, KVM_IRQFD_FLAG_DEASSIGN); } @@ -610,15 +610,15 @@ static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc } void read_stat_data(int stats_fd, struct kvm_stats_header *header, - struct kvm_stats_desc *desc, uint64_t *data, + struct kvm_stats_desc *desc, u64 *data, size_t max_elements); void kvm_get_stat(struct kvm_binary_stats *stats, const char *name, - uint64_t *data, size_t max_elements); + u64 *data, size_t max_elements); #define __get_stat(stats, stat) \ ({ \ - uint64_t data; \ + u64 data; \ \ kvm_get_stat(stats, #stat, &data, 1); \ data; \ @@ -664,8 +664,8 @@ static inline bool is_smt_on(void) void vm_create_irqchip(struct kvm_vm *vm); -static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, - uint64_t flags) +static inline int __vm_create_guest_memfd(struct kvm_vm *vm, u64 size, + u64 flags) { struct kvm_create_guest_memfd guest_memfd = { .size = size, @@ -675,8 +675,8 @@ static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd); } -static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, - uint64_t flags) +static inline int vm_create_guest_memfd(struct kvm_vm *vm, u64 size, + u64 flags) { int fd = __vm_create_guest_memfd(vm, size, flags); @@ -684,24 +684,23 @@ static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, return fd; } -void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva); -int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva); -void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva, - uint32_t guest_memfd, uint64_t guest_memfd_offset); -int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva, - uint32_t guest_memfd, uint64_t guest_memfd_offset); +void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva); +int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva); +void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva, + u32 guest_memfd, u64 guest_memfd_offset); +int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva, + u32 guest_memfd, u64 guest_memfd_offset); void vm_userspace_mem_region_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, - uint64_t gpa, uint32_t slot, uint64_t npages, - uint32_t flags); + gpa_t gpa, u32 slot, u64 npages, u32 flags); void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, - uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags, - int guest_memfd_fd, uint64_t guest_memfd_offset); + gpa_t gpa, u32 slot, u64 npages, u32 flags, + int guest_memfd_fd, u64 guest_memfd_offset); #ifndef vm_arch_has_protected_memory static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) @@ -710,36 +709,34 @@ static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) } #endif -void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); -void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot); -void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); -void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); -struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); -void vm_populate_vaddr_bitmap(struct kvm_vm *vm); -vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); -vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); -vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, - enum kvm_mem_region_type type); -vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, - vm_vaddr_t vaddr_min, - enum kvm_mem_region_type type); -vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); -vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, - enum kvm_mem_region_type type); -vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); - -void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, +void vm_mem_region_set_flags(struct kvm_vm *vm, u32 slot, u32 flags); +void vm_mem_region_reload(struct kvm_vm *vm, u32 slot); +void vm_mem_region_move(struct kvm_vm *vm, u32 slot, u64 new_gpa); +void vm_mem_region_delete(struct kvm_vm *vm, u32 slot); +struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id); +void vm_populate_gva_bitmap(struct kvm_vm *vm); +gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t min_gva); +gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva); +gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva, + enum kvm_mem_region_type type); +gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t min_gva, + enum kvm_mem_region_type type); +gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages); +gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type); +gva_t vm_alloc_page(struct kvm_vm *vm); + +void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, unsigned int npages); -void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); -void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); -vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); -void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); +void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa); +void *addr_gva2hva(struct kvm_vm *vm, gva_t gva); +gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva); +void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa); #ifndef vcpu_arch_put_guest #define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0) #endif -static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa) +static inline gpa_t vm_untag_gpa(struct kvm_vm *vm, gpa_t gpa) { return gpa & ~vm->gpa_tag_mask; } @@ -755,8 +752,8 @@ static inline int __vcpu_run(struct kvm_vcpu *vcpu) void vcpu_run_complete_io(struct kvm_vcpu *vcpu); struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu); -static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap, - uint64_t arg0) +static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, u32 cap, + u64 arg0) { struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; @@ -811,31 +808,34 @@ static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) vcpu_ioctl(vcpu, KVM_SET_FPU, fpu); } -static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr) +static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id, void *addr) { - struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr }; + struct kvm_one_reg reg = { .id = id, .addr = (u64)addr }; return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®); } -static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) + +static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val) { - struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; + struct kvm_one_reg reg = { .id = id, .addr = (u64)&val }; return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); } -static inline uint64_t vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id) + +static inline u64 vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id) { - uint64_t val; - struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; + u64 val; + struct kvm_one_reg reg = { .id = id, .addr = (u64)&val }; TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id); vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®); return val; } -static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) + +static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val) { - struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; + struct kvm_one_reg reg = { .id = id, .addr = (u64)&val }; TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id); @@ -880,75 +880,75 @@ static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu) return fd; } -int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr); +int __kvm_has_device_attr(int dev_fd, u32 group, u64 attr); -static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) +static inline void kvm_has_device_attr(int dev_fd, u32 group, u64 attr) { int ret = __kvm_has_device_attr(dev_fd, group, attr); TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno); } -int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val); +int __kvm_device_attr_get(int dev_fd, u32 group, u64 attr, void *val); -static inline void kvm_device_attr_get(int dev_fd, uint32_t group, - uint64_t attr, void *val) +static inline void kvm_device_attr_get(int dev_fd, u32 group, + u64 attr, void *val) { int ret = __kvm_device_attr_get(dev_fd, group, attr, val); TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret)); } -int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val); +int __kvm_device_attr_set(int dev_fd, u32 group, u64 attr, void *val); -static inline void kvm_device_attr_set(int dev_fd, uint32_t group, - uint64_t attr, void *val) +static inline void kvm_device_attr_set(int dev_fd, u32 group, + u64 attr, void *val) { int ret = __kvm_device_attr_set(dev_fd, group, attr, val); TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret)); } -static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr) +static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group, + u64 attr) { return __kvm_has_device_attr(vcpu->fd, group, attr); } -static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr) +static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group, + u64 attr) { kvm_has_device_attr(vcpu->fd, group, attr); } -static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr, void *val) +static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group, + u64 attr, void *val) { return __kvm_device_attr_get(vcpu->fd, group, attr, val); } -static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr, void *val) +static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group, + u64 attr, void *val) { kvm_device_attr_get(vcpu->fd, group, attr, val); } -static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr, void *val) +static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group, + u64 attr, void *val) { return __kvm_device_attr_set(vcpu->fd, group, attr, val); } -static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr, void *val) +static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group, + u64 attr, void *val) { kvm_device_attr_set(vcpu->fd, group, attr, val); } -int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type); -int __kvm_create_device(struct kvm_vm *vm, uint64_t type); +int __kvm_test_create_device(struct kvm_vm *vm, u64 type); +int __kvm_create_device(struct kvm_vm *vm, u64 type); -static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type) +static inline int kvm_create_device(struct kvm_vm *vm, u64 type) { int fd = __kvm_create_device(vm, type); @@ -964,7 +964,7 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu); * Input Args: * vcpu - vCPU * num - number of arguments - * ... - arguments, each of type uint64_t + * ... - arguments, each of type u64 * * Output Args: None * @@ -972,40 +972,38 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu); * * Sets the first @num input parameters for the function at @vcpu's entry point, * per the C calling convention of the architecture, to the values given as - * variable args. Each of the variable args is expected to be of type uint64_t. + * variable args. Each of the variable args is expected to be of type u64. * The maximum @num can be is specific to the architecture. */ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...); -void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); -int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); +void kvm_irq_line(struct kvm_vm *vm, u32 irq, int level); +int _kvm_irq_line(struct kvm_vm *vm, u32 irq, int level); #define KVM_MAX_IRQ_ROUTES 4096 struct kvm_irq_routing *kvm_gsi_routing_create(void); void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, - uint32_t gsi, uint32_t pin); + u32 gsi, u32 pin); int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); const char *exit_reason_str(unsigned int exit_reason); -vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, - uint32_t memslot); -vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, - vm_paddr_t paddr_min, uint32_t memslot, - bool protected); -vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); +gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot); +gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, gpa_t min_gpa, + u32 memslot, bool protected); +gpa_t vm_alloc_page_table(struct kvm_vm *vm); -static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, - vm_paddr_t paddr_min, uint32_t memslot) +static inline gpa_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + gpa_t min_gpa, u32 memslot) { /* * By default, allocate memory as protected for VMs that support * protected memory, as the majority of memory for such VMs is * protected, i.e. using shared memory is effectively opt-in. */ - return __vm_phy_pages_alloc(vm, num, paddr_min, memslot, + return __vm_phy_pages_alloc(vm, num, min_gpa, memslot, vm_arch_has_protected_memory(vm)); } @@ -1016,8 +1014,8 @@ static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, * calculate the amount of memory needed for per-vCPU data, e.g. stacks. */ struct kvm_vm *____vm_create(struct vm_shape shape); -struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, - uint64_t nr_extra_pages); +struct kvm_vm *__vm_create(struct vm_shape shape, u32 nr_runnable_vcpus, + u64 nr_extra_pages); static inline struct kvm_vm *vm_create_barebones(void) { @@ -1034,16 +1032,16 @@ static inline struct kvm_vm *vm_create_barebones_type(unsigned long type) return ____vm_create(shape); } -static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus) +static inline struct kvm_vm *vm_create(u32 nr_runnable_vcpus) { return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0); } -struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, - uint64_t extra_mem_pages, +struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, u32 nr_vcpus, + u64 extra_mem_pages, void *guest_code, struct kvm_vcpu *vcpus[]); -static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus, +static inline struct kvm_vm *vm_create_with_vcpus(u32 nr_vcpus, void *guest_code, struct kvm_vcpu *vcpus[]) { @@ -1054,7 +1052,7 @@ static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus, struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape, struct kvm_vcpu **vcpu, - uint64_t extra_mem_pages, + u64 extra_mem_pages, void *guest_code); /* @@ -1062,7 +1060,7 @@ struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape, * additional pages of guest memory. Returns the VM and vCPU (via out param). */ static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, - uint64_t extra_mem_pages, + u64 extra_mem_pages, void *guest_code) { return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu, @@ -1084,7 +1082,7 @@ static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm); -void kvm_set_files_rlimit(uint32_t nr_vcpus); +void kvm_set_files_rlimit(u32 nr_vcpus); int __pin_task_to_cpu(pthread_t task, int cpu); @@ -1115,7 +1113,7 @@ static inline int pin_self_to_any_cpu(void) } void kvm_print_vcpu_pinning_help(void); -void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], +void kvm_parse_vcpu_pinning(const char *pcpus_string, u32 vcpu_to_pcpu[], int nr_vcpus); unsigned long vm_compute_max_gfn(struct kvm_vm *vm); @@ -1131,12 +1129,12 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) } #define sync_global_to_guest(vm, g) ({ \ - typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ + typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ memcpy(_p, &(g), sizeof(g)); \ }) #define sync_global_from_guest(vm, g) ({ \ - typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ + typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ memcpy(&(g), _p, sizeof(g)); \ }) @@ -1147,7 +1145,7 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) * undesirable to change the host's copy of the global. */ #define write_guest_global(vm, g, val) ({ \ - typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ + typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ typeof(g) _val = val; \ \ memcpy(_p, &(_val), sizeof(g)); \ @@ -1156,10 +1154,10 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) void assert_on_unhandled_exception(struct kvm_vcpu *vcpu); void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, - uint8_t indent); + u8 indent); static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu, - uint8_t indent) + u8 indent) { vcpu_arch_dump(stream, vcpu, indent); } @@ -1171,10 +1169,10 @@ static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu, * vm - Virtual Machine * vcpu_id - The id of the VCPU to add to the VM. */ -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id); void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code); -static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, +static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, void *guest_code) { struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id); @@ -1185,10 +1183,10 @@ static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, } /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */ -struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id); +struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, u32 vcpu_id); static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm, - uint32_t vcpu_id) + u32 vcpu_id) { return vm_arch_vcpu_recreate(vm, vcpu_id); } @@ -1203,27 +1201,15 @@ static inline void virt_pgd_alloc(struct kvm_vm *vm) } /* - * VM Virtual Page Map - * - * Input Args: - * vm - Virtual Machine - * vaddr - VM Virtual Address - * paddr - VM Physical Address - * memslot - Memory region slot for new virtual translation tables - * - * Output Args: None - * - * Return: None - * * Within @vm, creates a virtual translation for the page starting - * at @vaddr to the page starting at @paddr. + * at @gva to the page starting at @gpa. */ -void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr); +void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa); -static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +static inline void virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) { - virt_arch_pg_map(vm, vaddr, paddr); - sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); + virt_arch_pg_map(vm, gva, gpa); + sparsebit_set(vm->vpages_mapped, gva >> vm->page_shift); } @@ -1242,9 +1228,9 @@ static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr * Returns the VM physical address of the translated VM virtual * address given by @gva. */ -vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva); +gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva); -static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +static inline gpa_t addr_gva2gpa(struct kvm_vm *vm, gva_t gva) { return addr_arch_gva2gpa(vm, gva); } @@ -1264,9 +1250,9 @@ static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) * Dumps to the FILE stream given by @stream, the contents of all the * virtual translation tables for the VM given by @vm. */ -void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent); -static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +static inline void virt_dump(FILE *stream, struct kvm_vm *vm, u8 indent) { virt_arch_dump(stream, vm, indent); } @@ -1277,7 +1263,7 @@ static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm) return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0); } -static inline uint64_t vm_page_align(struct kvm_vm *vm, uint64_t v) +static inline u64 vm_page_align(struct kvm_vm *vm, u64 v) { return (v + vm->page_size - 1) & ~(vm->page_size - 1); } @@ -1293,9 +1279,9 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus); void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm); void kvm_arch_vm_release(struct kvm_vm *vm); -bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr); +bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa); -uint32_t guest_get_vcpuid(void); +u32 guest_get_vcpuid(void); bool kvm_arch_has_default_irqchip(void); diff --git a/tools/testing/selftests/kvm/include/kvm_util_types.h b/tools/testing/selftests/kvm/include/kvm_util_types.h index 0366e9bce7f9..ed0087e31674 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_types.h +++ b/tools/testing/selftests/kvm/include/kvm_util_types.h @@ -2,6 +2,8 @@ #ifndef SELFTEST_KVM_UTIL_TYPES_H #define SELFTEST_KVM_UTIL_TYPES_H +#include <linux/types.h> + /* * Provide a version of static_assert() that is guaranteed to have an optional * message param. _GNU_SOURCE is defined for all KVM selftests, _GNU_SOURCE @@ -14,9 +16,9 @@ #define __kvm_static_assert(expr, msg, ...) _Static_assert(expr, msg) #define kvm_static_assert(expr, ...) __kvm_static_assert(expr, ##__VA_ARGS__, #expr) -typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */ -typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ +typedef u64 gpa_t; /* Virtual Machine (Guest) physical address */ +typedef u64 gva_t; /* Virtual Machine (Guest) virtual address */ -#define INVALID_GPA (~(uint64_t)0) +#define INVALID_GPA (~(u64)0) #endif /* SELFTEST_KVM_UTIL_TYPES_H */ diff --git a/tools/testing/selftests/kvm/include/loongarch/arch_timer.h b/tools/testing/selftests/kvm/include/loongarch/arch_timer.h index 2ed106b32c81..3888aeeb3524 100644 --- a/tools/testing/selftests/kvm/include/loongarch/arch_timer.h +++ b/tools/testing/selftests/kvm/include/loongarch/arch_timer.h @@ -70,9 +70,9 @@ static inline void timer_set_next_cmp_ms(unsigned int msec, bool period) csr_write(val, LOONGARCH_CSR_TCFG); } -static inline void __delay(uint64_t cycles) +static inline void __delay(u64 cycles) { - uint64_t start = timer_get_cycles(); + u64 start = timer_get_cycles(); while ((timer_get_cycles() - start) < cycles) cpu_relax(); diff --git a/tools/testing/selftests/kvm/include/loongarch/ucall.h b/tools/testing/selftests/kvm/include/loongarch/ucall.h index 4ec801f37f00..2210d3d94c40 100644 --- a/tools/testing/selftests/kvm/include/loongarch/ucall.h +++ b/tools/testing/selftests/kvm/include/loongarch/ucall.h @@ -10,9 +10,9 @@ * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each * VM), it must not be accessed from host code. */ -extern vm_vaddr_t *ucall_exit_mmio_addr; +extern gva_t *ucall_exit_mmio_addr; -static inline void ucall_arch_do_ucall(vm_vaddr_t uc) +static inline void ucall_arch_do_ucall(gva_t uc) { WRITE_ONCE(*ucall_exit_mmio_addr, uc); } diff --git a/tools/testing/selftests/kvm/include/memstress.h b/tools/testing/selftests/kvm/include/memstress.h index 9071eb6dea60..0d1d6230cc05 100644 --- a/tools/testing/selftests/kvm/include/memstress.h +++ b/tools/testing/selftests/kvm/include/memstress.h @@ -20,9 +20,9 @@ #define MEMSTRESS_MEM_SLOT_INDEX 1 struct memstress_vcpu_args { - uint64_t gpa; - uint64_t gva; - uint64_t pages; + gpa_t gpa; + gva_t gva; + u64 pages; /* Only used by the host userspace part of the vCPU thread */ struct kvm_vcpu *vcpu; @@ -32,11 +32,11 @@ struct memstress_vcpu_args { struct memstress_args { struct kvm_vm *vm; /* The starting address and size of the guest test region. */ - uint64_t gpa; - uint64_t size; - uint64_t guest_page_size; - uint32_t random_seed; - uint32_t write_percent; + gpa_t gpa; + u64 size; + u64 guest_page_size; + u32 random_seed; + u32 write_percent; /* Run vCPUs in L2 instead of L1, if the architecture supports it. */ bool nested; @@ -45,7 +45,7 @@ struct memstress_args { /* True if all vCPUs are pinned to pCPUs */ bool pin_vcpus; /* The vCPU=>pCPU pinning map. Only valid if pin_vcpus is true. */ - uint32_t vcpu_to_pcpu[KVM_MAX_VCPUS]; + u32 vcpu_to_pcpu[KVM_MAX_VCPUS]; /* Test is done, stop running vCPUs. */ bool stop_vcpus; @@ -56,27 +56,27 @@ struct memstress_args { extern struct memstress_args memstress_args; struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus, - uint64_t vcpu_memory_bytes, int slots, + u64 vcpu_memory_bytes, int slots, enum vm_mem_backing_src_type backing_src, bool partition_vcpu_memory_access); void memstress_destroy_vm(struct kvm_vm *vm); -void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent); +void memstress_set_write_percent(struct kvm_vm *vm, u32 write_percent); void memstress_set_random_access(struct kvm_vm *vm, bool random_access); void memstress_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct memstress_vcpu_args *)); void memstress_join_vcpu_threads(int vcpus); -void memstress_guest_code(uint32_t vcpu_id); +void memstress_guest_code(u32 vcpu_id); -uint64_t memstress_nested_pages(int nr_vcpus); +u64 memstress_nested_pages(int nr_vcpus); void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]); void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots); void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots); void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots); void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], - int slots, uint64_t pages_per_slot); -unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot); + int slots, u64 pages_per_slot); +unsigned long **memstress_alloc_bitmaps(int slots, u64 pages_per_slot); void memstress_free_bitmaps(unsigned long *bitmaps[], int slots); #endif /* SELFTEST_KVM_MEMSTRESS_H */ diff --git a/tools/testing/selftests/kvm/include/riscv/arch_timer.h b/tools/testing/selftests/kvm/include/riscv/arch_timer.h index 225d81dad064..28ffc014da2a 100644 --- a/tools/testing/selftests/kvm/include/riscv/arch_timer.h +++ b/tools/testing/selftests/kvm/include/riscv/arch_timer.h @@ -14,25 +14,25 @@ static unsigned long timer_freq; #define msec_to_cycles(msec) \ - ((timer_freq) * (uint64_t)(msec) / 1000) + ((timer_freq) * (u64)(msec) / 1000) #define usec_to_cycles(usec) \ - ((timer_freq) * (uint64_t)(usec) / 1000000) + ((timer_freq) * (u64)(usec) / 1000000) #define cycles_to_usec(cycles) \ - ((uint64_t)(cycles) * 1000000 / (timer_freq)) + ((u64)(cycles) * 1000000 / (timer_freq)) -static inline uint64_t timer_get_cycles(void) +static inline u64 timer_get_cycles(void) { return csr_read(CSR_TIME); } -static inline void timer_set_cmp(uint64_t cval) +static inline void timer_set_cmp(u64 cval) { csr_write(CSR_STIMECMP, cval); } -static inline uint64_t timer_get_cmp(void) +static inline u64 timer_get_cmp(void) { return csr_read(CSR_STIMECMP); } @@ -47,17 +47,17 @@ static inline void timer_irq_disable(void) csr_clear(CSR_SIE, IE_TIE); } -static inline void timer_set_next_cmp_ms(uint32_t msec) +static inline void timer_set_next_cmp_ms(u32 msec) { - uint64_t now_ct = timer_get_cycles(); - uint64_t next_ct = now_ct + msec_to_cycles(msec); + u64 now_ct = timer_get_cycles(); + u64 next_ct = now_ct + msec_to_cycles(msec); timer_set_cmp(next_ct); } -static inline void __delay(uint64_t cycles) +static inline void __delay(u64 cycles) { - uint64_t start = timer_get_cycles(); + u64 start = timer_get_cycles(); while ((timer_get_cycles() - start) < cycles) cpu_relax(); diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h index 4dade8c4d18e..e3acf2ae9881 100644 --- a/tools/testing/selftests/kvm/include/riscv/processor.h +++ b/tools/testing/selftests/kvm/include/riscv/processor.h @@ -25,8 +25,7 @@ #define GET_RM(insn) (((insn) & INSN_MASK_FUNCT3) >> INSN_SHIFT_FUNCT3) #define GET_CSR_NUM(insn) (((insn) & INSN_CSR_MASK) >> INSN_CSR_SHIFT) -static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype, - uint64_t idx, uint64_t size) +static inline u64 __kvm_reg_id(u64 type, u64 subtype, u64 idx, u64 size) { return KVM_REG_RISCV | type | subtype | idx | size; } @@ -62,14 +61,14 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype, KVM_REG_RISCV_SBI_SINGLE, \ idx, KVM_REG_SIZE_ULONG) -bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext); +bool __vcpu_has_ext(struct kvm_vcpu *vcpu, u64 ext); -static inline bool __vcpu_has_isa_ext(struct kvm_vcpu *vcpu, uint64_t isa_ext) +static inline bool __vcpu_has_isa_ext(struct kvm_vcpu *vcpu, u64 isa_ext) { return __vcpu_has_ext(vcpu, RISCV_ISA_EXT_REG(isa_ext)); } -static inline bool __vcpu_has_sbi_ext(struct kvm_vcpu *vcpu, uint64_t sbi_ext) +static inline bool __vcpu_has_sbi_ext(struct kvm_vcpu *vcpu, u64 sbi_ext) { return __vcpu_has_ext(vcpu, RISCV_SBI_EXT_REG(sbi_ext)); } diff --git a/tools/testing/selftests/kvm/include/riscv/ucall.h b/tools/testing/selftests/kvm/include/riscv/ucall.h index a695ae36f3e0..2de7c6a36096 100644 --- a/tools/testing/selftests/kvm/include/riscv/ucall.h +++ b/tools/testing/selftests/kvm/include/riscv/ucall.h @@ -7,11 +7,11 @@ #define UCALL_EXIT_REASON KVM_EXIT_RISCV_SBI -static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) { } -static inline void ucall_arch_do_ucall(vm_vaddr_t uc) +static inline void ucall_arch_do_ucall(gva_t uc) { sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT, KVM_RISCV_SELFTESTS_SBI_UCALL, diff --git a/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h b/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h index b0ed71302722..6deaf18fec22 100644 --- a/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h +++ b/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h @@ -8,6 +8,6 @@ #ifndef SELFTEST_KVM_DIAG318_TEST_HANDLER #define SELFTEST_KVM_DIAG318_TEST_HANDLER -uint64_t get_diag318_info(void); +u64 get_diag318_info(void); #endif diff --git a/tools/testing/selftests/kvm/include/s390/facility.h b/tools/testing/selftests/kvm/include/s390/facility.h index 00a1ced6538b..41a265742666 100644 --- a/tools/testing/selftests/kvm/include/s390/facility.h +++ b/tools/testing/selftests/kvm/include/s390/facility.h @@ -16,7 +16,7 @@ /* alt_stfle_fac_list[16] + stfle_fac_list[16] */ #define NB_STFL_DOUBLEWORDS 32 -extern uint64_t stfl_doublewords[NB_STFL_DOUBLEWORDS]; +extern u64 stfl_doublewords[NB_STFL_DOUBLEWORDS]; extern bool stfle_flag; static inline bool test_bit_inv(unsigned long nr, const unsigned long *ptr) @@ -24,7 +24,7 @@ static inline bool test_bit_inv(unsigned long nr, const unsigned long *ptr) return test_bit(nr ^ (BITS_PER_LONG - 1), ptr); } -static inline void stfle(uint64_t *fac, unsigned int nb_doublewords) +static inline void stfle(u64 *fac, unsigned int nb_doublewords) { register unsigned long r0 asm("0") = nb_doublewords - 1; diff --git a/tools/testing/selftests/kvm/include/s390/ucall.h b/tools/testing/selftests/kvm/include/s390/ucall.h index 8035a872a351..3907d629304f 100644 --- a/tools/testing/selftests/kvm/include/s390/ucall.h +++ b/tools/testing/selftests/kvm/include/s390/ucall.h @@ -6,11 +6,11 @@ #define UCALL_EXIT_REASON KVM_EXIT_S390_SIEIC -static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) { } -static inline void ucall_arch_do_ucall(vm_vaddr_t uc) +static inline void ucall_arch_do_ucall(gva_t uc) { /* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */ asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory"); diff --git a/tools/testing/selftests/kvm/include/sparsebit.h b/tools/testing/selftests/kvm/include/sparsebit.h index bc760761e1a3..e027e5790946 100644 --- a/tools/testing/selftests/kvm/include/sparsebit.h +++ b/tools/testing/selftests/kvm/include/sparsebit.h @@ -6,7 +6,7 @@ * * Header file that describes API to the sparsebit library. * This library provides a memory efficient means of storing - * the settings of bits indexed via a uint64_t. Memory usage + * the settings of bits indexed via a u64. Memory usage * is reasonable, significantly less than (2^64 / 8) bytes, as * long as bits that are mostly set or mostly cleared are close * to each other. This library is efficient in memory usage @@ -25,8 +25,8 @@ extern "C" { #endif struct sparsebit; -typedef uint64_t sparsebit_idx_t; -typedef uint64_t sparsebit_num_t; +typedef u64 sparsebit_idx_t; +typedef u64 sparsebit_num_t; struct sparsebit *sparsebit_alloc(void); void sparsebit_free(struct sparsebit **sbitp); diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index b4872ba8ed12..d9b433b834f1 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -22,6 +22,8 @@ #include <sys/mman.h> #include "kselftest.h" +#include <linux/types.h> + #define msecs_to_usecs(msec) ((msec) * 1000ULL) static inline __printf(1, 2) int _no_printf(const char *format, ...) { return 0; } @@ -99,25 +101,25 @@ do { \ size_t parse_size(const char *size); -int64_t timespec_to_ns(struct timespec ts); -struct timespec timespec_add_ns(struct timespec ts, int64_t ns); +s64 timespec_to_ns(struct timespec ts); +struct timespec timespec_add_ns(struct timespec ts, s64 ns); struct timespec timespec_add(struct timespec ts1, struct timespec ts2); struct timespec timespec_sub(struct timespec ts1, struct timespec ts2); struct timespec timespec_elapsed(struct timespec start); struct timespec timespec_div(struct timespec ts, int divisor); struct guest_random_state { - uint32_t seed; + u32 seed; }; -extern uint32_t guest_random_seed; +extern u32 guest_random_seed; extern struct guest_random_state guest_rng; -struct guest_random_state new_guest_random_state(uint32_t seed); -uint32_t guest_random_u32(struct guest_random_state *state); +struct guest_random_state new_guest_random_state(u32 seed); +u32 guest_random_u32(struct guest_random_state *state); static inline bool __guest_random_bool(struct guest_random_state *state, - uint8_t percent) + u8 percent) { return (guest_random_u32(state) % 100) < percent; } @@ -127,9 +129,9 @@ static inline bool guest_random_bool(struct guest_random_state *state) return __guest_random_bool(state, 50); } -static inline uint64_t guest_random_u64(struct guest_random_state *state) +static inline u64 guest_random_u64(struct guest_random_state *state) { - return ((uint64_t)guest_random_u32(state) << 32) | guest_random_u32(state); + return ((u64)guest_random_u32(state) << 32) | guest_random_u32(state); } enum vm_mem_backing_src_type { @@ -158,7 +160,7 @@ enum vm_mem_backing_src_type { struct vm_mem_backing_src_alias { const char *name; - uint32_t flag; + u32 flag; }; #define MIN_RUN_DELAY_NS 200000UL @@ -166,9 +168,9 @@ struct vm_mem_backing_src_alias { bool thp_configured(void); size_t get_trans_hugepagesz(void); size_t get_def_hugetlb_pagesz(void); -const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i); -size_t get_backing_src_pagesz(uint32_t i); -bool is_backing_src_hugetlb(uint32_t i); +const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(u32 i); +size_t get_backing_src_pagesz(u32 i); +bool is_backing_src_hugetlb(u32 i); void backing_src_help(const char *flag); enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name); long get_run_delay(void); @@ -189,18 +191,18 @@ static inline bool backing_src_can_be_huge(enum vm_mem_backing_src_type t) } /* Aligns x up to the next multiple of size. Size must be a power of 2. */ -static inline uint64_t align_up(uint64_t x, uint64_t size) +static inline u64 align_up(u64 x, u64 size) { - uint64_t mask = size - 1; + u64 mask = size - 1; TEST_ASSERT(size != 0 && !(size & (size - 1)), "size not a power of 2: %lu", size); return ((x + mask) & ~mask); } -static inline uint64_t align_down(uint64_t x, uint64_t size) +static inline u64 align_down(u64 x, u64 size) { - uint64_t x_aligned_up = align_up(x, size); + u64 x_aligned_up = align_up(x, size); if (x == x_aligned_up) return x; @@ -215,7 +217,7 @@ static inline void *align_ptr_up(void *x, size_t size) int atoi_paranoid(const char *num_str); -static inline uint32_t atoi_positive(const char *name, const char *num_str) +static inline u32 atoi_positive(const char *name, const char *num_str) { int num = atoi_paranoid(num_str); @@ -223,7 +225,7 @@ static inline uint32_t atoi_positive(const char *name, const char *num_str) return num; } -static inline uint32_t atoi_non_negative(const char *name, const char *num_str) +static inline u32 atoi_non_negative(const char *name, const char *num_str) { int num = atoi_paranoid(num_str); diff --git a/tools/testing/selftests/kvm/include/timer_test.h b/tools/testing/selftests/kvm/include/timer_test.h index 9b6edaafe6d4..b7d5d2c84701 100644 --- a/tools/testing/selftests/kvm/include/timer_test.h +++ b/tools/testing/selftests/kvm/include/timer_test.h @@ -18,21 +18,21 @@ /* Timer test cmdline parameters */ struct test_args { - uint32_t nr_vcpus; - uint32_t nr_iter; - uint32_t timer_period_ms; - uint32_t migration_freq_ms; - uint32_t timer_err_margin_us; + u32 nr_vcpus; + u32 nr_iter; + u32 timer_period_ms; + u32 migration_freq_ms; + u32 timer_err_margin_us; /* Members of struct kvm_arm_counter_offset */ - uint64_t counter_offset; - uint64_t reserved; + u64 counter_offset; + u64 reserved; }; /* Shared variables between host and guest */ struct test_vcpu_shared_data { - uint32_t nr_iter; + u32 nr_iter; int guest_stage; - uint64_t xcnt; + u64 xcnt; }; extern struct test_args test_args; diff --git a/tools/testing/selftests/kvm/include/ucall_common.h b/tools/testing/selftests/kvm/include/ucall_common.h index d9d6581b8d4f..cbdcb0a50c4f 100644 --- a/tools/testing/selftests/kvm/include/ucall_common.h +++ b/tools/testing/selftests/kvm/include/ucall_common.h @@ -21,26 +21,26 @@ enum { #define UCALL_BUFFER_LEN 1024 struct ucall { - uint64_t cmd; - uint64_t args[UCALL_MAX_ARGS]; + u64 cmd; + u64 args[UCALL_MAX_ARGS]; char buffer[UCALL_BUFFER_LEN]; /* Host virtual address of this struct. */ struct ucall *hva; }; -void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa); -void ucall_arch_do_ucall(vm_vaddr_t uc); +void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa); +void ucall_arch_do_ucall(gva_t uc); void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu); -void ucall(uint64_t cmd, int nargs, ...); -__printf(2, 3) void ucall_fmt(uint64_t cmd, const char *fmt, ...); -__printf(5, 6) void ucall_assert(uint64_t cmd, const char *exp, +void ucall(u64 cmd, int nargs, ...); +__printf(2, 3) void ucall_fmt(u64 cmd, const char *fmt, ...); +__printf(5, 6) void ucall_assert(u64 cmd, const char *exp, const char *file, unsigned int line, const char *fmt, ...); -uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc); -void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa); -int ucall_nr_pages_required(uint64_t page_size); +u64 get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc); +void ucall_init(struct kvm_vm *vm, gpa_t mmio_gpa); +int ucall_nr_pages_required(u64 page_size); /* * Perform userspace call without any associated data. This bare call avoids @@ -48,7 +48,7 @@ int ucall_nr_pages_required(uint64_t page_size); * the full ucall() are problematic and/or unwanted. Note, this will come out * as UCALL_NONE on the backend. */ -#define GUEST_UCALL_NONE() ucall_arch_do_ucall((vm_vaddr_t)NULL) +#define GUEST_UCALL_NONE() ucall_arch_do_ucall((gva_t)NULL) #define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \ ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4) diff --git a/tools/testing/selftests/kvm/include/userfaultfd_util.h b/tools/testing/selftests/kvm/include/userfaultfd_util.h index 60f7f9d435dc..0bc1dc16600e 100644 --- a/tools/testing/selftests/kvm/include/userfaultfd_util.h +++ b/tools/testing/selftests/kvm/include/userfaultfd_util.h @@ -25,7 +25,7 @@ struct uffd_reader_args { struct uffd_desc { int uffd; - uint64_t num_readers; + u64 num_readers; /* Holds the write ends of the pipes for killing the readers. */ int *pipefds; pthread_t *readers; @@ -33,8 +33,8 @@ struct uffd_desc { }; struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay, - void *hva, uint64_t len, - uint64_t num_readers, + void *hva, u64 len, + u64 num_readers, uffd_handler_t handler); void uffd_stop_demand_paging(struct uffd_desc *uffd); diff --git a/tools/testing/selftests/kvm/include/x86/apic.h b/tools/testing/selftests/kvm/include/x86/apic.h index 5ca6bacbd70e..31887bdc3d6c 100644 --- a/tools/testing/selftests/kvm/include/x86/apic.h +++ b/tools/testing/selftests/kvm/include/x86/apic.h @@ -79,42 +79,42 @@ void apic_disable(void); void xapic_enable(void); void x2apic_enable(void); -static inline uint32_t get_bsp_flag(void) +static inline u32 get_bsp_flag(void) { return rdmsr(MSR_IA32_APICBASE) & MSR_IA32_APICBASE_BSP; } -static inline uint32_t xapic_read_reg(unsigned int reg) +static inline u32 xapic_read_reg(unsigned int reg) { - return ((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2]; + return ((volatile u32 *)APIC_DEFAULT_GPA)[reg >> 2]; } -static inline void xapic_write_reg(unsigned int reg, uint32_t val) +static inline void xapic_write_reg(unsigned int reg, u32 val) { - ((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2] = val; + ((volatile u32 *)APIC_DEFAULT_GPA)[reg >> 2] = val; } -static inline uint64_t x2apic_read_reg(unsigned int reg) +static inline u64 x2apic_read_reg(unsigned int reg) { return rdmsr(APIC_BASE_MSR + (reg >> 4)); } -static inline uint8_t x2apic_write_reg_safe(unsigned int reg, uint64_t value) +static inline u8 x2apic_write_reg_safe(unsigned int reg, u64 value) { return wrmsr_safe(APIC_BASE_MSR + (reg >> 4), value); } -static inline void x2apic_write_reg(unsigned int reg, uint64_t value) +static inline void x2apic_write_reg(unsigned int reg, u64 value) { - uint8_t fault = x2apic_write_reg_safe(reg, value); + u8 fault = x2apic_write_reg_safe(reg, value); __GUEST_ASSERT(!fault, "Unexpected fault 0x%x on WRMSR(%x) = %lx\n", fault, APIC_BASE_MSR + (reg >> 4), value); } -static inline void x2apic_write_reg_fault(unsigned int reg, uint64_t value) +static inline void x2apic_write_reg_fault(unsigned int reg, u64 value) { - uint8_t fault = x2apic_write_reg_safe(reg, value); + u8 fault = x2apic_write_reg_safe(reg, value); __GUEST_ASSERT(fault == GP_VECTOR, "Wanted #GP on WRMSR(%x) = %lx, got 0x%x\n", diff --git a/tools/testing/selftests/kvm/include/x86/evmcs.h b/tools/testing/selftests/kvm/include/x86/evmcs.h index 5a74bb30e2f8..be79bda024bf 100644 --- a/tools/testing/selftests/kvm/include/x86/evmcs.h +++ b/tools/testing/selftests/kvm/include/x86/evmcs.h @@ -10,9 +10,9 @@ #include "hyperv.h" #include "vmx.h" -#define u16 uint16_t -#define u32 uint32_t -#define u64 uint64_t +#define u16 u16 +#define u32 u32 +#define u64 u64 #define EVMCS_VERSION 1 @@ -245,7 +245,7 @@ static inline void evmcs_enable(void) enable_evmcs = true; } -static inline int evmcs_vmptrld(uint64_t vmcs_pa, void *vmcs) +static inline int evmcs_vmptrld(u64 vmcs_pa, void *vmcs) { current_vp_assist->current_nested_vmcs = vmcs_pa; current_vp_assist->enlighten_vmentry = 1; @@ -265,7 +265,7 @@ static inline bool load_evmcs(struct hyperv_test_pages *hv) return true; } -static inline int evmcs_vmptrst(uint64_t *value) +static inline int evmcs_vmptrst(u64 *value) { *value = current_vp_assist->current_nested_vmcs & ~HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; @@ -273,7 +273,7 @@ static inline int evmcs_vmptrst(uint64_t *value) return 0; } -static inline int evmcs_vmread(uint64_t encoding, uint64_t *value) +static inline int evmcs_vmread(u64 encoding, u64 *value) { switch (encoding) { case GUEST_RIP: @@ -672,7 +672,7 @@ static inline int evmcs_vmread(uint64_t encoding, uint64_t *value) return 0; } -static inline int evmcs_vmwrite(uint64_t encoding, uint64_t value) +static inline int evmcs_vmwrite(u64 encoding, u64 value) { switch (encoding) { case GUEST_RIP: @@ -1226,9 +1226,9 @@ static inline int evmcs_vmlaunch(void) "pop %%rbp;" : [ret]"=&a"(ret) : [host_rsp]"r" - ((uint64_t)¤t_evmcs->host_rsp), + ((u64)¤t_evmcs->host_rsp), [host_rip]"r" - ((uint64_t)¤t_evmcs->host_rip) + ((u64)¤t_evmcs->host_rip) : "memory", "cc", "rbx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); return ret; @@ -1265,9 +1265,9 @@ static inline int evmcs_vmresume(void) "pop %%rbp;" : [ret]"=&a"(ret) : [host_rsp]"r" - ((uint64_t)¤t_evmcs->host_rsp), + ((u64)¤t_evmcs->host_rsp), [host_rip]"r" - ((uint64_t)¤t_evmcs->host_rip) + ((u64)¤t_evmcs->host_rip) : "memory", "cc", "rbx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); return ret; diff --git a/tools/testing/selftests/kvm/include/x86/hyperv.h b/tools/testing/selftests/kvm/include/x86/hyperv.h index f13e532be240..78003f5a22f3 100644 --- a/tools/testing/selftests/kvm/include/x86/hyperv.h +++ b/tools/testing/selftests/kvm/include/x86/hyperv.h @@ -254,12 +254,12 @@ * Issue a Hyper-V hypercall. Returns exception vector raised or 0, 'hv_status' * is set to the hypercall status (if no exception occurred). */ -static inline uint8_t __hyperv_hypercall(u64 control, vm_vaddr_t input_address, - vm_vaddr_t output_address, - uint64_t *hv_status) +static inline u8 __hyperv_hypercall(u64 control, gva_t input_address, + gva_t output_address, + u64 *hv_status) { - uint64_t error_code; - uint8_t vector; + u64 error_code; + u8 vector; /* Note both the hypercall and the "asm safe" clobber r9-r11. */ asm volatile("mov %[output_address], %%r8\n\t" @@ -274,11 +274,11 @@ static inline uint8_t __hyperv_hypercall(u64 control, vm_vaddr_t input_address, } /* Issue a Hyper-V hypercall and assert that it succeeded. */ -static inline void hyperv_hypercall(u64 control, vm_vaddr_t input_address, - vm_vaddr_t output_address) +static inline void hyperv_hypercall(u64 control, gva_t input_address, + gva_t output_address) { - uint64_t hv_status; - uint8_t vector; + u64 hv_status; + u8 vector; vector = __hyperv_hypercall(control, input_address, output_address, &hv_status); @@ -327,27 +327,27 @@ struct hv_vp_assist_page { extern struct hv_vp_assist_page *current_vp_assist; -int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist); +int enable_vp_assist(u64 vp_assist_pa, void *vp_assist); struct hyperv_test_pages { /* VP assist page */ void *vp_assist_hva; - uint64_t vp_assist_gpa; + u64 vp_assist_gpa; void *vp_assist; /* Partition assist page */ void *partition_assist_hva; - uint64_t partition_assist_gpa; + u64 partition_assist_gpa; void *partition_assist; /* Enlightened VMCS */ void *enlightened_vmcs_hva; - uint64_t enlightened_vmcs_gpa; + u64 enlightened_vmcs_gpa; void *enlightened_vmcs; }; struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm, - vm_vaddr_t *p_hv_pages_gva); + gva_t *p_hv_pages_gva); /* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */ #define HV_INVARIANT_TSC_EXPOSED BIT_ULL(0) diff --git a/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h b/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h index be35d26bb320..c33ab6e04171 100644 --- a/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h +++ b/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h @@ -11,19 +11,19 @@ extern bool is_forced_emulation_enabled; struct pte_masks { - uint64_t present; - uint64_t writable; - uint64_t user; - uint64_t readable; - uint64_t executable; - uint64_t accessed; - uint64_t dirty; - uint64_t huge; - uint64_t nx; - uint64_t c; - uint64_t s; + u64 present; + u64 writable; + u64 user; + u64 readable; + u64 executable; + u64 accessed; + u64 dirty; + u64 huge; + u64 nx; + u64 c; + u64 s; - uint64_t always_set; + u64 always_set; }; struct kvm_mmu_arch { @@ -33,12 +33,12 @@ struct kvm_mmu_arch { struct kvm_mmu; struct kvm_vm_arch { - vm_vaddr_t gdt; - vm_vaddr_t tss; - vm_vaddr_t idt; + gva_t gdt; + gva_t tss; + gva_t idt; - uint64_t c_bit; - uint64_t s_bit; + u64 c_bit; + u64 s_bit; int sev_fd; bool is_pt_protected; }; @@ -62,7 +62,7 @@ do { \ : "+m" (mem) \ : "r" (val) : "memory"); \ } else { \ - uint64_t __old = READ_ONCE(mem); \ + u64 __old = READ_ONCE(mem); \ \ __asm__ __volatile__(KVM_FEP LOCK_PREFIX "cmpxchg %[new], %[ptr]" \ : [ptr] "+m" (mem), [old] "+a" (__old) \ diff --git a/tools/testing/selftests/kvm/include/x86/pmu.h b/tools/testing/selftests/kvm/include/x86/pmu.h index 72575eadb63a..98537cc8840d 100644 --- a/tools/testing/selftests/kvm/include/x86/pmu.h +++ b/tools/testing/selftests/kvm/include/x86/pmu.h @@ -6,8 +6,8 @@ #define SELFTEST_KVM_PMU_H #include <stdbool.h> -#include <stdint.h> +#include <linux/types.h> #include <linux/bits.h> #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300 @@ -104,14 +104,15 @@ enum amd_pmu_zen_events { NR_AMD_ZEN_EVENTS, }; -extern const uint64_t intel_pmu_arch_events[]; -extern const uint64_t amd_pmu_zen_events[]; +extern const u64 intel_pmu_arch_events[]; +extern const u64 amd_pmu_zen_events[]; enum pmu_errata { INSTRUCTIONS_RETIRED_OVERCOUNT, BRANCHES_RETIRED_OVERCOUNT, }; -extern uint64_t pmu_errata_mask; + +extern u64 pmu_errata_mask; void kvm_init_pmu_errata(void); diff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h index d8634a760a60..77f576ee7789 100644 --- a/tools/testing/selftests/kvm/include/x86/processor.h +++ b/tools/testing/selftests/kvm/include/x86/processor.h @@ -23,7 +23,7 @@ extern bool host_cpu_is_intel; extern bool host_cpu_is_amd; extern bool host_cpu_is_hygon; extern bool host_cpu_is_amd_compatible; -extern uint64_t guest_tsc_khz; +extern u64 guest_tsc_khz; #ifndef MAX_NR_CPUID_ENTRIES #define MAX_NR_CPUID_ENTRIES 100 @@ -399,17 +399,17 @@ struct gpr64_regs { }; struct desc64 { - uint16_t limit0; - uint16_t base0; + u16 limit0; + u16 base0; unsigned base1:8, type:4, s:1, dpl:2, p:1; unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8; - uint32_t base3; - uint32_t zero1; + u32 base3; + u32 zero1; } __attribute__((packed)); struct desc_ptr { - uint16_t size; - uint64_t address; + u16 size; + u64 address; } __attribute__((packed)); struct kvm_x86_state { @@ -427,18 +427,18 @@ struct kvm_x86_state { struct kvm_msrs msrs; }; -static inline uint64_t get_desc64_base(const struct desc64 *desc) +static inline u64 get_desc64_base(const struct desc64 *desc) { - return (uint64_t)desc->base3 << 32 | - (uint64_t)desc->base2 << 24 | - (uint64_t)desc->base1 << 16 | - (uint64_t)desc->base0; + return (u64)desc->base3 << 32 | + (u64)desc->base2 << 24 | + (u64)desc->base1 << 16 | + (u64)desc->base0; } -static inline uint64_t rdtsc(void) +static inline u64 rdtsc(void) { - uint32_t eax, edx; - uint64_t tsc_val; + u32 eax, edx; + u64 tsc_val; /* * The lfence is to wait (on Intel CPUs) until all previous * instructions have been executed. If software requires RDTSC to be @@ -446,39 +446,39 @@ static inline uint64_t rdtsc(void) * execute LFENCE immediately after RDTSC */ __asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx)); - tsc_val = ((uint64_t)edx) << 32 | eax; + tsc_val = ((u64)edx) << 32 | eax; return tsc_val; } -static inline uint64_t rdtscp(uint32_t *aux) +static inline u64 rdtscp(u32 *aux) { - uint32_t eax, edx; + u32 eax, edx; __asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux)); - return ((uint64_t)edx) << 32 | eax; + return ((u64)edx) << 32 | eax; } -static inline uint64_t rdmsr(uint32_t msr) +static inline u64 rdmsr(u32 msr) { - uint32_t a, d; + u32 a, d; __asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory"); - return a | ((uint64_t) d << 32); + return a | ((u64)d << 32); } -static inline void wrmsr(uint32_t msr, uint64_t value) +static inline void wrmsr(u32 msr, u64 value) { - uint32_t a = value; - uint32_t d = value >> 32; + u32 a = value; + u32 d = value >> 32; __asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory"); } -static inline uint16_t inw(uint16_t port) +static inline u16 inw(u16 port) { - uint16_t tmp; + u16 tmp; __asm__ __volatile__("in %%dx, %%ax" : /* output */ "=a" (tmp) @@ -487,120 +487,120 @@ static inline uint16_t inw(uint16_t port) return tmp; } -static inline uint16_t get_es(void) +static inline u16 get_es(void) { - uint16_t es; + u16 es; __asm__ __volatile__("mov %%es, %[es]" : /* output */ [es]"=rm"(es)); return es; } -static inline uint16_t get_cs(void) +static inline u16 get_cs(void) { - uint16_t cs; + u16 cs; __asm__ __volatile__("mov %%cs, %[cs]" : /* output */ [cs]"=rm"(cs)); return cs; } -static inline uint16_t get_ss(void) +static inline u16 get_ss(void) { - uint16_t ss; + u16 ss; __asm__ __volatile__("mov %%ss, %[ss]" : /* output */ [ss]"=rm"(ss)); return ss; } -static inline uint16_t get_ds(void) +static inline u16 get_ds(void) { - uint16_t ds; + u16 ds; __asm__ __volatile__("mov %%ds, %[ds]" : /* output */ [ds]"=rm"(ds)); return ds; } -static inline uint16_t get_fs(void) +static inline u16 get_fs(void) { - uint16_t fs; + u16 fs; __asm__ __volatile__("mov %%fs, %[fs]" : /* output */ [fs]"=rm"(fs)); return fs; } -static inline uint16_t get_gs(void) +static inline u16 get_gs(void) { - uint16_t gs; + u16 gs; __asm__ __volatile__("mov %%gs, %[gs]" : /* output */ [gs]"=rm"(gs)); return gs; } -static inline uint16_t get_tr(void) +static inline u16 get_tr(void) { - uint16_t tr; + u16 tr; __asm__ __volatile__("str %[tr]" : /* output */ [tr]"=rm"(tr)); return tr; } -static inline uint64_t get_cr0(void) +static inline u64 get_cr0(void) { - uint64_t cr0; + u64 cr0; __asm__ __volatile__("mov %%cr0, %[cr0]" : /* output */ [cr0]"=r"(cr0)); return cr0; } -static inline void set_cr0(uint64_t val) +static inline void set_cr0(u64 val) { __asm__ __volatile__("mov %0, %%cr0" : : "r" (val) : "memory"); } -static inline uint64_t get_cr3(void) +static inline u64 get_cr3(void) { - uint64_t cr3; + u64 cr3; __asm__ __volatile__("mov %%cr3, %[cr3]" : /* output */ [cr3]"=r"(cr3)); return cr3; } -static inline void set_cr3(uint64_t val) +static inline void set_cr3(u64 val) { __asm__ __volatile__("mov %0, %%cr3" : : "r" (val) : "memory"); } -static inline uint64_t get_cr4(void) +static inline u64 get_cr4(void) { - uint64_t cr4; + u64 cr4; __asm__ __volatile__("mov %%cr4, %[cr4]" : /* output */ [cr4]"=r"(cr4)); return cr4; } -static inline void set_cr4(uint64_t val) +static inline void set_cr4(u64 val) { __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory"); } -static inline uint64_t get_cr8(void) +static inline u64 get_cr8(void) { - uint64_t cr8; + u64 cr8; __asm__ __volatile__("mov %%cr8, %[cr8]" : [cr8]"=r"(cr8)); return cr8; } -static inline void set_cr8(uint64_t val) +static inline void set_cr8(u64 val) { __asm__ __volatile__("mov %0, %%cr8" : : "r" (val) : "memory"); } @@ -651,14 +651,14 @@ static inline struct desc_ptr get_idt(void) return idt; } -static inline void outl(uint16_t port, uint32_t value) +static inline void outl(u16 port, u32 value) { __asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value)); } -static inline void __cpuid(uint32_t function, uint32_t index, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) +static inline void __cpuid(u32 function, u32 index, + u32 *eax, u32 *ebx, + u32 *ecx, u32 *edx) { *eax = function; *ecx = index; @@ -672,35 +672,35 @@ static inline void __cpuid(uint32_t function, uint32_t index, : "memory"); } -static inline void cpuid(uint32_t function, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) +static inline void cpuid(u32 function, + u32 *eax, u32 *ebx, + u32 *ecx, u32 *edx) { return __cpuid(function, 0, eax, ebx, ecx, edx); } -static inline uint32_t this_cpu_fms(void) +static inline u32 this_cpu_fms(void) { - uint32_t eax, ebx, ecx, edx; + u32 eax, ebx, ecx, edx; cpuid(1, &eax, &ebx, &ecx, &edx); return eax; } -static inline uint32_t this_cpu_family(void) +static inline u32 this_cpu_family(void) { return x86_family(this_cpu_fms()); } -static inline uint32_t this_cpu_model(void) +static inline u32 this_cpu_model(void) { return x86_model(this_cpu_fms()); } static inline bool this_cpu_vendor_string_is(const char *vendor) { - const uint32_t *chunk = (const uint32_t *)vendor; - uint32_t eax, ebx, ecx, edx; + const u32 *chunk = (const u32 *)vendor; + u32 eax, ebx, ecx, edx; cpuid(0, &eax, &ebx, &ecx, &edx); return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]); @@ -724,10 +724,9 @@ static inline bool this_cpu_is_hygon(void) return this_cpu_vendor_string_is("HygonGenuine"); } -static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index, - uint8_t reg, uint8_t lo, uint8_t hi) +static inline u32 __this_cpu_has(u32 function, u32 index, u8 reg, u8 lo, u8 hi) { - uint32_t gprs[4]; + u32 gprs[4]; __cpuid(function, index, &gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX], @@ -742,7 +741,7 @@ static inline bool this_cpu_has(struct kvm_x86_cpu_feature feature) feature.reg, feature.bit, feature.bit); } -static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property) +static inline u32 this_cpu_property(struct kvm_x86_cpu_property property) { return __this_cpu_has(property.function, property.index, property.reg, property.lo_bit, property.hi_bit); @@ -750,7 +749,7 @@ static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property) static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property) { - uint32_t max_leaf; + u32 max_leaf; switch (property.function & 0xc0000000) { case 0: @@ -770,7 +769,7 @@ static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property) static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature) { - uint32_t nr_bits; + u32 nr_bits; if (feature.f.reg == KVM_CPUID_EBX) { nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); @@ -782,13 +781,13 @@ static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature) return nr_bits > feature.f.bit || this_cpu_has(feature.f); } -static __always_inline uint64_t this_cpu_supported_xcr0(void) +static __always_inline u64 this_cpu_supported_xcr0(void) { if (!this_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO)) return 0; return this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) | - ((uint64_t)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); + ((u64)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); } typedef u32 __attribute__((vector_size(16))) sse128_t; @@ -867,7 +866,7 @@ static inline void cpu_relax(void) static inline void udelay(unsigned long usec) { - uint64_t start, now, cycles; + u64 start, now, cycles; GUEST_ASSERT(guest_tsc_khz); cycles = guest_tsc_khz / 1000 * usec; @@ -898,8 +897,8 @@ void kvm_x86_state_cleanup(struct kvm_x86_state *state); const struct kvm_msr_list *kvm_get_msr_index_list(void); const struct kvm_msr_list *kvm_get_feature_msr_index_list(void); -bool kvm_msr_is_in_save_restore_list(uint32_t msr_index); -uint64_t kvm_get_feature_msr(uint64_t msr_index); +bool kvm_msr_is_in_save_restore_list(u32 msr_index); +u64 kvm_get_feature_msr(u64 msr_index); static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs) @@ -954,20 +953,20 @@ static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs) } const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, - uint32_t function, uint32_t index); + u32 function, u32 index); const struct kvm_cpuid2 *kvm_get_supported_cpuid(void); -static inline uint32_t kvm_cpu_fms(void) +static inline u32 kvm_cpu_fms(void) { return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax; } -static inline uint32_t kvm_cpu_family(void) +static inline u32 kvm_cpu_family(void) { return x86_family(kvm_cpu_fms()); } -static inline uint32_t kvm_cpu_model(void) +static inline u32 kvm_cpu_model(void) { return x86_model(kvm_cpu_fms()); } @@ -980,17 +979,17 @@ static inline bool kvm_cpu_has(struct kvm_x86_cpu_feature feature) return kvm_cpuid_has(kvm_get_supported_cpuid(), feature); } -uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, - struct kvm_x86_cpu_property property); +u32 kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, + struct kvm_x86_cpu_property property); -static inline uint32_t kvm_cpu_property(struct kvm_x86_cpu_property property) +static inline u32 kvm_cpu_property(struct kvm_x86_cpu_property property) { return kvm_cpuid_property(kvm_get_supported_cpuid(), property); } static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property) { - uint32_t max_leaf; + u32 max_leaf; switch (property.function & 0xc0000000) { case 0: @@ -1010,7 +1009,7 @@ static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property) static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature) { - uint32_t nr_bits; + u32 nr_bits; if (feature.f.reg == KVM_CPUID_EBX) { nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); @@ -1022,13 +1021,13 @@ static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature) return nr_bits > feature.f.bit || kvm_cpu_has(feature.f); } -static __always_inline uint64_t kvm_cpu_supported_xcr0(void) +static __always_inline u64 kvm_cpu_supported_xcr0(void) { if (!kvm_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO)) return 0; return kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) | - ((uint64_t)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); + ((u64)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); } static inline size_t kvm_cpuid2_size(int nr_entries) @@ -1062,8 +1061,8 @@ static inline void vcpu_get_cpuid(struct kvm_vcpu *vcpu) } static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu, - uint32_t function, - uint32_t index) + u32 function, + u32 index) { TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first (or equivalent)"); @@ -1074,7 +1073,7 @@ static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *v } static inline struct kvm_cpuid_entry2 *vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu, - uint32_t function) + u32 function) { return __vcpu_get_cpuid_entry(vcpu, function, 0); } @@ -1104,10 +1103,10 @@ static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu) void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu, struct kvm_x86_cpu_property property, - uint32_t value); -void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr); + u32 value); +void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, u8 maxphyaddr); -void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function); +void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, u32 function); static inline bool vcpu_cpuid_has(struct kvm_vcpu *vcpu, struct kvm_x86_cpu_feature feature) @@ -1135,8 +1134,8 @@ static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu, vcpu_set_or_clear_cpuid_feature(vcpu, feature, false); } -uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index); -int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value); +u64 vcpu_get_msr(struct kvm_vcpu *vcpu, u64 msr_index); +int _vcpu_set_msr(struct kvm_vcpu *vcpu, u64 msr_index, u64 msr_value); /* * Assert on an MSR access(es) and pretty print the MSR name when possible. @@ -1161,14 +1160,14 @@ do { \ * is changing, etc. This is NOT an exhaustive list! The intent is to filter * out MSRs that are not durable _and_ that a selftest wants to write. */ -static inline bool is_durable_msr(uint32_t msr) +static inline bool is_durable_msr(u32 msr) { return msr != MSR_IA32_TSC; } #define vcpu_set_msr(vcpu, msr, val) \ do { \ - uint64_t r, v = val; \ + u64 r, v = val; \ \ TEST_ASSERT_MSR(_vcpu_set_msr(vcpu, msr, v) == 1, \ "KVM_SET_MSRS failed on %s, value = 0x%lx", msr, #msr, v); \ @@ -1182,28 +1181,28 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits); void kvm_init_vm_address_properties(struct kvm_vm *vm); struct ex_regs { - uint64_t rax, rcx, rdx, rbx; - uint64_t rbp, rsi, rdi; - uint64_t r8, r9, r10, r11; - uint64_t r12, r13, r14, r15; - uint64_t vector; - uint64_t error_code; - uint64_t rip; - uint64_t cs; - uint64_t rflags; + u64 rax, rcx, rdx, rbx; + u64 rbp, rsi, rdi; + u64 r8, r9, r10, r11; + u64 r12, r13, r14, r15; + u64 vector; + u64 error_code; + u64 rip; + u64 cs; + u64 rflags; }; struct idt_entry { - uint16_t offset0; - uint16_t selector; - uint16_t ist : 3; - uint16_t : 5; - uint16_t type : 4; - uint16_t : 1; - uint16_t dpl : 2; - uint16_t p : 1; - uint16_t offset1; - uint32_t offset2; uint32_t reserved; + u16 offset0; + u16 selector; + u16 ist : 3; + u16 : 5; + u16 type : 4; + u16 : 1; + u16 dpl : 2; + u16 p : 1; + u16 offset1; + u32 offset2; u32 reserved; }; void vm_install_exception_handler(struct kvm_vm *vm, int vector, @@ -1262,8 +1261,8 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, #define kvm_asm_safe(insn, inputs...) \ ({ \ - uint64_t ign_error_code; \ - uint8_t vector; \ + u64 ign_error_code; \ + u8 vector; \ \ asm volatile(KVM_ASM_SAFE(insn) \ : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \ @@ -1274,7 +1273,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, #define kvm_asm_safe_ec(insn, error_code, inputs...) \ ({ \ - uint8_t vector; \ + u8 vector; \ \ asm volatile(KVM_ASM_SAFE(insn) \ : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \ @@ -1285,8 +1284,8 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, #define kvm_asm_safe_fep(insn, inputs...) \ ({ \ - uint64_t ign_error_code; \ - uint8_t vector; \ + u64 ign_error_code; \ + u8 vector; \ \ asm volatile(KVM_ASM_SAFE_FEP(insn) \ : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \ @@ -1297,7 +1296,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, #define kvm_asm_safe_ec_fep(insn, error_code, inputs...) \ ({ \ - uint8_t vector; \ + u8 vector; \ \ asm volatile(KVM_ASM_SAFE_FEP(insn) \ : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \ @@ -1307,11 +1306,11 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, }) #define BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \ -static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \ +static inline u8 insn##_safe ##_fep(u32 idx, u64 *val) \ { \ - uint64_t error_code; \ - uint8_t vector; \ - uint32_t a, d; \ + u64 error_code; \ + u8 vector; \ + u32 a, d; \ \ asm volatile(KVM_ASM_SAFE##_FEP(#insn) \ : "=a"(a), "=d"(d), \ @@ -1319,7 +1318,7 @@ static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \ : "c"(idx) \ : KVM_ASM_SAFE_CLOBBERS); \ \ - *val = (uint64_t)a | ((uint64_t)d << 32); \ + *val = (u64)a | ((u64)d << 32); \ return vector; \ } @@ -1335,12 +1334,12 @@ BUILD_READ_U64_SAFE_HELPERS(rdmsr) BUILD_READ_U64_SAFE_HELPERS(rdpmc) BUILD_READ_U64_SAFE_HELPERS(xgetbv) -static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val) +static inline u8 wrmsr_safe(u32 msr, u64 val) { return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr)); } -static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value) +static inline u8 xsetbv_safe(u32 index, u64 value) { u32 eax = value; u32 edx = value >> 32; @@ -1395,23 +1394,20 @@ static inline bool kvm_is_lbrv_enabled(void) return !!get_kvm_amd_param_integer("lbrv"); } -uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr); +u64 *vm_get_pte(struct kvm_vm *vm, gva_t gva); -uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, - uint64_t a3); -uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1); -void xen_hypercall(uint64_t nr, uint64_t a0, void *a1); +u64 kvm_hypercall(u64 nr, u64 a0, u64 a1, u64 a2, u64 a3); +u64 __xen_hypercall(u64 nr, u64 a0, void *a1); +void xen_hypercall(u64 nr, u64 a0, void *a1); -static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa, - uint64_t size, uint64_t flags) +static inline u64 __kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags) { return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0); } -static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size, - uint64_t flags) +static inline void kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags) { - uint64_t ret = __kvm_hypercall_map_gpa_range(gpa, size, flags); + u64 ret = __kvm_hypercall_map_gpa_range(gpa, size, flags); GUEST_ASSERT(!ret); } @@ -1456,7 +1452,7 @@ static inline void cli(void) asm volatile ("cli"); } -void __vm_xsave_require_permission(uint64_t xfeature, const char *name); +void __vm_xsave_require_permission(u64 xfeature, const char *name); #define vm_xsave_require_permission(xfeature) \ __vm_xsave_require_permission(xfeature, #xfeature) @@ -1511,17 +1507,17 @@ enum pg_level { void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels, struct pte_masks *pte_masks); -void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr, - uint64_t paddr, int level); -void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, - uint64_t nr_bytes, int level); +void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva, + gpa_t gpa, int level); +void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa, + u64 nr_bytes, int level); void vm_enable_tdp(struct kvm_vm *vm); bool kvm_cpu_has_tdp(void); -void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size); +void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size); void tdp_identity_map_default_memslots(struct kvm_vm *vm); -void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size); -uint64_t *tdp_get_pte(struct kvm_vm *vm, uint64_t l2_gpa); +void tdp_identity_map_1g(struct kvm_vm *vm, u64 addr, u64 size); +u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa); /* * Basic CPU control in CR0 diff --git a/tools/testing/selftests/kvm/include/x86/sev.h b/tools/testing/selftests/kvm/include/x86/sev.h index 008b4169f5e2..1af44c151d60 100644 --- a/tools/testing/selftests/kvm/include/x86/sev.h +++ b/tools/testing/selftests/kvm/include/x86/sev.h @@ -46,16 +46,16 @@ static inline bool is_sev_vm(struct kvm_vm *vm) return is_sev_es_vm(vm) || vm->type == KVM_X86_SEV_VM; } -void sev_vm_launch(struct kvm_vm *vm, uint32_t policy); -void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement); +void sev_vm_launch(struct kvm_vm *vm, u32 policy); +void sev_vm_launch_measure(struct kvm_vm *vm, u8 *measurement); void sev_vm_launch_finish(struct kvm_vm *vm); -void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy); +void snp_vm_launch_start(struct kvm_vm *vm, u64 policy); void snp_vm_launch_update(struct kvm_vm *vm); void snp_vm_launch_finish(struct kvm_vm *vm); -struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, +struct kvm_vm *vm_sev_create_with_one_vcpu(u32 type, void *guest_code, struct kvm_vcpu **cpu); -void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement); +void vm_sev_launch(struct kvm_vm *vm, u64 policy, u8 *measurement); kvm_static_assert(SEV_RET_SUCCESS == 0); @@ -85,7 +85,7 @@ static inline u64 snp_default_policy(void) unsigned long raw; \ } sev_cmd = { .c = { \ .id = (cmd), \ - .data = (uint64_t)(arg), \ + .data = (u64)(arg), \ .sev_fd = (vm)->arch.sev_fd, \ } }; \ \ @@ -120,8 +120,8 @@ static inline void sev_register_encrypted_memory(struct kvm_vm *vm, vm_ioctl(vm, KVM_MEMORY_ENCRYPT_REG_REGION, &range); } -static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, - uint64_t size) +static inline void sev_launch_update_data(struct kvm_vm *vm, gpa_t gpa, + u64 size) { struct kvm_sev_launch_update_data update_data = { .uaddr = (unsigned long)addr_gpa2hva(vm, gpa), @@ -131,8 +131,8 @@ static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_DATA, &update_data); } -static inline void snp_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, - uint64_t hva, uint64_t size, uint8_t type) +static inline void snp_launch_update_data(struct kvm_vm *vm, gpa_t gpa, + u64 hva, u64 size, u8 type) { struct kvm_sev_snp_launch_update update_data = { .uaddr = hva, diff --git a/tools/testing/selftests/kvm/include/x86/smm.h b/tools/testing/selftests/kvm/include/x86/smm.h index 19337c34f13e..2d1afa09819b 100644 --- a/tools/testing/selftests/kvm/include/x86/smm.h +++ b/tools/testing/selftests/kvm/include/x86/smm.h @@ -8,8 +8,7 @@ #define SMRAM_MEMSLOT ((1 << 16) | 1) #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE) -void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, - uint64_t smram_gpa, +void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, u64 smram_gpa, const void *smi_handler, size_t handler_size); void inject_smi(struct kvm_vcpu *vcpu); diff --git a/tools/testing/selftests/kvm/include/x86/svm_util.h b/tools/testing/selftests/kvm/include/x86/svm_util.h index 5d7c42534bc4..6c013eb838be 100644 --- a/tools/testing/selftests/kvm/include/x86/svm_util.h +++ b/tools/testing/selftests/kvm/include/x86/svm_util.h @@ -16,20 +16,20 @@ struct svm_test_data { /* VMCB */ struct vmcb *vmcb; /* gva */ void *vmcb_hva; - uint64_t vmcb_gpa; + u64 vmcb_gpa; /* host state-save area */ struct vmcb_save_area *save_area; /* gva */ void *save_area_hva; - uint64_t save_area_gpa; + u64 save_area_gpa; /* MSR-Bitmap */ void *msr; /* gva */ void *msr_hva; - uint64_t msr_gpa; + u64 msr_gpa; /* NPT */ - uint64_t ncr3_gpa; + u64 ncr3_gpa; }; static inline void vmmcall(void) @@ -56,9 +56,9 @@ static inline void vmmcall(void) "clgi\n" \ ) -struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva); +struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, gva_t *p_svm_gva); void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp); -void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa); +void run_guest(struct vmcb *vmcb, u64 vmcb_gpa); static inline bool kvm_cpu_has_npt(void) { diff --git a/tools/testing/selftests/kvm/include/x86/ucall.h b/tools/testing/selftests/kvm/include/x86/ucall.h index d3825dcc3cd9..0e4950041e3e 100644 --- a/tools/testing/selftests/kvm/include/x86/ucall.h +++ b/tools/testing/selftests/kvm/include/x86/ucall.h @@ -6,7 +6,7 @@ #define UCALL_EXIT_REASON KVM_EXIT_IO -static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) { } diff --git a/tools/testing/selftests/kvm/include/x86/vmx.h b/tools/testing/selftests/kvm/include/x86/vmx.h index 92b918700d24..90fffaf91595 100644 --- a/tools/testing/selftests/kvm/include/x86/vmx.h +++ b/tools/testing/selftests/kvm/include/x86/vmx.h @@ -285,16 +285,16 @@ enum vmcs_field { }; struct vmx_msr_entry { - uint32_t index; - uint32_t reserved; - uint64_t value; + u32 index; + u32 reserved; + u64 value; } __attribute__ ((aligned(16))); #include "evmcs.h" -static inline int vmxon(uint64_t phys) +static inline int vmxon(u64 phys) { - uint8_t ret; + u8 ret; __asm__ __volatile__ ("vmxon %[pa]; setna %[ret]" : [ret]"=rm"(ret) @@ -309,9 +309,9 @@ static inline void vmxoff(void) __asm__ __volatile__("vmxoff"); } -static inline int vmclear(uint64_t vmcs_pa) +static inline int vmclear(u64 vmcs_pa) { - uint8_t ret; + u8 ret; __asm__ __volatile__ ("vmclear %[pa]; setna %[ret]" : [ret]"=rm"(ret) @@ -321,9 +321,9 @@ static inline int vmclear(uint64_t vmcs_pa) return ret; } -static inline int vmptrld(uint64_t vmcs_pa) +static inline int vmptrld(u64 vmcs_pa) { - uint8_t ret; + u8 ret; if (enable_evmcs) return -1; @@ -336,10 +336,10 @@ static inline int vmptrld(uint64_t vmcs_pa) return ret; } -static inline int vmptrst(uint64_t *value) +static inline int vmptrst(u64 *value) { - uint64_t tmp; - uint8_t ret; + u64 tmp; + u8 ret; if (enable_evmcs) return evmcs_vmptrst(value); @@ -356,9 +356,9 @@ static inline int vmptrst(uint64_t *value) * A wrapper around vmptrst that ignores errors and returns zero if the * vmptrst instruction fails. */ -static inline uint64_t vmptrstz(void) +static inline u64 vmptrstz(void) { - uint64_t value = 0; + u64 value = 0; vmptrst(&value); return value; } @@ -391,8 +391,8 @@ static inline int vmlaunch(void) "pop %%rcx;" "pop %%rbp;" : [ret]"=&a"(ret) - : [host_rsp]"r"((uint64_t)HOST_RSP), - [host_rip]"r"((uint64_t)HOST_RIP) + : [host_rsp]"r"((u64)HOST_RSP), + [host_rip]"r"((u64)HOST_RIP) : "memory", "cc", "rbx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); return ret; @@ -426,8 +426,8 @@ static inline int vmresume(void) "pop %%rcx;" "pop %%rbp;" : [ret]"=&a"(ret) - : [host_rsp]"r"((uint64_t)HOST_RSP), - [host_rip]"r"((uint64_t)HOST_RIP) + : [host_rsp]"r"((u64)HOST_RSP), + [host_rip]"r"((u64)HOST_RIP) : "memory", "cc", "rbx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); return ret; @@ -447,10 +447,10 @@ static inline void vmcall(void) "r10", "r11", "r12", "r13", "r14", "r15"); } -static inline int vmread(uint64_t encoding, uint64_t *value) +static inline int vmread(u64 encoding, u64 *value) { - uint64_t tmp; - uint8_t ret; + u64 tmp; + u8 ret; if (enable_evmcs) return evmcs_vmread(encoding, value); @@ -468,16 +468,16 @@ static inline int vmread(uint64_t encoding, uint64_t *value) * A wrapper around vmread that ignores errors and returns zero if the * vmread instruction fails. */ -static inline uint64_t vmreadz(uint64_t encoding) +static inline u64 vmreadz(u64 encoding) { - uint64_t value = 0; + u64 value = 0; vmread(encoding, &value); return value; } -static inline int vmwrite(uint64_t encoding, uint64_t value) +static inline int vmwrite(u64 encoding, u64 value) { - uint8_t ret; + u8 ret; if (enable_evmcs) return evmcs_vmwrite(encoding, value); @@ -490,41 +490,41 @@ static inline int vmwrite(uint64_t encoding, uint64_t value) return ret; } -static inline uint32_t vmcs_revision(void) +static inline u32 vmcs_revision(void) { return rdmsr(MSR_IA32_VMX_BASIC); } struct vmx_pages { void *vmxon_hva; - uint64_t vmxon_gpa; + u64 vmxon_gpa; void *vmxon; void *vmcs_hva; - uint64_t vmcs_gpa; + u64 vmcs_gpa; void *vmcs; void *msr_hva; - uint64_t msr_gpa; + u64 msr_gpa; void *msr; void *shadow_vmcs_hva; - uint64_t shadow_vmcs_gpa; + u64 shadow_vmcs_gpa; void *shadow_vmcs; void *vmread_hva; - uint64_t vmread_gpa; + u64 vmread_gpa; void *vmread; void *vmwrite_hva; - uint64_t vmwrite_gpa; + u64 vmwrite_gpa; void *vmwrite; void *apic_access_hva; - uint64_t apic_access_gpa; + u64 apic_access_gpa; void *apic_access; - uint64_t eptp_gpa; + u64 eptp_gpa; }; union vmx_basic { @@ -550,7 +550,7 @@ union vmx_ctrl_msr { }; }; -struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva); +struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, gva_t *p_vmx_gva); bool prepare_for_vmx_operation(struct vmx_pages *vmx); void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); bool load_vmcs(struct vmx_pages *vmx); diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c index c60a24a92829..fc5242fb956f 100644 --- a/tools/testing/selftests/kvm/kvm_page_table_test.c +++ b/tools/testing/selftests/kvm/kvm_page_table_test.c @@ -46,12 +46,12 @@ static const char * const test_stage_string[] = { struct test_args { struct kvm_vm *vm; - uint64_t guest_test_virt_mem; - uint64_t host_page_size; - uint64_t host_num_pages; - uint64_t large_page_size; - uint64_t large_num_pages; - uint64_t host_pages_per_lpage; + u64 guest_test_virt_mem; + u64 host_page_size; + u64 host_num_pages; + u64 large_page_size; + u64 large_num_pages; + u64 host_pages_per_lpage; enum vm_mem_backing_src_type src_type; struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; }; @@ -63,7 +63,7 @@ struct test_args { static enum test_stage guest_test_stage; /* Host variables */ -static uint32_t nr_vcpus = 1; +static u32 nr_vcpus = 1; static struct test_args test_args; static enum test_stage *current_stage; static bool host_quit; @@ -77,19 +77,19 @@ static sem_t test_stage_completed; * This will be set to the topmost valid physical address minus * the test memory size. */ -static uint64_t guest_test_phys_mem; +static u64 guest_test_phys_mem; /* * Guest virtual memory offset of the testing memory slot. * Must not conflict with identity mapped test code. */ -static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; +static u64 guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; static void guest_code(bool do_write) { struct test_args *p = &test_args; enum test_stage *current_stage = &guest_test_stage; - uint64_t addr; + u64 addr; int i, j; while (true) { @@ -113,9 +113,9 @@ static void guest_code(bool do_write) case KVM_CREATE_MAPPINGS: for (i = 0; i < p->large_num_pages; i++) { if (do_write) - *(uint64_t *)addr = 0x0123456789ABCDEF; + *(u64 *)addr = 0x0123456789ABCDEF; else - READ_ONCE(*(uint64_t *)addr); + READ_ONCE(*(u64 *)addr); addr += p->large_page_size; } @@ -131,7 +131,7 @@ static void guest_code(bool do_write) case KVM_UPDATE_MAPPINGS: if (p->src_type == VM_MEM_SRC_ANONYMOUS) { for (i = 0; i < p->host_num_pages; i++) { - *(uint64_t *)addr = 0x0123456789ABCDEF; + *(u64 *)addr = 0x0123456789ABCDEF; addr += p->host_page_size; } break; @@ -142,7 +142,7 @@ static void guest_code(bool do_write) * Write to the first host page in each large * page region, and triger break of large pages. */ - *(uint64_t *)addr = 0x0123456789ABCDEF; + *(u64 *)addr = 0x0123456789ABCDEF; /* * Access the middle host pages in each large @@ -152,7 +152,7 @@ static void guest_code(bool do_write) */ addr += p->large_page_size / 2; for (j = 0; j < p->host_pages_per_lpage / 2; j++) { - READ_ONCE(*(uint64_t *)addr); + READ_ONCE(*(u64 *)addr); addr += p->host_page_size; } } @@ -167,7 +167,7 @@ static void guest_code(bool do_write) */ case KVM_ADJUST_MAPPINGS: for (i = 0; i < p->host_num_pages; i++) { - READ_ONCE(*(uint64_t *)addr); + READ_ONCE(*(u64 *)addr); addr += p->host_page_size; } break; @@ -227,8 +227,8 @@ static void *vcpu_worker(void *data) } struct test_params { - uint64_t phys_offset; - uint64_t test_mem_size; + u64 phys_offset; + u64 test_mem_size; enum vm_mem_backing_src_type src_type; }; @@ -237,12 +237,12 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg) int ret; struct test_params *p = arg; enum vm_mem_backing_src_type src_type = p->src_type; - uint64_t large_page_size = get_backing_src_pagesz(src_type); - uint64_t guest_page_size = vm_guest_mode_params[mode].page_size; - uint64_t host_page_size = getpagesize(); - uint64_t test_mem_size = p->test_mem_size; - uint64_t guest_num_pages; - uint64_t alignment; + u64 large_page_size = get_backing_src_pagesz(src_type); + u64 guest_page_size = vm_guest_mode_params[mode].page_size; + u64 host_page_size = getpagesize(); + u64 test_mem_size = p->test_mem_size; + u64 guest_num_pages; + u64 alignment; void *host_test_mem; struct kvm_vm *vm; @@ -281,7 +281,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg) virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages); /* Cache the HVA pointer of the region */ - host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem); + host_test_mem = addr_gpa2hva(vm, (gpa_t)guest_test_phys_mem); /* Export shared structure test_args to guest */ sync_global_to_guest(vm, test_args); @@ -292,7 +292,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg) ret = sem_init(&test_stage_completed, 0, 0); TEST_ASSERT(ret == 0, "Error in sem_init"); - current_stage = addr_gva2hva(vm, (vm_vaddr_t)(&guest_test_stage)); + current_stage = addr_gva2hva(vm, (gva_t)(&guest_test_stage)); *current_stage = NUM_TEST_STAGES; pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode)); @@ -304,7 +304,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg) pr_info("Guest physical test memory offset: 0x%lx\n", guest_test_phys_mem); pr_info("Host virtual test memory offset: 0x%lx\n", - (uint64_t)host_test_mem); + (u64)host_test_mem); pr_info("Number of testing vCPUs: %d\n", nr_vcpus); return vm; diff --git a/tools/testing/selftests/kvm/lib/arm64/gic.c b/tools/testing/selftests/kvm/lib/arm64/gic.c index b023868fe0b8..011dfe1dfcb3 100644 --- a/tools/testing/selftests/kvm/lib/arm64/gic.c +++ b/tools/testing/selftests/kvm/lib/arm64/gic.c @@ -50,7 +50,7 @@ static void gic_dist_init(enum gic_type type, unsigned int nr_cpus) void gic_init(enum gic_type type, unsigned int nr_cpus) { - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); GUEST_ASSERT(type < GIC_TYPE_MAX); GUEST_ASSERT(nr_cpus); @@ -73,7 +73,7 @@ void gic_irq_disable(unsigned int intid) unsigned int gic_get_and_ack_irq(void) { - uint64_t irqstat; + u64 irqstat; unsigned int intid; GUEST_ASSERT(gic_common_ops); @@ -102,7 +102,7 @@ void gic_set_eoi_split(bool split) gic_common_ops->gic_set_eoi_split(split); } -void gic_set_priority_mask(uint64_t pmr) +void gic_set_priority_mask(u64 pmr) { GUEST_ASSERT(gic_common_ops); gic_common_ops->gic_set_priority_mask(pmr); diff --git a/tools/testing/selftests/kvm/lib/arm64/gic_private.h b/tools/testing/selftests/kvm/lib/arm64/gic_private.h index b6a7e30c3eb1..6d393f5c5685 100644 --- a/tools/testing/selftests/kvm/lib/arm64/gic_private.h +++ b/tools/testing/selftests/kvm/lib/arm64/gic_private.h @@ -12,20 +12,20 @@ struct gic_common_ops { void (*gic_cpu_init)(unsigned int cpu); void (*gic_irq_enable)(unsigned int intid); void (*gic_irq_disable)(unsigned int intid); - uint64_t (*gic_read_iar)(void); - void (*gic_write_eoir)(uint32_t irq); - void (*gic_write_dir)(uint32_t irq); + u64 (*gic_read_iar)(void); + void (*gic_write_eoir)(u32 irq); + void (*gic_write_dir)(u32 irq); void (*gic_set_eoi_split)(bool split); - void (*gic_set_priority_mask)(uint64_t mask); - void (*gic_set_priority)(uint32_t intid, uint32_t prio); - void (*gic_irq_set_active)(uint32_t intid); - void (*gic_irq_clear_active)(uint32_t intid); - bool (*gic_irq_get_active)(uint32_t intid); - void (*gic_irq_set_pending)(uint32_t intid); - void (*gic_irq_clear_pending)(uint32_t intid); - bool (*gic_irq_get_pending)(uint32_t intid); - void (*gic_irq_set_config)(uint32_t intid, bool is_edge); - void (*gic_irq_set_group)(uint32_t intid, bool group); + void (*gic_set_priority_mask)(u64 mask); + void (*gic_set_priority)(u32 intid, u32 prio); + void (*gic_irq_set_active)(u32 intid); + void (*gic_irq_clear_active)(u32 intid); + bool (*gic_irq_get_active)(u32 intid); + void (*gic_irq_set_pending)(u32 intid); + void (*gic_irq_clear_pending)(u32 intid); + bool (*gic_irq_get_pending)(u32 intid); + void (*gic_irq_set_config)(u32 intid, bool is_edge); + void (*gic_irq_set_group)(u32 intid, bool group); }; extern const struct gic_common_ops gicv3_ops; diff --git a/tools/testing/selftests/kvm/lib/arm64/gic_v3.c b/tools/testing/selftests/kvm/lib/arm64/gic_v3.c index 50754a27f493..a99a53accfe9 100644 --- a/tools/testing/selftests/kvm/lib/arm64/gic_v3.c +++ b/tools/testing/selftests/kvm/lib/arm64/gic_v3.c @@ -50,13 +50,13 @@ static void gicv3_gicd_wait_for_rwp(void) } } -static inline volatile void *gicr_base_cpu(uint32_t cpu) +static inline volatile void *gicr_base_cpu(u32 cpu) { /* Align all the redistributors sequentially */ return GICR_BASE_GVA + cpu * SZ_64K * 2; } -static void gicv3_gicr_wait_for_rwp(uint32_t cpu) +static void gicv3_gicr_wait_for_rwp(u32 cpu) { unsigned int count = 100000; /* 1s */ @@ -66,7 +66,7 @@ static void gicv3_gicr_wait_for_rwp(uint32_t cpu) } } -static void gicv3_wait_for_rwp(uint32_t cpu_or_dist) +static void gicv3_wait_for_rwp(u32 cpu_or_dist) { if (cpu_or_dist & DIST_BIT) gicv3_gicd_wait_for_rwp(); @@ -91,34 +91,34 @@ static enum gicv3_intid_range get_intid_range(unsigned int intid) return INVALID_RANGE; } -static uint64_t gicv3_read_iar(void) +static u64 gicv3_read_iar(void) { - uint64_t irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1); + u64 irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1); dsb(sy); return irqstat; } -static void gicv3_write_eoir(uint32_t irq) +static void gicv3_write_eoir(u32 irq) { write_sysreg_s(irq, SYS_ICC_EOIR1_EL1); isb(); } -static void gicv3_write_dir(uint32_t irq) +static void gicv3_write_dir(u32 irq) { write_sysreg_s(irq, SYS_ICC_DIR_EL1); isb(); } -static void gicv3_set_priority_mask(uint64_t mask) +static void gicv3_set_priority_mask(u64 mask) { write_sysreg_s(mask, SYS_ICC_PMR_EL1); } static void gicv3_set_eoi_split(bool split) { - uint32_t val; + u32 val; /* * All other fields are read-only, so no need to read CTLR first. In @@ -129,29 +129,29 @@ static void gicv3_set_eoi_split(bool split) isb(); } -uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset) +u32 gicv3_reg_readl(u32 cpu_or_dist, u64 offset) { volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA : sgi_base_from_redist(gicr_base_cpu(cpu_or_dist)); return readl(base + offset); } -void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val) +void gicv3_reg_writel(u32 cpu_or_dist, u64 offset, u32 reg_val) { volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA : sgi_base_from_redist(gicr_base_cpu(cpu_or_dist)); writel(reg_val, base + offset); } -uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask) +u32 gicv3_getl_fields(u32 cpu_or_dist, u64 offset, u32 mask) { return gicv3_reg_readl(cpu_or_dist, offset) & mask; } -void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset, - uint32_t mask, uint32_t reg_val) +void gicv3_setl_fields(u32 cpu_or_dist, u64 offset, + u32 mask, u32 reg_val) { - uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask; + u32 tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask; tmp |= (reg_val & mask); gicv3_reg_writel(cpu_or_dist, offset, tmp); @@ -165,14 +165,14 @@ void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset, * map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being * marked as "Reserved" in the Distributor map. */ -static void gicv3_access_reg(uint32_t intid, uint64_t offset, - uint32_t reg_bits, uint32_t bits_per_field, - bool write, uint32_t *val) +static void gicv3_access_reg(u32 intid, u64 offset, + u32 reg_bits, u32 bits_per_field, + bool write, u32 *val) { - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); enum gicv3_intid_range intid_range = get_intid_range(intid); - uint32_t fields_per_reg, index, mask, shift; - uint32_t cpu_or_dist; + u32 fields_per_reg, index, mask, shift; + u32 cpu_or_dist; GUEST_ASSERT(bits_per_field <= reg_bits); GUEST_ASSERT(!write || *val < (1U << bits_per_field)); @@ -197,32 +197,32 @@ static void gicv3_access_reg(uint32_t intid, uint64_t offset, *val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift; } -static void gicv3_write_reg(uint32_t intid, uint64_t offset, - uint32_t reg_bits, uint32_t bits_per_field, uint32_t val) +static void gicv3_write_reg(u32 intid, u64 offset, + u32 reg_bits, u32 bits_per_field, u32 val) { gicv3_access_reg(intid, offset, reg_bits, bits_per_field, true, &val); } -static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset, - uint32_t reg_bits, uint32_t bits_per_field) +static u32 gicv3_read_reg(u32 intid, u64 offset, + u32 reg_bits, u32 bits_per_field) { - uint32_t val; + u32 val; gicv3_access_reg(intid, offset, reg_bits, bits_per_field, false, &val); return val; } -static void gicv3_set_priority(uint32_t intid, uint32_t prio) +static void gicv3_set_priority(u32 intid, u32 prio) { gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio); } /* Sets the intid to be level-sensitive or edge-triggered. */ -static void gicv3_irq_set_config(uint32_t intid, bool is_edge) +static void gicv3_irq_set_config(u32 intid, bool is_edge) { - uint32_t val; + u32 val; /* N/A for private interrupts. */ GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE); @@ -230,57 +230,57 @@ static void gicv3_irq_set_config(uint32_t intid, bool is_edge) gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val); } -static void gicv3_irq_enable(uint32_t intid) +static void gicv3_irq_enable(u32 intid) { bool is_spi = get_intid_range(intid) == SPI_RANGE; - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1); gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu); } -static void gicv3_irq_disable(uint32_t intid) +static void gicv3_irq_disable(u32 intid) { bool is_spi = get_intid_range(intid) == SPI_RANGE; - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1); gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu); } -static void gicv3_irq_set_active(uint32_t intid) +static void gicv3_irq_set_active(u32 intid) { gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1); } -static void gicv3_irq_clear_active(uint32_t intid) +static void gicv3_irq_clear_active(u32 intid) { gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1); } -static bool gicv3_irq_get_active(uint32_t intid) +static bool gicv3_irq_get_active(u32 intid) { return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1); } -static void gicv3_irq_set_pending(uint32_t intid) +static void gicv3_irq_set_pending(u32 intid) { gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1); } -static void gicv3_irq_clear_pending(uint32_t intid) +static void gicv3_irq_clear_pending(u32 intid) { gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1); } -static bool gicv3_irq_get_pending(uint32_t intid) +static bool gicv3_irq_get_pending(u32 intid) { return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1); } static void gicv3_enable_redist(volatile void *redist_base) { - uint32_t val = readl(redist_base + GICR_WAKER); + u32 val = readl(redist_base + GICR_WAKER); unsigned int count = 100000; /* 1s */ val &= ~GICR_WAKER_ProcessorSleep; @@ -293,10 +293,10 @@ static void gicv3_enable_redist(volatile void *redist_base) } } -static void gicv3_set_group(uint32_t intid, bool grp) +static void gicv3_set_group(u32 intid, bool grp) { - uint32_t cpu_or_dist; - uint32_t val; + u32 cpu_or_dist; + u32 val; cpu_or_dist = (get_intid_range(intid) == SPI_RANGE) ? DIST_BIT : guest_get_vcpuid(); val = gicv3_reg_readl(cpu_or_dist, GICD_IGROUPR + (intid / 32) * 4); @@ -424,8 +424,8 @@ const struct gic_common_ops gicv3_ops = { .gic_irq_set_group = gicv3_set_group, }; -void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size, - vm_paddr_t pend_table) +void gic_rdist_enable_lpis(gpa_t cfg_table, size_t cfg_table_size, + gpa_t pend_table) { volatile void *rdist_base = gicr_base_cpu(guest_get_vcpuid()); diff --git a/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c b/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c index 7f9fdcf42ae6..1188b578121d 100644 --- a/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c +++ b/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c @@ -54,7 +54,7 @@ static unsigned long its_find_baser(unsigned int type) return -1; } -static void its_install_table(unsigned int type, vm_paddr_t base, size_t size) +static void its_install_table(unsigned int type, gpa_t base, size_t size) { unsigned long offset = its_find_baser(type); u64 baser; @@ -69,7 +69,7 @@ static void its_install_table(unsigned int type, vm_paddr_t base, size_t size) its_write_u64(offset, baser); } -static void its_install_cmdq(vm_paddr_t base, size_t size) +static void its_install_cmdq(gpa_t base, size_t size) { u64 cbaser; @@ -82,9 +82,8 @@ static void its_install_cmdq(vm_paddr_t base, size_t size) its_write_u64(GITS_CBASER, cbaser); } -void its_init(vm_paddr_t coll_tbl, size_t coll_tbl_sz, - vm_paddr_t device_tbl, size_t device_tbl_sz, - vm_paddr_t cmdq, size_t cmdq_size) +void its_init(gpa_t coll_tbl, size_t coll_tbl_sz, gpa_t device_tbl, + size_t device_tbl_sz, gpa_t cmdq, size_t cmdq_size) { u32 ctlr; @@ -204,7 +203,7 @@ static void its_send_cmd(void *cmdq_base, struct its_cmd_block *cmd) } } -void its_send_mapd_cmd(void *cmdq_base, u32 device_id, vm_paddr_t itt_base, +void its_send_mapd_cmd(void *cmdq_base, u32 device_id, gpa_t itt_base, size_t itt_size, bool valid) { struct its_cmd_block cmd = {}; diff --git a/tools/testing/selftests/kvm/lib/arm64/processor.c b/tools/testing/selftests/kvm/lib/arm64/processor.c index 43ea40edc533..01325bf4d36f 100644 --- a/tools/testing/selftests/kvm/lib/arm64/processor.c +++ b/tools/testing/selftests/kvm/lib/arm64/processor.c @@ -19,20 +19,20 @@ #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000 -static vm_vaddr_t exception_handlers; +static gva_t exception_handlers; -static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) +static u64 pgd_index(struct kvm_vm *vm, gva_t gva) { unsigned int shift = (vm->mmu.pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; - uint64_t mask = (1UL << (vm->va_bits - shift)) - 1; + u64 mask = (1UL << (vm->va_bits - shift)) - 1; return (gva >> shift) & mask; } -static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) +static u64 pud_index(struct kvm_vm *vm, gva_t gva) { unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; - uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; + u64 mask = (1UL << (vm->page_shift - 3)) - 1; TEST_ASSERT(vm->mmu.pgtable_levels == 4, "Mode %d does not have 4 page table levels", vm->mode); @@ -40,10 +40,10 @@ static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) return (gva >> shift) & mask; } -static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva) +static u64 pmd_index(struct kvm_vm *vm, gva_t gva) { unsigned int shift = (vm->page_shift - 3) + vm->page_shift; - uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; + u64 mask = (1UL << (vm->page_shift - 3)) - 1; TEST_ASSERT(vm->mmu.pgtable_levels >= 3, "Mode %d does not have >= 3 page table levels", vm->mode); @@ -51,9 +51,9 @@ static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva) return (gva >> shift) & mask; } -static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva) +static u64 pte_index(struct kvm_vm *vm, gva_t gva) { - uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; + u64 mask = (1UL << (vm->page_shift - 3)) - 1; return (gva >> vm->page_shift) & mask; } @@ -63,9 +63,9 @@ static inline bool use_lpa2_pte_format(struct kvm_vm *vm) (vm->pa_bits > 48 || vm->va_bits > 48); } -static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs) +static u64 addr_pte(struct kvm_vm *vm, u64 pa, u64 attrs) { - uint64_t pte; + u64 pte; if (use_lpa2_pte_format(vm)) { pte = pa & PTE_ADDR_MASK_LPA2(vm->page_shift); @@ -81,9 +81,9 @@ static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs) return pte; } -static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte) +static u64 pte_addr(struct kvm_vm *vm, u64 pte) { - uint64_t pa; + u64 pa; if (use_lpa2_pte_format(vm)) { pa = pte & PTE_ADDR_MASK_LPA2(vm->page_shift); @@ -97,13 +97,13 @@ static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte) return pa; } -static uint64_t ptrs_per_pgd(struct kvm_vm *vm) +static u64 ptrs_per_pgd(struct kvm_vm *vm) { unsigned int shift = (vm->mmu.pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; return 1 << (vm->va_bits - shift); } -static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm) +static u64 __maybe_unused ptrs_per_pte(struct kvm_vm *vm) { return 1 << (vm->page_shift - 3); } @@ -121,47 +121,46 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) vm->mmu.pgd_created = true; } -static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, - uint64_t flags) +static void _virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, + u64 flags) { - uint8_t attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT); - uint64_t pg_attr; - uint64_t *ptep; + u8 attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT); + u64 pg_attr; + u64 *ptep; - TEST_ASSERT((vaddr % vm->page_size) == 0, + TEST_ASSERT((gva % vm->page_size) == 0, "Virtual address not on page boundary,\n" - " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, - (vaddr >> vm->page_shift)), - "Invalid virtual address, vaddr: 0x%lx", vaddr); - TEST_ASSERT((paddr % vm->page_size) == 0, - "Physical address not on page boundary,\n" - " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, - "Physical address beyond beyond maximum supported,\n" - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", - paddr, vm->max_gfn, vm->page_size); - - ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, vaddr) * 8; + " gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), + "Invalid virtual address, gva: 0x%lx", gva); + TEST_ASSERT((gpa % vm->page_size) == 0, + "Physical address not on page boundary,\n" + " gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size); + TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, + "Physical address beyond beyond maximum supported,\n" + " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + gpa, vm->max_gfn, vm->page_size); + + ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, gva) * 8; if (!*ptep) *ptep = addr_pte(vm, vm_alloc_page_table(vm), PGD_TYPE_TABLE | PTE_VALID); switch (vm->mmu.pgtable_levels) { case 4: - ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8; + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8; if (!*ptep) *ptep = addr_pte(vm, vm_alloc_page_table(vm), PUD_TYPE_TABLE | PTE_VALID); /* fall through */ case 3: - ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8; + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8; if (!*ptep) *ptep = addr_pte(vm, vm_alloc_page_table(vm), PMD_TYPE_TABLE | PTE_VALID); /* fall through */ case 2: - ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8; + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8; break; default: TEST_FAIL("Page table levels must be 2, 3, or 4"); @@ -171,19 +170,19 @@ static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, if (!use_lpa2_pte_format(vm)) pg_attr |= PTE_SHARED; - *ptep = addr_pte(vm, paddr, pg_attr); + *ptep = addr_pte(vm, gpa, pg_attr); } -void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) { - uint64_t attr_idx = MT_NORMAL; + u64 attr_idx = MT_NORMAL; - _virt_pg_map(vm, vaddr, paddr, attr_idx); + _virt_pg_map(vm, gva, gpa, attr_idx); } -uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level) +u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level) { - uint64_t *ptep; + u64 *ptep; if (!vm->mmu.pgd_created) goto unmapped_gva; @@ -225,23 +224,23 @@ unmapped_gva: exit(EXIT_FAILURE); } -uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva) +u64 *virt_get_pte_hva(struct kvm_vm *vm, gva_t gva) { return virt_get_pte_hva_at_level(vm, gva, 3); } -vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) { - uint64_t *ptep = virt_get_pte_hva(vm, gva); + u64 *ptep = virt_get_pte_hva(vm, gva); return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); } -static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level) +static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, u64 page, int level) { #ifdef DEBUG static const char * const type[] = { "", "pud", "pmd", "pte" }; - uint64_t pte, *ptep; + u64 pte, *ptep; if (level == 4) return; @@ -256,10 +255,10 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t p #endif } -void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent) { int level = 4 - (vm->mmu.pgtable_levels - 1); - uint64_t pgd, *ptep; + u64 pgd, *ptep; if (!vm->mmu.pgd_created) return; @@ -298,7 +297,7 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init) { struct kvm_vcpu_init default_init = { .target = -1, }; struct kvm_vm *vm = vcpu->vm; - uint64_t sctlr_el1, tcr_el1, ttbr0_el1; + u64 sctlr_el1, tcr_el1, ttbr0_el1; if (!init) { kvm_get_default_vcpu_target(vm, &default_init); @@ -397,9 +396,9 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init) HCR_EL2_RW | HCR_EL2_TGE | HCR_EL2_E2H); } -void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) +void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent) { - uint64_t pstate, pc; + u64 pstate, pc; pstate = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate)); pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc)); @@ -410,29 +409,29 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) { - vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); + vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (u64)guest_code); } -static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, +static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, struct kvm_vcpu_init *init) { size_t stack_size; - uint64_t stack_vaddr; + gva_t stack_gva; struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size : vm->page_size; - stack_vaddr = __vm_vaddr_alloc(vm, stack_size, - DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, - MEM_REGION_DATA); + stack_gva = __vm_alloc(vm, stack_size, + DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, + MEM_REGION_DATA); aarch64_vcpu_setup(vcpu, init); - vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), stack_vaddr + stack_size); + vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), stack_gva + stack_size); return vcpu; } -struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, +struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, struct kvm_vcpu_init *init, void *guest_code) { struct kvm_vcpu *vcpu = __aarch64_vcpu_add(vm, vcpu_id, init); @@ -442,7 +441,7 @@ struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, return vcpu; } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) { return __aarch64_vcpu_add(vm, vcpu_id, NULL); } @@ -459,13 +458,13 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) for (i = 0; i < num; i++) { vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]), - va_arg(ap, uint64_t)); + va_arg(ap, u64)); } va_end(ap); } -void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec) +void kvm_exit_unexpected_exception(int vector, u64 ec, bool valid_ec) { ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec); while (1) @@ -498,7 +497,7 @@ void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu) { extern char vectors; - vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_VBAR_EL1), (uint64_t)&vectors); + vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_VBAR_EL1), (u64)&vectors); } void route_exception(struct ex_regs *regs, int vector) @@ -536,10 +535,10 @@ unexpected_exception: void vm_init_descriptor_tables(struct kvm_vm *vm) { - vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers), - vm->page_size, MEM_REGION_DATA); + vm->handlers = __vm_alloc(vm, sizeof(struct handlers), vm->page_size, + MEM_REGION_DATA); - *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; + *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers; } void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec, @@ -563,13 +562,13 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, handlers->exception_handlers[vector][0] = handler; } -uint32_t guest_get_vcpuid(void) +u32 guest_get_vcpuid(void) { return read_sysreg(tpidr_el1); } -static uint32_t max_ipa_for_page_size(uint32_t vm_ipa, uint32_t gran, - uint32_t not_sup_val, uint32_t ipa52_min_val) +static u32 max_ipa_for_page_size(u32 vm_ipa, u32 gran, + u32 not_sup_val, u32 ipa52_min_val) { if (gran == not_sup_val) return 0; @@ -579,16 +578,16 @@ static uint32_t max_ipa_for_page_size(uint32_t vm_ipa, uint32_t gran, return min(vm_ipa, 48U); } -void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k, - uint32_t *ipa16k, uint32_t *ipa64k) +void aarch64_get_supported_page_sizes(u32 ipa, u32 *ipa4k, + u32 *ipa16k, u32 *ipa64k) { struct kvm_vcpu_init preferred_init; int kvm_fd, vm_fd, vcpu_fd, err; - uint64_t val; - uint32_t gran; + u64 val; + u32 gran; struct kvm_one_reg reg = { .id = KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1), - .addr = (uint64_t)&val, + .addr = (u64)&val, }; kvm_fd = open_kvm_dev_path_or_exit(); @@ -646,17 +645,17 @@ void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k, : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7") -void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1, - uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, - uint64_t arg6, struct arm_smccc_res *res) +void smccc_hvc(u32 function_id, u64 arg0, u64 arg1, + u64 arg2, u64 arg3, u64 arg4, u64 arg5, + u64 arg6, struct arm_smccc_res *res) { __smccc_call(hvc, function_id, arg0, arg1, arg2, arg3, arg4, arg5, arg6, res); } -void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1, - uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, - uint64_t arg6, struct arm_smccc_res *res) +void smccc_smc(u32 function_id, u64 arg0, u64 arg1, + u64 arg2, u64 arg3, u64 arg4, u64 arg5, + u64 arg6, struct arm_smccc_res *res) { __smccc_call(smc, function_id, arg0, arg1, arg2, arg3, arg4, arg5, arg6, res); @@ -671,7 +670,7 @@ void kvm_selftest_arch_init(void) guest_modes_append_default(); } -void vm_vaddr_populate_bitmap(struct kvm_vm *vm) +void vm_populate_gva_bitmap(struct kvm_vm *vm) { /* * arm64 selftests use only TTBR0_EL1, meaning that the valid VA space diff --git a/tools/testing/selftests/kvm/lib/arm64/ucall.c b/tools/testing/selftests/kvm/lib/arm64/ucall.c index ddab0ce89d4d..e0550ad5aa75 100644 --- a/tools/testing/selftests/kvm/lib/arm64/ucall.c +++ b/tools/testing/selftests/kvm/lib/arm64/ucall.c @@ -6,17 +6,17 @@ */ #include "kvm_util.h" -vm_vaddr_t *ucall_exit_mmio_addr; +gva_t *ucall_exit_mmio_addr; -void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) { - vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); + gva_t mmio_gva = vm_unused_gva_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); virt_map(vm, mmio_gva, mmio_gpa, 1); vm->ucall_mmio_addr = mmio_gpa; - write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva); + write_guest_global(vm, ucall_exit_mmio_addr, (gva_t *)mmio_gva); } void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) @@ -25,9 +25,9 @@ void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) if (run->exit_reason == KVM_EXIT_MMIO && run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) { - TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t), + TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(u64), "Unexpected ucall exit mmio address access"); - return (void *)(*((uint64_t *)run->mmio.data)); + return (void *)(*((u64 *)run->mmio.data)); } return NULL; diff --git a/tools/testing/selftests/kvm/lib/arm64/vgic.c b/tools/testing/selftests/kvm/lib/arm64/vgic.c index d0f7bd0984b8..4ecebf3146a2 100644 --- a/tools/testing/selftests/kvm/lib/arm64/vgic.c +++ b/tools/testing/selftests/kvm/lib/arm64/vgic.c @@ -41,10 +41,10 @@ bool kvm_supports_vgic_v3(void) * redistributor regions of the guest. Since it depends on the number of * vCPUs for the VM, it must be called after all the vCPUs have been created. */ -int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs) +int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs) { int gic_fd; - uint64_t attr; + u64 attr; unsigned int nr_gic_pages; /* Distributor setup */ @@ -77,7 +77,7 @@ void __vgic_v3_init(int fd) KVM_DEV_ARM_VGIC_CTRL_INIT, NULL); } -int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs) +int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs) { unsigned int nr_vcpus_created = 0; struct list_head *iter; @@ -104,11 +104,11 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs) } /* should only work for level sensitive interrupts */ -int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level) +int _kvm_irq_set_level_info(int gic_fd, u32 intid, int level) { - uint64_t attr = 32 * (intid / 32); - uint64_t index = intid % 32; - uint64_t val; + u64 attr = 32 * (intid / 32); + u64 index = intid % 32; + u64 val; int ret; ret = __kvm_device_attr_get(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, @@ -122,16 +122,16 @@ int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level) return ret; } -void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level) +void kvm_irq_set_level_info(int gic_fd, u32 intid, int level) { int ret = _kvm_irq_set_level_info(gic_fd, intid, level); TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, ret)); } -int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) +int _kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level) { - uint32_t irq = intid & KVM_ARM_IRQ_NUM_MASK; + u32 irq = intid & KVM_ARM_IRQ_NUM_MASK; TEST_ASSERT(!INTID_IS_SGI(intid), "KVM_IRQ_LINE's interface itself " "doesn't allow injecting SGIs. There's no mask for it."); @@ -144,23 +144,23 @@ int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) return _kvm_irq_line(vm, irq, level); } -void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) +void kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level) { int ret = _kvm_arm_irq_line(vm, intid, level); TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret)); } -static void vgic_poke_irq(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu, - uint64_t reg_off) +static void vgic_poke_irq(int gic_fd, u32 intid, struct kvm_vcpu *vcpu, + u64 reg_off) { - uint64_t reg = intid / 32; - uint64_t index = intid % 32; - uint64_t attr = reg_off + reg * 4; - uint64_t val; + u64 reg = intid / 32; + u64 index = intid % 32; + u64 attr = reg_off + reg * 4; + u64 val; bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid); - uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS + u32 group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS : KVM_DEV_ARM_VGIC_GRP_DIST_REGS; if (intid_is_private) { @@ -183,12 +183,12 @@ static void vgic_poke_irq(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu, kvm_device_attr_set(gic_fd, group, attr, &val); } -void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu) +void kvm_irq_write_ispendr(int gic_fd, u32 intid, struct kvm_vcpu *vcpu) { vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISPENDR); } -void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu) +void kvm_irq_write_isactiver(int gic_fd, u32 intid, struct kvm_vcpu *vcpu) { vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISACTIVER); } diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c index f34d926d9735..b689c4df4a01 100644 --- a/tools/testing/selftests/kvm/lib/elf.c +++ b/tools/testing/selftests/kvm/lib/elf.c @@ -156,21 +156,20 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename) TEST_ASSERT(phdr.p_memsz > 0, "Unexpected loadable segment " "memsize of 0,\n" " phdr index: %u p_memsz: 0x%" PRIx64, - n1, (uint64_t) phdr.p_memsz); - vm_vaddr_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size); - vm_vaddr_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1; + n1, (u64)phdr.p_memsz); + gva_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size); + gva_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1; seg_vend |= vm->page_size - 1; size_t seg_size = seg_vend - seg_vstart + 1; - vm_vaddr_t vaddr = __vm_vaddr_alloc(vm, seg_size, seg_vstart, - MEM_REGION_CODE); - TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate " + gva_t gva = __vm_alloc(vm, seg_size, seg_vstart, MEM_REGION_CODE); + TEST_ASSERT(gva == seg_vstart, "Unable to allocate " "virtual memory for segment at requested min addr,\n" " segment idx: %u\n" " seg_vstart: 0x%lx\n" - " vaddr: 0x%lx", - n1, seg_vstart, vaddr); - memset(addr_gva2hva(vm, vaddr), 0, seg_size); + " gva: 0x%lx", + n1, seg_vstart, gva); + memset(addr_gva2hva(vm, gva), 0, seg_size); /* TODO(lhuemill): Set permissions of each memory segment * based on the least-significant 3 bits of phdr.p_flags. */ diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c index ce3099630397..7a96c43b5704 100644 --- a/tools/testing/selftests/kvm/lib/guest_modes.c +++ b/tools/testing/selftests/kvm/lib/guest_modes.c @@ -20,7 +20,7 @@ void guest_modes_append_default(void) #ifdef __aarch64__ { unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE); - uint32_t ipa4k, ipa16k, ipa64k; + u32 ipa4k, ipa16k, ipa64k; int i; aarch64_get_supported_page_sizes(limit, &ipa4k, &ipa16k, &ipa64k); diff --git a/tools/testing/selftests/kvm/lib/guest_sprintf.c b/tools/testing/selftests/kvm/lib/guest_sprintf.c index 74627514c4d4..7a33965349a7 100644 --- a/tools/testing/selftests/kvm/lib/guest_sprintf.c +++ b/tools/testing/selftests/kvm/lib/guest_sprintf.c @@ -35,8 +35,8 @@ static int skip_atoi(const char **s) ({ \ int __res; \ \ - __res = ((uint64_t) n) % (uint32_t) base; \ - n = ((uint64_t) n) / (uint32_t) base; \ + __res = ((u64)n) % (u32)base; \ + n = ((u64)n) / (u32)base; \ __res; \ }) @@ -119,7 +119,7 @@ int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args) { char *str, *end; const char *s; - uint64_t num; + u64 num; int i, base; int len; @@ -216,7 +216,7 @@ repeat: while (--field_width > 0) APPEND_BUFFER_SAFE(str, end, ' '); APPEND_BUFFER_SAFE(str, end, - (uint8_t)va_arg(args, int)); + (u8)va_arg(args, int)); while (--field_width > 0) APPEND_BUFFER_SAFE(str, end, ' '); continue; @@ -240,7 +240,7 @@ repeat: flags |= SPECIAL | SMALL | ZEROPAD; } str = number(str, end, - (uint64_t)va_arg(args, void *), 16, + (u64)va_arg(args, void *), 16, field_width, precision, flags); continue; @@ -284,15 +284,15 @@ repeat: continue; } if (qualifier == 'l') - num = va_arg(args, uint64_t); + num = va_arg(args, u64); else if (qualifier == 'h') { - num = (uint16_t)va_arg(args, int); + num = (u16)va_arg(args, int); if (flags & SIGN) - num = (int16_t)num; + num = (s16)num; } else if (flags & SIGN) num = va_arg(args, int); else - num = va_arg(args, uint32_t); + num = va_arg(args, u32); str = number(str, end, num, base, field_width, precision, flags); } diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index f5e076591c64..2a76eca7029d 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -20,9 +20,9 @@ #define KVM_UTIL_MIN_PFN 2 -uint32_t guest_random_seed; +u32 guest_random_seed; struct guest_random_state guest_rng; -static uint32_t last_guest_seed; +static u32 last_guest_seed; static size_t vcpu_mmap_sz(void); @@ -165,7 +165,7 @@ unsigned int kvm_check_cap(long cap) return (unsigned int)ret; } -void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) +void vm_enable_dirty_ring(struct kvm_vm *vm, u32 ring_size) { if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); @@ -189,7 +189,7 @@ static void vm_open(struct kvm_vm *vm) vm->stats.fd = -1; } -const char *vm_guest_mode_string(uint32_t i) +const char *vm_guest_mode_string(u32 i) { static const char * const strings[] = { [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages", @@ -267,7 +267,7 @@ _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) * based on the MSB of the VA. On architectures with this behavior * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1]. */ -__weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm) +__weak void vm_populate_gva_bitmap(struct kvm_vm *vm) { sparsebit_set_num(vm->vpages_valid, 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); @@ -385,7 +385,7 @@ struct kvm_vm *____vm_create(struct vm_shape shape) /* Limit to VA-bit canonical virtual addresses. */ vm->vpages_valid = sparsebit_alloc(); - vm_vaddr_populate_bitmap(vm); + vm_populate_gva_bitmap(vm); /* Limit physical addresses to PA-bits. */ vm->max_gfn = vm_compute_max_gfn(vm); @@ -396,12 +396,12 @@ struct kvm_vm *____vm_create(struct vm_shape shape) return vm; } -static uint64_t vm_nr_pages_required(enum vm_guest_mode mode, - uint32_t nr_runnable_vcpus, - uint64_t extra_mem_pages) +static u64 vm_nr_pages_required(enum vm_guest_mode mode, + u32 nr_runnable_vcpus, + u64 extra_mem_pages) { - uint64_t page_size = vm_guest_mode_params[mode].page_size; - uint64_t nr_pages; + u64 page_size = vm_guest_mode_params[mode].page_size; + u64 nr_pages; TEST_ASSERT(nr_runnable_vcpus, "Use vm_create_barebones() for VMs that _never_ have vCPUs"); @@ -435,7 +435,7 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode, return vm_adjust_num_guest_pages(mode, nr_pages); } -void kvm_set_files_rlimit(uint32_t nr_vcpus) +void kvm_set_files_rlimit(u32 nr_vcpus) { /* * Each vCPU will open two file descriptors: the vCPU itself and the @@ -476,10 +476,10 @@ static bool is_guest_memfd_required(struct vm_shape shape) #endif } -struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, - uint64_t nr_extra_pages) +struct kvm_vm *__vm_create(struct vm_shape shape, u32 nr_runnable_vcpus, + u64 nr_extra_pages) { - uint64_t nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus, + u64 nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus, nr_extra_pages); struct userspace_mem_region *slot0; struct kvm_vm *vm; @@ -546,8 +546,8 @@ struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, * extra_mem_pages is only used to calculate the maximum page table size, * no real memory allocation for non-slot0 memory in this function. */ -struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, - uint64_t extra_mem_pages, +struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, u32 nr_vcpus, + u64 extra_mem_pages, void *guest_code, struct kvm_vcpu *vcpus[]) { struct kvm_vm *vm; @@ -566,7 +566,7 @@ struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape, struct kvm_vcpu **vcpu, - uint64_t extra_mem_pages, + u64 extra_mem_pages, void *guest_code) { struct kvm_vcpu *vcpus[1]; @@ -614,7 +614,7 @@ void kvm_vm_restart(struct kvm_vm *vmp) } __weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, - uint32_t vcpu_id) + u32 vcpu_id) { return __vm_vcpu_add(vm, vcpu_id); } @@ -636,9 +636,9 @@ int __pin_task_to_cpu(pthread_t task, int cpu) return pthread_setaffinity_np(task, sizeof(cpuset), &cpuset); } -static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask) +static u32 parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask) { - uint32_t pcpu = atoi_non_negative("CPU number", cpu_str); + u32 pcpu = atoi_non_negative("CPU number", cpu_str); TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask), "Not allowed to run on pCPU '%d', check cgroups?", pcpu); @@ -662,7 +662,7 @@ void kvm_print_vcpu_pinning_help(void) " (default: no pinning)\n", name, name); } -void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], +void kvm_parse_vcpu_pinning(const char *pcpus_string, u32 vcpu_to_pcpu[], int nr_vcpus) { cpu_set_t allowed_mask; @@ -715,15 +715,15 @@ void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], * region exists. */ static struct userspace_mem_region * -userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) +userspace_mem_region_find(struct kvm_vm *vm, u64 start, u64 end) { struct rb_node *node; for (node = vm->regions.gpa_tree.rb_node; node; ) { struct userspace_mem_region *region = container_of(node, struct userspace_mem_region, gpa_node); - uint64_t existing_start = region->region.guest_phys_addr; - uint64_t existing_end = region->region.guest_phys_addr + u64 existing_start = region->region.guest_phys_addr; + u64 existing_end = region->region.guest_phys_addr + region->region.memory_size - 1; if (start <= existing_end && end >= existing_start) return region; @@ -918,8 +918,8 @@ static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree, } -int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva) +int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva) { struct kvm_userspace_memory_region region = { .slot = slot, @@ -932,8 +932,8 @@ int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion); } -void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva) +void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva) { int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva); @@ -945,9 +945,9 @@ void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, __TEST_REQUIRE(kvm_has_cap(KVM_CAP_USER_MEMORY2), \ "KVM selftests now require KVM_SET_USER_MEMORY_REGION2 (introduced in v6.8)") -int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva, - uint32_t guest_memfd, uint64_t guest_memfd_offset) +int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva, + u32 guest_memfd, u64 guest_memfd_offset) { struct kvm_userspace_memory_region2 region = { .slot = slot, @@ -964,9 +964,9 @@ int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flag return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, ®ion); } -void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva, - uint32_t guest_memfd, uint64_t guest_memfd_offset) +void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva, + u32 guest_memfd, u64 guest_memfd_offset) { int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva, guest_memfd, guest_memfd_offset); @@ -978,8 +978,8 @@ void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags /* FIXME: This thing needs to be ripped apart and rewritten. */ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, - uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags, - int guest_memfd, uint64_t guest_memfd_offset) + gpa_t gpa, u32 slot, u64 npages, u32 flags, + int guest_memfd, u64 guest_memfd_offset) { int ret; struct userspace_mem_region *region; @@ -1016,8 +1016,8 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, " requested gpa: 0x%lx npages: 0x%lx page_size: 0x%x\n" " existing gpa: 0x%lx size: 0x%lx", gpa, npages, vm->page_size, - (uint64_t) region->region.guest_phys_addr, - (uint64_t) region->region.memory_size); + (u64)region->region.guest_phys_addr, + (u64)region->region.memory_size); /* Confirm no region with the requested slot already exists. */ hash_for_each_possible(vm->regions.slot_hash, region, slot_node, @@ -1027,11 +1027,11 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, TEST_FAIL("A mem region with the requested slot " "already exists.\n" - " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" - " existing slot: %u paddr: 0x%lx size: 0x%lx", + " requested slot: %u gpa: 0x%lx npages: 0x%lx\n" + " existing slot: %u gpa: 0x%lx size: 0x%lx", slot, gpa, npages, region->region.slot, - (uint64_t) region->region.guest_phys_addr, - (uint64_t) region->region.memory_size); + (u64)region->region.guest_phys_addr, + (u64)region->region.memory_size); } /* Allocate and initialize new mem region structure. */ @@ -1085,7 +1085,7 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, if (flags & KVM_MEM_GUEST_MEMFD) { if (guest_memfd < 0) { - uint32_t guest_memfd_flags = 0; + u32 guest_memfd_flags = 0; TEST_ASSERT(!guest_memfd_offset, "Offset must be zero when creating new guest_memfd"); guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags); @@ -1141,8 +1141,7 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, void vm_userspace_mem_region_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, - uint64_t gpa, uint32_t slot, uint64_t npages, - uint32_t flags) + gpa_t gpa, u32 slot, u64 npages, u32 flags) { vm_mem_add(vm, src_type, gpa, slot, npages, flags, -1, 0); } @@ -1163,7 +1162,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, * memory slot ID). */ struct userspace_mem_region * -memslot2region(struct kvm_vm *vm, uint32_t memslot) +memslot2region(struct kvm_vm *vm, u32 memslot) { struct userspace_mem_region *region; @@ -1194,7 +1193,7 @@ memslot2region(struct kvm_vm *vm, uint32_t memslot) * Sets the flags of the memory region specified by the value of slot, * to the values given by flags. */ -void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) +void vm_mem_region_set_flags(struct kvm_vm *vm, u32 slot, u32 flags) { int ret; struct userspace_mem_region *region; @@ -1210,7 +1209,7 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) ret, errno, slot, flags); } -void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot) +void vm_mem_region_reload(struct kvm_vm *vm, u32 slot) { struct userspace_mem_region *region = memslot2region(vm, slot); struct kvm_userspace_memory_region2 tmp = region->region; @@ -1234,7 +1233,7 @@ void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot) * * Change the gpa of a memory region. */ -void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) +void vm_mem_region_move(struct kvm_vm *vm, u32 slot, u64 new_gpa) { struct userspace_mem_region *region; int ret; @@ -1263,7 +1262,7 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) * * Delete a memory region. */ -void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) +void vm_mem_region_delete(struct kvm_vm *vm, u32 slot) { struct userspace_mem_region *region = memslot2region(vm, slot); @@ -1273,18 +1272,18 @@ void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) __vm_mem_region_delete(vm, region); } -void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size, +void vm_guest_mem_fallocate(struct kvm_vm *vm, u64 base, u64 size, bool punch_hole) { const int mode = FALLOC_FL_KEEP_SIZE | (punch_hole ? FALLOC_FL_PUNCH_HOLE : 0); struct userspace_mem_region *region; - uint64_t end = base + size; - uint64_t gpa, len; + u64 end = base + size; + gpa_t gpa, len; off_t fd_offset; int ret; for (gpa = base; gpa < end; gpa += len) { - uint64_t offset; + u64 offset; region = userspace_mem_region_find(vm, gpa, gpa); TEST_ASSERT(region && region->region.flags & KVM_MEM_GUEST_MEMFD, @@ -1292,7 +1291,7 @@ void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size, offset = gpa - region->region.guest_phys_addr; fd_offset = region->region.guest_memfd_offset + offset; - len = min_t(uint64_t, end - gpa, region->region.memory_size - offset); + len = min_t(u64, end - gpa, region->region.memory_size - offset); ret = fallocate(region->region.guest_memfd, mode, fd_offset, len); TEST_ASSERT(!ret, "fallocate() failed to %s at %lx (len = %lu), fd = %d, mode = %x, offset = %lx", @@ -1317,7 +1316,7 @@ static size_t vcpu_mmap_sz(void) return ret; } -static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id) +static bool vcpu_exists(struct kvm_vm *vm, u32 vcpu_id) { struct kvm_vcpu *vcpu; @@ -1333,7 +1332,7 @@ static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id) * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id. * No additional vCPU setup is done. Returns the vCPU. */ -struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) +struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) { struct kvm_vcpu *vcpu; @@ -1367,33 +1366,18 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) } /* - * VM Virtual Address Unused Gap - * - * Input Args: - * vm - Virtual Machine - * sz - Size (bytes) - * vaddr_min - Minimum Virtual Address - * - * Output Args: None - * - * Return: - * Lowest virtual address at or above vaddr_min, with at least - * sz unused bytes. TEST_ASSERT failure if no area of at least - * size sz is available. - * - * Within the VM specified by vm, locates the lowest starting virtual - * address >= vaddr_min, that has at least sz unallocated bytes. A + * Within the VM specified by @vm, locates the lowest starting guest virtual + * address >= @min_gva, that has at least @sz unallocated bytes. A * TEST_ASSERT failure occurs for invalid input or no area of at least - * sz unallocated bytes >= vaddr_min is available. + * @sz unallocated bytes >= @min_gva is available. */ -vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, - vm_vaddr_t vaddr_min) +gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t min_gva) { - uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; + u64 pages = (sz + vm->page_size - 1) >> vm->page_shift; /* Determine lowest permitted virtual page index. */ - uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; - if ((pgidx_start * vm->page_size) < vaddr_min) + u64 pgidx_start = (min_gva + vm->page_size - 1) >> vm->page_shift; + if ((pgidx_start * vm->page_size) < min_gva) goto no_va_found; /* Loop over section with enough valid virtual page indexes. */ @@ -1430,7 +1414,7 @@ vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, } while (pgidx_start != 0); no_va_found: - TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages); + TEST_FAIL("No gva of specified pages available, pages: 0x%lx", pages); /* NOT REACHED */ return -1; @@ -1452,145 +1436,91 @@ va_found: return pgidx_start * vm->page_size; } -static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, - vm_vaddr_t vaddr_min, - enum kvm_mem_region_type type, - bool protected) +static gva_t ____vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva, + enum kvm_mem_region_type type, bool protected) { - uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); + u64 pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); virt_pgd_alloc(vm); - vm_paddr_t paddr = __vm_phy_pages_alloc(vm, pages, - KVM_UTIL_MIN_PFN * vm->page_size, - vm->memslots[type], protected); + gpa_t gpa = __vm_phy_pages_alloc(vm, pages, + KVM_UTIL_MIN_PFN * vm->page_size, + vm->memslots[type], protected); /* * Find an unused range of virtual page addresses of at least * pages in length. */ - vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); + gva_t gva_start = vm_unused_gva_gap(vm, sz, min_gva); /* Map the virtual pages. */ - for (vm_vaddr_t vaddr = vaddr_start; pages > 0; - pages--, vaddr += vm->page_size, paddr += vm->page_size) { + for (gva_t gva = gva_start; pages > 0; + pages--, gva += vm->page_size, gpa += vm->page_size) { - virt_pg_map(vm, vaddr, paddr); + virt_pg_map(vm, gva, gpa); } - return vaddr_start; + return gva_start; } -vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, - enum kvm_mem_region_type type) +gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva, + enum kvm_mem_region_type type) { - return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, - vm_arch_has_protected_memory(vm)); + return ____vm_alloc(vm, sz, min_gva, type, + vm_arch_has_protected_memory(vm)); } -vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, - vm_vaddr_t vaddr_min, - enum kvm_mem_region_type type) +gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t min_gva, + enum kvm_mem_region_type type) { - return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, false); + return ____vm_alloc(vm, sz, min_gva, type, false); } /* - * VM Virtual Address Allocate - * - * Input Args: - * vm - Virtual Machine - * sz - Size in bytes - * vaddr_min - Minimum starting virtual address - * - * Output Args: None - * - * Return: - * Starting guest virtual address - * - * Allocates at least sz bytes within the virtual address space of the vm - * given by vm. The allocated bytes are mapped to a virtual address >= - * the address given by vaddr_min. Note that each allocation uses a - * a unique set of pages, with the minimum real allocation being at least - * a page. The allocated physical space comes from the TEST_DATA memory region. + * Allocates at least sz bytes within the virtual address space of the VM + * given by @vm. The allocated bytes are mapped to a virtual address >= the + * address given by @min_gva. Note that each allocation uses a a unique set + * of pages, with the minimum real allocation being at least a page. The + * allocated physical space comes from the TEST_DATA memory region. */ -vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) +gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva) { - return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA); + return __vm_alloc(vm, sz, min_gva, MEM_REGION_TEST_DATA); } -/* - * VM Virtual Address Allocate Pages - * - * Input Args: - * vm - Virtual Machine - * - * Output Args: None - * - * Return: - * Starting guest virtual address - * - * Allocates at least N system pages worth of bytes within the virtual address - * space of the vm. - */ -vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) +gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages) { - return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR); + return vm_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR); } -vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type) +gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type) { - return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type); + return __vm_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type); } -/* - * VM Virtual Address Allocate Page - * - * Input Args: - * vm - Virtual Machine - * - * Output Args: None - * - * Return: - * Starting guest virtual address - * - * Allocates at least one system page worth of bytes within the virtual address - * space of the vm. - */ -vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm) +gva_t vm_alloc_page(struct kvm_vm *vm) { - return vm_vaddr_alloc_pages(vm, 1); + return vm_alloc_pages(vm, 1); } /* - * Map a range of VM virtual address to the VM's physical address - * - * Input Args: - * vm - Virtual Machine - * vaddr - Virtuall address to map - * paddr - VM Physical Address - * npages - The number of pages to map + * Map a range of VM virtual address to the VM's physical address. * - * Output Args: None - * - * Return: None - * - * Within the VM given by @vm, creates a virtual translation for - * @npages starting at @vaddr to the page range starting at @paddr. + * Within the VM given by @vm, creates a virtual translation for @npages + * starting at @gva to the page range starting at @gpa. */ -void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, - unsigned int npages) +void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, unsigned int npages) { size_t page_size = vm->page_size; size_t size = npages * page_size; - TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow"); - TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); + TEST_ASSERT(gva + size > gva, "Vaddr overflow"); + TEST_ASSERT(gpa + size > gpa, "Paddr overflow"); while (npages--) { - virt_pg_map(vm, vaddr, paddr); + virt_pg_map(vm, gva, gpa); - vaddr += page_size; - paddr += page_size; + gva += page_size; + gpa += page_size; } } @@ -1611,7 +1541,7 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, * address providing the memory to the vm physical address is returned. * A TEST_ASSERT failure occurs if no region containing gpa exists. */ -void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) +void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa) { struct userspace_mem_region *region; @@ -1644,7 +1574,7 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) * VM physical address is returned. A TEST_ASSERT failure occurs if no * region containing hva exists. */ -vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) +gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva) { struct rb_node *node; @@ -1655,7 +1585,7 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) if (hva >= region->host_mem) { if (hva <= (region->host_mem + region->region.memory_size - 1)) - return (vm_paddr_t)((uintptr_t) + return (gpa_t)((uintptr_t) region->region.guest_phys_addr + (hva - (uintptr_t)region->host_mem)); @@ -1687,7 +1617,7 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) * memory without mapping said memory in the guest's address space. And, for * userfaultfd-based demand paging, to do so without triggering userfaults. */ -void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa) +void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa) { struct userspace_mem_region *region; uintptr_t offset; @@ -1781,8 +1711,8 @@ struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu) void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu) { - uint32_t page_size = getpagesize(); - uint32_t size = vcpu->vm->dirty_ring_size; + u32 page_size = getpagesize(); + u32 size = vcpu->vm->dirty_ring_size; TEST_ASSERT(size > 0, "Should enable dirty ring first"); @@ -1811,7 +1741,7 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu) * Device Ioctl */ -int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) +int __kvm_has_device_attr(int dev_fd, u32 group, u64 attr) { struct kvm_device_attr attribute = { .group = group, @@ -1822,7 +1752,7 @@ int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute); } -int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type) +int __kvm_test_create_device(struct kvm_vm *vm, u64 type) { struct kvm_create_device create_dev = { .type = type, @@ -1832,7 +1762,7 @@ int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type) return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); } -int __kvm_create_device(struct kvm_vm *vm, uint64_t type) +int __kvm_create_device(struct kvm_vm *vm, u64 type) { struct kvm_create_device create_dev = { .type = type, @@ -1846,7 +1776,7 @@ int __kvm_create_device(struct kvm_vm *vm, uint64_t type) return err ? : create_dev.fd; } -int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val) +int __kvm_device_attr_get(int dev_fd, u32 group, u64 attr, void *val) { struct kvm_device_attr kvmattr = { .group = group, @@ -1858,7 +1788,7 @@ int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val) return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr); } -int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val) +int __kvm_device_attr_set(int dev_fd, u32 group, u64 attr, void *val) { struct kvm_device_attr kvmattr = { .group = group, @@ -1874,7 +1804,7 @@ int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val) * IRQ related functions. */ -int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) +int _kvm_irq_line(struct kvm_vm *vm, u32 irq, int level) { struct kvm_irq_level irq_level = { .irq = irq, @@ -1884,7 +1814,7 @@ int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level); } -void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) +void kvm_irq_line(struct kvm_vm *vm, u32 irq, int level) { int ret = _kvm_irq_line(vm, irq, level); @@ -1906,7 +1836,7 @@ struct kvm_irq_routing *kvm_gsi_routing_create(void) } void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, - uint32_t gsi, uint32_t pin) + u32 gsi, u32 pin) { int i; @@ -1956,7 +1886,7 @@ void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) * Dumps the current state of the VM given by vm, to the FILE stream * given by stream. */ -void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +void vm_dump(FILE *stream, struct kvm_vm *vm, u8 indent) { int ctr; struct userspace_mem_region *region; @@ -1969,8 +1899,8 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx " "host_virt: %p\n", indent + 2, "", - (uint64_t) region->region.guest_phys_addr, - (uint64_t) region->region.memory_size, + (u64)region->region.guest_phys_addr, + (u64)region->region.memory_size, region->host_mem); fprintf(stream, "%*sunused_phy_pages: ", indent + 2, ""); sparsebit_dump(stream, region->unused_phy_pages, 0); @@ -2077,7 +2007,7 @@ const char *exit_reason_str(unsigned int exit_reason) * Input Args: * vm - Virtual Machine * num - number of pages - * paddr_min - Physical address minimum + * min_gpa - Physical address minimum * memslot - Memory region to allocate page from * protected - True if the pages will be used as protected/private memory * @@ -2087,29 +2017,29 @@ const char *exit_reason_str(unsigned int exit_reason) * Starting physical address * * Within the VM specified by vm, locates a range of available physical - * pages at or above paddr_min. If found, the pages are marked as in use + * pages at or above min_gpa. If found, the pages are marked as in use * and their base address is returned. A TEST_ASSERT failure occurs if - * not enough pages are available at or above paddr_min. + * not enough pages are available at or above min_gpa. */ -vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, - vm_paddr_t paddr_min, uint32_t memslot, - bool protected) +gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + gpa_t min_gpa, u32 memslot, + bool protected) { struct userspace_mem_region *region; sparsebit_idx_t pg, base; TEST_ASSERT(num > 0, "Must allocate at least one page"); - TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " + TEST_ASSERT((min_gpa % vm->page_size) == 0, "Min physical address " "not divisible by page size.\n" - " paddr_min: 0x%lx page_size: 0x%x", - paddr_min, vm->page_size); + " min_gpa: 0x%lx page_size: 0x%x", + min_gpa, vm->page_size); region = memslot2region(vm, memslot); TEST_ASSERT(!protected || region->protected_phy_pages, "Region doesn't support protected memory"); - base = pg = paddr_min >> vm->page_shift; + base = pg = min_gpa >> vm->page_shift; do { for (; pg < base + num; ++pg) { if (!sparsebit_is_set(region->unused_phy_pages, pg)) { @@ -2121,8 +2051,8 @@ vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, if (pg == 0) { fprintf(stderr, "No guest physical page available, " - "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", - paddr_min, vm->page_size, memslot); + "min_gpa: 0x%lx page_size: 0x%x memslot: %u\n", + min_gpa, vm->page_size, memslot); fputs("---- vm dump ----\n", stderr); vm_dump(stderr, vm, 2); abort(); @@ -2137,13 +2067,12 @@ vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, return base * vm->page_size; } -vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, - uint32_t memslot) +gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot) { - return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); + return vm_phy_pages_alloc(vm, 1, min_gpa, memslot); } -vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) +gpa_t vm_alloc_page_table(struct kvm_vm *vm) { return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, vm->memslots[MEM_REGION_PT]); @@ -2161,7 +2090,7 @@ vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) * Return: * Equivalent host virtual address */ -void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) +void *addr_gva2hva(struct kvm_vm *vm, gva_t gva) { return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); } @@ -2259,7 +2188,7 @@ struct kvm_stats_desc *read_stats_descriptors(int stats_fd, * Read the data values of a specified stat from the binary stats interface. */ void read_stat_data(int stats_fd, struct kvm_stats_header *header, - struct kvm_stats_desc *desc, uint64_t *data, + struct kvm_stats_desc *desc, u64 *data, size_t max_elements) { size_t nr_elements = min_t(ssize_t, desc->size, max_elements); @@ -2280,7 +2209,7 @@ void read_stat_data(int stats_fd, struct kvm_stats_header *header, } void kvm_get_stat(struct kvm_binary_stats *stats, const char *name, - uint64_t *data, size_t max_elements) + u64 *data, size_t max_elements) { struct kvm_stats_desc *desc; size_t size_desc; @@ -2357,7 +2286,7 @@ void __attribute((constructor)) kvm_selftest_init(void) kvm_selftest_arch_init(); } -bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr) +bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa) { sparsebit_idx_t pg = 0; struct userspace_mem_region *region; @@ -2365,10 +2294,10 @@ bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr) if (!vm_arch_has_protected_memory(vm)) return false; - region = userspace_mem_region_find(vm, paddr, paddr); - TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr); + region = userspace_mem_region_find(vm, gpa, gpa); + TEST_ASSERT(region, "No vm physical memory at 0x%lx", gpa); - pg = paddr >> vm->page_shift; + pg = gpa >> vm->page_shift; return sparsebit_is_set(region->protected_phy_pages, pg); } diff --git a/tools/testing/selftests/kvm/lib/loongarch/processor.c b/tools/testing/selftests/kvm/lib/loongarch/processor.c index ee4ad3b1d2a4..64d91fb76522 100644 --- a/tools/testing/selftests/kvm/lib/loongarch/processor.c +++ b/tools/testing/selftests/kvm/lib/loongarch/processor.c @@ -12,32 +12,32 @@ #define LOONGARCH_PAGE_TABLE_PHYS_MIN 0x200000 #define LOONGARCH_GUEST_STACK_VADDR_MIN 0x200000 -static vm_paddr_t invalid_pgtable[4]; -static vm_vaddr_t exception_handlers; +static gpa_t invalid_pgtable[4]; +static gva_t exception_handlers; -static uint64_t virt_pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) +static u64 virt_pte_index(struct kvm_vm *vm, gva_t gva, int level) { unsigned int shift; - uint64_t mask; + u64 mask; shift = level * (vm->page_shift - 3) + vm->page_shift; mask = (1UL << (vm->page_shift - 3)) - 1; return (gva >> shift) & mask; } -static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) +static u64 pte_addr(struct kvm_vm *vm, u64 entry) { return entry & ~((0x1UL << vm->page_shift) - 1); } -static uint64_t ptrs_per_pte(struct kvm_vm *vm) +static u64 ptrs_per_pte(struct kvm_vm *vm) { return 1 << (vm->page_shift - 3); } -static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t child) +static void virt_set_pgtable(struct kvm_vm *vm, gpa_t table, gpa_t child) { - uint64_t *ptep; + u64 *ptep; int i, ptrs_per_pte; ptep = addr_gpa2hva(vm, table); @@ -49,7 +49,7 @@ static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t chi void virt_arch_pgd_alloc(struct kvm_vm *vm) { int i; - vm_paddr_t child, table; + gpa_t child, table; if (vm->mmu.pgd_created) return; @@ -67,16 +67,16 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) vm->mmu.pgd_created = true; } -static int virt_pte_none(uint64_t *ptep, int level) +static int virt_pte_none(u64 *ptep, int level) { return *ptep == invalid_pgtable[level]; } -static uint64_t *virt_populate_pte(struct kvm_vm *vm, vm_vaddr_t gva, int alloc) +static u64 *virt_populate_pte(struct kvm_vm *vm, gva_t gva, int alloc) { int level; - uint64_t *ptep; - vm_paddr_t child; + u64 *ptep; + gpa_t child; if (!vm->mmu.pgd_created) goto unmapped_gva; @@ -106,43 +106,42 @@ unmapped_gva: exit(EXIT_FAILURE); } -vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) { - uint64_t *ptep; + u64 *ptep; ptep = virt_populate_pte(vm, gva, 0); - TEST_ASSERT(*ptep != 0, "Virtual address vaddr: 0x%lx not mapped\n", gva); + TEST_ASSERT(*ptep != 0, "Virtual address gva: 0x%lx not mapped\n", gva); return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); } -void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) { - uint32_t prot_bits; - uint64_t *ptep; + u32 prot_bits; + u64 *ptep; - TEST_ASSERT((vaddr % vm->page_size) == 0, + TEST_ASSERT((gva % vm->page_size) == 0, "Virtual address not on page boundary,\n" - "vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, - (vaddr >> vm->page_shift)), - "Invalid virtual address, vaddr: 0x%lx", vaddr); - TEST_ASSERT((paddr % vm->page_size) == 0, + "gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), + "Invalid virtual address, gva: 0x%lx", gva); + TEST_ASSERT((gpa % vm->page_size) == 0, "Physical address not on page boundary,\n" - "paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, + "gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size); + TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, "Physical address beyond maximum supported,\n" - "paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", - paddr, vm->max_gfn, vm->page_size); + "gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + gpa, vm->max_gfn, vm->page_size); - ptep = virt_populate_pte(vm, vaddr, 1); + ptep = virt_populate_pte(vm, gva, 1); prot_bits = _PAGE_PRESENT | __READABLE | __WRITEABLE | _CACHE_CC | _PAGE_USER; - WRITE_ONCE(*ptep, paddr | prot_bits); + WRITE_ONCE(*ptep, gpa | prot_bits); } -static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level) +static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, u64 page, int level) { - uint64_t pte, *ptep; + u64 pte, *ptep; static const char * const type[] = { "pte", "pmd", "pud", "pgd"}; if (level < 0) @@ -158,7 +157,7 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t p } } -void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent) { int level; @@ -169,7 +168,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) pte_dump(stream, vm, indent, vm->mmu.pgd, level); } -void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) +void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent) { } @@ -206,8 +205,9 @@ void vm_init_descriptor_tables(struct kvm_vm *vm) { void *addr; - vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers), - LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); + vm->handlers = __vm_alloc(vm, sizeof(struct handlers), + LOONGARCH_GUEST_STACK_VADDR_MIN, + MEM_REGION_DATA); addr = addr_gva2hva(vm, vm->handlers); memset(addr, 0, vm->page_size); @@ -223,7 +223,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, handler_fn hand handlers->exception_handlers[vector] = handler; } -uint32_t guest_get_vcpuid(void) +u32 guest_get_vcpuid(void) { return csr_read(LOONGARCH_CSR_CPUID); } @@ -241,36 +241,36 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) va_start(ap, num); for (i = 0; i < num; i++) - regs.gpr[i + 4] = va_arg(ap, uint64_t); + regs.gpr[i + 4] = va_arg(ap, u64); va_end(ap); vcpu_regs_set(vcpu, ®s); } -static void loongarch_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) +static void loongarch_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val) { __vcpu_set_reg(vcpu, id, val); } -static void loongarch_set_cpucfg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) +static void loongarch_set_cpucfg(struct kvm_vcpu *vcpu, u64 id, u64 val) { - uint64_t cfgid; + u64 cfgid; cfgid = KVM_REG_LOONGARCH_CPUCFG | KVM_REG_SIZE_U64 | 8 * id; __vcpu_set_reg(vcpu, cfgid, val); } -static void loongarch_get_csr(struct kvm_vcpu *vcpu, uint64_t id, void *addr) +static void loongarch_get_csr(struct kvm_vcpu *vcpu, u64 id, void *addr) { - uint64_t csrid; + u64 csrid; csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id; __vcpu_get_reg(vcpu, csrid, addr); } -static void loongarch_set_csr(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) +static void loongarch_set_csr(struct kvm_vcpu *vcpu, u64 id, u64 val) { - uint64_t csrid; + u64 csrid; csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id; __vcpu_set_reg(vcpu, csrid, val); @@ -354,8 +354,8 @@ void loongarch_vcpu_setup(struct kvm_vcpu *vcpu) loongarch_set_csr(vcpu, LOONGARCH_CSR_STLBPGSIZE, PS_DEFAULT_SIZE); /* LOONGARCH_CSR_KS1 is used for exception stack */ - val = __vm_vaddr_alloc(vm, vm->page_size, - LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); + val = __vm_alloc(vm, vm->page_size, LOONGARCH_GUEST_STACK_VADDR_MIN, + MEM_REGION_DATA); TEST_ASSERT(val != 0, "No memory for exception stack"); val = val + vm->page_size; loongarch_set_csr(vcpu, LOONGARCH_CSR_KS1, val); @@ -369,23 +369,23 @@ void loongarch_vcpu_setup(struct kvm_vcpu *vcpu) loongarch_set_csr(vcpu, LOONGARCH_CSR_TMID, vcpu->id); } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) { size_t stack_size; - uint64_t stack_vaddr; + u64 stack_gva; struct kvm_regs regs; struct kvm_vcpu *vcpu; vcpu = __vm_vcpu_add(vm, vcpu_id); stack_size = vm->page_size; - stack_vaddr = __vm_vaddr_alloc(vm, stack_size, - LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); - TEST_ASSERT(stack_vaddr != 0, "No memory for vm stack"); + stack_gva = __vm_alloc(vm, stack_size, + LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); + TEST_ASSERT(stack_gva != 0, "No memory for vm stack"); loongarch_vcpu_setup(vcpu); /* Setup guest general purpose registers */ vcpu_regs_get(vcpu, ®s); - regs.gpr[3] = stack_vaddr + stack_size; + regs.gpr[3] = stack_gva + stack_size; vcpu_regs_set(vcpu, ®s); return vcpu; @@ -397,6 +397,6 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) /* Setup guest PC register */ vcpu_regs_get(vcpu, ®s); - regs.pc = (uint64_t)guest_code; + regs.pc = (u64)guest_code; vcpu_regs_set(vcpu, ®s); } diff --git a/tools/testing/selftests/kvm/lib/loongarch/ucall.c b/tools/testing/selftests/kvm/lib/loongarch/ucall.c index fc6cbb50573f..cd49a3440ead 100644 --- a/tools/testing/selftests/kvm/lib/loongarch/ucall.c +++ b/tools/testing/selftests/kvm/lib/loongarch/ucall.c @@ -9,17 +9,17 @@ * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each * VM), it must not be accessed from host code. */ -vm_vaddr_t *ucall_exit_mmio_addr; +gva_t *ucall_exit_mmio_addr; -void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) { - vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); + gva_t mmio_gva = vm_unused_gva_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); virt_map(vm, mmio_gva, mmio_gpa, 1); vm->ucall_mmio_addr = mmio_gpa; - write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva); + write_guest_global(vm, ucall_exit_mmio_addr, (gva_t *)mmio_gva); } void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) @@ -28,10 +28,10 @@ void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) if (run->exit_reason == KVM_EXIT_MMIO && run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) { - TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t), + TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(u64), "Unexpected ucall exit mmio address access"); - return (void *)(*((uint64_t *)run->mmio.data)); + return (void *)(*((u64 *)run->mmio.data)); } return NULL; diff --git a/tools/testing/selftests/kvm/lib/memstress.c b/tools/testing/selftests/kvm/lib/memstress.c index 1ea735d66e15..6dcd15910a06 100644 --- a/tools/testing/selftests/kvm/lib/memstress.c +++ b/tools/testing/selftests/kvm/lib/memstress.c @@ -16,7 +16,7 @@ struct memstress_args memstress_args; * Guest virtual memory offset of the testing memory slot. * Must not conflict with identity mapped test code. */ -static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; +static u64 guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; struct vcpu_thread { /* The index of the vCPU. */ @@ -44,15 +44,15 @@ static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; * Continuously write to the first 8 bytes of each page in the * specified region. */ -void memstress_guest_code(uint32_t vcpu_idx) +void memstress_guest_code(u32 vcpu_idx) { struct memstress_args *args = &memstress_args; struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx]; struct guest_random_state rand_state; - uint64_t gva; - uint64_t pages; - uint64_t addr; - uint64_t page; + gva_t gva; + u64 pages; + u64 addr; + u64 page; int i; rand_state = new_guest_random_state(guest_random_seed + vcpu_idx); @@ -76,9 +76,9 @@ void memstress_guest_code(uint32_t vcpu_idx) addr = gva + (page * args->guest_page_size); if (__guest_random_bool(&rand_state, args->write_percent)) - *(uint64_t *)addr = 0x0123456789ABCDEF; + *(u64 *)addr = 0x0123456789ABCDEF; else - READ_ONCE(*(uint64_t *)addr); + READ_ONCE(*(u64 *)addr); } GUEST_SYNC(1); @@ -87,7 +87,7 @@ void memstress_guest_code(uint32_t vcpu_idx) void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[], - uint64_t vcpu_memory_bytes, + u64 vcpu_memory_bytes, bool partition_vcpu_memory_access) { struct memstress_args *args = &memstress_args; @@ -122,15 +122,15 @@ void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus, } struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus, - uint64_t vcpu_memory_bytes, int slots, + u64 vcpu_memory_bytes, int slots, enum vm_mem_backing_src_type backing_src, bool partition_vcpu_memory_access) { struct memstress_args *args = &memstress_args; struct kvm_vm *vm; - uint64_t guest_num_pages, slot0_pages = 0; - uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src); - uint64_t region_end_gfn; + u64 guest_num_pages, slot0_pages = 0; + u64 backing_src_pagesz = get_backing_src_pagesz(backing_src); + u64 region_end_gfn; int i; pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode)); @@ -202,8 +202,8 @@ struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus, /* Add extra memory slots for testing */ for (i = 0; i < slots; i++) { - uint64_t region_pages = guest_num_pages / slots; - vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i; + u64 region_pages = guest_num_pages / slots; + gpa_t region_start = args->gpa + region_pages * args->guest_page_size * i; vm_userspace_mem_region_add(vm, backing_src, region_start, MEMSTRESS_MEM_SLOT_INDEX + i, @@ -232,7 +232,7 @@ void memstress_destroy_vm(struct kvm_vm *vm) kvm_vm_free(vm); } -void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent) +void memstress_set_write_percent(struct kvm_vm *vm, u32 write_percent) { memstress_args.write_percent = write_percent; sync_global_to_guest(vm, memstress_args.write_percent); @@ -244,7 +244,7 @@ void memstress_set_random_access(struct kvm_vm *vm, bool random_access) sync_global_to_guest(vm, memstress_args.random_access); } -uint64_t __weak memstress_nested_pages(int nr_vcpus) +u64 __weak memstress_nested_pages(int nr_vcpus) { return 0; } @@ -349,7 +349,7 @@ void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int sl } void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], - int slots, uint64_t pages_per_slot) + int slots, u64 pages_per_slot) { int i; @@ -360,7 +360,7 @@ void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], } } -unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot) +unsigned long **memstress_alloc_bitmaps(int slots, u64 pages_per_slot) { unsigned long **bitmaps; int i; diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c index 067c6b2c15b0..ded5429f3448 100644 --- a/tools/testing/selftests/kvm/lib/riscv/processor.c +++ b/tools/testing/selftests/kvm/lib/riscv/processor.c @@ -15,9 +15,9 @@ #define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN 0xac0000 -static vm_vaddr_t exception_handlers; +static gva_t exception_handlers; -bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext) +bool __vcpu_has_ext(struct kvm_vcpu *vcpu, u64 ext) { unsigned long value = 0; int ret; @@ -27,32 +27,32 @@ bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext) return !ret && !!value; } -static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) +static u64 pte_addr(struct kvm_vm *vm, u64 entry) { return ((entry & PGTBL_PTE_ADDR_MASK) >> PGTBL_PTE_ADDR_SHIFT) << PGTBL_PAGE_SIZE_SHIFT; } -static uint64_t ptrs_per_pte(struct kvm_vm *vm) +static u64 ptrs_per_pte(struct kvm_vm *vm) { - return PGTBL_PAGE_SIZE / sizeof(uint64_t); + return PGTBL_PAGE_SIZE / sizeof(u64); } -static uint64_t pte_index_mask[] = { +static u64 pte_index_mask[] = { PGTBL_L0_INDEX_MASK, PGTBL_L1_INDEX_MASK, PGTBL_L2_INDEX_MASK, PGTBL_L3_INDEX_MASK, }; -static uint32_t pte_index_shift[] = { +static u32 pte_index_shift[] = { PGTBL_L0_INDEX_SHIFT, PGTBL_L1_INDEX_SHIFT, PGTBL_L2_INDEX_SHIFT, PGTBL_L3_INDEX_SHIFT, }; -static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) +static u64 pte_index(struct kvm_vm *vm, gva_t gva, int level) { TEST_ASSERT(level > -1, "Negative page table level (%d) not possible", level); @@ -75,26 +75,25 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) vm->mmu.pgd_created = true; } -void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) { - uint64_t *ptep, next_ppn; + u64 *ptep, next_ppn; int level = vm->mmu.pgtable_levels - 1; - TEST_ASSERT((vaddr % vm->page_size) == 0, + TEST_ASSERT((gva % vm->page_size) == 0, "Virtual address not on page boundary,\n" - " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, - (vaddr >> vm->page_shift)), - "Invalid virtual address, vaddr: 0x%lx", vaddr); - TEST_ASSERT((paddr % vm->page_size) == 0, + " gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), + "Invalid virtual address, gva: 0x%lx", gva); + TEST_ASSERT((gpa % vm->page_size) == 0, "Physical address not on page boundary,\n" - " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, + " gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size); + TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, "Physical address beyond maximum supported,\n" - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", - paddr, vm->max_gfn, vm->page_size); + " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + gpa, vm->max_gfn, vm->page_size); - ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, vaddr, level) * 8; + ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, gva, level) * 8; if (!*ptep) { next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT; *ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) | @@ -104,7 +103,7 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) while (level > -1) { ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + - pte_index(vm, vaddr, level) * 8; + pte_index(vm, gva, level) * 8; if (!*ptep && level > 0) { next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT; @@ -114,14 +113,14 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) level--; } - paddr = paddr >> PGTBL_PAGE_SIZE_SHIFT; - *ptep = (paddr << PGTBL_PTE_ADDR_SHIFT) | + gpa = gpa >> PGTBL_PAGE_SIZE_SHIFT; + *ptep = (gpa << PGTBL_PTE_ADDR_SHIFT) | PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK; } -vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) { - uint64_t *ptep; + u64 *ptep; int level = vm->mmu.pgtable_levels - 1; if (!vm->mmu.pgd_created) @@ -148,12 +147,12 @@ unmapped_gva: exit(1); } -static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, - uint64_t page, int level) +static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, + u64 page, int level) { #ifdef DEBUG static const char *const type[] = { "pte", "pmd", "pud", "p4d"}; - uint64_t pte, *ptep; + u64 pte, *ptep; if (level < 0) return; @@ -170,11 +169,11 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, #endif } -void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent) { struct kvm_mmu *mmu = &vm->mmu; int level = mmu->pgtable_levels - 1; - uint64_t pgd, *ptep; + u64 pgd, *ptep; if (!mmu->pgd_created) return; @@ -233,7 +232,7 @@ void riscv_vcpu_mmu_setup(struct kvm_vcpu *vcpu) vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(satp), satp); } -void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) +void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent) { struct kvm_riscv_core core; @@ -311,20 +310,20 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code); } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) { int r; size_t stack_size; - unsigned long stack_vaddr; + unsigned long stack_gva; unsigned long current_gp = 0; struct kvm_mp_state mps; struct kvm_vcpu *vcpu; stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size : vm->page_size; - stack_vaddr = __vm_vaddr_alloc(vm, stack_size, - DEFAULT_RISCV_GUEST_STACK_VADDR_MIN, - MEM_REGION_DATA); + stack_gva = __vm_alloc(vm, stack_size, + DEFAULT_RISCV_GUEST_STACK_VADDR_MIN, + MEM_REGION_DATA); vcpu = __vm_vcpu_add(vm, vcpu_id); riscv_vcpu_mmu_setup(vcpu); @@ -344,7 +343,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.gp), current_gp); /* Setup stack pointer and program counter of guest */ - vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size); + vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_gva + stack_size); /* Setup sscratch for guest_get_vcpuid() */ vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(sscratch), vcpu_id); @@ -358,7 +357,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) { va_list ap; - uint64_t id = RISCV_CORE_REG(regs.a0); + u64 id = RISCV_CORE_REG(regs.a0); int i; TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n" @@ -393,7 +392,7 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) id = RISCV_CORE_REG(regs.a7); break; } - vcpu_set_reg(vcpu, id, va_arg(ap, uint64_t)); + vcpu_set_reg(vcpu, id, va_arg(ap, u64)); } va_end(ap); @@ -449,10 +448,10 @@ void vcpu_init_vector_tables(struct kvm_vcpu *vcpu) void vm_init_vector_tables(struct kvm_vm *vm) { - vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers), - vm->page_size, MEM_REGION_DATA); + vm->handlers = __vm_alloc(vm, sizeof(struct handlers), vm->page_size, + MEM_REGION_DATA); - *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; + *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers; } void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler) @@ -470,7 +469,7 @@ void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handle handlers->exception_handlers[1][0] = handler; } -uint32_t guest_get_vcpuid(void) +u32 guest_get_vcpuid(void) { return csr_read(CSR_SSCRATCH); } @@ -544,10 +543,10 @@ void kvm_selftest_arch_init(void) unsigned long riscv64_get_satp_mode(void) { int kvm_fd, vm_fd, vcpu_fd, err; - uint64_t val; + u64 val; struct kvm_one_reg reg = { .id = RISCV_CONFIG_REG(satp_mode), - .addr = (uint64_t)&val, + .addr = (u64)&val, }; kvm_fd = open_kvm_dev_path_or_exit(); diff --git a/tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c b/tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c index 2c432fa164f1..f5480473f192 100644 --- a/tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c +++ b/tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c @@ -13,7 +13,7 @@ static void guest_code(void) { - uint64_t diag318_info = 0x12345678; + u64 diag318_info = 0x12345678; asm volatile ("diag %0,0,0x318\n" : : "d" (diag318_info)); } @@ -23,13 +23,13 @@ static void guest_code(void) * we create an ad-hoc VM here to handle the instruction then extract the * necessary data. It is up to the caller to decide what to do with that data. */ -static uint64_t diag318_handler(void) +static u64 diag318_handler(void) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; struct kvm_run *run; - uint64_t reg; - uint64_t diag318_info; + u64 reg; + u64 diag318_info; vm = vm_create_with_one_vcpu(&vcpu, guest_code); vcpu_run(vcpu); @@ -51,9 +51,9 @@ static uint64_t diag318_handler(void) return diag318_info; } -uint64_t get_diag318_info(void) +u64 get_diag318_info(void) { - static uint64_t diag318_info; + static u64 diag318_info; static bool printed_skip; /* diff --git a/tools/testing/selftests/kvm/lib/s390/facility.c b/tools/testing/selftests/kvm/lib/s390/facility.c index d540812d911a..9a778054f07f 100644 --- a/tools/testing/selftests/kvm/lib/s390/facility.c +++ b/tools/testing/selftests/kvm/lib/s390/facility.c @@ -10,5 +10,5 @@ #include "facility.h" -uint64_t stfl_doublewords[NB_STFL_DOUBLEWORDS]; +u64 stfl_doublewords[NB_STFL_DOUBLEWORDS]; bool stfle_flag; diff --git a/tools/testing/selftests/kvm/lib/s390/processor.c b/tools/testing/selftests/kvm/lib/s390/processor.c index 6a9a660413a7..a9adb3782b35 100644 --- a/tools/testing/selftests/kvm/lib/s390/processor.c +++ b/tools/testing/selftests/kvm/lib/s390/processor.c @@ -12,7 +12,7 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) { - vm_paddr_t paddr; + gpa_t gpa; TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x", vm->page_size); @@ -20,12 +20,12 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) if (vm->mmu.pgd_created) return; - paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, + gpa = vm_phy_pages_alloc(vm, PAGES_PER_REGION, KVM_GUEST_PAGE_TABLE_MIN_PADDR, vm->memslots[MEM_REGION_PT]); - memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); + memset(addr_gpa2hva(vm, gpa), 0xff, PAGES_PER_REGION * vm->page_size); - vm->mmu.pgd = paddr; + vm->mmu.pgd = gpa; vm->mmu.pgd_created = true; } @@ -34,9 +34,9 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) * a page table (ri == 4). Returns a suitable region/segment table entry * which points to the freshly allocated pages. */ -static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri) +static u64 virt_alloc_region(struct kvm_vm *vm, int ri) { - uint64_t taddr; + u64 taddr; taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); @@ -47,26 +47,24 @@ static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri) | ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH); } -void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa) +void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) { int ri, idx; - uint64_t *entry; + u64 *entry; TEST_ASSERT((gva % vm->page_size) == 0, - "Virtual address not on page boundary,\n" - " vaddr: 0x%lx vm->page_size: 0x%x", - gva, vm->page_size); - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, - (gva >> vm->page_shift)), - "Invalid virtual address, vaddr: 0x%lx", - gva); + "Virtual address not on page boundary,\n" + " gva: 0x%lx vm->page_size: 0x%x", + gva, vm->page_size); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), + "Invalid virtual address, gva: 0x%lx", gva); TEST_ASSERT((gpa % vm->page_size) == 0, "Physical address not on page boundary,\n" - " paddr: 0x%lx vm->page_size: 0x%x", + " gpa: 0x%lx vm->page_size: 0x%x", gva, vm->page_size); TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, "Physical address beyond beyond maximum supported,\n" - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", gva, vm->max_gfn, vm->page_size); /* Walk through region and segment tables */ @@ -86,10 +84,10 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa) entry[idx] = gpa; } -vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) { int ri, idx; - uint64_t *entry; + u64 *entry; TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x", vm->page_size); @@ -111,10 +109,10 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) return (entry[idx] & ~0xffful) + (gva & 0xffful); } -static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent, - uint64_t ptea_start) +static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, u8 indent, + u64 ptea_start) { - uint64_t *pte, ptea; + u64 *pte, ptea; for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) { pte = addr_gpa2hva(vm, ptea); @@ -125,10 +123,10 @@ static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent, } } -static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent, - uint64_t reg_tab_addr) +static void virt_dump_region(FILE *stream, struct kvm_vm *vm, u8 indent, + u64 reg_tab_addr) { - uint64_t addr, *entry; + u64 addr, *entry; for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) { entry = addr_gpa2hva(vm, addr); @@ -147,7 +145,7 @@ static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent, } } -void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent) { if (!vm->mmu.pgd_created) return; @@ -160,10 +158,10 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) vcpu->run->psw_addr = (uintptr_t)guest_code; } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) { size_t stack_size = DEFAULT_STACK_PGS * getpagesize(); - uint64_t stack_vaddr; + u64 stack_gva; struct kvm_regs regs; struct kvm_sregs sregs; struct kvm_vcpu *vcpu; @@ -171,15 +169,14 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x", vm->page_size); - stack_vaddr = __vm_vaddr_alloc(vm, stack_size, - DEFAULT_GUEST_STACK_VADDR_MIN, - MEM_REGION_DATA); + stack_gva = __vm_alloc(vm, stack_size, DEFAULT_GUEST_STACK_VADDR_MIN, + MEM_REGION_DATA); vcpu = __vm_vcpu_add(vm, vcpu_id); /* Setup guest registers */ vcpu_regs_get(vcpu, ®s); - regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160; + regs.gprs[15] = stack_gva + (DEFAULT_STACK_PGS * getpagesize()) - 160; vcpu_regs_set(vcpu, ®s); vcpu_sregs_get(vcpu, &sregs); @@ -206,13 +203,13 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) vcpu_regs_get(vcpu, ®s); for (i = 0; i < num; i++) - regs.gprs[i + 2] = va_arg(ap, uint64_t); + regs.gprs[i + 2] = va_arg(ap, u64); vcpu_regs_set(vcpu, ®s); va_end(ap); } -void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) +void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent) { fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n", indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr); diff --git a/tools/testing/selftests/kvm/lib/sparsebit.c b/tools/testing/selftests/kvm/lib/sparsebit.c index a99188f87a38..4d845000de15 100644 --- a/tools/testing/selftests/kvm/lib/sparsebit.c +++ b/tools/testing/selftests/kvm/lib/sparsebit.c @@ -76,11 +76,11 @@ * the use of a binary-search tree, where each node contains at least * the following members: * - * typedef uint64_t sparsebit_idx_t; - * typedef uint64_t sparsebit_num_t; + * typedef u64 sparsebit_idx_t; + * typedef u64 sparsebit_num_t; * * sparsebit_idx_t idx; - * uint32_t mask; + * u32 mask; * sparsebit_num_t num_after; * * The idx member contains the bit index of the first bit described by this @@ -162,7 +162,7 @@ #define DUMP_LINE_MAX 100 /* Does not include indent amount */ -typedef uint32_t mask_t; +typedef u32 mask_t; #define MASK_BITS (sizeof(mask_t) * CHAR_BIT) struct node { @@ -2056,9 +2056,9 @@ unsigned char get8(void) return ch; } -uint64_t get64(void) +u64 get64(void) { - uint64_t x; + u64 x; x = get8(); x = (x << 8) | get8(); @@ -2074,9 +2074,9 @@ int main(void) { s = sparsebit_alloc(); for (;;) { - uint8_t op = get8() & 0xf; - uint64_t first = get64(); - uint64_t last = get64(); + u8 op = get8() & 0xf; + u64 first = get64(); + u64 last = get64(); operate(op, first, last); } diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c index 8a1848586a85..bab1bd2b775b 100644 --- a/tools/testing/selftests/kvm/lib/test_util.c +++ b/tools/testing/selftests/kvm/lib/test_util.c @@ -30,15 +30,15 @@ void __attribute__((used)) expect_sigbus_handler(int signum) * Park-Miller LCG using standard constants. */ -struct guest_random_state new_guest_random_state(uint32_t seed) +struct guest_random_state new_guest_random_state(u32 seed) { struct guest_random_state s = {.seed = seed}; return s; } -uint32_t guest_random_u32(struct guest_random_state *state) +u32 guest_random_u32(struct guest_random_state *state) { - state->seed = (uint64_t)state->seed * 48271 % ((uint32_t)(1 << 31) - 1); + state->seed = (u64)state->seed * 48271 % ((u32)(1 << 31) - 1); return state->seed; } @@ -83,12 +83,12 @@ size_t parse_size(const char *size) return base << shift; } -int64_t timespec_to_ns(struct timespec ts) +s64 timespec_to_ns(struct timespec ts) { - return (int64_t)ts.tv_nsec + 1000000000LL * (int64_t)ts.tv_sec; + return (s64)ts.tv_nsec + 1000000000LL * (s64)ts.tv_sec; } -struct timespec timespec_add_ns(struct timespec ts, int64_t ns) +struct timespec timespec_add_ns(struct timespec ts, s64 ns) { struct timespec res; @@ -101,15 +101,15 @@ struct timespec timespec_add_ns(struct timespec ts, int64_t ns) struct timespec timespec_add(struct timespec ts1, struct timespec ts2) { - int64_t ns1 = timespec_to_ns(ts1); - int64_t ns2 = timespec_to_ns(ts2); + s64 ns1 = timespec_to_ns(ts1); + s64 ns2 = timespec_to_ns(ts2); return timespec_add_ns((struct timespec){0}, ns1 + ns2); } struct timespec timespec_sub(struct timespec ts1, struct timespec ts2) { - int64_t ns1 = timespec_to_ns(ts1); - int64_t ns2 = timespec_to_ns(ts2); + s64 ns1 = timespec_to_ns(ts1); + s64 ns2 = timespec_to_ns(ts2); return timespec_add_ns((struct timespec){0}, ns1 - ns2); } @@ -123,7 +123,7 @@ struct timespec timespec_elapsed(struct timespec start) struct timespec timespec_div(struct timespec ts, int divisor) { - int64_t ns = timespec_to_ns(ts) / divisor; + s64 ns = timespec_to_ns(ts) / divisor; return timespec_add_ns((struct timespec){0}, ns); } @@ -225,7 +225,7 @@ size_t get_def_hugetlb_pagesz(void) #define ANON_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS) #define ANON_HUGE_FLAGS (ANON_FLAGS | MAP_HUGETLB) -const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i) +const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(u32 i) { static const struct vm_mem_backing_src_alias aliases[] = { [VM_MEM_SRC_ANONYMOUS] = { @@ -317,9 +317,9 @@ const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i) #define MAP_HUGE_PAGE_SIZE(x) (1ULL << ((x >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK)) -size_t get_backing_src_pagesz(uint32_t i) +size_t get_backing_src_pagesz(u32 i) { - uint32_t flag = vm_mem_backing_src_alias(i)->flag; + u32 flag = vm_mem_backing_src_alias(i)->flag; switch (i) { case VM_MEM_SRC_ANONYMOUS: @@ -335,7 +335,7 @@ size_t get_backing_src_pagesz(uint32_t i) } } -bool is_backing_src_hugetlb(uint32_t i) +bool is_backing_src_hugetlb(u32 i) { return !!(vm_mem_backing_src_alias(i)->flag & MAP_HUGETLB); } diff --git a/tools/testing/selftests/kvm/lib/ucall_common.c b/tools/testing/selftests/kvm/lib/ucall_common.c index 42151e571953..029ce21f9f2f 100644 --- a/tools/testing/selftests/kvm/lib/ucall_common.c +++ b/tools/testing/selftests/kvm/lib/ucall_common.c @@ -14,7 +14,7 @@ struct ucall_header { struct ucall ucalls[KVM_MAX_VCPUS]; }; -int ucall_nr_pages_required(uint64_t page_size) +int ucall_nr_pages_required(u64 page_size) { return align_up(sizeof(struct ucall_header), page_size) / page_size; } @@ -25,16 +25,16 @@ int ucall_nr_pages_required(uint64_t page_size) */ static struct ucall_header *ucall_pool; -void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +void ucall_init(struct kvm_vm *vm, gpa_t mmio_gpa) { struct ucall_header *hdr; struct ucall *uc; - vm_vaddr_t vaddr; + gva_t gva; int i; - vaddr = vm_vaddr_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR, - MEM_REGION_DATA); - hdr = (struct ucall_header *)addr_gva2hva(vm, vaddr); + gva = vm_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR, + MEM_REGION_DATA); + hdr = (struct ucall_header *)addr_gva2hva(vm, gva); memset(hdr, 0, sizeof(*hdr)); for (i = 0; i < KVM_MAX_VCPUS; ++i) { @@ -42,7 +42,7 @@ void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) uc->hva = uc; } - write_guest_global(vm, ucall_pool, (struct ucall_header *)vaddr); + write_guest_global(vm, ucall_pool, (struct ucall_header *)gva); ucall_arch_init(vm, mmio_gpa); } @@ -79,7 +79,7 @@ static void ucall_free(struct ucall *uc) clear_bit(uc - ucall_pool->ucalls, ucall_pool->in_use); } -void ucall_assert(uint64_t cmd, const char *exp, const char *file, +void ucall_assert(u64 cmd, const char *exp, const char *file, unsigned int line, const char *fmt, ...) { struct ucall *uc; @@ -88,20 +88,20 @@ void ucall_assert(uint64_t cmd, const char *exp, const char *file, uc = ucall_alloc(); uc->cmd = cmd; - WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (uint64_t)(exp)); - WRITE_ONCE(uc->args[GUEST_FILE], (uint64_t)(file)); + WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (u64)(exp)); + WRITE_ONCE(uc->args[GUEST_FILE], (u64)(file)); WRITE_ONCE(uc->args[GUEST_LINE], line); va_start(va, fmt); guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); va_end(va); - ucall_arch_do_ucall((vm_vaddr_t)uc->hva); + ucall_arch_do_ucall((gva_t)uc->hva); ucall_free(uc); } -void ucall_fmt(uint64_t cmd, const char *fmt, ...) +void ucall_fmt(u64 cmd, const char *fmt, ...) { struct ucall *uc; va_list va; @@ -113,12 +113,12 @@ void ucall_fmt(uint64_t cmd, const char *fmt, ...) guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); va_end(va); - ucall_arch_do_ucall((vm_vaddr_t)uc->hva); + ucall_arch_do_ucall((gva_t)uc->hva); ucall_free(uc); } -void ucall(uint64_t cmd, int nargs, ...) +void ucall(u64 cmd, int nargs, ...) { struct ucall *uc; va_list va; @@ -132,15 +132,15 @@ void ucall(uint64_t cmd, int nargs, ...) va_start(va, nargs); for (i = 0; i < nargs; ++i) - WRITE_ONCE(uc->args[i], va_arg(va, uint64_t)); + WRITE_ONCE(uc->args[i], va_arg(va, u64)); va_end(va); - ucall_arch_do_ucall((vm_vaddr_t)uc->hva); + ucall_arch_do_ucall((gva_t)uc->hva); ucall_free(uc); } -uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) +u64 get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) { struct ucall ucall; void *addr; diff --git a/tools/testing/selftests/kvm/lib/userfaultfd_util.c b/tools/testing/selftests/kvm/lib/userfaultfd_util.c index 5bde176cedd5..ef8d76f71f83 100644 --- a/tools/testing/selftests/kvm/lib/userfaultfd_util.c +++ b/tools/testing/selftests/kvm/lib/userfaultfd_util.c @@ -27,7 +27,7 @@ static void *uffd_handler_thread_fn(void *arg) { struct uffd_reader_args *reader_args = (struct uffd_reader_args *)arg; int uffd = reader_args->uffd; - int64_t pages = 0; + s64 pages = 0; struct timespec start; struct timespec ts_diff; struct epoll_event evt; @@ -100,8 +100,8 @@ static void *uffd_handler_thread_fn(void *arg) } struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay, - void *hva, uint64_t len, - uint64_t num_readers, + void *hva, u64 len, + u64 num_readers, uffd_handler_t handler) { struct uffd_desc *uffd_desc; @@ -109,7 +109,7 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay, int uffd; struct uffdio_api uffdio_api; struct uffdio_register uffdio_register; - uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY; + u64 expected_ioctls = ((u64)1) << _UFFDIO_COPY; int ret, i; PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n", @@ -132,7 +132,7 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay, /* In order to get minor faults, prefault via the alias. */ if (is_minor) - expected_ioctls = ((uint64_t) 1) << _UFFDIO_CONTINUE; + expected_ioctls = ((u64)1) << _UFFDIO_CONTINUE; uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); TEST_ASSERT(uffd >= 0, "uffd creation failed, errno: %d", errno); @@ -141,9 +141,9 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay, uffdio_api.features = 0; TEST_ASSERT(ioctl(uffd, UFFDIO_API, &uffdio_api) != -1, "ioctl UFFDIO_API failed: %" PRIu64, - (uint64_t)uffdio_api.api); + (u64)uffdio_api.api); - uffdio_register.range.start = (uint64_t)hva; + uffdio_register.range.start = (u64)hva; uffdio_register.range.len = len; uffdio_register.mode = uffd_mode; TEST_ASSERT(ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) != -1, diff --git a/tools/testing/selftests/kvm/lib/x86/apic.c b/tools/testing/selftests/kvm/lib/x86/apic.c index 89153a333e83..5182fd0d6a76 100644 --- a/tools/testing/selftests/kvm/lib/x86/apic.c +++ b/tools/testing/selftests/kvm/lib/x86/apic.c @@ -14,7 +14,7 @@ void apic_disable(void) void xapic_enable(void) { - uint64_t val = rdmsr(MSR_IA32_APICBASE); + u64 val = rdmsr(MSR_IA32_APICBASE); /* Per SDM: to enable xAPIC when in x2APIC must first disable APIC */ if (val & MSR_IA32_APICBASE_EXTD) { diff --git a/tools/testing/selftests/kvm/lib/x86/hyperv.c b/tools/testing/selftests/kvm/lib/x86/hyperv.c index 15bc8cd583aa..d200c5c26e2e 100644 --- a/tools/testing/selftests/kvm/lib/x86/hyperv.c +++ b/tools/testing/selftests/kvm/lib/x86/hyperv.c @@ -76,23 +76,23 @@ bool kvm_hv_cpu_has(struct kvm_x86_cpu_feature feature) } struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm, - vm_vaddr_t *p_hv_pages_gva) + gva_t *p_hv_pages_gva) { - vm_vaddr_t hv_pages_gva = vm_vaddr_alloc_page(vm); + gva_t hv_pages_gva = vm_alloc_page(vm); struct hyperv_test_pages *hv = addr_gva2hva(vm, hv_pages_gva); /* Setup of a region of guest memory for the VP Assist page. */ - hv->vp_assist = (void *)vm_vaddr_alloc_page(vm); + hv->vp_assist = (void *)vm_alloc_page(vm); hv->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->vp_assist); hv->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->vp_assist); /* Setup of a region of guest memory for the partition assist page. */ - hv->partition_assist = (void *)vm_vaddr_alloc_page(vm); + hv->partition_assist = (void *)vm_alloc_page(vm); hv->partition_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->partition_assist); hv->partition_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->partition_assist); /* Setup of a region of guest memory for the enlightened VMCS. */ - hv->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm); + hv->enlightened_vmcs = (void *)vm_alloc_page(vm); hv->enlightened_vmcs_hva = addr_gva2hva(vm, (uintptr_t)hv->enlightened_vmcs); hv->enlightened_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)hv->enlightened_vmcs); @@ -100,9 +100,9 @@ struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm, return hv; } -int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist) +int enable_vp_assist(u64 vp_assist_pa, void *vp_assist) { - uint64_t val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | + u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; wrmsr(HV_X64_MSR_VP_ASSIST_PAGE, val); diff --git a/tools/testing/selftests/kvm/lib/x86/memstress.c b/tools/testing/selftests/kvm/lib/x86/memstress.c index f53414ba7103..61cf952cd2dc 100644 --- a/tools/testing/selftests/kvm/lib/x86/memstress.c +++ b/tools/testing/selftests/kvm/lib/x86/memstress.c @@ -16,7 +16,7 @@ #include "svm_util.h" #include "vmx.h" -void memstress_l2_guest_code(uint64_t vcpu_id) +void memstress_l2_guest_code(u64 vcpu_id) { memstress_guest_code(vcpu_id); vmcall(); @@ -32,7 +32,7 @@ __asm__( #define L2_GUEST_STACK_SIZE 64 -static void l1_vmx_code(struct vmx_pages *vmx, uint64_t vcpu_id) +static void l1_vmx_code(struct vmx_pages *vmx, u64 vcpu_id) { unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; unsigned long *rsp; @@ -51,7 +51,7 @@ static void l1_vmx_code(struct vmx_pages *vmx, uint64_t vcpu_id) GUEST_DONE(); } -static void l1_svm_code(struct svm_test_data *svm, uint64_t vcpu_id) +static void l1_svm_code(struct svm_test_data *svm, u64 vcpu_id) { unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; unsigned long *rsp; @@ -67,7 +67,7 @@ static void l1_svm_code(struct svm_test_data *svm, uint64_t vcpu_id) } -static void memstress_l1_guest_code(void *data, uint64_t vcpu_id) +static void memstress_l1_guest_code(void *data, u64 vcpu_id) { if (this_cpu_has(X86_FEATURE_VMX)) l1_vmx_code(data, vcpu_id); @@ -75,7 +75,7 @@ static void memstress_l1_guest_code(void *data, uint64_t vcpu_id) l1_svm_code(data, vcpu_id); } -uint64_t memstress_nested_pages(int nr_vcpus) +u64 memstress_nested_pages(int nr_vcpus) { /* * 513 page tables is enough to identity-map 256 TiB of L2 with 1G @@ -87,7 +87,7 @@ uint64_t memstress_nested_pages(int nr_vcpus) static void memstress_setup_ept_mappings(struct kvm_vm *vm) { - uint64_t start, end; + u64 start, end; /* * Identity map the first 4G and the test region with 1G pages so that @@ -104,7 +104,7 @@ static void memstress_setup_ept_mappings(struct kvm_vm *vm) void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]) { struct kvm_regs regs; - vm_vaddr_t nested_gva; + gva_t nested_gva; int vcpu_id; TEST_REQUIRE(kvm_cpu_has_tdp()); diff --git a/tools/testing/selftests/kvm/lib/x86/pmu.c b/tools/testing/selftests/kvm/lib/x86/pmu.c index 34cb57d1d671..0851b74b4e46 100644 --- a/tools/testing/selftests/kvm/lib/x86/pmu.c +++ b/tools/testing/selftests/kvm/lib/x86/pmu.c @@ -11,7 +11,7 @@ #include "processor.h" #include "pmu.h" -const uint64_t intel_pmu_arch_events[] = { +const u64 intel_pmu_arch_events[] = { INTEL_ARCH_CPU_CYCLES, INTEL_ARCH_INSTRUCTIONS_RETIRED, INTEL_ARCH_REFERENCE_CYCLES, @@ -28,7 +28,7 @@ const uint64_t intel_pmu_arch_events[] = { }; kvm_static_assert(ARRAY_SIZE(intel_pmu_arch_events) == NR_INTEL_ARCH_EVENTS); -const uint64_t amd_pmu_zen_events[] = { +const u64 amd_pmu_zen_events[] = { AMD_ZEN_CORE_CYCLES, AMD_ZEN_INSTRUCTIONS_RETIRED, AMD_ZEN_BRANCHES_RETIRED, @@ -50,7 +50,7 @@ kvm_static_assert(ARRAY_SIZE(amd_pmu_zen_events) == NR_AMD_ZEN_EVENTS); * be overcounted on these certain instructions, but for Clearwater Forest * only "Instruction Retired" event is overcounted on these instructions. */ -static uint64_t get_pmu_errata(void) +static u64 get_pmu_errata(void) { if (!this_cpu_is_intel()) return 0; @@ -72,7 +72,7 @@ static uint64_t get_pmu_errata(void) } } -uint64_t pmu_errata_mask; +u64 pmu_errata_mask; void kvm_init_pmu_errata(void) { diff --git a/tools/testing/selftests/kvm/lib/x86/processor.c b/tools/testing/selftests/kvm/lib/x86/processor.c index 01f0f97d4430..b51467d70f6e 100644 --- a/tools/testing/selftests/kvm/lib/x86/processor.c +++ b/tools/testing/selftests/kvm/lib/x86/processor.c @@ -21,13 +21,13 @@ #define KERNEL_DS 0x10 #define KERNEL_TSS 0x18 -vm_vaddr_t exception_handlers; +gva_t exception_handlers; bool host_cpu_is_amd; bool host_cpu_is_intel; bool host_cpu_is_hygon; bool host_cpu_is_amd_compatible; bool is_forced_emulation_enabled; -uint64_t guest_tsc_khz; +u64 guest_tsc_khz; const char *ex_str(int vector) { @@ -62,7 +62,7 @@ const char *ex_str(int vector) } } -static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent) +static void regs_dump(FILE *stream, struct kvm_regs *regs, u8 indent) { fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx " "rcx: 0x%.16llx rdx: 0x%.16llx\n", @@ -86,7 +86,7 @@ static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent) } static void segment_dump(FILE *stream, struct kvm_segment *segment, - uint8_t indent) + u8 indent) { fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x " "selector: 0x%.4x type: 0x%.2x\n", @@ -103,7 +103,7 @@ static void segment_dump(FILE *stream, struct kvm_segment *segment, } static void dtable_dump(FILE *stream, struct kvm_dtable *dtable, - uint8_t indent) + u8 indent) { fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x " "padding: 0x%.4x 0x%.4x 0x%.4x\n", @@ -111,7 +111,7 @@ static void dtable_dump(FILE *stream, struct kvm_dtable *dtable, dtable->padding[0], dtable->padding[1], dtable->padding[2]); } -static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent) +static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, u8 indent) { unsigned int i; @@ -207,37 +207,37 @@ void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels, } static void *virt_get_pte(struct kvm_vm *vm, struct kvm_mmu *mmu, - uint64_t *parent_pte, uint64_t vaddr, int level) + u64 *parent_pte, gva_t gva, int level) { - uint64_t pt_gpa = PTE_GET_PA(*parent_pte); - uint64_t *page_table = addr_gpa2hva(vm, pt_gpa); - int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu; + u64 pt_gpa = PTE_GET_PA(*parent_pte); + u64 *page_table = addr_gpa2hva(vm, pt_gpa); + int index = (gva >> PG_LEVEL_SHIFT(level)) & 0x1ffu; TEST_ASSERT((*parent_pte == mmu->pgd) || is_present_pte(mmu, parent_pte), "Parent PTE (level %d) not PRESENT for gva: 0x%08lx", - level + 1, vaddr); + level + 1, gva); return &page_table[index]; } -static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, - struct kvm_mmu *mmu, - uint64_t *parent_pte, - uint64_t vaddr, - uint64_t paddr, - int current_level, - int target_level) +static u64 *virt_create_upper_pte(struct kvm_vm *vm, + struct kvm_mmu *mmu, + u64 *parent_pte, + gva_t gva, + gpa_t gpa, + int current_level, + int target_level) { - uint64_t *pte = virt_get_pte(vm, mmu, parent_pte, vaddr, current_level); + u64 *pte = virt_get_pte(vm, mmu, parent_pte, gva, current_level); - paddr = vm_untag_gpa(vm, paddr); + gpa = vm_untag_gpa(vm, gpa); if (!is_present_pte(mmu, pte)) { *pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) | PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) | PTE_ALWAYS_SET_MASK(mmu); if (current_level == target_level) - *pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK); + *pte |= PTE_HUGE_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK); else *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK; } else { @@ -247,39 +247,39 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, * this level. */ TEST_ASSERT(current_level != target_level, - "Cannot create hugepage at level: %u, vaddr: 0x%lx", - current_level, vaddr); + "Cannot create hugepage at level: %u, gva: 0x%lx", + current_level, gva); TEST_ASSERT(!is_huge_pte(mmu, pte), - "Cannot create page table at level: %u, vaddr: 0x%lx", - current_level, vaddr); + "Cannot create page table at level: %u, gva: 0x%lx", + current_level, gva); } return pte; } -void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr, - uint64_t paddr, int level) +void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva, + gpa_t gpa, int level) { - const uint64_t pg_size = PG_LEVEL_SIZE(level); - uint64_t *pte = &mmu->pgd; + const u64 pg_size = PG_LEVEL_SIZE(level); + u64 *pte = &mmu->pgd; int current_level; TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K, "Unknown or unsupported guest mode: 0x%x", vm->mode); - TEST_ASSERT((vaddr % pg_size) == 0, + TEST_ASSERT((gva % pg_size) == 0, "Virtual address not aligned,\n" - "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size); - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)), - "Invalid virtual address, vaddr: 0x%lx", vaddr); - TEST_ASSERT((paddr % pg_size) == 0, + "gva: 0x%lx page size: 0x%lx", gva, pg_size); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), + "Invalid virtual address, gva: 0x%lx", gva); + TEST_ASSERT((gpa % pg_size) == 0, "Physical address not aligned,\n" - " paddr: 0x%lx page size: 0x%lx", paddr, pg_size); - TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, + " gpa: 0x%lx page size: 0x%lx", gpa, pg_size); + TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, "Physical address beyond maximum supported,\n" - " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", - paddr, vm->max_gfn, vm->page_size); - TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr, - "Unexpected bits in paddr: %lx", paddr); + " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + gpa, vm->max_gfn, vm->page_size); + TEST_ASSERT(vm_untag_gpa(vm, gpa) == gpa, + "Unexpected bits in gpa: %lx", gpa); TEST_ASSERT(!PTE_EXECUTABLE_MASK(mmu) || !PTE_NX_MASK(mmu), "X and NX bit masks cannot be used simultaneously"); @@ -291,40 +291,40 @@ void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr, for (current_level = mmu->pgtable_levels; current_level > PG_LEVEL_4K; current_level--) { - pte = virt_create_upper_pte(vm, mmu, pte, vaddr, paddr, + pte = virt_create_upper_pte(vm, mmu, pte, gva, gpa, current_level, level); if (is_huge_pte(mmu, pte)) return; } /* Fill in page table entry. */ - pte = virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K); + pte = virt_get_pte(vm, mmu, pte, gva, PG_LEVEL_4K); TEST_ASSERT(!is_present_pte(mmu, pte), - "PTE already present for 4k page at vaddr: 0x%lx", vaddr); + "PTE already present for 4k page at gva: 0x%lx", gva); *pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) | PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) | - PTE_ALWAYS_SET_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK); + PTE_ALWAYS_SET_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK); /* * Neither SEV nor TDX supports shared page tables, so only the final * leaf PTE needs manually set the C/S-bit. */ - if (vm_is_gpa_protected(vm, paddr)) + if (vm_is_gpa_protected(vm, gpa)) *pte |= PTE_C_BIT_MASK(mmu); else *pte |= PTE_S_BIT_MASK(mmu); } -void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) { - __virt_pg_map(vm, &vm->mmu, vaddr, paddr, PG_LEVEL_4K); + __virt_pg_map(vm, &vm->mmu, gva, gpa, PG_LEVEL_4K); } -void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, - uint64_t nr_bytes, int level) +void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa, + u64 nr_bytes, int level) { - uint64_t pg_size = PG_LEVEL_SIZE(level); - uint64_t nr_pages = nr_bytes / pg_size; + u64 pg_size = PG_LEVEL_SIZE(level); + u64 nr_pages = nr_bytes / pg_size; int i; TEST_ASSERT(nr_bytes % pg_size == 0, @@ -332,16 +332,16 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, nr_bytes, pg_size); for (i = 0; i < nr_pages; i++) { - __virt_pg_map(vm, &vm->mmu, vaddr, paddr, level); - sparsebit_set_num(vm->vpages_mapped, vaddr >> vm->page_shift, + __virt_pg_map(vm, &vm->mmu, gva, gpa, level); + sparsebit_set_num(vm->vpages_mapped, gva >> vm->page_shift, nr_bytes / PAGE_SIZE); - vaddr += pg_size; - paddr += pg_size; + gva += pg_size; + gpa += pg_size; } } -static bool vm_is_target_pte(struct kvm_mmu *mmu, uint64_t *pte, +static bool vm_is_target_pte(struct kvm_mmu *mmu, u64 *pte, int *level, int current_level) { if (is_huge_pte(mmu, pte)) { @@ -354,13 +354,13 @@ static bool vm_is_target_pte(struct kvm_mmu *mmu, uint64_t *pte, return *level == current_level; } -static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, - struct kvm_mmu *mmu, - uint64_t vaddr, - int *level) +static u64 *__vm_get_page_table_entry(struct kvm_vm *vm, + struct kvm_mmu *mmu, + gva_t gva, + int *level) { int va_width = 12 + (mmu->pgtable_levels) * 9; - uint64_t *pte = &mmu->pgd; + u64 *pte = &mmu->pgd; int current_level; TEST_ASSERT(!vm->arch.is_pt_protected, @@ -371,49 +371,46 @@ static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K, "Unknown or unsupported guest mode: 0x%x", vm->mode); - TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, - (vaddr >> vm->page_shift)), - "Invalid virtual address, vaddr: 0x%lx", - vaddr); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)), + "Invalid virtual address, gva: 0x%lx", gva); /* - * Check that the vaddr is a sign-extended va_width value. + * Check that the gva is a sign-extended va_width value. */ - TEST_ASSERT(vaddr == - (((int64_t)vaddr << (64 - va_width) >> (64 - va_width))), + TEST_ASSERT(gva == (((s64)gva << (64 - va_width) >> (64 - va_width))), "Canonical check failed. The virtual address is invalid."); for (current_level = mmu->pgtable_levels; current_level > PG_LEVEL_4K; current_level--) { - pte = virt_get_pte(vm, mmu, pte, vaddr, current_level); + pte = virt_get_pte(vm, mmu, pte, gva, current_level); if (vm_is_target_pte(mmu, pte, level, current_level)) return pte; } - return virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K); + return virt_get_pte(vm, mmu, pte, gva, PG_LEVEL_4K); } -uint64_t *tdp_get_pte(struct kvm_vm *vm, uint64_t l2_gpa) +u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa) { int level = PG_LEVEL_4K; return __vm_get_page_table_entry(vm, &vm->stage2_mmu, l2_gpa, &level); } -uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr) +u64 *vm_get_pte(struct kvm_vm *vm, gva_t gva) { int level = PG_LEVEL_4K; - return __vm_get_page_table_entry(vm, &vm->mmu, vaddr, &level); + return __vm_get_page_table_entry(vm, &vm->mmu, gva, &level); } -void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent) { struct kvm_mmu *mmu = &vm->mmu; - uint64_t *pml4e, *pml4e_start; - uint64_t *pdpe, *pdpe_start; - uint64_t *pde, *pde_start; - uint64_t *pte, *pte_start; + u64 *pml4e, *pml4e_start; + u64 *pdpe, *pdpe_start; + u64 *pde, *pde_start; + u64 *pte, *pte_start; if (!mmu->pgd_created) return; @@ -423,8 +420,8 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) fprintf(stream, "%*s index hvaddr gpaddr " "addr w exec dirty\n", indent, ""); - pml4e_start = (uint64_t *) addr_gpa2hva(vm, mmu->pgd); - for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) { + pml4e_start = (u64 *)addr_gpa2hva(vm, mmu->pgd); + for (u16 n1 = 0; n1 <= 0x1ffu; n1++) { pml4e = &pml4e_start[n1]; if (!is_present_pte(mmu, pml4e)) continue; @@ -436,7 +433,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) is_writable_pte(mmu, pml4e), is_nx_pte(mmu, pml4e)); pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK); - for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) { + for (u16 n2 = 0; n2 <= 0x1ffu; n2++) { pdpe = &pdpe_start[n2]; if (!is_present_pte(mmu, pdpe)) continue; @@ -449,7 +446,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) is_nx_pte(mmu, pdpe)); pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK); - for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) { + for (u16 n3 = 0; n3 <= 0x1ffu; n3++) { pde = &pde_start[n3]; if (!is_present_pte(mmu, pde)) continue; @@ -461,7 +458,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) is_nx_pte(mmu, pde)); pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK); - for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) { + for (u16 n4 = 0; n4 <= 0x1ffu; n4++) { pte = &pte_start[n4]; if (!is_present_pte(mmu, pte)) continue; @@ -475,10 +472,10 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) is_writable_pte(mmu, pte), is_nx_pte(mmu, pte), is_dirty_pte(mmu, pte), - ((uint64_t) n1 << 27) - | ((uint64_t) n2 << 18) - | ((uint64_t) n3 << 9) - | ((uint64_t) n4)); + ((u64)n1 << 27) + | ((u64)n2 << 18) + | ((u64)n3 << 9) + | ((u64)n4)); } } } @@ -498,26 +495,24 @@ bool kvm_cpu_has_tdp(void) return kvm_cpu_has_ept() || kvm_cpu_has_npt(); } -void __tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, - uint64_t size, int level) +void __tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size, int level) { size_t page_size = PG_LEVEL_SIZE(level); size_t npages = size / page_size; - TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow"); - TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); + TEST_ASSERT(l2_gpa + size > l2_gpa, "L2 GPA overflow"); + TEST_ASSERT(gpa + size > gpa, "GPA overflow"); while (npages--) { - __virt_pg_map(vm, &vm->stage2_mmu, nested_paddr, paddr, level); - nested_paddr += page_size; - paddr += page_size; + __virt_pg_map(vm, &vm->stage2_mmu, l2_gpa, gpa, level); + l2_gpa += page_size; + gpa += page_size; } } -void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, - uint64_t size) +void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size) { - __tdp_map(vm, nested_paddr, paddr, size, PG_LEVEL_4K); + __tdp_map(vm, l2_gpa, gpa, size, PG_LEVEL_4K); } /* Prepare an identity extended page table that maps all the @@ -525,7 +520,7 @@ void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, */ void tdp_identity_map_default_memslots(struct kvm_vm *vm) { - uint32_t s, memslot = 0; + u32 s, memslot = 0; sparsebit_idx_t i, last; struct userspace_mem_region *region = memslot2region(vm, memslot); @@ -540,13 +535,13 @@ void tdp_identity_map_default_memslots(struct kvm_vm *vm) if (i > last) break; - tdp_map(vm, (uint64_t)i << vm->page_shift, - (uint64_t)i << vm->page_shift, 1 << vm->page_shift); + tdp_map(vm, (u64)i << vm->page_shift, + (u64)i << vm->page_shift, 1 << vm->page_shift); } } /* Identity map a region with 1GiB Pages. */ -void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size) +void tdp_identity_map_1g(struct kvm_vm *vm, u64 addr, u64 size) { __tdp_map(vm, addr, addr, size, PG_LEVEL_1G); } @@ -618,10 +613,10 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_segment *segp) segp->present = true; } -vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva) { int level = PG_LEVEL_NONE; - uint64_t *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level); + u64 *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level); TEST_ASSERT(is_present_pte(&vm->mmu, pte), "Leaf PTE not PRESENT for gva: 0x%08lx", gva); @@ -633,7 +628,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level)); } -static void kvm_seg_set_tss_64bit(vm_vaddr_t base, struct kvm_segment *segp) +static void kvm_seg_set_tss_64bit(gva_t base, struct kvm_segment *segp) { memset(segp, 0, sizeof(*segp)); segp->base = base; @@ -746,16 +741,16 @@ static void vm_init_descriptor_tables(struct kvm_vm *vm) struct kvm_segment seg; int i; - vm->arch.gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); - vm->arch.idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); - vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); - vm->arch.tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); + vm->arch.gdt = __vm_alloc_page(vm, MEM_REGION_DATA); + vm->arch.idt = __vm_alloc_page(vm, MEM_REGION_DATA); + vm->handlers = __vm_alloc_page(vm, MEM_REGION_DATA); + vm->arch.tss = __vm_alloc_page(vm, MEM_REGION_DATA); /* Handlers have the same address in both address spaces.*/ for (i = 0; i < NUM_INTERRUPTS; i++) set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, KERNEL_CS); - *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; + *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers; kvm_seg_set_kernel_code_64bit(&seg); kvm_seg_fill_gdt_64bit(vm, &seg); @@ -770,9 +765,9 @@ static void vm_init_descriptor_tables(struct kvm_vm *vm) void vm_install_exception_handler(struct kvm_vm *vm, int vector, void (*handler)(struct ex_regs *)) { - vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers); + gva_t *handlers = (gva_t *)addr_gva2hva(vm, vm->handlers); - handlers[vector] = (vm_vaddr_t)handler; + handlers[vector] = (gva_t)handler; } void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) @@ -821,18 +816,17 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) vcpu_regs_set(vcpu, ®s); } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id) { struct kvm_mp_state mp_state; struct kvm_regs regs; - vm_vaddr_t stack_vaddr; + gva_t stack_gva; struct kvm_vcpu *vcpu; - stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(), - DEFAULT_GUEST_STACK_VADDR_MIN, - MEM_REGION_DATA); + stack_gva = __vm_alloc(vm, DEFAULT_STACK_PGS * getpagesize(), + DEFAULT_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA); - stack_vaddr += DEFAULT_STACK_PGS * getpagesize(); + stack_gva += DEFAULT_STACK_PGS * getpagesize(); /* * Align stack to match calling sequence requirements in section "The @@ -843,9 +837,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) * If this code is ever used to launch a vCPU with 32-bit entry point it * may need to subtract 4 bytes instead of 8 bytes. */ - TEST_ASSERT(IS_ALIGNED(stack_vaddr, PAGE_SIZE), - "__vm_vaddr_alloc() did not provide a page-aligned address"); - stack_vaddr -= 8; + TEST_ASSERT(IS_ALIGNED(stack_gva, PAGE_SIZE), + "__vm_alloc() did not provide a page-aligned address"); + stack_gva -= 8; vcpu = __vm_vcpu_add(vm, vcpu_id); vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid()); @@ -855,7 +849,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) /* Setup guest general purpose registers */ vcpu_regs_get(vcpu, ®s); regs.rflags = regs.rflags | 0x2; - regs.rsp = stack_vaddr; + regs.rsp = stack_gva; vcpu_regs_set(vcpu, ®s); /* Setup the MP state */ @@ -872,7 +866,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) return vcpu; } -struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id) +struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, u32 vcpu_id) { struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); @@ -907,9 +901,9 @@ const struct kvm_cpuid2 *kvm_get_supported_cpuid(void) return kvm_supported_cpuid; } -static uint32_t __kvm_cpu_has(const struct kvm_cpuid2 *cpuid, - uint32_t function, uint32_t index, - uint8_t reg, uint8_t lo, uint8_t hi) +static u32 __kvm_cpu_has(const struct kvm_cpuid2 *cpuid, + u32 function, u32 index, + u8 reg, u8 lo, u8 hi) { const struct kvm_cpuid_entry2 *entry; int i; @@ -936,14 +930,14 @@ bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid, feature.reg, feature.bit, feature.bit); } -uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, - struct kvm_x86_cpu_property property) +u32 kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, + struct kvm_x86_cpu_property property) { return __kvm_cpu_has(cpuid, property.function, property.index, property.reg, property.lo_bit, property.hi_bit); } -uint64_t kvm_get_feature_msr(uint64_t msr_index) +u64 kvm_get_feature_msr(u64 msr_index) { struct { struct kvm_msrs header; @@ -962,7 +956,7 @@ uint64_t kvm_get_feature_msr(uint64_t msr_index) return buffer.entry.data; } -void __vm_xsave_require_permission(uint64_t xfeature, const char *name) +void __vm_xsave_require_permission(u64 xfeature, const char *name) { int kvm_fd; u64 bitmask; @@ -1019,7 +1013,7 @@ void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid) void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu, struct kvm_x86_cpu_property property, - uint32_t value) + u32 value) { struct kvm_cpuid_entry2 *entry; @@ -1034,7 +1028,7 @@ void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu, TEST_ASSERT_EQ(kvm_cpuid_property(vcpu->cpuid, property), value); } -void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function) +void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, u32 function) { struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function); @@ -1063,7 +1057,7 @@ void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu, vcpu_set_cpuid(vcpu); } -uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index) +u64 vcpu_get_msr(struct kvm_vcpu *vcpu, u64 msr_index) { struct { struct kvm_msrs header; @@ -1078,7 +1072,7 @@ uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index) return buffer.entry.data; } -int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value) +int _vcpu_set_msr(struct kvm_vcpu *vcpu, u64 msr_index, u64 msr_value) { struct { struct kvm_msrs header; @@ -1106,28 +1100,28 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) vcpu_regs_get(vcpu, ®s); if (num >= 1) - regs.rdi = va_arg(ap, uint64_t); + regs.rdi = va_arg(ap, u64); if (num >= 2) - regs.rsi = va_arg(ap, uint64_t); + regs.rsi = va_arg(ap, u64); if (num >= 3) - regs.rdx = va_arg(ap, uint64_t); + regs.rdx = va_arg(ap, u64); if (num >= 4) - regs.rcx = va_arg(ap, uint64_t); + regs.rcx = va_arg(ap, u64); if (num >= 5) - regs.r8 = va_arg(ap, uint64_t); + regs.r8 = va_arg(ap, u64); if (num >= 6) - regs.r9 = va_arg(ap, uint64_t); + regs.r9 = va_arg(ap, u64); vcpu_regs_set(vcpu, ®s); va_end(ap); } -void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) +void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent) { struct kvm_regs regs; struct kvm_sregs sregs; @@ -1196,7 +1190,7 @@ const struct kvm_msr_list *kvm_get_feature_msr_index_list(void) return list; } -bool kvm_msr_is_in_save_restore_list(uint32_t msr_index) +bool kvm_msr_is_in_save_restore_list(u32 msr_index) { const struct kvm_msr_list *list = kvm_get_msr_index_list(); int i; @@ -1327,7 +1321,7 @@ void kvm_init_vm_address_properties(struct kvm_vm *vm) } const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, - uint32_t function, uint32_t index) + u32 function, u32 index) { int i; @@ -1344,7 +1338,7 @@ const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, #define X86_HYPERCALL(inputs...) \ ({ \ - uint64_t r; \ + u64 r; \ \ asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t" \ "jnz 1f\n\t" \ @@ -1359,18 +1353,17 @@ const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, r; \ }) -uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, - uint64_t a3) +u64 kvm_hypercall(u64 nr, u64 a0, u64 a1, u64 a2, u64 a3) { return X86_HYPERCALL("a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3)); } -uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1) +u64 __xen_hypercall(u64 nr, u64 a0, void *a1) { return X86_HYPERCALL("a"(nr), "D"(a0), "S"(a1)); } -void xen_hypercall(uint64_t nr, uint64_t a0, void *a1) +void xen_hypercall(u64 nr, u64 a0, void *a1) { GUEST_ASSERT(!__xen_hypercall(nr, a0, a1)); } @@ -1379,7 +1372,7 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm) { const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */ unsigned long ht_gfn, max_gfn, max_pfn; - uint8_t maxphyaddr, guest_maxphyaddr; + u8 maxphyaddr, guest_maxphyaddr; /* * Use "guest MAXPHYADDR" from KVM if it's available. Guest MAXPHYADDR @@ -1453,8 +1446,7 @@ bool kvm_arch_has_default_irqchip(void) return true; } -void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, - uint64_t smram_gpa, +void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, u64 smram_gpa, const void *smi_handler, size_t handler_size) { vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, smram_gpa, diff --git a/tools/testing/selftests/kvm/lib/x86/sev.c b/tools/testing/selftests/kvm/lib/x86/sev.c index c3a9838f4806..93f916903461 100644 --- a/tools/testing/selftests/kvm/lib/x86/sev.c +++ b/tools/testing/selftests/kvm/lib/x86/sev.c @@ -15,10 +15,10 @@ * expression would cause us to quit the loop. */ static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region, - uint8_t page_type, bool private) + u8 page_type, bool private) { const struct sparsebit *protected_phy_pages = region->protected_phy_pages; - const vm_paddr_t gpa_base = region->region.guest_phys_addr; + const gpa_t gpa_base = region->region.guest_phys_addr; const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift; sparsebit_idx_t i, j; @@ -29,15 +29,15 @@ static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *regio sev_register_encrypted_memory(vm, region); sparsebit_for_each_set_range(protected_phy_pages, i, j) { - const uint64_t size = (j - i + 1) * vm->page_size; - const uint64_t offset = (i - lowest_page_in_region) * vm->page_size; + const u64 size = (j - i + 1) * vm->page_size; + const u64 offset = (i - lowest_page_in_region) * vm->page_size; if (private) vm_mem_set_private(vm, gpa_base + offset, size); if (is_sev_snp_vm(vm)) snp_launch_update_data(vm, gpa_base + offset, - (uint64_t)addr_gpa2hva(vm, gpa_base + offset), + (u64)addr_gpa2hva(vm, gpa_base + offset), size, page_type); else sev_launch_update_data(vm, gpa_base + offset, size); @@ -79,7 +79,7 @@ void snp_vm_init(struct kvm_vm *vm) vm_sev_ioctl(vm, KVM_SEV_INIT2, &init); } -void sev_vm_launch(struct kvm_vm *vm, uint32_t policy) +void sev_vm_launch(struct kvm_vm *vm, u32 policy) { struct kvm_sev_launch_start launch_start = { .policy = policy, @@ -103,7 +103,7 @@ void sev_vm_launch(struct kvm_vm *vm, uint32_t policy) vm->arch.is_pt_protected = true; } -void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement) +void sev_vm_launch_measure(struct kvm_vm *vm, u8 *measurement) { struct kvm_sev_launch_measure launch_measure; struct kvm_sev_guest_status guest_status; @@ -131,7 +131,7 @@ void sev_vm_launch_finish(struct kvm_vm *vm) TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING); } -void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy) +void snp_vm_launch_start(struct kvm_vm *vm, u64 policy) { struct kvm_sev_snp_launch_start launch_start = { .policy = policy, @@ -158,7 +158,7 @@ void snp_vm_launch_finish(struct kvm_vm *vm) vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_FINISH, &launch_finish); } -struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, +struct kvm_vm *vm_sev_create_with_one_vcpu(u32 type, void *guest_code, struct kvm_vcpu **cpu) { struct vm_shape shape = { @@ -174,7 +174,7 @@ struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, return vm; } -void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement) +void vm_sev_launch(struct kvm_vm *vm, u64 policy, u8 *measurement) { if (is_sev_snp_vm(vm)) { vm_enable_cap(vm, KVM_CAP_EXIT_HYPERCALL, BIT(KVM_HC_MAP_GPA_RANGE)); diff --git a/tools/testing/selftests/kvm/lib/x86/svm.c b/tools/testing/selftests/kvm/lib/x86/svm.c index eb20b00112c7..3b01605ab016 100644 --- a/tools/testing/selftests/kvm/lib/x86/svm.c +++ b/tools/testing/selftests/kvm/lib/x86/svm.c @@ -28,20 +28,20 @@ u64 rflags; * Pointer to structure with the addresses of the SVM areas. */ struct svm_test_data * -vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva) +vcpu_alloc_svm(struct kvm_vm *vm, gva_t *p_svm_gva) { - vm_vaddr_t svm_gva = vm_vaddr_alloc_page(vm); + gva_t svm_gva = vm_alloc_page(vm); struct svm_test_data *svm = addr_gva2hva(vm, svm_gva); - svm->vmcb = (void *)vm_vaddr_alloc_page(vm); + svm->vmcb = (void *)vm_alloc_page(vm); svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb); svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb); - svm->save_area = (void *)vm_vaddr_alloc_page(vm); + svm->save_area = (void *)vm_alloc_page(vm); svm->save_area_hva = addr_gva2hva(vm, (uintptr_t)svm->save_area); svm->save_area_gpa = addr_gva2gpa(vm, (uintptr_t)svm->save_area); - svm->msr = (void *)vm_vaddr_alloc_page(vm); + svm->msr = (void *)vm_alloc_page(vm); svm->msr_hva = addr_gva2hva(vm, (uintptr_t)svm->msr); svm->msr_gpa = addr_gva2gpa(vm, (uintptr_t)svm->msr); memset(svm->msr_hva, 0, getpagesize()); @@ -84,14 +84,14 @@ void vm_enable_npt(struct kvm_vm *vm) void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp) { struct vmcb *vmcb = svm->vmcb; - uint64_t vmcb_gpa = svm->vmcb_gpa; + u64 vmcb_gpa = svm->vmcb_gpa; struct vmcb_save_area *save = &vmcb->save; struct vmcb_control_area *ctrl = &vmcb->control; u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK; u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK; - uint64_t efer; + u64 efer; efer = rdmsr(MSR_EFER); wrmsr(MSR_EFER, efer | EFER_SVME); @@ -158,7 +158,7 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r * for now. registers involved in LOAD/SAVE_GPR_C are eventually * unmodified so they do not need to be in the clobber list. */ -void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa) +void run_guest(struct vmcb *vmcb, u64 vmcb_gpa) { asm volatile ( "vmload %[vmcb_gpa]\n\t" diff --git a/tools/testing/selftests/kvm/lib/x86/ucall.c b/tools/testing/selftests/kvm/lib/x86/ucall.c index 1265cecc7dd1..e7dd5791959b 100644 --- a/tools/testing/selftests/kvm/lib/x86/ucall.c +++ b/tools/testing/selftests/kvm/lib/x86/ucall.c @@ -6,9 +6,9 @@ */ #include "kvm_util.h" -#define UCALL_PIO_PORT ((uint16_t)0x1000) +#define UCALL_PIO_PORT ((u16)0x1000) -void ucall_arch_do_ucall(vm_vaddr_t uc) +void ucall_arch_do_ucall(gva_t uc) { /* * FIXME: Revert this hack (the entire commit that added it) once nVMX diff --git a/tools/testing/selftests/kvm/lib/x86/vmx.c b/tools/testing/selftests/kvm/lib/x86/vmx.c index c87b340362a9..67642759e4a0 100644 --- a/tools/testing/selftests/kvm/lib/x86/vmx.c +++ b/tools/testing/selftests/kvm/lib/x86/vmx.c @@ -27,7 +27,7 @@ struct hv_vp_assist_page *current_vp_assist; int vcpu_enable_evmcs(struct kvm_vcpu *vcpu) { - uint16_t evmcs_ver; + u16 evmcs_ver; vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, (unsigned long)&evmcs_ver); @@ -79,39 +79,39 @@ void vm_enable_ept(struct kvm_vm *vm) * Pointer to structure with the addresses of the VMX areas. */ struct vmx_pages * -vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) +vcpu_alloc_vmx(struct kvm_vm *vm, gva_t *p_vmx_gva) { - vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm); + gva_t vmx_gva = vm_alloc_page(vm); struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); /* Setup of a region of guest memory for the vmxon region. */ - vmx->vmxon = (void *)vm_vaddr_alloc_page(vm); + vmx->vmxon = (void *)vm_alloc_page(vm); vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); /* Setup of a region of guest memory for a vmcs. */ - vmx->vmcs = (void *)vm_vaddr_alloc_page(vm); + vmx->vmcs = (void *)vm_alloc_page(vm); vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs); vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs); /* Setup of a region of guest memory for the MSR bitmap. */ - vmx->msr = (void *)vm_vaddr_alloc_page(vm); + vmx->msr = (void *)vm_alloc_page(vm); vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr); vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr); memset(vmx->msr_hva, 0, getpagesize()); /* Setup of a region of guest memory for the shadow VMCS. */ - vmx->shadow_vmcs = (void *)vm_vaddr_alloc_page(vm); + vmx->shadow_vmcs = (void *)vm_alloc_page(vm); vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs); vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs); /* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */ - vmx->vmread = (void *)vm_vaddr_alloc_page(vm); + vmx->vmread = (void *)vm_alloc_page(vm); vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread); vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread); memset(vmx->vmread_hva, 0, getpagesize()); - vmx->vmwrite = (void *)vm_vaddr_alloc_page(vm); + vmx->vmwrite = (void *)vm_alloc_page(vm); vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite); vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite); memset(vmx->vmwrite_hva, 0, getpagesize()); @@ -125,8 +125,8 @@ vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) bool prepare_for_vmx_operation(struct vmx_pages *vmx) { - uint64_t feature_control; - uint64_t required; + u64 feature_control; + u64 required; unsigned long cr0; unsigned long cr4; @@ -160,7 +160,7 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx) wrmsr(MSR_IA32_FEAT_CTL, feature_control | required); /* Enter VMX root operation. */ - *(uint32_t *)(vmx->vmxon) = vmcs_revision(); + *(u32 *)(vmx->vmxon) = vmcs_revision(); if (vmxon(vmx->vmxon_gpa)) return false; @@ -170,7 +170,7 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx) bool load_vmcs(struct vmx_pages *vmx) { /* Load a VMCS. */ - *(uint32_t *)(vmx->vmcs) = vmcs_revision(); + *(u32 *)(vmx->vmcs) = vmcs_revision(); if (vmclear(vmx->vmcs_gpa)) return false; @@ -178,14 +178,14 @@ bool load_vmcs(struct vmx_pages *vmx) return false; /* Setup shadow VMCS, do not load it yet. */ - *(uint32_t *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul; + *(u32 *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul; if (vmclear(vmx->shadow_vmcs_gpa)) return false; return true; } -static bool ept_vpid_cap_supported(uint64_t mask) +static bool ept_vpid_cap_supported(u64 mask) { return rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & mask; } @@ -200,7 +200,7 @@ bool ept_1g_pages_supported(void) */ static inline void init_vmcs_control_fields(struct vmx_pages *vmx) { - uint32_t sec_exec_ctl = 0; + u32 sec_exec_ctl = 0; vmwrite(VIRTUAL_PROCESSOR_ID, 0); vmwrite(POSTED_INTR_NV, 0); @@ -208,7 +208,7 @@ static inline void init_vmcs_control_fields(struct vmx_pages *vmx) vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS)); if (vmx->eptp_gpa) { - uint64_t eptp = vmx->eptp_gpa | EPTP_WB | EPTP_PWL_4; + u64 eptp = vmx->eptp_gpa | EPTP_WB | EPTP_PWL_4; TEST_ASSERT((vmx->eptp_gpa & ~PHYSICAL_PAGE_MASK) == 0, "Illegal bits set in vmx->eptp_gpa"); @@ -259,7 +259,7 @@ static inline void init_vmcs_control_fields(struct vmx_pages *vmx) */ static inline void init_vmcs_host_state(void) { - uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS); + u32 exit_controls = vmreadz(VM_EXIT_CONTROLS); vmwrite(HOST_ES_SELECTOR, get_es()); vmwrite(HOST_CS_SELECTOR, get_cs()); @@ -358,8 +358,8 @@ static inline void init_vmcs_guest_state(void *rip, void *rsp) vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE)); vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE)); vmwrite(GUEST_DR7, 0x400); - vmwrite(GUEST_RSP, (uint64_t)rsp); - vmwrite(GUEST_RIP, (uint64_t)rip); + vmwrite(GUEST_RSP, (u64)rsp); + vmwrite(GUEST_RIP, (u64)rip); vmwrite(GUEST_RFLAGS, 2); vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0); vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP)); @@ -375,7 +375,7 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp) bool kvm_cpu_has_ept(void) { - uint64_t ctrl; + u64 ctrl; if (!kvm_cpu_has(X86_FEATURE_VMX)) return false; @@ -390,7 +390,7 @@ bool kvm_cpu_has_ept(void) void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm) { - vmx->apic_access = (void *)vm_vaddr_alloc_page(vm); + vmx->apic_access = (void *)vm_alloc_page(vm); vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access); vmx->apic_access_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->apic_access); } diff --git a/tools/testing/selftests/kvm/loongarch/arch_timer.c b/tools/testing/selftests/kvm/loongarch/arch_timer.c index 355ecac30954..a7279ded8518 100644 --- a/tools/testing/selftests/kvm/loongarch/arch_timer.c +++ b/tools/testing/selftests/kvm/loongarch/arch_timer.c @@ -27,8 +27,8 @@ static void do_idle(void) static void guest_irq_handler(struct ex_regs *regs) { unsigned int intid; - uint32_t cpu = guest_get_vcpuid(); - uint64_t xcnt, val, cfg, xcnt_diff_us; + u32 cpu = guest_get_vcpuid(); + u64 xcnt, val, cfg, xcnt_diff_us; struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; intid = !!(regs->estat & BIT(INT_TI)); @@ -62,10 +62,10 @@ static void guest_irq_handler(struct ex_regs *regs) WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1); } -static void guest_test_period_timer(uint32_t cpu) +static void guest_test_period_timer(u32 cpu) { - uint32_t irq_iter, config_iter; - uint64_t us; + u32 irq_iter, config_iter; + u64 us; struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; shared_data->nr_iter = test_args.nr_iter; @@ -86,10 +86,10 @@ static void guest_test_period_timer(uint32_t cpu) irq_iter); } -static void guest_test_oneshot_timer(uint32_t cpu) +static void guest_test_oneshot_timer(u32 cpu) { - uint32_t irq_iter, config_iter; - uint64_t us; + u32 irq_iter, config_iter; + u64 us; struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; shared_data->nr_iter = 0; @@ -112,10 +112,10 @@ static void guest_test_oneshot_timer(uint32_t cpu) } } -static void guest_test_emulate_timer(uint32_t cpu) +static void guest_test_emulate_timer(u32 cpu) { - uint32_t config_iter; - uint64_t xcnt_diff_us, us; + u32 config_iter; + u64 xcnt_diff_us, us; struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; local_irq_disable(); @@ -136,9 +136,9 @@ static void guest_test_emulate_timer(uint32_t cpu) local_irq_enable(); } -static void guest_time_count_test(uint32_t cpu) +static void guest_time_count_test(u32 cpu) { - uint32_t config_iter; + u32 config_iter; unsigned long start, end, prev, us; /* Assuming that test case starts to run in 1 second */ @@ -165,7 +165,7 @@ static void guest_time_count_test(uint32_t cpu) static void guest_code(void) { - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); /* must run at first */ guest_time_count_test(cpu); diff --git a/tools/testing/selftests/kvm/loongarch/pmu_test.c b/tools/testing/selftests/kvm/loongarch/pmu_test.c index 88bb530e336e..ec3fefb9ea97 100644 --- a/tools/testing/selftests/kvm/loongarch/pmu_test.c +++ b/tools/testing/selftests/kvm/loongarch/pmu_test.c @@ -15,7 +15,7 @@ static int pmu_irq_count; /* Check PMU support */ static bool has_pmu_support(void) { - uint32_t cfg6; + u32 cfg6; /* Read CPUCFG6 to check PMU */ cfg6 = read_cpucfg(LOONGARCH_CPUCFG6); @@ -34,7 +34,7 @@ static bool has_pmu_support(void) /* Dump PMU capabilities */ static void dump_pmu_caps(void) { - uint32_t cfg6; + u32 cfg6; int nr_counters, counter_bits; cfg6 = read_cpucfg(LOONGARCH_CPUCFG6); @@ -51,8 +51,8 @@ static void dump_pmu_caps(void) static void guest_pmu_base_test(void) { int i; - uint32_t cfg6, pmnum; - uint64_t cnt[4]; + u32 cfg6, pmnum; + u64 cnt[4]; cfg6 = read_cpucfg(LOONGARCH_CPUCFG6); pmnum = (cfg6 >> 4) & 0xf; @@ -114,7 +114,7 @@ static void guest_irq_handler(struct ex_regs *regs) static void guest_pmu_interrupt_test(void) { - uint64_t cnt; + u64 cnt; csr_write(PMU_OVERFLOW - 1, LOONGARCH_CSR_PERFCNTR0); csr_write(PMU_ENVENT_ENABLED | CSR_PERFCTRL_PMIE | LOONGARCH_PMU_EVENT_CYCLES, LOONGARCH_CSR_PERFCTRL0); diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c index 3cdfa3b19b85..9c7578a098c3 100644 --- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c +++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c @@ -30,7 +30,7 @@ static int nr_vcpus = 1; -static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; +static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) { @@ -55,10 +55,10 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) } static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, - uint64_t nr_modifications) + u64 nr_modifications) { - uint64_t pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size; - uint64_t gpa; + u64 pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size; + gpa_t gpa; int i; /* @@ -78,7 +78,7 @@ static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, struct test_params { useconds_t delay; - uint64_t nr_iterations; + u64 nr_iterations; bool partition_vcpu_memory_access; bool disable_slot_zap_quirk; }; diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c index 5087d082c4b0..3d02db371422 100644 --- a/tools/testing/selftests/kvm/memslot_perf_test.c +++ b/tools/testing/selftests/kvm/memslot_perf_test.c @@ -85,17 +85,17 @@ struct vm_data { struct kvm_vm *vm; struct kvm_vcpu *vcpu; pthread_t vcpu_thread; - uint32_t nslots; - uint64_t npages; - uint64_t pages_per_slot; + u32 nslots; + u64 npages; + u64 pages_per_slot; void **hva_slots; bool mmio_ok; - uint64_t mmio_gpa_min; - uint64_t mmio_gpa_max; + u64 mmio_gpa_min; + u64 mmio_gpa_max; }; struct sync_area { - uint32_t guest_page_size; + u32 guest_page_size; atomic_bool start_flag; atomic_bool exit_flag; atomic_bool sync_flag; @@ -186,12 +186,12 @@ static void wait_for_vcpu(void) "sem_timedwait() failed: %d", errno); } -static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages) +static void *vm_gpa2hva(struct vm_data *data, gpa_t gpa, u64 *rempages) { - uint64_t gpage, pgoffs; - uint32_t slot, slotoffs; + gpa_t gpage, pgoffs; + u32 slot, slotoffs; void *base; - uint32_t guest_page_size = data->vm->page_size; + u32 guest_page_size = data->vm->page_size; TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate"); TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size, @@ -200,11 +200,11 @@ static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages) gpage = gpa / guest_page_size; pgoffs = gpa % guest_page_size; - slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1); + slot = min(gpage / data->pages_per_slot, (u64)data->nslots - 1); slotoffs = gpage - (slot * data->pages_per_slot); if (rempages) { - uint64_t slotpages; + u64 slotpages; if (slot == data->nslots - 1) slotpages = data->npages - slot * data->pages_per_slot; @@ -217,12 +217,12 @@ static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages) } base = data->hva_slots[slot]; - return (uint8_t *)base + slotoffs * guest_page_size + pgoffs; + return (u8 *)base + slotoffs * guest_page_size + pgoffs; } -static uint64_t vm_slot2gpa(struct vm_data *data, uint32_t slot) +static u64 vm_slot2gpa(struct vm_data *data, u32 slot) { - uint32_t guest_page_size = data->vm->page_size; + u32 guest_page_size = data->vm->page_size; TEST_ASSERT(slot < data->nslots, "Too high slot number"); @@ -243,8 +243,8 @@ static struct vm_data *alloc_vm(void) return data; } -static bool check_slot_pages(uint32_t host_page_size, uint32_t guest_page_size, - uint64_t pages_per_slot, uint64_t rempages) +static bool check_slot_pages(u32 host_page_size, u32 guest_page_size, + u64 pages_per_slot, u64 rempages) { if (!pages_per_slot) return false; @@ -259,11 +259,11 @@ static bool check_slot_pages(uint32_t host_page_size, uint32_t guest_page_size, } -static uint64_t get_max_slots(struct vm_data *data, uint32_t host_page_size) +static u64 get_max_slots(struct vm_data *data, u32 host_page_size) { - uint32_t guest_page_size = data->vm->page_size; - uint64_t mempages, pages_per_slot, rempages; - uint64_t slots; + u32 guest_page_size = data->vm->page_size; + u64 mempages, pages_per_slot, rempages; + u64 slots; mempages = data->npages; slots = data->nslots; @@ -281,13 +281,13 @@ static uint64_t get_max_slots(struct vm_data *data, uint32_t host_page_size) return 0; } -static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, - void *guest_code, uint64_t mem_size, +static bool prepare_vm(struct vm_data *data, int nslots, u64 *maxslots, + void *guest_code, u64 mem_size, struct timespec *slot_runtime) { - uint64_t mempages, rempages; - uint64_t guest_addr; - uint32_t slot, host_page_size, guest_page_size; + u64 mempages, rempages; + u64 guest_addr; + u32 slot, host_page_size, guest_page_size; struct timespec tstart; struct sync_area *sync; @@ -317,7 +317,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, clock_gettime(CLOCK_MONOTONIC, &tstart); for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) { - uint64_t npages; + u64 npages; npages = data->pages_per_slot; if (slot == data->nslots) @@ -331,8 +331,8 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, *slot_runtime = timespec_elapsed(tstart); for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) { - uint64_t npages; - uint64_t gpa; + u64 npages; + gpa_t gpa; npages = data->pages_per_slot; if (slot == data->nslots) @@ -448,7 +448,7 @@ static bool guest_perform_sync(void) static void guest_code_test_memslot_move(void) { struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; - uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); + u32 page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr); GUEST_SYNC(0); @@ -460,7 +460,7 @@ static void guest_code_test_memslot_move(void) for (ptr = base; ptr < base + MEM_TEST_MOVE_SIZE; ptr += page_size) - *(uint64_t *)ptr = MEM_TEST_VAL_1; + *(u64 *)ptr = MEM_TEST_VAL_1; /* * No host sync here since the MMIO exits are so expensive @@ -477,7 +477,7 @@ static void guest_code_test_memslot_move(void) static void guest_code_test_memslot_map(void) { struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; - uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); + u32 page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); GUEST_SYNC(0); @@ -489,7 +489,7 @@ static void guest_code_test_memslot_map(void) for (ptr = MEM_TEST_GPA; ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2; ptr += page_size) - *(uint64_t *)ptr = MEM_TEST_VAL_1; + *(u64 *)ptr = MEM_TEST_VAL_1; if (!guest_perform_sync()) break; @@ -497,7 +497,7 @@ static void guest_code_test_memslot_map(void) for (ptr = MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2; ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE; ptr += page_size) - *(uint64_t *)ptr = MEM_TEST_VAL_2; + *(u64 *)ptr = MEM_TEST_VAL_2; if (!guest_perform_sync()) break; @@ -526,13 +526,13 @@ static void guest_code_test_memslot_unmap(void) * * Just access a single page to be on the safe side. */ - *(uint64_t *)ptr = MEM_TEST_VAL_1; + *(u64 *)ptr = MEM_TEST_VAL_1; if (!guest_perform_sync()) break; ptr += MEM_TEST_UNMAP_SIZE / 2; - *(uint64_t *)ptr = MEM_TEST_VAL_2; + *(u64 *)ptr = MEM_TEST_VAL_2; if (!guest_perform_sync()) break; @@ -544,7 +544,7 @@ static void guest_code_test_memslot_unmap(void) static void guest_code_test_memslot_rw(void) { struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; - uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); + u32 page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size); GUEST_SYNC(0); @@ -555,17 +555,17 @@ static void guest_code_test_memslot_rw(void) for (ptr = MEM_TEST_GPA; ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size) - *(uint64_t *)ptr = MEM_TEST_VAL_1; + *(u64 *)ptr = MEM_TEST_VAL_1; if (!guest_perform_sync()) break; for (ptr = MEM_TEST_GPA + page_size / 2; ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size) { - uint64_t val = *(uint64_t *)ptr; + u64 val = *(u64 *)ptr; GUEST_ASSERT_EQ(val, MEM_TEST_VAL_2); - *(uint64_t *)ptr = 0; + *(u64 *)ptr = 0; } if (!guest_perform_sync()) @@ -577,10 +577,10 @@ static void guest_code_test_memslot_rw(void) static bool test_memslot_move_prepare(struct vm_data *data, struct sync_area *sync, - uint64_t *maxslots, bool isactive) + u64 *maxslots, bool isactive) { - uint32_t guest_page_size = data->vm->page_size; - uint64_t movesrcgpa, movetestgpa; + u32 guest_page_size = data->vm->page_size; + u64 movesrcgpa, movetestgpa; #ifdef __x86_64__ if (disable_slot_zap_quirk) @@ -590,7 +590,7 @@ static bool test_memslot_move_prepare(struct vm_data *data, movesrcgpa = vm_slot2gpa(data, data->nslots - 1); if (isactive) { - uint64_t lastpages; + u64 lastpages; vm_gpa2hva(data, movesrcgpa, &lastpages); if (lastpages * guest_page_size < MEM_TEST_MOVE_SIZE / 2) { @@ -613,21 +613,21 @@ static bool test_memslot_move_prepare(struct vm_data *data, static bool test_memslot_move_prepare_active(struct vm_data *data, struct sync_area *sync, - uint64_t *maxslots) + u64 *maxslots) { return test_memslot_move_prepare(data, sync, maxslots, true); } static bool test_memslot_move_prepare_inactive(struct vm_data *data, struct sync_area *sync, - uint64_t *maxslots) + u64 *maxslots) { return test_memslot_move_prepare(data, sync, maxslots, false); } static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync) { - uint64_t movesrcgpa; + u64 movesrcgpa; movesrcgpa = vm_slot2gpa(data, data->nslots - 1); vm_mem_region_move(data->vm, data->nslots - 1 + 1, @@ -636,13 +636,13 @@ static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync) } static void test_memslot_do_unmap(struct vm_data *data, - uint64_t offsp, uint64_t count) + u64 offsp, u64 count) { - uint64_t gpa, ctr; - uint32_t guest_page_size = data->vm->page_size; + gpa_t gpa, ctr; + u32 guest_page_size = data->vm->page_size; for (gpa = MEM_TEST_GPA + offsp * guest_page_size, ctr = 0; ctr < count; ) { - uint64_t npages; + u64 npages; void *hva; int ret; @@ -661,11 +661,11 @@ static void test_memslot_do_unmap(struct vm_data *data, } static void test_memslot_map_unmap_check(struct vm_data *data, - uint64_t offsp, uint64_t valexp) + u64 offsp, u64 valexp) { - uint64_t gpa; - uint64_t *val; - uint32_t guest_page_size = data->vm->page_size; + gpa_t gpa; + u64 *val; + u32 guest_page_size = data->vm->page_size; if (!map_unmap_verify) return; @@ -680,8 +680,8 @@ static void test_memslot_map_unmap_check(struct vm_data *data, static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync) { - uint32_t guest_page_size = data->vm->page_size; - uint64_t guest_pages = MEM_TEST_MAP_SIZE / guest_page_size; + u32 guest_page_size = data->vm->page_size; + u64 guest_pages = MEM_TEST_MAP_SIZE / guest_page_size; /* * Unmap the second half of the test area while guest writes to (maps) @@ -718,11 +718,11 @@ static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync) static void test_memslot_unmap_loop_common(struct vm_data *data, struct sync_area *sync, - uint64_t chunk) + u64 chunk) { - uint32_t guest_page_size = data->vm->page_size; - uint64_t guest_pages = MEM_TEST_UNMAP_SIZE / guest_page_size; - uint64_t ctr; + u32 guest_page_size = data->vm->page_size; + u64 guest_pages = MEM_TEST_UNMAP_SIZE / guest_page_size; + u64 ctr; /* * Wait for the guest to finish mapping page(s) in the first half @@ -746,9 +746,9 @@ static void test_memslot_unmap_loop_common(struct vm_data *data, static void test_memslot_unmap_loop(struct vm_data *data, struct sync_area *sync) { - uint32_t host_page_size = getpagesize(); - uint32_t guest_page_size = data->vm->page_size; - uint64_t guest_chunk_pages = guest_page_size >= host_page_size ? + u32 host_page_size = getpagesize(); + u32 guest_page_size = data->vm->page_size; + u64 guest_chunk_pages = guest_page_size >= host_page_size ? 1 : host_page_size / guest_page_size; test_memslot_unmap_loop_common(data, sync, guest_chunk_pages); @@ -757,27 +757,27 @@ static void test_memslot_unmap_loop(struct vm_data *data, static void test_memslot_unmap_loop_chunked(struct vm_data *data, struct sync_area *sync) { - uint32_t guest_page_size = data->vm->page_size; - uint64_t guest_chunk_pages = MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size; + u32 guest_page_size = data->vm->page_size; + u64 guest_chunk_pages = MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size; test_memslot_unmap_loop_common(data, sync, guest_chunk_pages); } static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync) { - uint64_t gptr; - uint32_t guest_page_size = data->vm->page_size; + u64 gptr; + u32 guest_page_size = data->vm->page_size; for (gptr = MEM_TEST_GPA + guest_page_size / 2; gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += guest_page_size) - *(uint64_t *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2; + *(u64 *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2; host_perform_sync(sync); for (gptr = MEM_TEST_GPA; gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += guest_page_size) { - uint64_t *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL); - uint64_t val = *vptr; + u64 *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL); + u64 val = *vptr; TEST_ASSERT(val == MEM_TEST_VAL_1, "Guest written values should read back correctly (is %"PRIu64" @ %"PRIx64")", @@ -790,21 +790,21 @@ static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync) struct test_data { const char *name; - uint64_t mem_size; + u64 mem_size; void (*guest_code)(void); bool (*prepare)(struct vm_data *data, struct sync_area *sync, - uint64_t *maxslots); + u64 *maxslots); void (*loop)(struct vm_data *data, struct sync_area *sync); }; -static bool test_execute(int nslots, uint64_t *maxslots, +static bool test_execute(int nslots, u64 *maxslots, unsigned int maxtime, const struct test_data *tdata, - uint64_t *nloops, + u64 *nloops, struct timespec *slot_runtime, struct timespec *guest_runtime) { - uint64_t mem_size = tdata->mem_size ? : MEM_SIZE; + u64 mem_size = tdata->mem_size ? : MEM_SIZE; struct vm_data *data; struct sync_area *sync; struct timespec tstart; @@ -924,8 +924,8 @@ static void help(char *name, struct test_args *targs) static bool check_memory_sizes(void) { - uint32_t host_page_size = getpagesize(); - uint32_t guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size; + u32 host_page_size = getpagesize(); + u32 guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size; if (host_page_size > SZ_64K || guest_page_size > SZ_64K) { pr_info("Unsupported page size on host (0x%x) or guest (0x%x)\n", @@ -961,7 +961,7 @@ static bool check_memory_sizes(void) static bool parse_args(int argc, char *argv[], struct test_args *targs) { - uint32_t max_mem_slots; + u32 max_mem_slots; int opt; while ((opt = getopt(argc, argv, "hvdqs:f:e:l:r:")) != -1) { @@ -1040,8 +1040,8 @@ static bool parse_args(int argc, char *argv[], struct test_result { struct timespec slot_runtime, guest_runtime, iter_runtime; - int64_t slottimens, runtimens; - uint64_t nloops; + s64 slottimens, runtimens; + u64 nloops; }; static bool test_loop(const struct test_data *data, @@ -1049,7 +1049,7 @@ static bool test_loop(const struct test_data *data, struct test_result *rbestslottime, struct test_result *rbestruntime) { - uint64_t maxslots; + u64 maxslots; struct test_result result = {}; if (!test_execute(targs->nslots, &maxslots, targs->seconds, data, diff --git a/tools/testing/selftests/kvm/mmu_stress_test.c b/tools/testing/selftests/kvm/mmu_stress_test.c index 51c070556f3e..54d281419d31 100644 --- a/tools/testing/selftests/kvm/mmu_stress_test.c +++ b/tools/testing/selftests/kvm/mmu_stress_test.c @@ -20,19 +20,19 @@ static bool mprotect_ro_done; static bool all_vcpus_hit_ro_fault; -static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride) +static void guest_code(u64 start_gpa, u64 end_gpa, u64 stride) { - uint64_t gpa; + gpa_t gpa; int i; for (i = 0; i < 2; i++) { for (gpa = start_gpa; gpa < end_gpa; gpa += stride) - vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa); + vcpu_arch_put_guest(*((volatile u64 *)gpa), gpa); GUEST_SYNC(i); } for (gpa = start_gpa; gpa < end_gpa; gpa += stride) - *((volatile uint64_t *)gpa); + *((volatile u64 *)gpa); GUEST_SYNC(2); /* @@ -55,7 +55,7 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride) #elif defined(__aarch64__) asm volatile("str %0, [%0]" :: "r" (gpa) : "memory"); #else - vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa); + vcpu_arch_put_guest(*((volatile u64 *)gpa), gpa); #endif } while (!READ_ONCE(mprotect_ro_done) || !READ_ONCE(all_vcpus_hit_ro_fault)); @@ -68,7 +68,7 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride) #endif for (gpa = start_gpa; gpa < end_gpa; gpa += stride) - vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa); + vcpu_arch_put_guest(*((volatile u64 *)gpa), gpa); GUEST_SYNC(4); GUEST_ASSERT(0); @@ -76,8 +76,8 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride) struct vcpu_info { struct kvm_vcpu *vcpu; - uint64_t start_gpa; - uint64_t end_gpa; + u64 start_gpa; + u64 end_gpa; }; static int nr_vcpus; @@ -203,10 +203,10 @@ static void *vcpu_worker(void *data) } static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus, - uint64_t start_gpa, uint64_t end_gpa) + u64 start_gpa, u64 end_gpa) { struct vcpu_info *info; - uint64_t gpa, nr_bytes; + gpa_t gpa, nr_bytes; pthread_t *threads; int i; @@ -217,7 +217,7 @@ static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus, TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges"); nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) & - ~((uint64_t)vm->page_size - 1); + ~((u64)vm->page_size - 1); TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus); for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) { @@ -278,11 +278,11 @@ int main(int argc, char *argv[]) * just below the 4gb boundary. This test could create memory at * 1gb-3gb,but it's simpler to skip straight to 4gb. */ - const uint64_t start_gpa = SZ_4G; + const u64 start_gpa = SZ_4G; const int first_slot = 1; struct timespec time_start, time_run1, time_reset, time_run2, time_ro, time_rw; - uint64_t max_gpa, gpa, slot_size, max_mem, i; + u64 max_gpa, gpa, slot_size, max_mem, i; int max_slots, slot, opt, fd; bool hugepages = false; struct kvm_vcpu **vcpus; @@ -347,7 +347,7 @@ int main(int argc, char *argv[]) /* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */ for (i = 0; i < slot_size; i += vm->page_size) - ((uint8_t *)mem)[i] = 0xaa; + ((u8 *)mem)[i] = 0xaa; gpa = 0; for (slot = first_slot; slot < max_slots; slot++) { diff --git a/tools/testing/selftests/kvm/pre_fault_memory_test.c b/tools/testing/selftests/kvm/pre_fault_memory_test.c index f3de0386ba7b..fcb57fd034e6 100644 --- a/tools/testing/selftests/kvm/pre_fault_memory_test.c +++ b/tools/testing/selftests/kvm/pre_fault_memory_test.c @@ -17,13 +17,13 @@ #define TEST_NPAGES (TEST_SIZE / PAGE_SIZE) #define TEST_SLOT 10 -static void guest_code(uint64_t base_gva) +static void guest_code(u64 base_gva) { - volatile uint64_t val __used; + volatile u64 val __used; int i; for (i = 0; i < TEST_NPAGES; i++) { - uint64_t *src = (uint64_t *)(base_gva + i * PAGE_SIZE); + u64 *src = (u64 *)(base_gva + i * PAGE_SIZE); val = *src; } @@ -33,8 +33,8 @@ static void guest_code(uint64_t base_gva) struct slot_worker_data { struct kvm_vm *vm; - u64 gpa; - uint32_t flags; + gpa_t gpa; + u32 flags; bool worker_ready; bool prefault_ready; bool recreate_slot; @@ -161,7 +161,7 @@ static void pre_fault_memory(struct kvm_vcpu *vcpu, u64 base_gpa, u64 offset, static void __test_pre_fault_memory(unsigned long vm_type, bool private) { - uint64_t gpa, gva, alignment, guest_page_size; + gpa_t gpa, gva, alignment, guest_page_size; const struct vm_shape shape = { .mode = VM_MODE_DEFAULT, .type = vm_type, diff --git a/tools/testing/selftests/kvm/riscv/arch_timer.c b/tools/testing/selftests/kvm/riscv/arch_timer.c index f962fefc48fa..d67c918ee310 100644 --- a/tools/testing/selftests/kvm/riscv/arch_timer.c +++ b/tools/testing/selftests/kvm/riscv/arch_timer.c @@ -17,9 +17,9 @@ static int timer_irq = IRQ_S_TIMER; static void guest_irq_handler(struct pt_regs *regs) { - uint64_t xcnt, xcnt_diff_us, cmp; + u64 xcnt, xcnt_diff_us, cmp; unsigned int intid = regs->cause & ~CAUSE_IRQ_FLAG; - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; timer_irq_disable(); @@ -40,7 +40,7 @@ static void guest_irq_handler(struct pt_regs *regs) static void guest_run(struct test_vcpu_shared_data *shared_data) { - uint32_t irq_iter, config_iter; + u32 irq_iter, config_iter; shared_data->nr_iter = 0; shared_data->guest_stage = 0; @@ -66,7 +66,7 @@ static void guest_run(struct test_vcpu_shared_data *shared_data) static void guest_code(void) { - uint32_t cpu = guest_get_vcpuid(); + u32 cpu = guest_get_vcpuid(); struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; timer_irq_disable(); diff --git a/tools/testing/selftests/kvm/riscv/ebreak_test.c b/tools/testing/selftests/kvm/riscv/ebreak_test.c index 739d17befb5a..3f44b045a22e 100644 --- a/tools/testing/selftests/kvm/riscv/ebreak_test.c +++ b/tools/testing/selftests/kvm/riscv/ebreak_test.c @@ -8,10 +8,10 @@ #include "kvm_util.h" #include "ucall_common.h" -#define LABEL_ADDRESS(v) ((uint64_t)&(v)) +#define LABEL_ADDRESS(v) ((u64)&(v)) extern unsigned char sw_bp_1, sw_bp_2; -static uint64_t sw_bp_addr; +static u64 sw_bp_addr; static void guest_code(void) { @@ -37,7 +37,7 @@ int main(void) { struct kvm_vm *vm; struct kvm_vcpu *vcpu; - uint64_t pc; + u64 pc; struct kvm_guest_debug debug = { .control = KVM_GUESTDBG_ENABLE, }; diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c index 8d6b951434eb..8d6fdb5d38b8 100644 --- a/tools/testing/selftests/kvm/riscv/get-reg-list.c +++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c @@ -162,7 +162,7 @@ bool check_reject_set(int err) } static int override_vector_reg_size(struct kvm_vcpu *vcpu, struct vcpu_reg_sublist *s, - uint64_t feature) + u64 feature) { unsigned long vlenb_reg = 0; int rc; @@ -197,7 +197,7 @@ void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) { unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 }; struct vcpu_reg_sublist *s; - uint64_t feature; + u64 feature; int rc; for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) diff --git a/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c b/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c index cec1621ace23..e56a3dd6a51e 100644 --- a/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c +++ b/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c @@ -24,7 +24,7 @@ union sbi_pmu_ctr_info ctrinfo_arr[RISCV_MAX_PMU_COUNTERS]; /* Snapshot shared memory data */ #define PMU_SNAPSHOT_GPA_BASE BIT(30) static void *snapshot_gva; -static vm_paddr_t snapshot_gpa; +static gpa_t snapshot_gpa; static int vcpu_shared_irq_count; static int counter_in_use; @@ -86,7 +86,7 @@ unsigned long pmu_csr_read_num(int csr_num) #undef switchcase_csr_read } -static inline void dummy_func_loop(uint64_t iter) +static inline void dummy_func_loop(u64 iter) { int i = 0; @@ -259,7 +259,7 @@ static inline void verify_sbi_requirement_assert(void) __GUEST_ASSERT(0, "SBI implementation version doesn't support PMU Snapshot"); } -static void snapshot_set_shmem(vm_paddr_t gpa, unsigned long flags) +static void snapshot_set_shmem(gpa_t gpa, unsigned long flags) { unsigned long lo = (unsigned long)gpa; #if __riscv_xlen == 32 @@ -610,7 +610,7 @@ static void test_vm_setup_snapshot_mem(struct kvm_vm *vm, struct kvm_vcpu *vcpu) virt_map(vm, PMU_SNAPSHOT_GPA_BASE, PMU_SNAPSHOT_GPA_BASE, 1); snapshot_gva = (void *)(PMU_SNAPSHOT_GPA_BASE); - snapshot_gpa = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)snapshot_gva); + snapshot_gpa = addr_gva2gpa(vcpu->vm, (gva_t)snapshot_gva); sync_global_to_guest(vcpu->vm, snapshot_gva); sync_global_to_guest(vcpu->vm, snapshot_gpa); } diff --git a/tools/testing/selftests/kvm/s390/debug_test.c b/tools/testing/selftests/kvm/s390/debug_test.c index ad8095968601..751c61c0f056 100644 --- a/tools/testing/selftests/kvm/s390/debug_test.c +++ b/tools/testing/selftests/kvm/s390/debug_test.c @@ -17,7 +17,7 @@ asm("int_handler:\n" "j .\n"); static struct kvm_vm *test_step_int_1(struct kvm_vcpu **vcpu, void *guest_code, - size_t new_psw_off, uint64_t *new_psw) + size_t new_psw_off, u64 *new_psw) { struct kvm_guest_debug debug = {}; struct kvm_regs regs; @@ -27,7 +27,7 @@ static struct kvm_vm *test_step_int_1(struct kvm_vcpu **vcpu, void *guest_code, vm = vm_create_with_one_vcpu(vcpu, guest_code); lowcore = addr_gpa2hva(vm, 0); new_psw[0] = (*vcpu)->run->psw_mask; - new_psw[1] = (uint64_t)int_handler; + new_psw[1] = (u64)int_handler; memcpy(lowcore + new_psw_off, new_psw, 16); vcpu_regs_get(*vcpu, ®s); regs.gprs[2] = -1; @@ -42,7 +42,7 @@ static struct kvm_vm *test_step_int_1(struct kvm_vcpu **vcpu, void *guest_code, static void test_step_int(void *guest_code, size_t new_psw_off) { struct kvm_vcpu *vcpu; - uint64_t new_psw[2]; + u64 new_psw[2]; struct kvm_vm *vm; vm = test_step_int_1(&vcpu, guest_code, new_psw_off, new_psw); @@ -79,7 +79,7 @@ static void test_step_pgm_diag(void) .u.pgm.code = PGM_SPECIFICATION, }; struct kvm_vcpu *vcpu; - uint64_t new_psw[2]; + u64 new_psw[2]; struct kvm_vm *vm; vm = test_step_int_1(&vcpu, test_step_pgm_diag_guest_code, diff --git a/tools/testing/selftests/kvm/s390/irq_routing.c b/tools/testing/selftests/kvm/s390/irq_routing.c index 7819a0af19a8..f3839284ac08 100644 --- a/tools/testing/selftests/kvm/s390/irq_routing.c +++ b/tools/testing/selftests/kvm/s390/irq_routing.c @@ -27,7 +27,7 @@ static void test(void) struct kvm_irq_routing *routing; struct kvm_vcpu *vcpu; struct kvm_vm *vm; - vm_paddr_t mem; + gpa_t mem; int ret; struct kvm_irq_routing_entry ue = { diff --git a/tools/testing/selftests/kvm/s390/memop.c b/tools/testing/selftests/kvm/s390/memop.c index 4374b4cd2a80..0244848621b3 100644 --- a/tools/testing/selftests/kvm/s390/memop.c +++ b/tools/testing/selftests/kvm/s390/memop.c @@ -34,7 +34,7 @@ enum mop_access_mode { struct mop_desc { uintptr_t gaddr; uintptr_t gaddr_v; - uint64_t set_flags; + u64 set_flags; unsigned int f_check : 1; unsigned int f_inject : 1; unsigned int f_key : 1; @@ -42,19 +42,19 @@ struct mop_desc { unsigned int _set_flags : 1; unsigned int _sida_offset : 1; unsigned int _ar : 1; - uint32_t size; + u32 size; enum mop_target target; enum mop_access_mode mode; void *buf; - uint32_t sida_offset; + u32 sida_offset; void *old; - uint8_t old_value[16]; + u8 old_value[16]; bool *cmpxchg_success; - uint8_t ar; - uint8_t key; + u8 ar; + u8 key; }; -const uint8_t NO_KEY = 0xff; +const u8 NO_KEY = 0xff; static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc *desc) { @@ -85,7 +85,7 @@ static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc *desc) ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE; if (desc->mode == CMPXCHG) { ksmo.op = KVM_S390_MEMOP_ABSOLUTE_CMPXCHG; - ksmo.old_addr = (uint64_t)desc->old; + ksmo.old_addr = (u64)desc->old; memcpy(desc->old_value, desc->old, desc->size); } break; @@ -230,8 +230,8 @@ static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo, #define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38)) #define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39)) -static uint8_t __aligned(PAGE_SIZE) mem1[65536]; -static uint8_t __aligned(PAGE_SIZE) mem2[65536]; +static u8 __aligned(PAGE_SIZE) mem1[65536]; +static u8 __aligned(PAGE_SIZE) mem2[65536]; struct test_default { struct kvm_vm *kvm_vm; @@ -296,7 +296,7 @@ static void prepare_mem12(void) TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!") static void default_write_read(struct test_info copy_cpu, struct test_info mop_cpu, - enum mop_target mop_target, uint32_t size, uint8_t key) + enum mop_target mop_target, u32 size, u8 key) { prepare_mem12(); CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, @@ -308,7 +308,7 @@ static void default_write_read(struct test_info copy_cpu, struct test_info mop_c } static void default_read(struct test_info copy_cpu, struct test_info mop_cpu, - enum mop_target mop_target, uint32_t size, uint8_t key) + enum mop_target mop_target, u32 size, u8 key) { prepare_mem12(); CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, GADDR_V(mem1)); @@ -318,12 +318,12 @@ static void default_read(struct test_info copy_cpu, struct test_info mop_cpu, ASSERT_MEM_EQ(mem1, mem2, size); } -static void default_cmpxchg(struct test_default *test, uint8_t key) +static void default_cmpxchg(struct test_default *test, u8 key) { for (int size = 1; size <= 16; size *= 2) { for (int offset = 0; offset < 16; offset += size) { - uint8_t __aligned(16) new[16] = {}; - uint8_t __aligned(16) old[16]; + u8 __aligned(16) new[16] = {}; + u8 __aligned(16) old[16]; bool succ; prepare_mem12(); @@ -400,7 +400,7 @@ static void test_copy_access_register(void) kvm_vm_free(t.kvm_vm); } -static void set_storage_key_range(void *addr, size_t len, uint8_t key) +static void set_storage_key_range(void *addr, size_t len, u8 key) { uintptr_t _addr, abs, i; int not_mapped = 0; @@ -483,13 +483,13 @@ static __uint128_t cut_to_size(int size, __uint128_t val) { switch (size) { case 1: - return (uint8_t)val; + return (u8)val; case 2: - return (uint16_t)val; + return (u16)val; case 4: - return (uint32_t)val; + return (u32)val; case 8: - return (uint64_t)val; + return (u64)val; case 16: return val; } @@ -501,10 +501,10 @@ static bool popcount_eq(__uint128_t a, __uint128_t b) { unsigned int count_a, count_b; - count_a = __builtin_popcountl((uint64_t)(a >> 64)) + - __builtin_popcountl((uint64_t)a); - count_b = __builtin_popcountl((uint64_t)(b >> 64)) + - __builtin_popcountl((uint64_t)b); + count_a = __builtin_popcountl((u64)(a >> 64)) + + __builtin_popcountl((u64)a); + count_b = __builtin_popcountl((u64)(b >> 64)) + + __builtin_popcountl((u64)b); return count_a == count_b; } @@ -553,7 +553,7 @@ static __uint128_t permutate_bits(bool guest, int i, int size, __uint128_t old) if (swap) { int i, j; __uint128_t new; - uint8_t byte0, byte1; + u8 byte0, byte1; rand = rand * 3 + 1; i = rand % size; @@ -585,28 +585,28 @@ static bool _cmpxchg(int size, void *target, __uint128_t *old_addr, __uint128_t switch (size) { case 4: { - uint32_t old = *old_addr; + u32 old = *old_addr; asm volatile ("cs %[old],%[new],%[address]" : [old] "+d" (old), - [address] "+Q" (*(uint32_t *)(target)) - : [new] "d" ((uint32_t)new) + [address] "+Q" (*(u32 *)(target)) + : [new] "d" ((u32)new) : "cc" ); - ret = old == (uint32_t)*old_addr; + ret = old == (u32)*old_addr; *old_addr = old; return ret; } case 8: { - uint64_t old = *old_addr; + u64 old = *old_addr; asm volatile ("csg %[old],%[new],%[address]" : [old] "+d" (old), - [address] "+Q" (*(uint64_t *)(target)) - : [new] "d" ((uint64_t)new) + [address] "+Q" (*(u64 *)(target)) + : [new] "d" ((u64)new) : "cc" ); - ret = old == (uint64_t)*old_addr; + ret = old == (u64)*old_addr; *old_addr = old; return ret; } @@ -811,10 +811,10 @@ static void test_errors_cmpxchg_key(void) static void test_termination(void) { struct test_default t = test_default_init(guest_error_key); - uint64_t prefix; - uint64_t teid; - uint64_t teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61); - uint64_t psw[2]; + u64 prefix; + u64 teid; + u64 teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61); + u64 psw[2]; HOST_SYNC(t.vcpu, STAGE_INITED); HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); @@ -855,7 +855,7 @@ static void test_errors_key_storage_prot_override(void) kvm_vm_free(t.kvm_vm); } -const uint64_t last_page_addr = -PAGE_SIZE; +const u64 last_page_addr = -PAGE_SIZE; static void guest_copy_key_fetch_prot_override(void) { @@ -878,10 +878,10 @@ static void guest_copy_key_fetch_prot_override(void) static void test_copy_key_fetch_prot_override(void) { struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); - vm_vaddr_t guest_0_page, guest_last_page; + gva_t guest_0_page, guest_last_page; - guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); - guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); + guest_0_page = vm_alloc(t.kvm_vm, PAGE_SIZE, 0); + guest_last_page = vm_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); if (guest_0_page != 0 || guest_last_page != last_page_addr) { print_skip("did not allocate guest pages at required positions"); goto out; @@ -917,10 +917,10 @@ out: static void test_errors_key_fetch_prot_override_not_enabled(void) { struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); - vm_vaddr_t guest_0_page, guest_last_page; + gva_t guest_0_page, guest_last_page; - guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); - guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); + guest_0_page = vm_alloc(t.kvm_vm, PAGE_SIZE, 0); + guest_last_page = vm_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); if (guest_0_page != 0 || guest_last_page != last_page_addr) { print_skip("did not allocate guest pages at required positions"); goto out; @@ -938,10 +938,10 @@ out: static void test_errors_key_fetch_prot_override_enabled(void) { struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); - vm_vaddr_t guest_0_page, guest_last_page; + gva_t guest_0_page, guest_last_page; - guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); - guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); + guest_0_page = vm_alloc(t.kvm_vm, PAGE_SIZE, 0); + guest_last_page = vm_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); if (guest_0_page != 0 || guest_last_page != last_page_addr) { print_skip("did not allocate guest pages at required positions"); goto out; diff --git a/tools/testing/selftests/kvm/s390/resets.c b/tools/testing/selftests/kvm/s390/resets.c index b58f75b381e5..e3c7a2f148f9 100644 --- a/tools/testing/selftests/kvm/s390/resets.c +++ b/tools/testing/selftests/kvm/s390/resets.c @@ -20,7 +20,7 @@ struct kvm_s390_irq buf[ARBITRARY_NON_ZERO_VCPU_ID + LOCAL_IRQS]; -static uint8_t regs_null[512]; +static u8 regs_null[512]; static void guest_code_initial(void) { @@ -57,9 +57,9 @@ static void guest_code_initial(void) ); } -static void test_one_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t value) +static void test_one_reg(struct kvm_vcpu *vcpu, u64 id, u64 value) { - uint64_t eval_reg; + u64 eval_reg; eval_reg = vcpu_get_reg(vcpu, id); TEST_ASSERT(eval_reg == value, "value == 0x%lx", value); diff --git a/tools/testing/selftests/kvm/s390/shared_zeropage_test.c b/tools/testing/selftests/kvm/s390/shared_zeropage_test.c index bba0d9a6dcc8..a9e5a01200b8 100644 --- a/tools/testing/selftests/kvm/s390/shared_zeropage_test.c +++ b/tools/testing/selftests/kvm/s390/shared_zeropage_test.c @@ -13,7 +13,7 @@ #include "kselftest.h" #include "ucall_common.h" -static void set_storage_key(void *addr, uint8_t skey) +static void set_storage_key(void *addr, u8 skey) { asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); } diff --git a/tools/testing/selftests/kvm/s390/tprot.c b/tools/testing/selftests/kvm/s390/tprot.c index 12d5e1cb62e3..8054d2b178f0 100644 --- a/tools/testing/selftests/kvm/s390/tprot.c +++ b/tools/testing/selftests/kvm/s390/tprot.c @@ -14,12 +14,12 @@ #define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38)) #define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39)) -static __aligned(PAGE_SIZE) uint8_t pages[2][PAGE_SIZE]; -static uint8_t *const page_store_prot = pages[0]; -static uint8_t *const page_fetch_prot = pages[1]; +static __aligned(PAGE_SIZE) u8 pages[2][PAGE_SIZE]; +static u8 *const page_store_prot = pages[0]; +static u8 *const page_fetch_prot = pages[1]; /* Nonzero return value indicates that address not mapped */ -static int set_storage_key(void *addr, uint8_t key) +static int set_storage_key(void *addr, u8 key) { int not_mapped = 0; @@ -44,9 +44,9 @@ enum permission { TRANSL_UNAVAIL = 3, }; -static enum permission test_protection(void *addr, uint8_t key) +static enum permission test_protection(void *addr, u8 key) { - uint64_t mask; + u64 mask; asm volatile ( "tprot %[addr], 0(%[key])\n" @@ -72,7 +72,7 @@ enum stage { struct test { enum stage stage; void *addr; - uint8_t key; + u8 key; enum permission expected; } tests[] = { /* @@ -146,7 +146,7 @@ static enum stage perform_next_stage(int *i, bool mapped_0) /* * Some fetch protection override tests require that page 0 * be mapped, however, when the hosts tries to map that page via - * vm_vaddr_alloc, it may happen that some other page gets mapped + * vm_alloc, it may happen that some other page gets mapped * instead. * In order to skip these tests we detect this inside the guest */ @@ -207,7 +207,7 @@ int main(int argc, char *argv[]) struct kvm_vcpu *vcpu; struct kvm_vm *vm; struct kvm_run *run; - vm_vaddr_t guest_0_page; + gva_t guest_0_page; ksft_print_header(); ksft_set_plan(STAGE_END); @@ -216,10 +216,10 @@ int main(int argc, char *argv[]) run = vcpu->run; HOST_SYNC(vcpu, STAGE_INIT_SIMPLE); - mprotect(addr_gva2hva(vm, (vm_vaddr_t)pages), PAGE_SIZE * 2, PROT_READ); + mprotect(addr_gva2hva(vm, (gva_t)pages), PAGE_SIZE * 2, PROT_READ); HOST_SYNC(vcpu, TEST_SIMPLE); - guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0); + guest_0_page = vm_alloc(vm, PAGE_SIZE, 0); if (guest_0_page != 0) { /* Use NO_TAP so we don't get a PASS print */ HOST_SYNC_NO_TAP(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE); @@ -229,7 +229,7 @@ int main(int argc, char *argv[]) HOST_SYNC(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE); } if (guest_0_page == 0) - mprotect(addr_gva2hva(vm, (vm_vaddr_t)0), PAGE_SIZE, PROT_READ); + mprotect(addr_gva2hva(vm, (gva_t)0), PAGE_SIZE, PROT_READ); run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE; run->kvm_dirty_regs = KVM_SYNC_CRS; HOST_SYNC(vcpu, TEST_FETCH_PROT_OVERRIDE); diff --git a/tools/testing/selftests/kvm/s390/ucontrol_test.c b/tools/testing/selftests/kvm/s390/ucontrol_test.c index 50bc1c38225a..b8c6f37b53e0 100644 --- a/tools/testing/selftests/kvm/s390/ucontrol_test.c +++ b/tools/testing/selftests/kvm/s390/ucontrol_test.c @@ -111,7 +111,7 @@ FIXTURE(uc_kvm) uintptr_t base_hva; uintptr_t code_hva; int kvm_run_size; - vm_paddr_t pgd; + gpa_t pgd; void *vm_mem; int vcpu_fd; int kvm_fd; @@ -269,7 +269,7 @@ TEST(uc_cap_hpage) } /* calculate host virtual addr from guest physical addr */ -static void *gpa2hva(FIXTURE_DATA(uc_kvm) *self, u64 gpa) +static void *gpa2hva(FIXTURE_DATA(uc_kvm) *self, gpa_t gpa) { return (void *)(self->base_hva - self->base_gpa + gpa); } @@ -571,7 +571,7 @@ TEST_F(uc_kvm, uc_skey) { struct kvm_s390_sie_block *sie_block = self->sie_block; struct kvm_sync_regs *sync_regs = &self->run->s.regs; - u64 test_vaddr = VM_MEM_SIZE - (SZ_1M / 2); + u64 test_gva = VM_MEM_SIZE - (SZ_1M / 2); struct kvm_run *run = self->run; const u8 skeyvalue = 0x34; @@ -583,7 +583,7 @@ TEST_F(uc_kvm, uc_skey) /* set register content for test_skey_asm to access not mapped memory */ sync_regs->gprs[1] = skeyvalue; sync_regs->gprs[5] = self->base_gpa; - sync_regs->gprs[6] = test_vaddr; + sync_regs->gprs[6] = test_gva; run->kvm_dirty_regs |= KVM_SYNC_GPRS; /* DAT disabled + 64 bit mode */ diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c index a398dc3a8c4b..9b919a231c93 100644 --- a/tools/testing/selftests/kvm/set_memory_region_test.c +++ b/tools/testing/selftests/kvm/set_memory_region_test.c @@ -30,19 +30,19 @@ #define MEM_REGION_GPA 0xc0000000 #define MEM_REGION_SLOT 10 -static const uint64_t MMIO_VAL = 0xbeefull; +static const u64 MMIO_VAL = 0xbeefull; -extern const uint64_t final_rip_start; -extern const uint64_t final_rip_end; +extern const u64 final_rip_start; +extern const u64 final_rip_end; static sem_t vcpu_ready; -static inline uint64_t guest_spin_on_val(uint64_t spin_val) +static inline u64 guest_spin_on_val(u64 spin_val) { - uint64_t val; + u64 val; do { - val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA)); + val = READ_ONCE(*((u64 *)MEM_REGION_GPA)); } while (val == spin_val); GUEST_SYNC(0); @@ -54,7 +54,7 @@ static void *vcpu_worker(void *data) struct kvm_vcpu *vcpu = data; struct kvm_run *run = vcpu->run; struct ucall uc; - uint64_t cmd; + u64 cmd; /* * Loop until the guest is done. Re-enter the guest on all MMIO exits, @@ -111,8 +111,8 @@ static struct kvm_vm *spawn_vm(struct kvm_vcpu **vcpu, pthread_t *vcpu_thread, void *guest_code) { struct kvm_vm *vm; - uint64_t *hva; - uint64_t gpa; + u64 *hva; + gpa_t gpa; vm = vm_create_with_one_vcpu(vcpu, guest_code); @@ -144,7 +144,7 @@ static struct kvm_vm *spawn_vm(struct kvm_vcpu **vcpu, pthread_t *vcpu_thread, static void guest_code_move_memory_region(void) { - uint64_t val; + u64 val; GUEST_SYNC(0); @@ -180,7 +180,7 @@ static void test_move_memory_region(bool disable_slot_zap_quirk) pthread_t vcpu_thread; struct kvm_vcpu *vcpu; struct kvm_vm *vm; - uint64_t *hva; + u64 *hva; vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_move_memory_region); @@ -224,7 +224,7 @@ static void test_move_memory_region(bool disable_slot_zap_quirk) static void guest_code_delete_memory_region(void) { struct desc_ptr idt; - uint64_t val; + u64 val; /* * Clobber the IDT so that a #PF due to the memory region being deleted @@ -345,8 +345,8 @@ static void test_zero_memory_regions(void) static void test_invalid_memory_region_flags(void) { - uint32_t supported_flags = KVM_MEM_LOG_DIRTY_PAGES; - const uint32_t v2_only_flags = KVM_MEM_GUEST_MEMFD; + u32 supported_flags = KVM_MEM_LOG_DIRTY_PAGES; + const u32 v2_only_flags = KVM_MEM_GUEST_MEMFD; struct kvm_vm *vm; int r, i; @@ -410,8 +410,8 @@ static void test_add_max_memory_regions(void) { int ret; struct kvm_vm *vm; - uint32_t max_mem_slots; - uint32_t slot; + u32 max_mem_slots; + u32 slot; void *mem, *mem_aligned, *mem_extra; size_t alignment = 1; @@ -434,16 +434,16 @@ static void test_add_max_memory_regions(void) for (slot = 0; slot < max_mem_slots; slot++) vm_set_user_memory_region(vm, slot, 0, - ((uint64_t)slot * MEM_REGION_SIZE), + ((u64)slot * MEM_REGION_SIZE), MEM_REGION_SIZE, - mem_aligned + (uint64_t)slot * MEM_REGION_SIZE); + mem_aligned + (u64)slot * MEM_REGION_SIZE); /* Check it cannot be added memory slots beyond the limit */ mem_extra = kvm_mmap(MEM_REGION_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1); ret = __vm_set_user_memory_region(vm, max_mem_slots, 0, - (uint64_t)max_mem_slots * MEM_REGION_SIZE, + (u64)max_mem_slots * MEM_REGION_SIZE, MEM_REGION_SIZE, mem_extra); TEST_ASSERT(ret == -1 && errno == EINVAL, "Adding one more memory slot should fail with EINVAL"); @@ -556,7 +556,7 @@ static void guest_code_mmio_during_vectoring(void) set_idt(&idt_desc); /* Generate a #GP by dereferencing a non-canonical address */ - *((uint8_t *)NONCANONICAL) = 0x1; + *((u8 *)NONCANONICAL) = 0x1; GUEST_ASSERT(0); } diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c index efe56a10d13e..7df2bc8eec02 100644 --- a/tools/testing/selftests/kvm/steal_time.c +++ b/tools/testing/selftests/kvm/steal_time.c @@ -25,7 +25,7 @@ #define ST_GPA_BASE (1 << 30) static void *st_gva[NR_VCPUS]; -static uint64_t guest_stolen_time[NR_VCPUS]; +static u64 guest_stolen_time[NR_VCPUS]; #if defined(__x86_64__) @@ -42,9 +42,9 @@ static void check_status(struct kvm_steal_time *st) static void guest_code(int cpu) { struct kvm_steal_time *st = st_gva[cpu]; - uint32_t version; + u32 version; - GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED)); + GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((u64)st_gva[cpu] | KVM_MSR_ENABLED)); memset(st, 0, sizeof(*st)); GUEST_SYNC(0); @@ -67,7 +67,7 @@ static bool is_steal_time_supported(struct kvm_vcpu *vcpu) return kvm_cpu_has(X86_FEATURE_KVM_STEAL_TIME); } -static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) +static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) { /* ST_GPA_BASE is identity mapped */ st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); @@ -76,7 +76,7 @@ static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED); } -static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) +static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) { struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); @@ -118,12 +118,12 @@ static void check_steal_time_uapi(void) #define PV_TIME_ST 0xc5000021 struct st_time { - uint32_t rev; - uint32_t attr; - uint64_t st_time; + u32 rev; + u32 attr; + u64 st_time; }; -static int64_t smccc(uint32_t func, uint64_t arg) +static s64 smccc(u32 func, u64 arg) { struct arm_smccc_res res; @@ -140,7 +140,7 @@ static void check_status(struct st_time *st) static void guest_code(int cpu) { struct st_time *st; - int64_t status; + s64 status; status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES); GUEST_ASSERT_EQ(status, 0); @@ -175,15 +175,15 @@ static bool is_steal_time_supported(struct kvm_vcpu *vcpu) return !__vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev); } -static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) +static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) { struct kvm_vm *vm = vcpu->vm; - uint64_t st_ipa; + u64 st_ipa; struct kvm_device_attr dev = { .group = KVM_ARM_VCPU_PVTIME_CTRL, .attr = KVM_ARM_VCPU_PVTIME_IPA, - .addr = (uint64_t)&st_ipa, + .addr = (u64)&st_ipa, }; /* ST_GPA_BASE is identity mapped */ @@ -194,7 +194,7 @@ static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev); } -static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) +static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) { struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); @@ -208,7 +208,7 @@ static void check_steal_time_uapi(void) { struct kvm_vm *vm; struct kvm_vcpu *vcpu; - uint64_t st_ipa; + u64 st_ipa; int ret; vm = vm_create_with_one_vcpu(&vcpu, NULL); @@ -216,7 +216,7 @@ static void check_steal_time_uapi(void) struct kvm_device_attr dev = { .group = KVM_ARM_VCPU_PVTIME_CTRL, .attr = KVM_ARM_VCPU_PVTIME_IPA, - .addr = (uint64_t)&st_ipa, + .addr = (u64)&st_ipa, }; vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev); @@ -239,17 +239,17 @@ static void check_steal_time_uapi(void) /* SBI STA shmem must have 64-byte alignment */ #define STEAL_TIME_SIZE ((sizeof(struct sta_struct) + 63) & ~63) -static vm_paddr_t st_gpa[NR_VCPUS]; +static gpa_t st_gpa[NR_VCPUS]; struct sta_struct { - uint32_t sequence; - uint32_t flags; - uint64_t steal; - uint8_t preempted; - uint8_t pad[47]; + u32 sequence; + u32 flags; + u64 steal; + u8 preempted; + u8 pad[47]; } __packed; -static void sta_set_shmem(vm_paddr_t gpa, unsigned long flags) +static void sta_set_shmem(gpa_t gpa, unsigned long flags) { unsigned long lo = (unsigned long)gpa; #if __riscv_xlen == 32 @@ -272,7 +272,7 @@ static void check_status(struct sta_struct *st) static void guest_code(int cpu) { struct sta_struct *st = st_gva[cpu]; - uint32_t sequence; + u32 sequence; long out_val = 0; bool probe; @@ -297,7 +297,7 @@ static void guest_code(int cpu) static bool is_steal_time_supported(struct kvm_vcpu *vcpu) { - uint64_t id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA); + u64 id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA); unsigned long enabled = vcpu_get_reg(vcpu, id); TEST_ASSERT(enabled == 0 || enabled == 1, "Expected boolean result"); @@ -305,16 +305,16 @@ static bool is_steal_time_supported(struct kvm_vcpu *vcpu) return enabled; } -static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) +static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) { /* ST_GPA_BASE is identity mapped */ st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); - st_gpa[i] = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)st_gva[i]); + st_gpa[i] = addr_gva2gpa(vcpu->vm, (gva_t)st_gva[i]); sync_global_to_guest(vcpu->vm, st_gva[i]); sync_global_to_guest(vcpu->vm, st_gpa[i]); } -static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) +static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) { struct sta_struct *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); int i; @@ -335,7 +335,7 @@ static void check_steal_time_uapi(void) struct kvm_vm *vm; struct kvm_vcpu *vcpu; struct kvm_one_reg reg; - uint64_t shmem; + u64 shmem; int ret; vm = vm_create_with_one_vcpu(&vcpu, NULL); @@ -345,7 +345,7 @@ static void check_steal_time_uapi(void) KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_lo); - reg.addr = (uint64_t)&shmem; + reg.addr = (u64)&shmem; shmem = ST_GPA_BASE + 1; ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); @@ -388,7 +388,7 @@ static void check_status(struct kvm_steal_time *st) static void guest_code(int cpu) { - uint32_t version; + u32 version; struct kvm_steal_time *st = st_gva[cpu]; memset(st, 0, sizeof(*st)); @@ -410,11 +410,11 @@ static void guest_code(int cpu) static bool is_steal_time_supported(struct kvm_vcpu *vcpu) { int err; - uint64_t val; + u64 val; struct kvm_device_attr attr = { .group = KVM_LOONGARCH_VCPU_CPUCFG, .attr = CPUCFG_KVM_FEATURE, - .addr = (uint64_t)&val, + .addr = (u64)&val, }; err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr); @@ -428,15 +428,15 @@ static bool is_steal_time_supported(struct kvm_vcpu *vcpu) return val & BIT(KVM_FEATURE_STEAL_TIME); } -static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) +static void steal_time_init(struct kvm_vcpu *vcpu, u32 i) { int err; - uint64_t st_gpa; + u64 st_gpa; struct kvm_vm *vm = vcpu->vm; struct kvm_device_attr attr = { .group = KVM_LOONGARCH_VCPU_PVTIME_CTRL, .attr = KVM_LOONGARCH_VCPU_PVTIME_GPA, - .addr = (uint64_t)&st_gpa, + .addr = (u64)&st_gpa, }; /* ST_GPA_BASE is identity mapped */ @@ -451,7 +451,7 @@ static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) TEST_ASSERT(err == 0, "Fail to set PV stealtime GPA"); } -static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) +static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx) { struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); @@ -461,6 +461,11 @@ static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) ksft_print_msg(" version: %d\n", st->version); ksft_print_msg(" preempted: %d\n", st->preempted); } + +static void check_steal_time_uapi(void) +{ + +} #endif static void *do_steal_time(void *arg) diff --git a/tools/testing/selftests/kvm/system_counter_offset_test.c b/tools/testing/selftests/kvm/system_counter_offset_test.c index 513d421a9bff..dc5e30b7b77f 100644 --- a/tools/testing/selftests/kvm/system_counter_offset_test.c +++ b/tools/testing/selftests/kvm/system_counter_offset_test.c @@ -17,7 +17,7 @@ #ifdef __x86_64__ struct test_case { - uint64_t tsc_offset; + u64 tsc_offset; }; static struct test_case test_cases[] = { @@ -39,12 +39,12 @@ static void setup_system_counter(struct kvm_vcpu *vcpu, struct test_case *test) &test->tsc_offset); } -static uint64_t guest_read_system_counter(struct test_case *test) +static u64 guest_read_system_counter(struct test_case *test) { return rdtsc(); } -static uint64_t host_read_guest_system_counter(struct test_case *test) +static u64 host_read_guest_system_counter(struct test_case *test) { return rdtsc() + test->tsc_offset; } @@ -69,9 +69,9 @@ static void guest_main(void) } } -static void handle_sync(struct ucall *uc, uint64_t start, uint64_t end) +static void handle_sync(struct ucall *uc, u64 start, u64 end) { - uint64_t obs = uc->args[2]; + u64 obs = uc->args[2]; TEST_ASSERT(start <= obs && obs <= end, "unexpected system counter value: %"PRIu64" expected range: [%"PRIu64", %"PRIu64"]", @@ -88,7 +88,7 @@ static void handle_abort(struct ucall *uc) static void enter_guest(struct kvm_vcpu *vcpu) { - uint64_t start, end; + u64 start, end; struct ucall uc; int i; diff --git a/tools/testing/selftests/kvm/x86/amx_test.c b/tools/testing/selftests/kvm/x86/amx_test.c index 37b166260ee3..4e63da2b1889 100644 --- a/tools/testing/selftests/kvm/x86/amx_test.c +++ b/tools/testing/selftests/kvm/x86/amx_test.c @@ -80,10 +80,10 @@ static inline void __tilerelease(void) asm volatile(".byte 0xc4, 0xe2, 0x78, 0x49, 0xc0" ::); } -static inline void __xsavec(struct xstate *xstate, uint64_t rfbm) +static inline void __xsavec(struct xstate *xstate, u64 rfbm) { - uint32_t rfbm_lo = rfbm; - uint32_t rfbm_hi = rfbm >> 32; + u32 rfbm_lo = rfbm; + u32 rfbm_hi = rfbm >> 32; asm volatile("xsavec (%%rdi)" : : "D" (xstate), "a" (rfbm_lo), "d" (rfbm_hi) @@ -236,7 +236,7 @@ int main(int argc, char *argv[]) struct kvm_x86_state *state; struct kvm_x86_state *tile_state = NULL; int xsave_restore_size; - vm_vaddr_t amx_cfg, tiledata, xstate; + gva_t amx_cfg, tiledata, xstate; struct ucall uc; int ret; @@ -263,15 +263,15 @@ int main(int argc, char *argv[]) vcpu_regs_get(vcpu, ®s1); /* amx cfg for guest_code */ - amx_cfg = vm_vaddr_alloc_page(vm); + amx_cfg = vm_alloc_page(vm); memset(addr_gva2hva(vm, amx_cfg), 0x0, getpagesize()); /* amx tiledata for guest_code */ - tiledata = vm_vaddr_alloc_pages(vm, 2); + tiledata = vm_alloc_pages(vm, 2); memset(addr_gva2hva(vm, tiledata), rand() | 1, 2 * getpagesize()); /* XSAVE state for guest_code */ - xstate = vm_vaddr_alloc_pages(vm, DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE)); + xstate = vm_alloc_pages(vm, DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE)); memset(addr_gva2hva(vm, xstate), 0, PAGE_SIZE * DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE)); vcpu_args_set(vcpu, 3, amx_cfg, tiledata, xstate); diff --git a/tools/testing/selftests/kvm/x86/aperfmperf_test.c b/tools/testing/selftests/kvm/x86/aperfmperf_test.c index 8b15a13df939..c91660103137 100644 --- a/tools/testing/selftests/kvm/x86/aperfmperf_test.c +++ b/tools/testing/selftests/kvm/x86/aperfmperf_test.c @@ -35,9 +35,9 @@ static int open_dev_msr(int cpu) return open_path_or_exit(path, O_RDONLY); } -static uint64_t read_dev_msr(int msr_fd, uint32_t msr) +static u64 read_dev_msr(int msr_fd, u32 msr) { - uint64_t data; + u64 data; ssize_t rc; rc = pread(msr_fd, &data, sizeof(data), msr); @@ -107,8 +107,8 @@ static void guest_code(void *nested_test_data) static void guest_no_aperfmperf(void) { - uint64_t msr_val; - uint8_t vector; + u64 msr_val; + u8 vector; vector = rdmsr_safe(MSR_IA32_APERF, &msr_val); GUEST_ASSERT(vector == GP_VECTOR); @@ -122,8 +122,8 @@ static void guest_no_aperfmperf(void) int main(int argc, char *argv[]) { const bool has_nested = kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX); - uint64_t host_aperf_before, host_mperf_before; - vm_vaddr_t nested_test_data_gva; + u64 host_aperf_before, host_mperf_before; + gva_t nested_test_data_gva; struct kvm_vcpu *vcpu; struct kvm_vm *vm; int msr_fd, cpu, i; @@ -166,8 +166,8 @@ int main(int argc, char *argv[]) host_mperf_before = read_dev_msr(msr_fd, MSR_IA32_MPERF); for (i = 0; i <= NUM_ITERATIONS * (1 + has_nested); i++) { - uint64_t host_aperf_after, host_mperf_after; - uint64_t guest_aperf, guest_mperf; + u64 host_aperf_after, host_mperf_after; + u64 guest_aperf, guest_mperf; struct ucall uc; vcpu_run(vcpu); diff --git a/tools/testing/selftests/kvm/x86/apic_bus_clock_test.c b/tools/testing/selftests/kvm/x86/apic_bus_clock_test.c index f8916bb34405..404f0028e110 100644 --- a/tools/testing/selftests/kvm/x86/apic_bus_clock_test.c +++ b/tools/testing/selftests/kvm/x86/apic_bus_clock_test.c @@ -19,8 +19,8 @@ * timer frequency. */ static const struct { - const uint32_t tdcr; - const uint32_t divide_count; + const u32 tdcr; + const u32 divide_count; } tdcrs[] = { {0x0, 2}, {0x1, 4}, @@ -42,12 +42,12 @@ static void apic_enable(void) xapic_enable(); } -static uint32_t apic_read_reg(unsigned int reg) +static u32 apic_read_reg(unsigned int reg) { return is_x2apic ? x2apic_read_reg(reg) : xapic_read_reg(reg); } -static void apic_write_reg(unsigned int reg, uint32_t val) +static void apic_write_reg(unsigned int reg, u32 val) { if (is_x2apic) x2apic_write_reg(reg, val); @@ -55,12 +55,12 @@ static void apic_write_reg(unsigned int reg, uint32_t val) xapic_write_reg(reg, val); } -static void apic_guest_code(uint64_t apic_hz, uint64_t delay_ms) +static void apic_guest_code(u64 apic_hz, u64 delay_ms) { - uint64_t tsc_hz = guest_tsc_khz * 1000; - const uint32_t tmict = ~0u; - uint64_t tsc0, tsc1, freq; - uint32_t tmcct; + u64 tsc_hz = guest_tsc_khz * 1000; + const u32 tmict = ~0u; + u64 tsc0, tsc1, freq; + u32 tmcct; int i; apic_enable(); @@ -121,7 +121,7 @@ static void test_apic_bus_clock(struct kvm_vcpu *vcpu) } } -static void run_apic_bus_clock_test(uint64_t apic_hz, uint64_t delay_ms, +static void run_apic_bus_clock_test(u64 apic_hz, u64 delay_ms, bool x2apic) { struct kvm_vcpu *vcpu; @@ -168,8 +168,8 @@ int main(int argc, char *argv[]) * Arbitrarilty default to 25MHz for the APIC bus frequency, which is * different enough from the default 1GHz to be interesting. */ - uint64_t apic_hz = 25 * 1000 * 1000; - uint64_t delay_ms = 100; + u64 apic_hz = 25 * 1000 * 1000; + u64 delay_ms = 100; int opt; TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_APIC_BUS_CYCLES_NS)); diff --git a/tools/testing/selftests/kvm/x86/cpuid_test.c b/tools/testing/selftests/kvm/x86/cpuid_test.c index f9ed14996977..ef0ddd240887 100644 --- a/tools/testing/selftests/kvm/x86/cpuid_test.c +++ b/tools/testing/selftests/kvm/x86/cpuid_test.c @@ -140,10 +140,10 @@ static void run_vcpu(struct kvm_vcpu *vcpu, int stage) } } -struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct kvm_cpuid2 *cpuid) +struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, gva_t *p_gva, struct kvm_cpuid2 *cpuid) { int size = sizeof(*cpuid) + cpuid->nent * sizeof(cpuid->entries[0]); - vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR); + gva_t gva = vm_alloc(vm, size, KVM_UTIL_MIN_VADDR); struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva); memcpy(guest_cpuids, cpuid, size); @@ -217,7 +217,7 @@ static void test_get_cpuid2(struct kvm_vcpu *vcpu) int main(void) { struct kvm_vcpu *vcpu; - vm_vaddr_t cpuid_gva; + gva_t cpuid_gva; struct kvm_vm *vm; int stage; diff --git a/tools/testing/selftests/kvm/x86/debug_regs.c b/tools/testing/selftests/kvm/x86/debug_regs.c index 2d814c1d1dc4..0dfaf03cd0a0 100644 --- a/tools/testing/selftests/kvm/x86/debug_regs.c +++ b/tools/testing/selftests/kvm/x86/debug_regs.c @@ -16,7 +16,7 @@ #define IRQ_VECTOR 0xAA /* For testing data access debug BP */ -uint32_t guest_value; +u32 guest_value; extern unsigned char sw_bp, hw_bp, write_data, ss_start, bd_start; @@ -86,7 +86,7 @@ int main(void) struct kvm_run *run; struct kvm_vm *vm; struct ucall uc; - uint64_t cmd; + u64 cmd; int i; /* Instruction lengths starting at ss_start */ int ss_size[6] = { diff --git a/tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c b/tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c index b0d2b04a7ff2..388ba4101f97 100644 --- a/tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c +++ b/tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c @@ -23,7 +23,7 @@ #define SLOTS 2 #define ITERATIONS 2 -static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; +static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; static enum vm_mem_backing_src_type backing_src = VM_MEM_SRC_ANONYMOUS_HUGETLB; @@ -33,10 +33,10 @@ static int iteration; static int vcpu_last_completed_iteration[KVM_MAX_VCPUS]; struct kvm_page_stats { - uint64_t pages_4k; - uint64_t pages_2m; - uint64_t pages_1g; - uint64_t hugepages; + u64 pages_4k; + u64 pages_2m; + u64 pages_1g; + u64 hugepages; }; static void get_page_stats(struct kvm_vm *vm, struct kvm_page_stats *stats, const char *stage) @@ -89,9 +89,9 @@ static void run_test(enum vm_guest_mode mode, void *unused) { struct kvm_vm *vm; unsigned long **bitmaps; - uint64_t guest_num_pages; - uint64_t host_num_pages; - uint64_t pages_per_slot; + u64 guest_num_pages; + u64 host_num_pages; + u64 pages_per_slot; int i; struct kvm_page_stats stats_populated; struct kvm_page_stats stats_dirty_logging_enabled; diff --git a/tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c b/tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c index af7c90103396..5b3aef109cfc 100644 --- a/tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c +++ b/tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c @@ -29,13 +29,13 @@ * SMI handler: runs in real-address mode. * Reports SMRAM_STAGE via port IO, then does RSM. */ -static uint8_t smi_handler[] = { +static u8 smi_handler[] = { 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */ 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */ 0x0f, 0xaa, /* rsm */ }; -static inline void sync_with_host(uint64_t phase) +static inline void sync_with_host(u64 phase) { asm volatile("in $" XSTR(SYNC_PORT) ", %%al \n" : "+a" (phase)); @@ -73,7 +73,7 @@ static void guest_code(struct vmx_pages *vmx_pages, int main(int argc, char *argv[]) { - vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0; + gva_t vmx_pages_gva = 0, hv_pages_gva = 0; struct hyperv_test_pages *hv; struct hv_enlightened_vmcs *evmcs; struct kvm_vcpu *vcpu; diff --git a/tools/testing/selftests/kvm/x86/fastops_test.c b/tools/testing/selftests/kvm/x86/fastops_test.c index 8926cfe0e209..c0d30ccd8767 100644 --- a/tools/testing/selftests/kvm/x86/fastops_test.c +++ b/tools/testing/selftests/kvm/x86/fastops_test.c @@ -15,7 +15,7 @@ "pop %[flags]\n\t" #define flags_constraint(flags_val) [flags]"=r"(flags_val) -#define bt_constraint(__bt_val) [bt_val]"rm"((uint32_t)__bt_val) +#define bt_constraint(__bt_val) [bt_val]"rm"((u32)__bt_val) #define guest_execute_fastop_1(FEP, insn, __val, __flags) \ ({ \ @@ -28,17 +28,17 @@ #define guest_test_fastop_1(insn, type_t, __val) \ ({ \ type_t val = __val, ex_val = __val, input = __val; \ - uint64_t flags, ex_flags; \ + u64 flags, ex_flags; \ \ guest_execute_fastop_1("", insn, ex_val, ex_flags); \ guest_execute_fastop_1(KVM_FEP, insn, val, flags); \ \ __GUEST_ASSERT(val == ex_val, \ "Wanted 0x%lx for '%s 0x%lx', got 0x%lx", \ - (uint64_t)ex_val, insn, (uint64_t)input, (uint64_t)val); \ + (u64)ex_val, insn, (u64)input, (u64)val); \ __GUEST_ASSERT(flags == ex_flags, \ "Wanted flags 0x%lx for '%s 0x%lx', got 0x%lx", \ - ex_flags, insn, (uint64_t)input, flags); \ + ex_flags, insn, (u64)input, flags); \ }) #define guest_execute_fastop_2(FEP, insn, __input, __output, __flags) \ @@ -52,18 +52,18 @@ #define guest_test_fastop_2(insn, type_t, __val1, __val2) \ ({ \ type_t input = __val1, input2 = __val2, output = __val2, ex_output = __val2; \ - uint64_t flags, ex_flags; \ + u64 flags, ex_flags; \ \ guest_execute_fastop_2("", insn, input, ex_output, ex_flags); \ guest_execute_fastop_2(KVM_FEP, insn, input, output, flags); \ \ __GUEST_ASSERT(output == ex_output, \ "Wanted 0x%lx for '%s 0x%lx 0x%lx', got 0x%lx", \ - (uint64_t)ex_output, insn, (uint64_t)input, \ - (uint64_t)input2, (uint64_t)output); \ + (u64)ex_output, insn, (u64)input, \ + (u64)input2, (u64)output); \ __GUEST_ASSERT(flags == ex_flags, \ "Wanted flags 0x%lx for '%s 0x%lx, 0x%lx', got 0x%lx", \ - ex_flags, insn, (uint64_t)input, (uint64_t)input2, flags); \ + ex_flags, insn, (u64)input, (u64)input2, flags); \ }) #define guest_execute_fastop_cl(FEP, insn, __shift, __output, __flags) \ @@ -77,25 +77,25 @@ #define guest_test_fastop_cl(insn, type_t, __val1, __val2) \ ({ \ type_t output = __val2, ex_output = __val2, input = __val2; \ - uint8_t shift = __val1; \ - uint64_t flags, ex_flags; \ + u8 shift = __val1; \ + u64 flags, ex_flags; \ \ guest_execute_fastop_cl("", insn, shift, ex_output, ex_flags); \ guest_execute_fastop_cl(KVM_FEP, insn, shift, output, flags); \ \ __GUEST_ASSERT(output == ex_output, \ "Wanted 0x%lx for '%s 0x%x, 0x%lx', got 0x%lx", \ - (uint64_t)ex_output, insn, shift, (uint64_t)input, \ - (uint64_t)output); \ + (u64)ex_output, insn, shift, (u64)input, \ + (u64)output); \ __GUEST_ASSERT(flags == ex_flags, \ "Wanted flags 0x%lx for '%s 0x%x, 0x%lx', got 0x%lx", \ - ex_flags, insn, shift, (uint64_t)input, flags); \ + ex_flags, insn, shift, (u64)input, flags); \ }) #define guest_execute_fastop_div(__KVM_ASM_SAFE, insn, __a, __d, __rm, __flags) \ ({ \ - uint64_t ign_error_code; \ - uint8_t vector; \ + u64 ign_error_code; \ + u8 vector; \ \ __asm__ __volatile__(fastop(__KVM_ASM_SAFE(insn " %[denom]")) \ : "+a"(__a), "+d"(__d), flags_constraint(__flags), \ @@ -109,8 +109,8 @@ ({ \ type_t _a = __val1, _d = __val1, rm = __val2; \ type_t a = _a, d = _d, ex_a = _a, ex_d = _d; \ - uint64_t flags, ex_flags; \ - uint8_t v, ex_v; \ + u64 flags, ex_flags; \ + u8 v, ex_v; \ \ ex_v = guest_execute_fastop_div(KVM_ASM_SAFE, insn, ex_a, ex_d, rm, ex_flags); \ v = guest_execute_fastop_div(KVM_ASM_SAFE_FEP, insn, a, d, rm, flags); \ @@ -118,17 +118,17 @@ GUEST_ASSERT_EQ(v, ex_v); \ __GUEST_ASSERT(v == ex_v, \ "Wanted vector 0x%x for '%s 0x%lx:0x%lx/0x%lx', got 0x%x", \ - ex_v, insn, (uint64_t)_a, (uint64_t)_d, (uint64_t)rm, v); \ + ex_v, insn, (u64)_a, (u64)_d, (u64)rm, v); \ __GUEST_ASSERT(a == ex_a && d == ex_d, \ "Wanted 0x%lx:0x%lx for '%s 0x%lx:0x%lx/0x%lx', got 0x%lx:0x%lx",\ - (uint64_t)ex_a, (uint64_t)ex_d, insn, (uint64_t)_a, \ - (uint64_t)_d, (uint64_t)rm, (uint64_t)a, (uint64_t)d); \ + (u64)ex_a, (u64)ex_d, insn, (u64)_a, \ + (u64)_d, (u64)rm, (u64)a, (u64)d); \ __GUEST_ASSERT(v || ex_v || (flags == ex_flags), \ "Wanted flags 0x%lx for '%s 0x%lx:0x%lx/0x%lx', got 0x%lx", \ - ex_flags, insn, (uint64_t)_a, (uint64_t)_d, (uint64_t)rm, flags);\ + ex_flags, insn, (u64)_a, (u64)_d, (u64)rm, flags);\ }) -static const uint64_t vals[] = { +static const u64 vals[] = { 0, 1, 2, @@ -185,10 +185,10 @@ if (sizeof(type_t) != 1) { \ static void guest_code(void) { - guest_test_fastops(uint8_t, "b"); - guest_test_fastops(uint16_t, "w"); - guest_test_fastops(uint32_t, "l"); - guest_test_fastops(uint64_t, "q"); + guest_test_fastops(u8, "b"); + guest_test_fastops(u16, "w"); + guest_test_fastops(u32, "l"); + guest_test_fastops(u64, "q"); GUEST_DONE(); } diff --git a/tools/testing/selftests/kvm/x86/feature_msrs_test.c b/tools/testing/selftests/kvm/x86/feature_msrs_test.c index a72f13ae2edb..158550701771 100644 --- a/tools/testing/selftests/kvm/x86/feature_msrs_test.c +++ b/tools/testing/selftests/kvm/x86/feature_msrs_test.c @@ -12,7 +12,7 @@ #include "kvm_util.h" #include "processor.h" -static bool is_kvm_controlled_msr(uint32_t msr) +static bool is_kvm_controlled_msr(u32 msr) { return msr == MSR_IA32_VMX_CR0_FIXED1 || msr == MSR_IA32_VMX_CR4_FIXED1; } @@ -21,7 +21,7 @@ static bool is_kvm_controlled_msr(uint32_t msr) * For VMX MSRs with a "true" variant, KVM requires userspace to set the "true" * MSR, and doesn't allow setting the hidden version. */ -static bool is_hidden_vmx_msr(uint32_t msr) +static bool is_hidden_vmx_msr(u32 msr) { switch (msr) { case MSR_IA32_VMX_PINBASED_CTLS: @@ -34,15 +34,15 @@ static bool is_hidden_vmx_msr(uint32_t msr) } } -static bool is_quirked_msr(uint32_t msr) +static bool is_quirked_msr(u32 msr) { return msr != MSR_AMD64_DE_CFG; } -static void test_feature_msr(uint32_t msr) +static void test_feature_msr(u32 msr) { - const uint64_t supported_mask = kvm_get_feature_msr(msr); - uint64_t reset_value = is_quirked_msr(msr) ? supported_mask : 0; + const u64 supported_mask = kvm_get_feature_msr(msr); + u64 reset_value = is_quirked_msr(msr) ? supported_mask : 0; struct kvm_vcpu *vcpu; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/fix_hypercall_test.c b/tools/testing/selftests/kvm/x86/fix_hypercall_test.c index 00b6e85735dd..753a0e730ea8 100644 --- a/tools/testing/selftests/kvm/x86/fix_hypercall_test.c +++ b/tools/testing/selftests/kvm/x86/fix_hypercall_test.c @@ -26,18 +26,18 @@ static void guest_ud_handler(struct ex_regs *regs) regs->rip += HYPERCALL_INSN_SIZE; } -static const uint8_t vmx_vmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xc1 }; -static const uint8_t svm_vmmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xd9 }; +static const u8 vmx_vmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xc1 }; +static const u8 svm_vmmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xd9 }; -extern uint8_t hypercall_insn[HYPERCALL_INSN_SIZE]; -static uint64_t do_sched_yield(uint8_t apic_id) +extern u8 hypercall_insn[HYPERCALL_INSN_SIZE]; +static u64 do_sched_yield(u8 apic_id) { - uint64_t ret; + u64 ret; asm volatile("hypercall_insn:\n\t" ".byte 0xcc,0xcc,0xcc\n\t" : "=a"(ret) - : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id) + : "a"((u64)KVM_HC_SCHED_YIELD), "b"((u64)apic_id) : "memory"); return ret; @@ -45,9 +45,9 @@ static uint64_t do_sched_yield(uint8_t apic_id) static void guest_main(void) { - const uint8_t *native_hypercall_insn; - const uint8_t *other_hypercall_insn; - uint64_t ret; + const u8 *native_hypercall_insn; + const u8 *other_hypercall_insn; + u64 ret; if (host_cpu_is_intel) { native_hypercall_insn = vmx_vmcall; @@ -72,7 +72,7 @@ static void guest_main(void) * the "right" hypercall. */ if (quirk_disabled) { - GUEST_ASSERT(ret == (uint64_t)-EFAULT); + GUEST_ASSERT(ret == (u64)-EFAULT); GUEST_ASSERT(!memcmp(other_hypercall_insn, hypercall_insn, HYPERCALL_INSN_SIZE)); } else { diff --git a/tools/testing/selftests/kvm/x86/flds_emulation.h b/tools/testing/selftests/kvm/x86/flds_emulation.h index 37b1a9f52864..fd6b6c67199a 100644 --- a/tools/testing/selftests/kvm/x86/flds_emulation.h +++ b/tools/testing/selftests/kvm/x86/flds_emulation.h @@ -12,7 +12,7 @@ * KVM to emulate the instruction (e.g. by providing an MMIO address) to * exercise emulation failures. */ -static inline void flds(uint64_t address) +static inline void flds(u64 address) { __asm__ __volatile__(FLDS_MEM_EAX :: "a"(address)); } @@ -21,8 +21,8 @@ static inline void handle_flds_emulation_failure_exit(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; struct kvm_regs regs; - uint8_t *insn_bytes; - uint64_t flags; + u8 *insn_bytes; + u64 flags; TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR); diff --git a/tools/testing/selftests/kvm/x86/hwcr_msr_test.c b/tools/testing/selftests/kvm/x86/hwcr_msr_test.c index 10b1b0ba374e..8e20a03b3329 100644 --- a/tools/testing/selftests/kvm/x86/hwcr_msr_test.c +++ b/tools/testing/selftests/kvm/x86/hwcr_msr_test.c @@ -10,11 +10,11 @@ void test_hwcr_bit(struct kvm_vcpu *vcpu, unsigned int bit) { - const uint64_t ignored = BIT_ULL(3) | BIT_ULL(6) | BIT_ULL(8); - const uint64_t valid = BIT_ULL(18) | BIT_ULL(24); - const uint64_t legal = ignored | valid; - uint64_t val = BIT_ULL(bit); - uint64_t actual; + const u64 ignored = BIT_ULL(3) | BIT_ULL(6) | BIT_ULL(8); + const u64 valid = BIT_ULL(18) | BIT_ULL(24); + const u64 legal = ignored | valid; + u64 val = BIT_ULL(bit); + u64 actual; int r; r = _vcpu_set_msr(vcpu, MSR_K7_HWCR, val); diff --git a/tools/testing/selftests/kvm/x86/hyperv_clock.c b/tools/testing/selftests/kvm/x86/hyperv_clock.c index e058bc676cd6..c083cea546dc 100644 --- a/tools/testing/selftests/kvm/x86/hyperv_clock.c +++ b/tools/testing/selftests/kvm/x86/hyperv_clock.c @@ -98,7 +98,7 @@ static inline void check_tsc_msr_tsc_page(struct ms_hyperv_tsc_page *tsc_page) GUEST_ASSERT(r2 >= t1 && r2 - t2 < 100000); } -static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_gpa) +static void guest_main(struct ms_hyperv_tsc_page *tsc_page, gpa_t tsc_page_gpa) { u64 tsc_scale, tsc_offset; @@ -208,7 +208,7 @@ int main(void) struct kvm_vcpu *vcpu; struct kvm_vm *vm; struct ucall uc; - vm_vaddr_t tsc_page_gva; + gva_t tsc_page_gva; int stage; TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_TIME)); @@ -218,7 +218,7 @@ int main(void) vcpu_set_hv_cpuid(vcpu); - tsc_page_gva = vm_vaddr_alloc_page(vm); + tsc_page_gva = vm_alloc_page(vm); memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize()); TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0, "TSC page has to be page aligned"); diff --git a/tools/testing/selftests/kvm/x86/hyperv_evmcs.c b/tools/testing/selftests/kvm/x86/hyperv_evmcs.c index 74cf19661309..c7fa114aee20 100644 --- a/tools/testing/selftests/kvm/x86/hyperv_evmcs.c +++ b/tools/testing/selftests/kvm/x86/hyperv_evmcs.c @@ -30,7 +30,7 @@ static void guest_nmi_handler(struct ex_regs *regs) { } -static inline void rdmsr_from_l2(uint32_t msr) +static inline void rdmsr_from_l2(u32 msr) { /* Currently, L1 doesn't preserve GPRs during vmexits. */ __asm__ __volatile__ ("rdmsr" : : "c"(msr) : @@ -76,7 +76,7 @@ void l2_guest_code(void) } void guest_code(struct vmx_pages *vmx_pages, struct hyperv_test_pages *hv_pages, - vm_vaddr_t hv_hcall_page_gpa) + gpa_t hv_hcall_page_gpa) { #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; @@ -231,8 +231,8 @@ static struct kvm_vcpu *save_restore_vm(struct kvm_vm *vm, int main(int argc, char *argv[]) { - vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0; - vm_vaddr_t hcall_page; + gva_t vmx_pages_gva = 0, hv_pages_gva = 0; + gva_t hcall_page; struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -246,7 +246,7 @@ int main(int argc, char *argv[]) vm = vm_create_with_one_vcpu(&vcpu, guest_code); - hcall_page = vm_vaddr_alloc_pages(vm, 1); + hcall_page = vm_alloc_pages(vm, 1); memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize()); vcpu_set_hv_cpuid(vcpu); diff --git a/tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c b/tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c index 949e08e98f31..ae047db7b1be 100644 --- a/tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c +++ b/tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c @@ -15,19 +15,19 @@ /* Any value is fine */ #define EXT_CAPABILITIES 0xbull -static void guest_code(vm_paddr_t in_pg_gpa, vm_paddr_t out_pg_gpa, - vm_vaddr_t out_pg_gva) +static void guest_code(gpa_t in_pg_gpa, gpa_t out_pg_gpa, + gva_t out_pg_gva) { - uint64_t *output_gva; + u64 *output_gva; wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); wrmsr(HV_X64_MSR_HYPERCALL, in_pg_gpa); - output_gva = (uint64_t *)out_pg_gva; + output_gva = (u64 *)out_pg_gva; hyperv_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, in_pg_gpa, out_pg_gpa); - /* TLFS states output will be a uint64_t value */ + /* TLFS states output will be a u64 value */ GUEST_ASSERT_EQ(*output_gva, EXT_CAPABILITIES); GUEST_DONE(); @@ -35,12 +35,12 @@ static void guest_code(vm_paddr_t in_pg_gpa, vm_paddr_t out_pg_gpa, int main(void) { - vm_vaddr_t hcall_out_page; - vm_vaddr_t hcall_in_page; + gva_t hcall_out_page; + gva_t hcall_in_page; struct kvm_vcpu *vcpu; struct kvm_run *run; struct kvm_vm *vm; - uint64_t *outval; + u64 *outval; struct ucall uc; TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_CPUID)); @@ -57,11 +57,11 @@ int main(void) vcpu_set_hv_cpuid(vcpu); /* Hypercall input */ - hcall_in_page = vm_vaddr_alloc_pages(vm, 1); + hcall_in_page = vm_alloc_pages(vm, 1); memset(addr_gva2hva(vm, hcall_in_page), 0x0, vm->page_size); /* Hypercall output */ - hcall_out_page = vm_vaddr_alloc_pages(vm, 1); + hcall_out_page = vm_alloc_pages(vm, 1); memset(addr_gva2hva(vm, hcall_out_page), 0x0, vm->page_size); vcpu_args_set(vcpu, 3, addr_gva2gpa(vm, hcall_in_page), diff --git a/tools/testing/selftests/kvm/x86/hyperv_features.c b/tools/testing/selftests/kvm/x86/hyperv_features.c index 130b9ce7e5dd..7347f1fe5157 100644 --- a/tools/testing/selftests/kvm/x86/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86/hyperv_features.c @@ -22,27 +22,27 @@ KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EBX, 0) struct msr_data { - uint32_t idx; + u32 idx; bool fault_expected; bool write; u64 write_val; }; struct hcall_data { - uint64_t control; - uint64_t expect; + u64 control; + u64 expect; bool ud_expected; }; -static bool is_write_only_msr(uint32_t msr) +static bool is_write_only_msr(u32 msr) { return msr == HV_X64_MSR_EOI; } static void guest_msr(struct msr_data *msr) { - uint8_t vector = 0; - uint64_t msr_val = 0; + u8 vector = 0; + u64 msr_val = 0; GUEST_ASSERT(msr->idx); @@ -82,10 +82,10 @@ done: GUEST_DONE(); } -static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall) +static void guest_hcall(gpa_t pgs_gpa, struct hcall_data *hcall) { u64 res, input, output; - uint8_t vector; + u8 vector; GUEST_ASSERT_NE(hcall->control, 0); @@ -134,14 +134,14 @@ static void guest_test_msrs_access(void) struct kvm_vm *vm; struct ucall uc; int stage = 0; - vm_vaddr_t msr_gva; + gva_t msr_gva; struct msr_data *msr; bool has_invtsc = kvm_cpu_has(X86_FEATURE_INVTSC); while (true) { vm = vm_create_with_one_vcpu(&vcpu, guest_msr); - msr_gva = vm_vaddr_alloc_page(vm); + msr_gva = vm_alloc_page(vm); memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize()); msr = addr_gva2hva(vm, msr_gva); @@ -523,17 +523,17 @@ static void guest_test_hcalls_access(void) struct kvm_vm *vm; struct ucall uc; int stage = 0; - vm_vaddr_t hcall_page, hcall_params; + gva_t hcall_page, hcall_params; struct hcall_data *hcall; while (true) { vm = vm_create_with_one_vcpu(&vcpu, guest_hcall); /* Hypercall input/output */ - hcall_page = vm_vaddr_alloc_pages(vm, 2); + hcall_page = vm_alloc_pages(vm, 2); memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize()); - hcall_params = vm_vaddr_alloc_page(vm); + hcall_params = vm_alloc_page(vm); memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize()); hcall = addr_gva2hva(vm, hcall_params); diff --git a/tools/testing/selftests/kvm/x86/hyperv_ipi.c b/tools/testing/selftests/kvm/x86/hyperv_ipi.c index ca61836c4e32..771535f9aad3 100644 --- a/tools/testing/selftests/kvm/x86/hyperv_ipi.c +++ b/tools/testing/selftests/kvm/x86/hyperv_ipi.c @@ -18,7 +18,7 @@ #define IPI_VECTOR 0xfe -static volatile uint64_t ipis_rcvd[RECEIVER_VCPU_ID_2 + 1]; +static volatile u64 ipis_rcvd[RECEIVER_VCPU_ID_2 + 1]; struct hv_vpset { u64 format; @@ -45,13 +45,13 @@ struct hv_send_ipi_ex { struct hv_vpset vp_set; }; -static inline void hv_init(vm_vaddr_t pgs_gpa) +static inline void hv_init(gpa_t pgs_gpa) { wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa); } -static void receiver_code(void *hcall_page, vm_vaddr_t pgs_gpa) +static void receiver_code(void *hcall_page, gpa_t pgs_gpa) { u32 vcpu_id; @@ -85,7 +85,7 @@ static inline void nop_loop(void) asm volatile("nop"); } -static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa) +static void sender_guest_code(void *hcall_page, gpa_t pgs_gpa) { struct hv_send_ipi *ipi = (struct hv_send_ipi *)hcall_page; struct hv_send_ipi_ex *ipi_ex = (struct hv_send_ipi_ex *)hcall_page; @@ -243,7 +243,7 @@ int main(int argc, char *argv[]) { struct kvm_vm *vm; struct kvm_vcpu *vcpu[3]; - vm_vaddr_t hcall_page; + gva_t hcall_page; pthread_t threads[2]; int stage = 1, r; struct ucall uc; @@ -253,7 +253,7 @@ int main(int argc, char *argv[]) vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code); /* Hypercall input/output */ - hcall_page = vm_vaddr_alloc_pages(vm, 2); + hcall_page = vm_alloc_pages(vm, 2); memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize()); diff --git a/tools/testing/selftests/kvm/x86/hyperv_svm_test.c b/tools/testing/selftests/kvm/x86/hyperv_svm_test.c index 0ddb63229bcb..7a62f6a9d606 100644 --- a/tools/testing/selftests/kvm/x86/hyperv_svm_test.c +++ b/tools/testing/selftests/kvm/x86/hyperv_svm_test.c @@ -21,7 +21,7 @@ #define L2_GUEST_STACK_SIZE 256 /* Exit to L1 from L2 with RDMSR instruction */ -static inline void rdmsr_from_l2(uint32_t msr) +static inline void rdmsr_from_l2(u32 msr) { /* Currently, L1 doesn't preserve GPRs during vmexits. */ __asm__ __volatile__ ("rdmsr" : : "c"(msr) : @@ -67,7 +67,7 @@ void l2_guest_code(void) static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm, struct hyperv_test_pages *hv_pages, - vm_vaddr_t pgs_gpa) + gpa_t pgs_gpa) { unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; struct vmcb *vmcb = svm->vmcb; @@ -149,8 +149,8 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm, int main(int argc, char *argv[]) { - vm_vaddr_t nested_gva = 0, hv_pages_gva = 0; - vm_vaddr_t hcall_page; + gva_t nested_gva = 0, hv_pages_gva = 0; + gva_t hcall_page; struct kvm_vcpu *vcpu; struct kvm_vm *vm; struct ucall uc; @@ -165,7 +165,7 @@ int main(int argc, char *argv[]) vcpu_alloc_svm(vm, &nested_gva); vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva); - hcall_page = vm_vaddr_alloc_pages(vm, 1); + hcall_page = vm_alloc_pages(vm, 1); memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize()); vcpu_args_set(vcpu, 3, nested_gva, hv_pages_gva, addr_gva2gpa(vm, hcall_page)); diff --git a/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c b/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c index c542cc4762b1..15ee8b7bfc11 100644 --- a/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c +++ b/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c @@ -61,14 +61,14 @@ struct hv_tlb_flush_ex { * - GVAs of the test pages' PTEs */ struct test_data { - vm_vaddr_t hcall_gva; - vm_paddr_t hcall_gpa; - vm_vaddr_t test_pages; - vm_vaddr_t test_pages_pte[NTEST_PAGES]; + gva_t hcall_gva; + gpa_t hcall_gpa; + gva_t test_pages; + gva_t test_pages_pte[NTEST_PAGES]; }; /* 'Worker' vCPU code checking the contents of the test page */ -static void worker_guest_code(vm_vaddr_t test_data) +static void worker_guest_code(gva_t test_data) { struct test_data *data = (struct test_data *)test_data; u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX); @@ -133,12 +133,12 @@ static void set_expected_val(void *addr, u64 val, int vcpu_id) * Update PTEs swapping two test pages. * TODO: use swap()/xchg() when these are provided. */ -static void swap_two_test_pages(vm_paddr_t pte_gva1, vm_paddr_t pte_gva2) +static void swap_two_test_pages(gpa_t pte_gva1, gpa_t pte_gva2) { - uint64_t tmp = *(uint64_t *)pte_gva1; + u64 tmp = *(u64 *)pte_gva1; - *(uint64_t *)pte_gva1 = *(uint64_t *)pte_gva2; - *(uint64_t *)pte_gva2 = tmp; + *(u64 *)pte_gva1 = *(u64 *)pte_gva2; + *(u64 *)pte_gva2 = tmp; } /* @@ -196,12 +196,12 @@ static inline void post_test(struct test_data *data, u64 exp1, u64 exp2) #define TESTVAL2 0x0202020202020202 /* Main vCPU doing the test */ -static void sender_guest_code(vm_vaddr_t test_data) +static void sender_guest_code(gva_t test_data) { struct test_data *data = (struct test_data *)test_data; struct hv_tlb_flush *flush = (struct hv_tlb_flush *)data->hcall_gva; struct hv_tlb_flush_ex *flush_ex = (struct hv_tlb_flush_ex *)data->hcall_gva; - vm_paddr_t hcall_gpa = data->hcall_gpa; + gpa_t hcall_gpa = data->hcall_gpa; int i, stage = 1; wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); @@ -581,9 +581,9 @@ int main(int argc, char *argv[]) struct kvm_vm *vm; struct kvm_vcpu *vcpu[3]; pthread_t threads[2]; - vm_vaddr_t test_data_page, gva; - vm_paddr_t gpa; - uint64_t *pte; + gva_t test_data_page, gva; + gpa_t gpa; + u64 *pte; struct test_data *data; struct ucall uc; int stage = 1, r, i; @@ -593,11 +593,11 @@ int main(int argc, char *argv[]) vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code); /* Test data page */ - test_data_page = vm_vaddr_alloc_page(vm); + test_data_page = vm_alloc_page(vm); data = (struct test_data *)addr_gva2hva(vm, test_data_page); /* Hypercall input/output */ - data->hcall_gva = vm_vaddr_alloc_pages(vm, 2); + data->hcall_gva = vm_alloc_pages(vm, 2); data->hcall_gpa = addr_gva2gpa(vm, data->hcall_gva); memset(addr_gva2hva(vm, data->hcall_gva), 0x0, 2 * PAGE_SIZE); @@ -606,7 +606,7 @@ int main(int argc, char *argv[]) * and the test will swap their mappings. The third page keeps the indication * about the current state of mappings. */ - data->test_pages = vm_vaddr_alloc_pages(vm, NTEST_PAGES + 1); + data->test_pages = vm_alloc_pages(vm, NTEST_PAGES + 1); for (i = 0; i < NTEST_PAGES; i++) memset(addr_gva2hva(vm, data->test_pages + PAGE_SIZE * i), (u8)(i + 1), PAGE_SIZE); @@ -617,7 +617,7 @@ int main(int argc, char *argv[]) * Get PTE pointers for test pages and map them inside the guest. * Use separate page for each PTE for simplicity. */ - gva = vm_vaddr_unused_gap(vm, NTEST_PAGES * PAGE_SIZE, KVM_UTIL_MIN_VADDR); + gva = vm_unused_gva_gap(vm, NTEST_PAGES * PAGE_SIZE, KVM_UTIL_MIN_VADDR); for (i = 0; i < NTEST_PAGES; i++) { pte = vm_get_pte(vm, data->test_pages + i * PAGE_SIZE); gpa = addr_hva2gpa(vm, pte); diff --git a/tools/testing/selftests/kvm/x86/kvm_buslock_test.c b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c index d88500c118eb..52014a3210c8 100644 --- a/tools/testing/selftests/kvm/x86/kvm_buslock_test.c +++ b/tools/testing/selftests/kvm/x86/kvm_buslock_test.c @@ -73,7 +73,7 @@ static void guest_code(void *test_data) int main(int argc, char *argv[]) { const bool has_nested = kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX); - vm_vaddr_t nested_test_data_gva; + gva_t nested_test_data_gva; struct kvm_vcpu *vcpu; struct kvm_run *run; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/kvm_clock_test.c b/tools/testing/selftests/kvm/x86/kvm_clock_test.c index 5bc12222d87a..5ad4aeb8e373 100644 --- a/tools/testing/selftests/kvm/x86/kvm_clock_test.c +++ b/tools/testing/selftests/kvm/x86/kvm_clock_test.c @@ -17,8 +17,8 @@ #include "processor.h" struct test_case { - uint64_t kvmclock_base; - int64_t realtime_offset; + u64 kvmclock_base; + s64 realtime_offset; }; static struct test_case test_cases[] = { @@ -31,7 +31,7 @@ static struct test_case test_cases[] = { #define GUEST_SYNC_CLOCK(__stage, __val) \ GUEST_SYNC_ARGS(__stage, __val, 0, 0, 0) -static void guest_main(vm_paddr_t pvti_pa, struct pvclock_vcpu_time_info *pvti) +static void guest_main(gpa_t pvti_pa, struct pvclock_vcpu_time_info *pvti) { int i; @@ -52,7 +52,7 @@ static inline void assert_flags(struct kvm_clock_data *data) static void handle_sync(struct ucall *uc, struct kvm_clock_data *start, struct kvm_clock_data *end) { - uint64_t obs, exp_lo, exp_hi; + u64 obs, exp_lo, exp_hi; obs = uc->args[2]; exp_lo = start->clock; @@ -135,8 +135,8 @@ static void enter_guest(struct kvm_vcpu *vcpu) int main(void) { struct kvm_vcpu *vcpu; - vm_vaddr_t pvti_gva; - vm_paddr_t pvti_gpa; + gva_t pvti_gva; + gpa_t pvti_gpa; struct kvm_vm *vm; int flags; @@ -147,7 +147,7 @@ int main(void) vm = vm_create_with_one_vcpu(&vcpu, guest_main); - pvti_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000); + pvti_gva = vm_alloc(vm, getpagesize(), 0x10000); pvti_gpa = addr_gva2gpa(vm, pvti_gva); vcpu_args_set(vcpu, 2, pvti_gpa, pvti_gva); diff --git a/tools/testing/selftests/kvm/x86/kvm_pv_test.c b/tools/testing/selftests/kvm/x86/kvm_pv_test.c index 1b805cbdb47b..8ed5fa635021 100644 --- a/tools/testing/selftests/kvm/x86/kvm_pv_test.c +++ b/tools/testing/selftests/kvm/x86/kvm_pv_test.c @@ -13,7 +13,7 @@ #include "processor.h" struct msr_data { - uint32_t idx; + u32 idx; const char *name; }; @@ -40,8 +40,8 @@ static struct msr_data msrs_to_test[] = { static void test_msr(struct msr_data *msr) { - uint64_t ignored; - uint8_t vector; + u64 ignored; + u8 vector; PR_MSR(msr); @@ -53,7 +53,7 @@ static void test_msr(struct msr_data *msr) } struct hcall_data { - uint64_t nr; + u64 nr; const char *name; }; @@ -73,7 +73,7 @@ static struct hcall_data hcalls_to_test[] = { static void test_hcall(struct hcall_data *hc) { - uint64_t r; + u64 r; PR_HCALL(hc); r = kvm_hypercall(hc->nr, 0, 0, 0, 0); diff --git a/tools/testing/selftests/kvm/x86/monitor_mwait_test.c b/tools/testing/selftests/kvm/x86/monitor_mwait_test.c index e45c028d2a7e..9c156cf7db0e 100644 --- a/tools/testing/selftests/kvm/x86/monitor_mwait_test.c +++ b/tools/testing/selftests/kvm/x86/monitor_mwait_test.c @@ -67,7 +67,7 @@ static void guest_monitor_wait(void *arg) int main(int argc, char *argv[]) { - uint64_t disabled_quirks; + u64 disabled_quirks; struct kvm_vcpu *vcpu; struct kvm_vm *vm; struct ucall uc; diff --git a/tools/testing/selftests/kvm/x86/nested_close_kvm_test.c b/tools/testing/selftests/kvm/x86/nested_close_kvm_test.c index f001cb836bfa..761fec293408 100644 --- a/tools/testing/selftests/kvm/x86/nested_close_kvm_test.c +++ b/tools/testing/selftests/kvm/x86/nested_close_kvm_test.c @@ -67,7 +67,7 @@ static void l1_guest_code(void *data) int main(int argc, char *argv[]) { - vm_vaddr_t guest_gva; + gva_t guest_gva; struct kvm_vcpu *vcpu; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/nested_dirty_log_test.c b/tools/testing/selftests/kvm/x86/nested_dirty_log_test.c index 619229bbd693..0e67cce83570 100644 --- a/tools/testing/selftests/kvm/x86/nested_dirty_log_test.c +++ b/tools/testing/selftests/kvm/x86/nested_dirty_log_test.c @@ -47,10 +47,10 @@ #define TEST_SYNC_WRITE_FAULT BIT(1) #define TEST_SYNC_NO_FAULT BIT(2) -static void l2_guest_code(vm_vaddr_t base) +static void l2_guest_code(gva_t base) { - vm_vaddr_t page0 = TEST_GUEST_ADDR(base, 0); - vm_vaddr_t page1 = TEST_GUEST_ADDR(base, 1); + gva_t page0 = TEST_GUEST_ADDR(base, 0); + gva_t page1 = TEST_GUEST_ADDR(base, 1); READ_ONCE(*(u64 *)page0); GUEST_SYNC(page0 | TEST_SYNC_READ_FAULT); @@ -143,7 +143,7 @@ static void l1_guest_code(void *data) static void test_handle_ucall_sync(struct kvm_vm *vm, u64 arg, unsigned long *bmap) { - vm_vaddr_t gva = arg & ~(PAGE_SIZE - 1); + gva_t gva = arg & ~(PAGE_SIZE - 1); int page_nr, i; /* @@ -198,7 +198,7 @@ static void test_handle_ucall_sync(struct kvm_vm *vm, u64 arg, static void test_dirty_log(bool nested_tdp) { - vm_vaddr_t nested_gva = 0; + gva_t nested_gva = 0; unsigned long *bmap; struct kvm_vcpu *vcpu; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/nested_emulation_test.c b/tools/testing/selftests/kvm/x86/nested_emulation_test.c index abc824dba04f..fb7dcbe53ac7 100644 --- a/tools/testing/selftests/kvm/x86/nested_emulation_test.c +++ b/tools/testing/selftests/kvm/x86/nested_emulation_test.c @@ -13,8 +13,8 @@ enum { struct emulated_instruction { const char name[32]; - uint8_t opcode[15]; - uint32_t exit_reason[NR_VIRTUALIZATION_FLAVORS]; + u8 opcode[15]; + u32 exit_reason[NR_VIRTUALIZATION_FLAVORS]; }; static struct emulated_instruction instructions[] = { @@ -32,13 +32,13 @@ static struct emulated_instruction instructions[] = { }, }; -static uint8_t kvm_fep[] = { 0x0f, 0x0b, 0x6b, 0x76, 0x6d }; /* ud2 ; .ascii "kvm" */ -static uint8_t l2_guest_code[sizeof(kvm_fep) + 15]; -static uint8_t *l2_instruction = &l2_guest_code[sizeof(kvm_fep)]; +static u8 kvm_fep[] = { 0x0f, 0x0b, 0x6b, 0x76, 0x6d }; /* ud2 ; .ascii "kvm" */ +static u8 l2_guest_code[sizeof(kvm_fep) + 15]; +static u8 *l2_instruction = &l2_guest_code[sizeof(kvm_fep)]; -static uint32_t get_instruction_length(struct emulated_instruction *insn) +static u32 get_instruction_length(struct emulated_instruction *insn) { - uint32_t i; + u32 i; for (i = 0; i < ARRAY_SIZE(insn->opcode) && insn->opcode[i]; i++) ; @@ -81,8 +81,8 @@ static void guest_code(void *test_data) for (i = 0; i < ARRAY_SIZE(instructions); i++) { struct emulated_instruction *insn = &instructions[i]; - uint32_t insn_len = get_instruction_length(insn); - uint32_t exit_insn_len; + u32 insn_len = get_instruction_length(insn); + u32 exit_insn_len; u32 exit_reason; /* @@ -122,7 +122,7 @@ static void guest_code(void *test_data) int main(int argc, char *argv[]) { - vm_vaddr_t nested_test_data_gva; + gva_t nested_test_data_gva; struct kvm_vcpu *vcpu; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/nested_exceptions_test.c b/tools/testing/selftests/kvm/x86/nested_exceptions_test.c index 3641a42934ac..186e980aa8ee 100644 --- a/tools/testing/selftests/kvm/x86/nested_exceptions_test.c +++ b/tools/testing/selftests/kvm/x86/nested_exceptions_test.c @@ -72,7 +72,7 @@ static void l2_ss_injected_tf_test(void) } static void svm_run_l2(struct svm_test_data *svm, void *l2_code, int vector, - uint32_t error_code) + u32 error_code) { struct vmcb *vmcb = svm->vmcb; struct vmcb_control_area *ctrl = &vmcb->control; @@ -111,7 +111,7 @@ static void l1_svm_code(struct svm_test_data *svm) GUEST_DONE(); } -static void vmx_run_l2(void *l2_code, int vector, uint32_t error_code) +static void vmx_run_l2(void *l2_code, int vector, u32 error_code) { GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_code)); @@ -216,7 +216,7 @@ static void queue_ss_exception(struct kvm_vcpu *vcpu, bool inject) */ int main(int argc, char *argv[]) { - vm_vaddr_t nested_test_data_gva; + gva_t nested_test_data_gva; struct kvm_vcpu_events events; struct kvm_vcpu *vcpu; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c b/tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c index a6b6da9cf7fe..11fd2467d823 100644 --- a/tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c +++ b/tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c @@ -78,7 +78,7 @@ int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - vm_vaddr_t guest_gva = 0; + gva_t guest_gva = 0; TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) || kvm_cpu_has(X86_FEATURE_SVM)); diff --git a/tools/testing/selftests/kvm/x86/nested_set_state_test.c b/tools/testing/selftests/kvm/x86/nested_set_state_test.c index 0f2102b43629..831380732671 100644 --- a/tools/testing/selftests/kvm/x86/nested_set_state_test.c +++ b/tools/testing/selftests/kvm/x86/nested_set_state_test.c @@ -250,14 +250,14 @@ void test_vmx_nested_state(struct kvm_vcpu *vcpu) static void vcpu_efer_enable_svm(struct kvm_vcpu *vcpu) { - uint64_t old_efer = vcpu_get_msr(vcpu, MSR_EFER); + u64 old_efer = vcpu_get_msr(vcpu, MSR_EFER); vcpu_set_msr(vcpu, MSR_EFER, old_efer | EFER_SVME); } static void vcpu_efer_disable_svm(struct kvm_vcpu *vcpu) { - uint64_t old_efer = vcpu_get_msr(vcpu, MSR_EFER); + u64 old_efer = vcpu_get_msr(vcpu, MSR_EFER); vcpu_set_msr(vcpu, MSR_EFER, old_efer & ~EFER_SVME); } diff --git a/tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c index 2839f650e5c9..f0e4adac4751 100644 --- a/tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c +++ b/tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c @@ -53,9 +53,9 @@ enum { /* The virtual machine object. */ static struct kvm_vm *vm; -static void check_ia32_tsc_adjust(int64_t max) +static void check_ia32_tsc_adjust(s64 max) { - int64_t adjust; + s64 adjust; adjust = rdmsr(MSR_IA32_TSC_ADJUST); GUEST_SYNC(adjust); @@ -64,7 +64,7 @@ static void check_ia32_tsc_adjust(int64_t max) static void l2_guest_code(void) { - uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE; + u64 l1_tsc = rdtsc() - TSC_OFFSET_VALUE; wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE); check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); @@ -88,7 +88,7 @@ static void l1_guest_code(void *data) */ if (this_cpu_has(X86_FEATURE_VMX)) { struct vmx_pages *vmx_pages = data; - uint32_t control; + u32 control; GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); GUEST_ASSERT(load_vmcs(vmx_pages)); @@ -117,7 +117,7 @@ static void l1_guest_code(void *data) GUEST_DONE(); } -static void report(int64_t val) +static void report(s64 val) { pr_info("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n", val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE); @@ -125,7 +125,7 @@ static void report(int64_t val) int main(int argc, char *argv[]) { - vm_vaddr_t nested_gva; + gva_t nested_gva; struct kvm_vcpu *vcpu; TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) || diff --git a/tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c b/tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c index 4260c9e4f489..190e93af20a1 100644 --- a/tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c +++ b/tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c @@ -19,7 +19,7 @@ /* L2 is scaled up (from L1's perspective) by this factor */ #define L2_SCALE_FACTOR 4ULL -#define TSC_OFFSET_L2 ((uint64_t) -33125236320908) +#define TSC_OFFSET_L2 ((u64)-33125236320908) #define TSC_MULTIPLIER_L2 (L2_SCALE_FACTOR << 48) #define L2_GUEST_STACK_SIZE 64 @@ -35,9 +35,9 @@ enum { USLEEP, UCHECK_L1, UCHECK_L2 }; * measurements, a difference of 1% between the actual and the expected value * is tolerated. */ -static void compare_tsc_freq(uint64_t actual, uint64_t expected) +static void compare_tsc_freq(u64 actual, u64 expected) { - uint64_t tolerance, thresh_low, thresh_high; + u64 tolerance, thresh_low, thresh_high; tolerance = expected / 100; thresh_low = expected - tolerance; @@ -55,7 +55,7 @@ static void compare_tsc_freq(uint64_t actual, uint64_t expected) static void check_tsc_freq(int level) { - uint64_t tsc_start, tsc_end, tsc_freq; + u64 tsc_start, tsc_end, tsc_freq; /* * Reading the TSC twice with about a second's difference should give @@ -106,7 +106,7 @@ static void l1_svm_code(struct svm_test_data *svm) static void l1_vmx_code(struct vmx_pages *vmx_pages) { unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; - uint32_t control; + u32 control; /* check that L1's frequency looks alright before launching L2 */ check_tsc_freq(UCHECK_L1); @@ -152,14 +152,14 @@ int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - vm_vaddr_t guest_gva = 0; - - uint64_t tsc_start, tsc_end; - uint64_t tsc_khz; - uint64_t l1_scale_factor; - uint64_t l0_tsc_freq = 0; - uint64_t l1_tsc_freq = 0; - uint64_t l2_tsc_freq = 0; + gva_t guest_gva = 0; + + u64 tsc_start, tsc_end; + u64 tsc_khz; + u64 l1_scale_factor; + u64 l0_tsc_freq = 0; + u64 l1_tsc_freq = 0; + u64 l2_tsc_freq = 0; TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) || kvm_cpu_has(X86_FEATURE_SVM)); diff --git a/tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c b/tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c index 71717118d692..85d3f4cc76f3 100644 --- a/tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c +++ b/tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c @@ -128,7 +128,7 @@ static void l1_guest_code(struct svm_test_data *svm) int main(int argc, char *argv[]) { - vm_vaddr_t nested_gva = 0; + gva_t nested_gva = 0; struct vmcb *test_vmcb[2]; struct kvm_vcpu *vcpu; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86/nx_huge_pages_test.c index c0d84827f736..70950067b989 100644 --- a/tools/testing/selftests/kvm/x86/nx_huge_pages_test.c +++ b/tools/testing/selftests/kvm/x86/nx_huge_pages_test.c @@ -32,7 +32,7 @@ #define RETURN_OPCODE 0xC3 /* Call the specified memory address. */ -static void guest_do_CALL(uint64_t target) +static void guest_do_CALL(u64 target) { ((void (*)(void)) target)(); } @@ -46,14 +46,14 @@ static void guest_do_CALL(uint64_t target) */ void guest_code(void) { - uint64_t hpage_1 = HPAGE_GVA; - uint64_t hpage_2 = hpage_1 + (PAGE_SIZE * 512); - uint64_t hpage_3 = hpage_2 + (PAGE_SIZE * 512); + u64 hpage_1 = HPAGE_GVA; + u64 hpage_2 = hpage_1 + (PAGE_SIZE * 512); + u64 hpage_3 = hpage_2 + (PAGE_SIZE * 512); - READ_ONCE(*(uint64_t *)hpage_1); + READ_ONCE(*(u64 *)hpage_1); GUEST_SYNC(1); - READ_ONCE(*(uint64_t *)hpage_2); + READ_ONCE(*(u64 *)hpage_2); GUEST_SYNC(2); guest_do_CALL(hpage_1); @@ -62,10 +62,10 @@ void guest_code(void) guest_do_CALL(hpage_3); GUEST_SYNC(4); - READ_ONCE(*(uint64_t *)hpage_1); + READ_ONCE(*(u64 *)hpage_1); GUEST_SYNC(5); - READ_ONCE(*(uint64_t *)hpage_3); + READ_ONCE(*(u64 *)hpage_3); GUEST_SYNC(6); } @@ -107,7 +107,7 @@ void run_test(int reclaim_period_ms, bool disable_nx_huge_pages, { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - uint64_t nr_bytes; + u64 nr_bytes; void *hva; int r; diff --git a/tools/testing/selftests/kvm/x86/platform_info_test.c b/tools/testing/selftests/kvm/x86/platform_info_test.c index 9cbf283ebc55..80bb07e6531c 100644 --- a/tools/testing/selftests/kvm/x86/platform_info_test.c +++ b/tools/testing/selftests/kvm/x86/platform_info_test.c @@ -23,8 +23,8 @@ static void guest_code(void) { - uint64_t msr_platform_info; - uint8_t vector; + u64 msr_platform_info; + u8 vector; GUEST_SYNC(true); msr_platform_info = rdmsr(MSR_PLATFORM_INFO); @@ -42,7 +42,7 @@ int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - uint64_t msr_platform_info; + u64 msr_platform_info; struct ucall uc; TEST_REQUIRE(kvm_has_cap(KVM_CAP_MSR_PLATFORM_INFO)); diff --git a/tools/testing/selftests/kvm/x86/pmu_counters_test.c b/tools/testing/selftests/kvm/x86/pmu_counters_test.c index 3eaa216b96c0..dc6afac3aa91 100644 --- a/tools/testing/selftests/kvm/x86/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86/pmu_counters_test.c @@ -30,9 +30,9 @@ #define NUM_INSNS_RETIRED (NUM_LOOPS * NUM_INSNS_PER_LOOP + NUM_EXTRA_INSNS) /* Track which architectural events are supported by hardware. */ -static uint32_t hardware_pmu_arch_events; +static u32 hardware_pmu_arch_events; -static uint8_t kvm_pmu_version; +static u8 kvm_pmu_version; static bool kvm_has_perf_caps; #define X86_PMU_FEATURE_NULL \ @@ -57,7 +57,7 @@ struct kvm_intel_pmu_event { * kvm_x86_pmu_feature use syntax that's only valid in function scope, and the * compiler often thinks the feature definitions aren't compile-time constants. */ -static struct kvm_intel_pmu_event intel_event_to_feature(uint8_t idx) +static struct kvm_intel_pmu_event intel_event_to_feature(u8 idx) { const struct kvm_intel_pmu_event __intel_event_to_feature[] = { [INTEL_ARCH_CPU_CYCLES_INDEX] = { X86_PMU_FEATURE_CPU_CYCLES, X86_PMU_FEATURE_CPU_CYCLES_FIXED }, @@ -89,8 +89,8 @@ static struct kvm_intel_pmu_event intel_event_to_feature(uint8_t idx) static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, void *guest_code, - uint8_t pmu_version, - uint64_t perf_capabilities) + u8 pmu_version, + u64 perf_capabilities) { struct kvm_vm *vm; @@ -132,7 +132,7 @@ static void run_vcpu(struct kvm_vcpu *vcpu) } while (uc.cmd != UCALL_DONE); } -static uint8_t guest_get_pmu_version(void) +static u8 guest_get_pmu_version(void) { /* * Return the effective PMU version, i.e. the minimum between what KVM @@ -141,7 +141,7 @@ static uint8_t guest_get_pmu_version(void) * supported by KVM to verify KVM doesn't freak out and do something * bizarre with an architecturally valid, but unsupported, version. */ - return min_t(uint8_t, kvm_pmu_version, this_cpu_property(X86_PROPERTY_PMU_VERSION)); + return min_t(u8, kvm_pmu_version, this_cpu_property(X86_PROPERTY_PMU_VERSION)); } /* @@ -153,9 +153,9 @@ static uint8_t guest_get_pmu_version(void) * Sanity check that in all cases, the event doesn't count when it's disabled, * and that KVM correctly emulates the write of an arbitrary value. */ -static void guest_assert_event_count(uint8_t idx, uint32_t pmc, uint32_t pmc_msr) +static void guest_assert_event_count(u8 idx, u32 pmc, u32 pmc_msr) { - uint64_t count; + u64 count; count = _rdpmc(pmc); if (!(hardware_pmu_arch_events & BIT(idx))) @@ -236,7 +236,7 @@ do { \ FEP "xor %%eax, %%eax\n\t" \ FEP "xor %%edx, %%edx\n\t" \ "wrmsr\n\t" \ - :: "a"((uint32_t)_value), "d"(_value >> 32), \ + :: "a"((u32)_value), "d"(_value >> 32), \ "c"(_msr), "D"(_msr), [m]"m"(kvm_pmu_version) \ ); \ } while (0) @@ -255,8 +255,8 @@ do { \ guest_assert_event_count(_idx, _pmc, _pmc_msr); \ } while (0) -static void __guest_test_arch_event(uint8_t idx, uint32_t pmc, uint32_t pmc_msr, - uint32_t ctrl_msr, uint64_t ctrl_msr_value) +static void __guest_test_arch_event(u8 idx, u32 pmc, u32 pmc_msr, + u32 ctrl_msr, u64 ctrl_msr_value) { GUEST_TEST_EVENT(idx, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, ""); @@ -264,14 +264,14 @@ static void __guest_test_arch_event(uint8_t idx, uint32_t pmc, uint32_t pmc_msr, GUEST_TEST_EVENT(idx, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, KVM_FEP); } -static void guest_test_arch_event(uint8_t idx) +static void guest_test_arch_event(u8 idx) { - uint32_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); - uint32_t pmu_version = guest_get_pmu_version(); + u32 nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); + u32 pmu_version = guest_get_pmu_version(); /* PERF_GLOBAL_CTRL exists only for Architectural PMU Version 2+. */ bool guest_has_perf_global_ctrl = pmu_version >= 2; struct kvm_x86_pmu_feature gp_event, fixed_event; - uint32_t base_pmc_msr; + u32 base_pmc_msr; unsigned int i; /* The host side shouldn't invoke this without a guest PMU. */ @@ -289,7 +289,7 @@ static void guest_test_arch_event(uint8_t idx) GUEST_ASSERT(nr_gp_counters); for (i = 0; i < nr_gp_counters; i++) { - uint64_t eventsel = ARCH_PERFMON_EVENTSEL_OS | + u64 eventsel = ARCH_PERFMON_EVENTSEL_OS | ARCH_PERFMON_EVENTSEL_ENABLE | intel_pmu_arch_events[idx]; @@ -320,7 +320,7 @@ static void guest_test_arch_event(uint8_t idx) static void guest_test_arch_events(void) { - uint8_t i; + u8 i; for (i = 0; i < NR_INTEL_ARCH_EVENTS; i++) guest_test_arch_event(i); @@ -328,8 +328,8 @@ static void guest_test_arch_events(void) GUEST_DONE(); } -static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities, - uint8_t length, uint32_t unavailable_mask) +static void test_arch_events(u8 pmu_version, u64 perf_capabilities, + u8 length, u32 unavailable_mask) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -373,11 +373,11 @@ __GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector, \ "Expected " #insn "(0x%x) to yield 0x%lx, got 0x%lx", \ msr, expected, val); -static void guest_test_rdpmc(uint32_t rdpmc_idx, bool expect_success, - uint64_t expected_val) +static void guest_test_rdpmc(u32 rdpmc_idx, bool expect_success, + u64 expected_val) { - uint8_t vector; - uint64_t val; + u8 vector; + u64 val; vector = rdpmc_safe(rdpmc_idx, &val); GUEST_ASSERT_PMC_MSR_ACCESS(RDPMC, rdpmc_idx, !expect_success, vector); @@ -393,19 +393,19 @@ static void guest_test_rdpmc(uint32_t rdpmc_idx, bool expect_success, GUEST_ASSERT_PMC_VALUE(RDPMC, rdpmc_idx, val, expected_val); } -static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters, - uint8_t nr_counters, uint32_t or_mask) +static void guest_rd_wr_counters(u32 base_msr, u8 nr_possible_counters, + u8 nr_counters, u32 or_mask) { const bool pmu_has_fast_mode = !guest_get_pmu_version(); - uint8_t i; + u8 i; for (i = 0; i < nr_possible_counters; i++) { /* * TODO: Test a value that validates full-width writes and the * width of the counters. */ - const uint64_t test_val = 0xffff; - const uint32_t msr = base_msr + i; + const u64 test_val = 0xffff; + const u32 msr = base_msr + i; /* * Fixed counters are supported if the counter is less than the @@ -418,12 +418,12 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters * KVM drops writes to MSR_P6_PERFCTR[0|1] if the counters are * unsupported, i.e. doesn't #GP and reads back '0'. */ - const uint64_t expected_val = expect_success ? test_val : 0; + const u64 expected_val = expect_success ? test_val : 0; const bool expect_gp = !expect_success && msr != MSR_P6_PERFCTR0 && msr != MSR_P6_PERFCTR1; - uint32_t rdpmc_idx; - uint8_t vector; - uint64_t val; + u32 rdpmc_idx; + u8 vector; + u64 val; vector = wrmsr_safe(msr, test_val); GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector); @@ -461,9 +461,9 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters static void guest_test_gp_counters(void) { - uint8_t pmu_version = guest_get_pmu_version(); - uint8_t nr_gp_counters = 0; - uint32_t base_msr; + u8 pmu_version = guest_get_pmu_version(); + u8 nr_gp_counters = 0; + u32 base_msr; if (pmu_version) nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); @@ -477,7 +477,7 @@ static void guest_test_gp_counters(void) * counters, of which there are none. */ if (pmu_version > 1) { - uint64_t global_ctrl = rdmsr(MSR_CORE_PERF_GLOBAL_CTRL); + u64 global_ctrl = rdmsr(MSR_CORE_PERF_GLOBAL_CTRL); if (nr_gp_counters) GUEST_ASSERT_EQ(global_ctrl, GENMASK_ULL(nr_gp_counters - 1, 0)); @@ -495,8 +495,8 @@ static void guest_test_gp_counters(void) GUEST_DONE(); } -static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities, - uint8_t nr_gp_counters) +static void test_gp_counters(u8 pmu_version, u64 perf_capabilities, + u8 nr_gp_counters) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -514,9 +514,9 @@ static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities, static void guest_test_fixed_counters(void) { - uint64_t supported_bitmask = 0; - uint8_t nr_fixed_counters = 0; - uint8_t i; + u64 supported_bitmask = 0; + u8 nr_fixed_counters = 0; + u8 i; /* Fixed counters require Architectural vPMU Version 2+. */ if (guest_get_pmu_version() >= 2) @@ -533,8 +533,8 @@ static void guest_test_fixed_counters(void) nr_fixed_counters, supported_bitmask); for (i = 0; i < MAX_NR_FIXED_COUNTERS; i++) { - uint8_t vector; - uint64_t val; + u8 vector; + u64 val; if (i >= nr_fixed_counters && !(supported_bitmask & BIT_ULL(i))) { vector = wrmsr_safe(MSR_CORE_PERF_FIXED_CTR_CTRL, @@ -561,9 +561,8 @@ static void guest_test_fixed_counters(void) GUEST_DONE(); } -static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities, - uint8_t nr_fixed_counters, - uint32_t supported_bitmask) +static void test_fixed_counters(u8 pmu_version, u64 perf_capabilities, + u8 nr_fixed_counters, u32 supported_bitmask) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -583,14 +582,14 @@ static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities, static void test_intel_counters(void) { - uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); - uint8_t nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); - uint8_t pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION); + u8 nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); + u8 nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); + u8 pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION); unsigned int i; - uint8_t v, j; - uint32_t k; + u8 v, j; + u32 k; - const uint64_t perf_caps[] = { + const u64 perf_caps[] = { 0, PMU_CAP_FW_WRITES, }; @@ -602,7 +601,7 @@ static void test_intel_counters(void) * as alternating bit sequencues, e.g. to detect if KVM is checking the * wrong bit(s). */ - const uint32_t unavailable_masks[] = { + const u32 unavailable_masks[] = { 0x0, 0xffffffffu, 0xaaaaaaaau, @@ -620,7 +619,7 @@ static void test_intel_counters(void) * Intel, i.e. is the last version that is guaranteed to be backwards * compatible with KVM's existing behavior. */ - uint8_t max_pmu_version = max_t(typeof(pmu_version), pmu_version, 5); + u8 max_pmu_version = max_t(typeof(pmu_version), pmu_version, 5); /* * Detect the existence of events that aren't supported by selftests. diff --git a/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c index 93b61c077991..c1232344fda8 100644 --- a/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c @@ -53,11 +53,11 @@ static const struct __kvm_pmu_event_filter base_event_filter = { }; struct { - uint64_t loads; - uint64_t stores; - uint64_t loads_stores; - uint64_t branches_retired; - uint64_t instructions_retired; + u64 loads; + u64 stores; + u64 loads_stores; + u64 branches_retired; + u64 instructions_retired; } pmc_results; /* @@ -75,9 +75,9 @@ static void guest_gp_handler(struct ex_regs *regs) * * Return on success. GUEST_SYNC(0) on error. */ -static void check_msr(uint32_t msr, uint64_t bits_to_flip) +static void check_msr(u32 msr, u64 bits_to_flip) { - uint64_t v = rdmsr(msr) ^ bits_to_flip; + u64 v = rdmsr(msr) ^ bits_to_flip; wrmsr(msr, v); if (rdmsr(msr) != v) @@ -89,10 +89,10 @@ static void check_msr(uint32_t msr, uint64_t bits_to_flip) GUEST_SYNC(-EIO); } -static void run_and_measure_loop(uint32_t msr_base) +static void run_and_measure_loop(u32 msr_base) { - const uint64_t branches_retired = rdmsr(msr_base + 0); - const uint64_t insn_retired = rdmsr(msr_base + 1); + const u64 branches_retired = rdmsr(msr_base + 0); + const u64 insn_retired = rdmsr(msr_base + 1); __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); @@ -147,7 +147,7 @@ static void amd_guest_code(void) * Run the VM to the next GUEST_SYNC(value), and return the value passed * to the sync. Any other exit from the guest is fatal. */ -static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu) +static u64 run_vcpu_to_sync(struct kvm_vcpu *vcpu) { struct ucall uc; @@ -161,7 +161,7 @@ static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu) static void run_vcpu_and_sync_pmc_results(struct kvm_vcpu *vcpu) { - uint64_t r; + u64 r; memset(&pmc_results, 0, sizeof(pmc_results)); sync_global_to_guest(vcpu->vm, pmc_results); @@ -182,7 +182,7 @@ static void run_vcpu_and_sync_pmc_results(struct kvm_vcpu *vcpu) */ static bool sanity_check_pmu(struct kvm_vcpu *vcpu) { - uint64_t r; + u64 r; vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler); r = run_vcpu_to_sync(vcpu); @@ -195,7 +195,7 @@ static bool sanity_check_pmu(struct kvm_vcpu *vcpu) * Remove the first occurrence of 'event' (if any) from the filter's * event list. */ -static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event) +static void remove_event(struct __kvm_pmu_event_filter *f, u64 event) { bool found = false; int i; @@ -212,8 +212,8 @@ static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event) #define ASSERT_PMC_COUNTING_INSTRUCTIONS() \ do { \ - uint64_t br = pmc_results.branches_retired; \ - uint64_t ir = pmc_results.instructions_retired; \ + u64 br = pmc_results.branches_retired; \ + u64 ir = pmc_results.instructions_retired; \ bool br_matched = this_pmu_has_errata(BRANCHES_RETIRED_OVERCOUNT) ? \ br >= NUM_BRANCHES : br == NUM_BRANCHES; \ \ @@ -228,8 +228,8 @@ do { \ #define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS() \ do { \ - uint64_t br = pmc_results.branches_retired; \ - uint64_t ir = pmc_results.instructions_retired; \ + u64 br = pmc_results.branches_retired; \ + u64 ir = pmc_results.instructions_retired; \ \ TEST_ASSERT(!br, "%s: Branch instructions retired = %lu (expected 0)", \ __func__, br); \ @@ -378,7 +378,7 @@ static bool use_amd_pmu(void) static bool supports_event_mem_inst_retired(void) { - uint32_t eax, ebx, ecx, edx; + u32 eax, ebx, ecx, edx; cpuid(1, &eax, &ebx, &ecx, &edx); if (x86_family(eax) == 0x6) { @@ -415,15 +415,15 @@ static bool supports_event_mem_inst_retired(void) #define EXCLUDE_MASKED_ENTRY(event_select, mask, match) \ KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, true) -static void masked_events_guest_test(uint32_t msr_base) +static void masked_events_guest_test(u32 msr_base) { /* * The actual value of the counters don't determine the outcome of * the test. Only that they are zero or non-zero. */ - const uint64_t loads = rdmsr(msr_base + 0); - const uint64_t stores = rdmsr(msr_base + 1); - const uint64_t loads_stores = rdmsr(msr_base + 2); + const u64 loads = rdmsr(msr_base + 0); + const u64 stores = rdmsr(msr_base + 1); + const u64 loads_stores = rdmsr(msr_base + 2); int val; @@ -476,7 +476,7 @@ static void amd_masked_events_guest_code(void) } static void run_masked_events_test(struct kvm_vcpu *vcpu, - const uint64_t masked_events[], + const u64 masked_events[], const int nmasked_events) { struct __kvm_pmu_event_filter f = { @@ -485,7 +485,7 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu, .flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS, }; - memcpy(f.events, masked_events, sizeof(uint64_t) * nmasked_events); + memcpy(f.events, masked_events, sizeof(u64) * nmasked_events); test_with_filter(vcpu, &f); } @@ -494,12 +494,12 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu, #define ALLOW_LOADS_STORES BIT(2) struct masked_events_test { - uint64_t intel_events[MAX_TEST_EVENTS]; - uint64_t intel_event_end; - uint64_t amd_events[MAX_TEST_EVENTS]; - uint64_t amd_event_end; + u64 intel_events[MAX_TEST_EVENTS]; + u64 intel_event_end; + u64 amd_events[MAX_TEST_EVENTS]; + u64 amd_event_end; const char *msg; - uint32_t flags; + u32 flags; }; /* @@ -582,9 +582,9 @@ const struct masked_events_test test_cases[] = { }; static int append_test_events(const struct masked_events_test *test, - uint64_t *events, int nevents) + u64 *events, int nevents) { - const uint64_t *evts; + const u64 *evts; int i; evts = use_intel_pmu() ? test->intel_events : test->amd_events; @@ -603,7 +603,7 @@ static bool bool_eq(bool a, bool b) return a == b; } -static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events, +static void run_masked_events_tests(struct kvm_vcpu *vcpu, u64 *events, int nevents) { int ntests = ARRAY_SIZE(test_cases); @@ -630,7 +630,7 @@ static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events, } } -static void add_dummy_events(uint64_t *events, int nevents) +static void add_dummy_events(u64 *events, int nevents) { int i; @@ -650,7 +650,7 @@ static void add_dummy_events(uint64_t *events, int nevents) static void test_masked_events(struct kvm_vcpu *vcpu) { int nevents = KVM_PMU_EVENT_FILTER_MAX_EVENTS - MAX_TEST_EVENTS; - uint64_t events[KVM_PMU_EVENT_FILTER_MAX_EVENTS]; + u64 events[KVM_PMU_EVENT_FILTER_MAX_EVENTS]; /* Run the test cases against a sparse PMU event filter. */ run_masked_events_tests(vcpu, events, 0); @@ -668,8 +668,8 @@ static int set_pmu_event_filter(struct kvm_vcpu *vcpu, return __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f); } -static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event, - uint32_t flags, uint32_t action) +static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, u64 event, + u32 flags, u32 action) { struct __kvm_pmu_event_filter f = { .nevents = 1, @@ -685,9 +685,9 @@ static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event, static void test_filter_ioctl(struct kvm_vcpu *vcpu) { - uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); + u8 nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); struct __kvm_pmu_event_filter f; - uint64_t e = ~0ul; + u64 e = ~0ul; int r; /* @@ -729,7 +729,7 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu) TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed"); } -static void intel_run_fixed_counter_guest_code(uint8_t idx) +static void intel_run_fixed_counter_guest_code(u8 idx) { for (;;) { wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); @@ -745,8 +745,8 @@ static void intel_run_fixed_counter_guest_code(uint8_t idx) } } -static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu, - uint32_t action, uint32_t bitmap) +static u64 test_with_fixed_counter_filter(struct kvm_vcpu *vcpu, + u32 action, u32 bitmap) { struct __kvm_pmu_event_filter f = { .action = action, @@ -757,9 +757,9 @@ static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu, return run_vcpu_to_sync(vcpu); } -static uint64_t test_set_gp_and_fixed_event_filter(struct kvm_vcpu *vcpu, - uint32_t action, - uint32_t bitmap) +static u64 test_set_gp_and_fixed_event_filter(struct kvm_vcpu *vcpu, + u32 action, + u32 bitmap) { struct __kvm_pmu_event_filter f = base_event_filter; @@ -770,12 +770,12 @@ static uint64_t test_set_gp_and_fixed_event_filter(struct kvm_vcpu *vcpu, return run_vcpu_to_sync(vcpu); } -static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx, - uint8_t nr_fixed_counters) +static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, u8 idx, + u8 nr_fixed_counters) { unsigned int i; - uint32_t bitmap; - uint64_t count; + u32 bitmap; + u64 count; TEST_ASSERT(nr_fixed_counters < sizeof(bitmap) * 8, "Invalid nr_fixed_counters"); @@ -815,10 +815,10 @@ static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx, static void test_fixed_counter_bitmap(void) { - uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); + u8 nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); struct kvm_vm *vm; struct kvm_vcpu *vcpu; - uint8_t idx; + u8 idx; /* * Check that pmu_event_filter works as expected when it's applied to diff --git a/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c b/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c index 1969f4ab9b28..1d2f5d4fd45d 100644 --- a/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c +++ b/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c @@ -23,13 +23,13 @@ #include <processor.h> #define BASE_DATA_SLOT 10 -#define BASE_DATA_GPA ((uint64_t)(1ull << 32)) -#define PER_CPU_DATA_SIZE ((uint64_t)(SZ_2M + PAGE_SIZE)) +#define BASE_DATA_GPA ((u64)(1ull << 32)) +#define PER_CPU_DATA_SIZE ((u64)(SZ_2M + PAGE_SIZE)) /* Horrific macro so that the line info is captured accurately :-( */ #define memcmp_g(gpa, pattern, size) \ do { \ - uint8_t *mem = (uint8_t *)gpa; \ + u8 *mem = (u8 *)gpa; \ size_t i; \ \ for (i = 0; i < size; i++) \ @@ -38,7 +38,7 @@ do { \ pattern, i, gpa + i, mem[i]); \ } while (0) -static void memcmp_h(uint8_t *mem, uint64_t gpa, uint8_t pattern, size_t size) +static void memcmp_h(u8 *mem, gpa_t gpa, u8 pattern, size_t size) { size_t i; @@ -70,13 +70,13 @@ enum ucall_syncs { SYNC_PRIVATE, }; -static void guest_sync_shared(uint64_t gpa, uint64_t size, - uint8_t current_pattern, uint8_t new_pattern) +static void guest_sync_shared(gpa_t gpa, u64 size, + u8 current_pattern, u8 new_pattern) { GUEST_SYNC5(SYNC_SHARED, gpa, size, current_pattern, new_pattern); } -static void guest_sync_private(uint64_t gpa, uint64_t size, uint8_t pattern) +static void guest_sync_private(gpa_t gpa, u64 size, u8 pattern) { GUEST_SYNC4(SYNC_PRIVATE, gpa, size, pattern); } @@ -86,10 +86,10 @@ static void guest_sync_private(uint64_t gpa, uint64_t size, uint8_t pattern) #define MAP_GPA_SHARED BIT(1) #define MAP_GPA_DO_FALLOCATE BIT(2) -static void guest_map_mem(uint64_t gpa, uint64_t size, bool map_shared, +static void guest_map_mem(gpa_t gpa, u64 size, bool map_shared, bool do_fallocate) { - uint64_t flags = MAP_GPA_SET_ATTRIBUTES; + u64 flags = MAP_GPA_SET_ATTRIBUTES; if (map_shared) flags |= MAP_GPA_SHARED; @@ -98,19 +98,19 @@ static void guest_map_mem(uint64_t gpa, uint64_t size, bool map_shared, kvm_hypercall_map_gpa_range(gpa, size, flags); } -static void guest_map_shared(uint64_t gpa, uint64_t size, bool do_fallocate) +static void guest_map_shared(gpa_t gpa, u64 size, bool do_fallocate) { guest_map_mem(gpa, size, true, do_fallocate); } -static void guest_map_private(uint64_t gpa, uint64_t size, bool do_fallocate) +static void guest_map_private(gpa_t gpa, u64 size, bool do_fallocate) { guest_map_mem(gpa, size, false, do_fallocate); } struct { - uint64_t offset; - uint64_t size; + u64 offset; + u64 size; } static const test_ranges[] = { GUEST_STAGE(0, PAGE_SIZE), GUEST_STAGE(0, SZ_2M), @@ -119,11 +119,11 @@ struct { GUEST_STAGE(SZ_2M, PAGE_SIZE), }; -static void guest_test_explicit_conversion(uint64_t base_gpa, bool do_fallocate) +static void guest_test_explicit_conversion(u64 base_gpa, bool do_fallocate) { - const uint8_t def_p = 0xaa; - const uint8_t init_p = 0xcc; - uint64_t j; + const u8 def_p = 0xaa; + const u8 init_p = 0xcc; + u64 j; int i; /* Memory should be shared by default. */ @@ -134,12 +134,12 @@ static void guest_test_explicit_conversion(uint64_t base_gpa, bool do_fallocate) memcmp_g(base_gpa, init_p, PER_CPU_DATA_SIZE); for (i = 0; i < ARRAY_SIZE(test_ranges); i++) { - uint64_t gpa = base_gpa + test_ranges[i].offset; - uint64_t size = test_ranges[i].size; - uint8_t p1 = 0x11; - uint8_t p2 = 0x22; - uint8_t p3 = 0x33; - uint8_t p4 = 0x44; + gpa_t gpa = base_gpa + test_ranges[i].offset; + u64 size = test_ranges[i].size; + u8 p1 = 0x11; + u8 p2 = 0x22; + u8 p3 = 0x33; + u8 p4 = 0x44; /* * Set the test region to pattern one to differentiate it from @@ -214,10 +214,10 @@ skip: } } -static void guest_punch_hole(uint64_t gpa, uint64_t size) +static void guest_punch_hole(gpa_t gpa, u64 size) { /* "Mapping" memory shared via fallocate() is done via PUNCH_HOLE. */ - uint64_t flags = MAP_GPA_SHARED | MAP_GPA_DO_FALLOCATE; + u64 flags = MAP_GPA_SHARED | MAP_GPA_DO_FALLOCATE; kvm_hypercall_map_gpa_range(gpa, size, flags); } @@ -227,9 +227,9 @@ static void guest_punch_hole(uint64_t gpa, uint64_t size) * proper conversion. Freeing (PUNCH_HOLE) should zap SPTEs, and reallocating * (subsequent fault) should zero memory. */ -static void guest_test_punch_hole(uint64_t base_gpa, bool precise) +static void guest_test_punch_hole(u64 base_gpa, bool precise) { - const uint8_t init_p = 0xcc; + const u8 init_p = 0xcc; int i; /* @@ -239,8 +239,8 @@ static void guest_test_punch_hole(uint64_t base_gpa, bool precise) guest_map_private(base_gpa, PER_CPU_DATA_SIZE, false); for (i = 0; i < ARRAY_SIZE(test_ranges); i++) { - uint64_t gpa = base_gpa + test_ranges[i].offset; - uint64_t size = test_ranges[i].size; + gpa_t gpa = base_gpa + test_ranges[i].offset; + u64 size = test_ranges[i].size; /* * Free all memory before each iteration, even for the !precise @@ -268,7 +268,7 @@ static void guest_test_punch_hole(uint64_t base_gpa, bool precise) } } -static void guest_code(uint64_t base_gpa) +static void guest_code(u64 base_gpa) { /* * Run the conversion test twice, with and without doing fallocate() on @@ -289,8 +289,8 @@ static void guest_code(uint64_t base_gpa) static void handle_exit_hypercall(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; - uint64_t gpa = run->hypercall.args[0]; - uint64_t size = run->hypercall.args[1] * PAGE_SIZE; + gpa_t gpa = run->hypercall.args[0]; + u64 size = run->hypercall.args[1] * PAGE_SIZE; bool set_attributes = run->hypercall.args[2] & MAP_GPA_SET_ATTRIBUTES; bool map_shared = run->hypercall.args[2] & MAP_GPA_SHARED; bool do_fallocate = run->hypercall.args[2] & MAP_GPA_DO_FALLOCATE; @@ -337,7 +337,7 @@ static void *__test_mem_conversions(void *__vcpu) case UCALL_ABORT: REPORT_GUEST_ASSERT(uc); case UCALL_SYNC: { - uint64_t gpa = uc.args[1]; + gpa_t gpa = uc.args[1]; size_t size = uc.args[2]; size_t i; @@ -347,7 +347,7 @@ static void *__test_mem_conversions(void *__vcpu) for (i = 0; i < size; i += vm->page_size) { size_t nr_bytes = min_t(size_t, vm->page_size, size - i); - uint8_t *hva = addr_gpa2hva(vm, gpa + i); + u8 *hva = addr_gpa2hva(vm, gpa + i); /* In all cases, the host should observe the shared data. */ memcmp_h(hva, gpa + i, uc.args[3], nr_bytes); @@ -366,8 +366,8 @@ static void *__test_mem_conversions(void *__vcpu) } } -static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t nr_vcpus, - uint32_t nr_memslots) +static void test_mem_conversions(enum vm_mem_backing_src_type src_type, u32 nr_vcpus, + u32 nr_memslots) { /* * Allocate enough memory so that each vCPU's chunk of memory can be @@ -402,7 +402,7 @@ static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t KVM_MEM_GUEST_MEMFD, memfd, slot_size * i); for (i = 0; i < nr_vcpus; i++) { - uint64_t gpa = BASE_DATA_GPA + i * per_cpu_size; + gpa_t gpa = BASE_DATA_GPA + i * per_cpu_size; vcpu_args_set(vcpus[i], 1, gpa); @@ -450,8 +450,8 @@ static void usage(const char *cmd) int main(int argc, char *argv[]) { enum vm_mem_backing_src_type src_type = DEFAULT_VM_MEM_SRC; - uint32_t nr_memslots = 1; - uint32_t nr_vcpus = 1; + u32 nr_memslots = 1; + u32 nr_vcpus = 1; int opt; TEST_REQUIRE(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM)); diff --git a/tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c b/tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c index 13e72fcec8dd..10db9fe6d906 100644 --- a/tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c +++ b/tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c @@ -17,17 +17,17 @@ #define EXITS_TEST_SIZE (EXITS_TEST_NPAGES * PAGE_SIZE) #define EXITS_TEST_SLOT 10 -static uint64_t guest_repeatedly_read(void) +static u64 guest_repeatedly_read(void) { - volatile uint64_t value; + volatile u64 value; while (true) - value = *((uint64_t *) EXITS_TEST_GVA); + value = *((u64 *)EXITS_TEST_GVA); return value; } -static uint32_t run_vcpu_get_exit_reason(struct kvm_vcpu *vcpu) +static u32 run_vcpu_get_exit_reason(struct kvm_vcpu *vcpu) { int r; @@ -50,7 +50,7 @@ static void test_private_access_memslot_deleted(void) struct kvm_vcpu *vcpu; pthread_t vm_thread; void *thread_return; - uint32_t exit_reason; + u32 exit_reason; vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu, guest_repeatedly_read); @@ -72,7 +72,7 @@ static void test_private_access_memslot_deleted(void) vm_mem_region_delete(vm, EXITS_TEST_SLOT); pthread_join(vm_thread, &thread_return); - exit_reason = (uint32_t)(uint64_t)thread_return; + exit_reason = (u32)(u64)thread_return; TEST_ASSERT_EQ(exit_reason, KVM_EXIT_MEMORY_FAULT); TEST_ASSERT_EQ(vcpu->run->memory_fault.flags, KVM_MEMORY_EXIT_FLAG_PRIVATE); @@ -86,7 +86,7 @@ static void test_private_access_memslot_not_private(void) { struct kvm_vm *vm; struct kvm_vcpu *vcpu; - uint32_t exit_reason; + u32 exit_reason; vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu, guest_repeatedly_read); diff --git a/tools/testing/selftests/kvm/x86/set_boot_cpu_id.c b/tools/testing/selftests/kvm/x86/set_boot_cpu_id.c index 49913784bc82..8e3898646c69 100644 --- a/tools/testing/selftests/kvm/x86/set_boot_cpu_id.c +++ b/tools/testing/selftests/kvm/x86/set_boot_cpu_id.c @@ -86,11 +86,11 @@ static void run_vcpu(struct kvm_vcpu *vcpu) } } -static struct kvm_vm *create_vm(uint32_t nr_vcpus, uint32_t bsp_vcpu_id, +static struct kvm_vm *create_vm(u32 nr_vcpus, u32 bsp_vcpu_id, struct kvm_vcpu *vcpus[]) { struct kvm_vm *vm; - uint32_t i; + u32 i; vm = vm_create(nr_vcpus); @@ -104,7 +104,7 @@ static struct kvm_vm *create_vm(uint32_t nr_vcpus, uint32_t bsp_vcpu_id, return vm; } -static void run_vm_bsp(uint32_t bsp_vcpu_id) +static void run_vm_bsp(u32 bsp_vcpu_id) { struct kvm_vcpu *vcpus[2]; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/set_sregs_test.c b/tools/testing/selftests/kvm/x86/set_sregs_test.c index f4095a3d1278..8e654cc9ab16 100644 --- a/tools/testing/selftests/kvm/x86/set_sregs_test.c +++ b/tools/testing/selftests/kvm/x86/set_sregs_test.c @@ -46,9 +46,9 @@ do { \ X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \ X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT) -static uint64_t calc_supported_cr4_feature_bits(void) +static u64 calc_supported_cr4_feature_bits(void) { - uint64_t cr4 = KVM_ALWAYS_ALLOWED_CR4; + u64 cr4 = KVM_ALWAYS_ALLOWED_CR4; if (kvm_cpu_has(X86_FEATURE_UMIP)) cr4 |= X86_CR4_UMIP; @@ -74,7 +74,7 @@ static uint64_t calc_supported_cr4_feature_bits(void) return cr4; } -static void test_cr_bits(struct kvm_vcpu *vcpu, uint64_t cr4) +static void test_cr_bits(struct kvm_vcpu *vcpu, u64 cr4) { struct kvm_sregs sregs; int rc, i; diff --git a/tools/testing/selftests/kvm/x86/sev_init2_tests.c b/tools/testing/selftests/kvm/x86/sev_init2_tests.c index b238615196ad..8eeba2327c7c 100644 --- a/tools/testing/selftests/kvm/x86/sev_init2_tests.c +++ b/tools/testing/selftests/kvm/x86/sev_init2_tests.c @@ -34,7 +34,7 @@ static int __sev_ioctl(int vm_fd, int cmd_id, void *data) { struct kvm_sev_cmd cmd = { .id = cmd_id, - .data = (uint64_t)data, + .data = (u64)data, .sev_fd = open_sev_dev_path_or_exit(), }; int ret; @@ -94,7 +94,7 @@ void test_vm_types(void) "VM type is KVM_X86_SW_PROTECTED_VM"); } -void test_flags(uint32_t vm_type) +void test_flags(u32 vm_type) { int i; @@ -104,7 +104,7 @@ void test_flags(uint32_t vm_type) "invalid flag"); } -void test_features(uint32_t vm_type, uint64_t supported_features) +void test_features(u32 vm_type, u64 supported_features) { int i; diff --git a/tools/testing/selftests/kvm/x86/sev_smoke_test.c b/tools/testing/selftests/kvm/x86/sev_smoke_test.c index 8bd37a476f15..1a49ee391586 100644 --- a/tools/testing/selftests/kvm/x86/sev_smoke_test.c +++ b/tools/testing/selftests/kvm/x86/sev_smoke_test.c @@ -13,9 +13,9 @@ #include "linux/psp-sev.h" #include "sev.h" -static void guest_sev_test_msr(uint32_t msr) +static void guest_sev_test_msr(u32 msr) { - uint64_t val = rdmsr(msr); + u64 val = rdmsr(msr); wrmsr(msr, val); GUEST_ASSERT(val == rdmsr(msr)); @@ -23,7 +23,7 @@ static void guest_sev_test_msr(uint32_t msr) #define guest_sev_test_reg(reg) \ do { \ - uint64_t val = get_##reg(); \ + u64 val = get_##reg(); \ \ set_##reg(val); \ GUEST_ASSERT(val == get_##reg()); \ @@ -42,7 +42,7 @@ static void guest_sev_test_regs(void) static void guest_snp_code(void) { - uint64_t sev_msr = rdmsr(MSR_AMD64_SEV); + u64 sev_msr = rdmsr(MSR_AMD64_SEV); GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ENABLED); GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ES_ENABLED); @@ -104,19 +104,19 @@ static void compare_xsave(u8 *from_host, u8 *from_guest) abort(); } -static void test_sync_vmsa(uint32_t type, uint64_t policy) +static void test_sync_vmsa(u32 type, u64 policy) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - vm_vaddr_t gva; + gva_t gva; void *hva; double x87val = M_PI; struct kvm_xsave __attribute__((aligned(64))) xsave = { 0 }; vm = vm_sev_create_with_one_vcpu(type, guest_code_xsave, &vcpu); - gva = vm_vaddr_alloc_shared(vm, PAGE_SIZE, KVM_UTIL_MIN_VADDR, - MEM_REGION_TEST_DATA); + gva = vm_alloc_shared(vm, PAGE_SIZE, KVM_UTIL_MIN_VADDR, + MEM_REGION_TEST_DATA); hva = addr_gva2hva(vm, gva); vcpu_args_set(vcpu, 1, gva); @@ -150,7 +150,7 @@ static void test_sync_vmsa(uint32_t type, uint64_t policy) kvm_vm_free(vm); } -static void test_sev(void *guest_code, uint32_t type, uint64_t policy) +static void test_sev(void *guest_code, u32 type, u64 policy) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -201,7 +201,7 @@ static void guest_shutdown_code(void) __asm__ __volatile__("ud2"); } -static void test_sev_shutdown(uint32_t type, uint64_t policy) +static void test_sev_shutdown(u32 type, u64 policy) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -218,7 +218,7 @@ static void test_sev_shutdown(uint32_t type, uint64_t policy) kvm_vm_free(vm); } -static void test_sev_smoke(void *guest, uint32_t type, uint64_t policy) +static void test_sev_smoke(void *guest, u32 type, u64 policy) { const u64 xf_mask = XFEATURE_MASK_X87_AVX; diff --git a/tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c b/tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c index 0e8aec568010..3dca85e95478 100644 --- a/tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c +++ b/tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c @@ -20,8 +20,8 @@ static void guest_code(bool tdp_enabled) { - uint64_t error_code; - uint64_t vector; + u64 error_code; + u64 vector; vector = kvm_asm_safe_ec(FLDS_MEM_EAX, error_code, "a"(MEM_REGION_GVA)); @@ -47,8 +47,8 @@ int main(int argc, char *argv[]) struct kvm_vcpu *vcpu; struct kvm_vm *vm; struct ucall uc; - uint64_t *hva; - uint64_t gpa; + u64 *hva; + gpa_t gpa; int rc; TEST_REQUIRE(kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR)); diff --git a/tools/testing/selftests/kvm/x86/smm_test.c b/tools/testing/selftests/kvm/x86/smm_test.c index ade8412bf94a..740051167dbd 100644 --- a/tools/testing/selftests/kvm/x86/smm_test.c +++ b/tools/testing/selftests/kvm/x86/smm_test.c @@ -34,13 +34,13 @@ * independent subset of asm here. * SMI handler always report back fixed stage SMRAM_STAGE. */ -uint8_t smi_handler[] = { +u8 smi_handler[] = { 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */ 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */ 0x0f, 0xaa, /* rsm */ }; -static inline void sync_with_host(uint64_t phase) +static inline void sync_with_host(u64 phase) { asm volatile("in $" XSTR(SYNC_PORT)", %%al \n" : "+a" (phase)); @@ -65,7 +65,7 @@ static void guest_code(void *arg) { #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; - uint64_t apicbase = rdmsr(MSR_IA32_APICBASE); + u64 apicbase = rdmsr(MSR_IA32_APICBASE); struct svm_test_data *svm = arg; struct vmx_pages *vmx_pages = arg; @@ -113,7 +113,7 @@ static void guest_code(void *arg) int main(int argc, char *argv[]) { - vm_vaddr_t nested_gva = 0; + gva_t nested_gva = 0; struct kvm_vcpu *vcpu; struct kvm_regs regs; diff --git a/tools/testing/selftests/kvm/x86/state_test.c b/tools/testing/selftests/kvm/x86/state_test.c index 992a52504a4a..409c6cc9f921 100644 --- a/tools/testing/selftests/kvm/x86/state_test.c +++ b/tools/testing/selftests/kvm/x86/state_test.c @@ -144,8 +144,8 @@ static void __attribute__((__flatten__)) guest_code(void *arg) GUEST_SYNC(1); if (this_cpu_has(X86_FEATURE_XSAVE)) { - uint64_t supported_xcr0 = this_cpu_supported_xcr0(); - uint8_t buffer[PAGE_SIZE]; + u64 supported_xcr0 = this_cpu_supported_xcr0(); + u8 buffer[PAGE_SIZE]; memset(buffer, 0xcc, sizeof(buffer)); @@ -172,8 +172,8 @@ static void __attribute__((__flatten__)) guest_code(void *arg) } if (this_cpu_has(X86_FEATURE_MPX)) { - uint64_t bounds[2] = { 10, 0xffffffffull }; - uint64_t output[2] = { }; + u64 bounds[2] = { 10, 0xffffffffull }; + u64 output[2] = { }; GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_BNDREGS); GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_BNDCSR); @@ -257,8 +257,8 @@ void check_nested_state(int stage, struct kvm_x86_state *state) int main(int argc, char *argv[]) { - uint64_t *xstate_bv, saved_xstate_bv; - vm_vaddr_t nested_gva = 0; + u64 *xstate_bv, saved_xstate_bv; + gva_t nested_gva = 0; struct kvm_cpuid2 empty_cpuid = {}; struct kvm_regs regs1, regs2; struct kvm_vcpu *vcpu, *vcpuN; @@ -331,7 +331,7 @@ int main(int argc, char *argv[]) * supported features, even if something goes awry in saving * the original snapshot. */ - xstate_bv = (void *)&((uint8_t *)state->xsave->region)[512]; + xstate_bv = (void *)&((u8 *)state->xsave->region)[512]; saved_xstate_bv = *xstate_bv; vcpuN = __vm_vcpu_add(vm, vcpu->id + 1); diff --git a/tools/testing/selftests/kvm/x86/svm_int_ctl_test.c b/tools/testing/selftests/kvm/x86/svm_int_ctl_test.c index 917b6066cfc1..d3cc5e4f7883 100644 --- a/tools/testing/selftests/kvm/x86/svm_int_ctl_test.c +++ b/tools/testing/selftests/kvm/x86/svm_int_ctl_test.c @@ -82,7 +82,7 @@ static void l1_guest_code(struct svm_test_data *svm) int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; - vm_vaddr_t svm_gva; + gva_t svm_gva; struct kvm_vm *vm; struct ucall uc; diff --git a/tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c b/tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c index ff99438824d3..7fbfaa054c95 100644 --- a/tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c +++ b/tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c @@ -97,9 +97,9 @@ void test_lbrv_nested_state(bool nested_lbrv) { struct kvm_x86_state *state = NULL; struct kvm_vcpu *vcpu; - vm_vaddr_t svm_gva; struct kvm_vm *vm; struct ucall uc; + gva_t svm_gva; pr_info("Testing with nested LBRV %s\n", nested_lbrv ? "enabled" : "disabled"); diff --git a/tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c b/tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c index a521a9eed061..6a89eaffc657 100644 --- a/tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c +++ b/tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c @@ -38,7 +38,7 @@ int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - vm_vaddr_t nested_gva = 0; + gva_t nested_gva = 0; TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM)); diff --git a/tools/testing/selftests/kvm/x86/svm_nested_shutdown_test.c b/tools/testing/selftests/kvm/x86/svm_nested_shutdown_test.c index 00135cbba35e..c6ea3d609a62 100644 --- a/tools/testing/selftests/kvm/x86/svm_nested_shutdown_test.c +++ b/tools/testing/selftests/kvm/x86/svm_nested_shutdown_test.c @@ -42,7 +42,7 @@ static void l1_guest_code(struct svm_test_data *svm, struct idt_entry *idt) int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; - vm_vaddr_t svm_gva; + gva_t svm_gva; struct kvm_vm *vm; TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM)); diff --git a/tools/testing/selftests/kvm/x86/svm_nested_soft_inject_test.c b/tools/testing/selftests/kvm/x86/svm_nested_soft_inject_test.c index 4bd1655f9e6d..f72f11d4c4f8 100644 --- a/tools/testing/selftests/kvm/x86/svm_nested_soft_inject_test.c +++ b/tools/testing/selftests/kvm/x86/svm_nested_soft_inject_test.c @@ -76,7 +76,7 @@ static void l2_guest_code_nmi(void) ud2(); } -static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t idt_alt) +static void l1_guest_code(struct svm_test_data *svm, u64 is_nmi, u64 idt_alt) { #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; @@ -144,8 +144,8 @@ static void run_test(bool is_nmi) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - vm_vaddr_t svm_gva; - vm_vaddr_t idt_alt_vm; + gva_t svm_gva; + gva_t idt_alt_vm; struct kvm_guest_debug debug; pr_info("Running %s test\n", is_nmi ? "NMI" : "soft int"); @@ -161,14 +161,14 @@ static void run_test(bool is_nmi) if (!is_nmi) { void *idt, *idt_alt; - idt_alt_vm = vm_vaddr_alloc_page(vm); + idt_alt_vm = vm_alloc_page(vm); idt_alt = addr_gva2hva(vm, idt_alt_vm); idt = addr_gva2hva(vm, vm->arch.idt); memcpy(idt_alt, idt, getpagesize()); } else { idt_alt_vm = 0; } - vcpu_args_set(vcpu, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm); + vcpu_args_set(vcpu, 3, svm_gva, (u64)is_nmi, (u64)idt_alt_vm); memset(&debug, 0, sizeof(debug)); vcpu_guest_debug_set(vcpu, &debug); diff --git a/tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c b/tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c index 569869bed20b..a4935ce2fb99 100644 --- a/tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c +++ b/tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c @@ -28,28 +28,28 @@ static void l2_code(void) vmcall(); } -static void l1_vmrun(struct svm_test_data *svm, u64 gpa) +static void l1_vmrun(struct svm_test_data *svm, gpa_t gpa) { generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); asm volatile ("vmrun %[gpa]" : : [gpa] "a" (gpa) : "memory"); } -static void l1_vmload(struct svm_test_data *svm, u64 gpa) +static void l1_vmload(struct svm_test_data *svm, gpa_t gpa) { generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); asm volatile ("vmload %[gpa]" : : [gpa] "a" (gpa) : "memory"); } -static void l1_vmsave(struct svm_test_data *svm, u64 gpa) +static void l1_vmsave(struct svm_test_data *svm, gpa_t gpa) { generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); asm volatile ("vmsave %[gpa]" : : [gpa] "a" (gpa) : "memory"); } -static void l1_vmexit(struct svm_test_data *svm, u64 gpa) +static void l1_vmexit(struct svm_test_data *svm, gpa_t gpa) { generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); @@ -74,7 +74,7 @@ static u64 unmappable_gpa(struct kvm_vcpu *vcpu) static void test_invalid_vmcb12(struct kvm_vcpu *vcpu) { - vm_vaddr_t nested_gva = 0; + gva_t nested_gva = 0; struct ucall uc; @@ -90,7 +90,7 @@ static void test_invalid_vmcb12(struct kvm_vcpu *vcpu) static void test_unmappable_vmcb12(struct kvm_vcpu *vcpu) { - vm_vaddr_t nested_gva = 0; + gva_t nested_gva = 0; vcpu_alloc_svm(vcpu->vm, &nested_gva); vcpu_args_set(vcpu, 2, nested_gva, unmappable_gpa(vcpu)); @@ -103,7 +103,7 @@ static void test_unmappable_vmcb12(struct kvm_vcpu *vcpu) static void test_unmappable_vmcb12_vmexit(struct kvm_vcpu *vcpu) { struct kvm_x86_state *state; - vm_vaddr_t nested_gva = 0; + gva_t nested_gva = 0; struct ucall uc; /* diff --git a/tools/testing/selftests/kvm/x86/svm_vmcall_test.c b/tools/testing/selftests/kvm/x86/svm_vmcall_test.c index 8a62cca28cfb..b1887242f3b8 100644 --- a/tools/testing/selftests/kvm/x86/svm_vmcall_test.c +++ b/tools/testing/selftests/kvm/x86/svm_vmcall_test.c @@ -36,7 +36,7 @@ static void l1_guest_code(struct svm_test_data *svm) int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; - vm_vaddr_t svm_gva; + gva_t svm_gva; struct kvm_vm *vm; TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM)); diff --git a/tools/testing/selftests/kvm/x86/sync_regs_test.c b/tools/testing/selftests/kvm/x86/sync_regs_test.c index 8fa3948b0170..e0c52321f87c 100644 --- a/tools/testing/selftests/kvm/x86/sync_regs_test.c +++ b/tools/testing/selftests/kvm/x86/sync_regs_test.c @@ -20,7 +20,7 @@ #include "kvm_util.h" #include "processor.h" -#define UCALL_PIO_PORT ((uint16_t)0x1000) +#define UCALL_PIO_PORT ((u16)0x1000) struct ucall uc_none = { .cmd = UCALL_NONE, diff --git a/tools/testing/selftests/kvm/x86/triple_fault_event_test.c b/tools/testing/selftests/kvm/x86/triple_fault_event_test.c index 56306a19144a..f1c488e0d497 100644 --- a/tools/testing/selftests/kvm/x86/triple_fault_event_test.c +++ b/tools/testing/selftests/kvm/x86/triple_fault_event_test.c @@ -72,13 +72,13 @@ int main(void) if (has_vmx) { - vm_vaddr_t vmx_pages_gva; + gva_t vmx_pages_gva; vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_vmx); vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_args_set(vcpu, 1, vmx_pages_gva); } else { - vm_vaddr_t svm_gva; + gva_t svm_gva; vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_svm); vcpu_alloc_svm(vm, &svm_gva); diff --git a/tools/testing/selftests/kvm/x86/tsc_msrs_test.c b/tools/testing/selftests/kvm/x86/tsc_msrs_test.c index 12b0964f4f13..91583969a14f 100644 --- a/tools/testing/selftests/kvm/x86/tsc_msrs_test.c +++ b/tools/testing/selftests/kvm/x86/tsc_msrs_test.c @@ -95,7 +95,7 @@ int main(void) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - uint64_t val; + u64 val; ksft_print_header(); ksft_set_plan(5); diff --git a/tools/testing/selftests/kvm/x86/tsc_scaling_sync.c b/tools/testing/selftests/kvm/x86/tsc_scaling_sync.c index 59c7304f805e..59da8d4da607 100644 --- a/tools/testing/selftests/kvm/x86/tsc_scaling_sync.c +++ b/tools/testing/selftests/kvm/x86/tsc_scaling_sync.c @@ -21,10 +21,10 @@ pthread_spinlock_t create_lock; #define TEST_TSC_KHZ 2345678UL #define TEST_TSC_OFFSET 200000000 -uint64_t tsc_sync; +u64 tsc_sync; static void guest_code(void) { - uint64_t start_tsc, local_tsc, tmp; + u64 start_tsc, local_tsc, tmp; start_tsc = rdtsc(); do { diff --git a/tools/testing/selftests/kvm/x86/ucna_injection_test.c b/tools/testing/selftests/kvm/x86/ucna_injection_test.c index 1e5e564523b3..df1ec8209c76 100644 --- a/tools/testing/selftests/kvm/x86/ucna_injection_test.c +++ b/tools/testing/selftests/kvm/x86/ucna_injection_test.c @@ -45,7 +45,7 @@ #define MCI_CTL2_RESERVED_BIT BIT_ULL(29) -static uint64_t supported_mcg_caps; +static u64 supported_mcg_caps; /* * Record states about the injected UCNA. @@ -53,30 +53,30 @@ static uint64_t supported_mcg_caps; * handler. Variables without the 'i_' prefixes are recorded in guest main * execution thread. */ -static volatile uint64_t i_ucna_rcvd; -static volatile uint64_t i_ucna_addr; -static volatile uint64_t ucna_addr; -static volatile uint64_t ucna_addr2; +static volatile u64 i_ucna_rcvd; +static volatile u64 i_ucna_addr; +static volatile u64 ucna_addr; +static volatile u64 ucna_addr2; struct thread_params { struct kvm_vcpu *vcpu; - uint64_t *p_i_ucna_rcvd; - uint64_t *p_i_ucna_addr; - uint64_t *p_ucna_addr; - uint64_t *p_ucna_addr2; + u64 *p_i_ucna_rcvd; + u64 *p_i_ucna_addr; + u64 *p_ucna_addr; + u64 *p_ucna_addr2; }; static void verify_apic_base_addr(void) { - uint64_t msr = rdmsr(MSR_IA32_APICBASE); - uint64_t base = GET_APIC_BASE(msr); + u64 msr = rdmsr(MSR_IA32_APICBASE); + u64 base = GET_APIC_BASE(msr); GUEST_ASSERT(base == APIC_DEFAULT_GPA); } static void ucna_injection_guest_code(void) { - uint64_t ctl2; + u64 ctl2; verify_apic_base_addr(); xapic_enable(); @@ -106,7 +106,7 @@ static void ucna_injection_guest_code(void) static void cmci_disabled_guest_code(void) { - uint64_t ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK)); + u64 ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK)); wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 | MCI_CTL2_CMCI_EN); GUEST_DONE(); @@ -114,7 +114,7 @@ static void cmci_disabled_guest_code(void) static void cmci_enabled_guest_code(void) { - uint64_t ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK)); + u64 ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK)); wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 | MCI_CTL2_RESERVED_BIT); GUEST_DONE(); @@ -145,14 +145,15 @@ static void run_vcpu_expect_gp(struct kvm_vcpu *vcpu) printf("vCPU received GP in guest.\n"); } -static void inject_ucna(struct kvm_vcpu *vcpu, uint64_t addr) { +static void inject_ucna(struct kvm_vcpu *vcpu, u64 addr) +{ /* * A UCNA error is indicated with VAL=1, UC=1, PCC=0, S=0 and AR=0 in * the IA32_MCi_STATUS register. * MSCOD=1 (BIT[16] - MscodDataRdErr). * MCACOD=0x0090 (Memory controller error format, channel 0) */ - uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | + u64 status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | 0x10090; struct kvm_x86_mce mce = {}; mce.status = status; @@ -216,10 +217,10 @@ static void test_ucna_injection(struct kvm_vcpu *vcpu, struct thread_params *par { struct kvm_vm *vm = vcpu->vm; params->vcpu = vcpu; - params->p_i_ucna_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&i_ucna_rcvd); - params->p_i_ucna_addr = (uint64_t *)addr_gva2hva(vm, (uint64_t)&i_ucna_addr); - params->p_ucna_addr = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ucna_addr); - params->p_ucna_addr2 = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ucna_addr2); + params->p_i_ucna_rcvd = (u64 *)addr_gva2hva(vm, (u64)&i_ucna_rcvd); + params->p_i_ucna_addr = (u64 *)addr_gva2hva(vm, (u64)&i_ucna_addr); + params->p_ucna_addr = (u64 *)addr_gva2hva(vm, (u64)&ucna_addr); + params->p_ucna_addr2 = (u64 *)addr_gva2hva(vm, (u64)&ucna_addr2); run_ucna_injection(params); @@ -242,7 +243,7 @@ static void test_ucna_injection(struct kvm_vcpu *vcpu, struct thread_params *par static void setup_mce_cap(struct kvm_vcpu *vcpu, bool enable_cmci_p) { - uint64_t mcg_caps = MCG_CTL_P | MCG_SER_P | MCG_LMCE_P | KVM_MAX_MCE_BANKS; + u64 mcg_caps = MCG_CTL_P | MCG_SER_P | MCG_LMCE_P | KVM_MAX_MCE_BANKS; if (enable_cmci_p) mcg_caps |= MCG_CMCI_P; @@ -250,7 +251,7 @@ static void setup_mce_cap(struct kvm_vcpu *vcpu, bool enable_cmci_p) vcpu_ioctl(vcpu, KVM_X86_SETUP_MCE, &mcg_caps); } -static struct kvm_vcpu *create_vcpu_with_mce_cap(struct kvm_vm *vm, uint32_t vcpuid, +static struct kvm_vcpu *create_vcpu_with_mce_cap(struct kvm_vm *vm, u32 vcpuid, bool enable_cmci_p, void *guest_code) { struct kvm_vcpu *vcpu = vm_vcpu_add(vm, vcpuid, guest_code); diff --git a/tools/testing/selftests/kvm/x86/userspace_io_test.c b/tools/testing/selftests/kvm/x86/userspace_io_test.c index be7d72f3c029..9c5a87576c2e 100644 --- a/tools/testing/selftests/kvm/x86/userspace_io_test.c +++ b/tools/testing/selftests/kvm/x86/userspace_io_test.c @@ -10,7 +10,7 @@ #include "kvm_util.h" #include "processor.h" -static void guest_ins_port80(uint8_t *buffer, unsigned int count) +static void guest_ins_port80(u8 *buffer, unsigned int count) { unsigned long end; @@ -26,7 +26,7 @@ static void guest_ins_port80(uint8_t *buffer, unsigned int count) static void guest_code(void) { - uint8_t buffer[8192]; + u8 buffer[8192]; int i; /* diff --git a/tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c b/tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c index 8463a9956410..2808ce727e5f 100644 --- a/tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c +++ b/tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c @@ -23,21 +23,21 @@ struct kvm_msr_filter filter_allow = { .nmsrs = 1, /* Test an MSR the kernel knows about. */ .base = MSR_IA32_XSS, - .bitmap = (uint8_t*)&deny_bits, + .bitmap = (u8 *)&deny_bits, }, { .flags = KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE, .nmsrs = 1, /* Test an MSR the kernel doesn't know about. */ .base = MSR_IA32_FLUSH_CMD, - .bitmap = (uint8_t*)&deny_bits, + .bitmap = (u8 *)&deny_bits, }, { .flags = KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE, .nmsrs = 1, /* Test a fabricated MSR that no one knows about. */ .base = MSR_NON_EXISTENT, - .bitmap = (uint8_t*)&deny_bits, + .bitmap = (u8 *)&deny_bits, }, }, }; @@ -49,7 +49,7 @@ struct kvm_msr_filter filter_fs = { .flags = KVM_MSR_FILTER_READ, .nmsrs = 1, .base = MSR_FS_BASE, - .bitmap = (uint8_t*)&deny_bits, + .bitmap = (u8 *)&deny_bits, }, }, }; @@ -61,12 +61,12 @@ struct kvm_msr_filter filter_gs = { .flags = KVM_MSR_FILTER_READ, .nmsrs = 1, .base = MSR_GS_BASE, - .bitmap = (uint8_t*)&deny_bits, + .bitmap = (u8 *)&deny_bits, }, }, }; -static uint64_t msr_non_existent_data; +static u64 msr_non_existent_data; static int guest_exception_count; static u32 msr_reads, msr_writes; @@ -77,7 +77,7 @@ static u8 bitmap_c0000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE]; static u8 bitmap_c0000000_read[KVM_MSR_FILTER_MAX_BITMAP_SIZE]; static u8 bitmap_deadbeef[1] = { 0x1 }; -static void deny_msr(uint8_t *bitmap, u32 msr) +static void deny_msr(u8 *bitmap, u32 msr) { u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1); @@ -142,26 +142,26 @@ struct kvm_msr_filter no_filter_deny = { * Note: Force test_rdmsr() to not be inlined to prevent the labels, * rdmsr_start and rdmsr_end, from being defined multiple times. */ -static noinline uint64_t test_rdmsr(uint32_t msr) +static noinline u64 test_rdmsr(u32 msr) { - uint32_t a, d; + u32 a, d; guest_exception_count = 0; __asm__ __volatile__("rdmsr_start: rdmsr; rdmsr_end:" : "=a"(a), "=d"(d) : "c"(msr) : "memory"); - return a | ((uint64_t) d << 32); + return a | ((u64)d << 32); } /* * Note: Force test_wrmsr() to not be inlined to prevent the labels, * wrmsr_start and wrmsr_end, from being defined multiple times. */ -static noinline void test_wrmsr(uint32_t msr, uint64_t value) +static noinline void test_wrmsr(u32 msr, u64 value) { - uint32_t a = value; - uint32_t d = value >> 32; + u32 a = value; + u32 d = value >> 32; guest_exception_count = 0; @@ -176,26 +176,26 @@ extern char wrmsr_start, wrmsr_end; * Note: Force test_em_rdmsr() to not be inlined to prevent the labels, * rdmsr_start and rdmsr_end, from being defined multiple times. */ -static noinline uint64_t test_em_rdmsr(uint32_t msr) +static noinline u64 test_em_rdmsr(u32 msr) { - uint32_t a, d; + u32 a, d; guest_exception_count = 0; __asm__ __volatile__(KVM_FEP "em_rdmsr_start: rdmsr; em_rdmsr_end:" : "=a"(a), "=d"(d) : "c"(msr) : "memory"); - return a | ((uint64_t) d << 32); + return a | ((u64)d << 32); } /* * Note: Force test_em_wrmsr() to not be inlined to prevent the labels, * wrmsr_start and wrmsr_end, from being defined multiple times. */ -static noinline void test_em_wrmsr(uint32_t msr, uint64_t value) +static noinline void test_em_wrmsr(u32 msr, u64 value) { - uint32_t a = value; - uint32_t d = value >> 32; + u32 a = value; + u32 d = value >> 32; guest_exception_count = 0; @@ -208,7 +208,7 @@ extern char em_wrmsr_start, em_wrmsr_end; static void guest_code_filter_allow(void) { - uint64_t data; + u64 data; /* * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_XSS. @@ -328,7 +328,7 @@ static void guest_code_filter_deny(void) static void guest_code_permission_bitmap(void) { - uint64_t data; + u64 data; data = test_rdmsr(MSR_FS_BASE); GUEST_ASSERT(data == MSR_FS_BASE); @@ -391,7 +391,7 @@ static void check_for_guest_assert(struct kvm_vcpu *vcpu) } } -static void process_rdmsr(struct kvm_vcpu *vcpu, uint32_t msr_index) +static void process_rdmsr(struct kvm_vcpu *vcpu, u32 msr_index) { struct kvm_run *run = vcpu->run; @@ -423,7 +423,7 @@ static void process_rdmsr(struct kvm_vcpu *vcpu, uint32_t msr_index) } } -static void process_wrmsr(struct kvm_vcpu *vcpu, uint32_t msr_index) +static void process_wrmsr(struct kvm_vcpu *vcpu, u32 msr_index) { struct kvm_run *run = vcpu->run; @@ -464,7 +464,7 @@ static void process_ucall_done(struct kvm_vcpu *vcpu) uc.cmd, UCALL_DONE); } -static uint64_t process_ucall(struct kvm_vcpu *vcpu) +static u64 process_ucall(struct kvm_vcpu *vcpu) { struct ucall uc = {}; @@ -489,20 +489,20 @@ static uint64_t process_ucall(struct kvm_vcpu *vcpu) } static void run_guest_then_process_rdmsr(struct kvm_vcpu *vcpu, - uint32_t msr_index) + u32 msr_index) { vcpu_run(vcpu); process_rdmsr(vcpu, msr_index); } static void run_guest_then_process_wrmsr(struct kvm_vcpu *vcpu, - uint32_t msr_index) + u32 msr_index) { vcpu_run(vcpu); process_wrmsr(vcpu, msr_index); } -static uint64_t run_guest_then_process_ucall(struct kvm_vcpu *vcpu) +static u64 run_guest_then_process_ucall(struct kvm_vcpu *vcpu) { vcpu_run(vcpu); return process_ucall(vcpu); @@ -519,7 +519,7 @@ KVM_ONE_VCPU_TEST_SUITE(user_msr); KVM_ONE_VCPU_TEST(user_msr, msr_filter_allow, guest_code_filter_allow) { struct kvm_vm *vm = vcpu->vm; - uint64_t cmd; + u64 cmd; int rc; rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR); @@ -732,7 +732,7 @@ static void run_msr_filter_flag_test(struct kvm_vm *vm) .flags = KVM_MSR_FILTER_READ, .nmsrs = 1, .base = 0, - .bitmap = (uint8_t *)&deny_bits, + .bitmap = (u8 *)&deny_bits, }, }, }; diff --git a/tools/testing/selftests/kvm/x86/vmx_apic_access_test.c b/tools/testing/selftests/kvm/x86/vmx_apic_access_test.c index a81a24761aac..1720113eae79 100644 --- a/tools/testing/selftests/kvm/x86/vmx_apic_access_test.c +++ b/tools/testing/selftests/kvm/x86/vmx_apic_access_test.c @@ -38,7 +38,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages, unsigned long high_gpa) { #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; - uint32_t control; + u32 control; GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); GUEST_ASSERT(load_vmcs(vmx_pages)); @@ -72,7 +72,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages, unsigned long high_gpa) int main(int argc, char *argv[]) { unsigned long apic_access_addr = ~0ul; - vm_vaddr_t vmx_pages_gva; + gva_t vmx_pages_gva; unsigned long high_gpa; struct vmx_pages *vmx; bool done = false; diff --git a/tools/testing/selftests/kvm/x86/vmx_apicv_updates_test.c b/tools/testing/selftests/kvm/x86/vmx_apicv_updates_test.c index 337c53fddeff..80a4fd1e5bbb 100644 --- a/tools/testing/selftests/kvm/x86/vmx_apicv_updates_test.c +++ b/tools/testing/selftests/kvm/x86/vmx_apicv_updates_test.c @@ -33,7 +33,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages) { #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; - uint32_t control; + u32 control; GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); GUEST_ASSERT(load_vmcs(vmx_pages)); @@ -110,7 +110,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages) int main(int argc, char *argv[]) { - vm_vaddr_t vmx_pages_gva; + gva_t vmx_pages_gva; struct vmx_pages *vmx; struct kvm_vcpu *vcpu; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/vmx_invalid_nested_guest_state.c b/tools/testing/selftests/kvm/x86/vmx_invalid_nested_guest_state.c index a100ee5f0009..a2eaceed9ad5 100644 --- a/tools/testing/selftests/kvm/x86/vmx_invalid_nested_guest_state.c +++ b/tools/testing/selftests/kvm/x86/vmx_invalid_nested_guest_state.c @@ -52,7 +52,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages) int main(int argc, char *argv[]) { - vm_vaddr_t vmx_pages_gva; + gva_t vmx_pages_gva; struct kvm_sregs sregs; struct kvm_vcpu *vcpu; struct kvm_run *run; diff --git a/tools/testing/selftests/kvm/x86/vmx_msrs_test.c b/tools/testing/selftests/kvm/x86/vmx_msrs_test.c index 90720b6205f4..c1e8632a1bb6 100644 --- a/tools/testing/selftests/kvm/x86/vmx_msrs_test.c +++ b/tools/testing/selftests/kvm/x86/vmx_msrs_test.c @@ -12,11 +12,10 @@ #include "kvm_util.h" #include "vmx.h" -static void vmx_fixed1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index, - uint64_t mask) +static void vmx_fixed1_msr_test(struct kvm_vcpu *vcpu, u32 msr_index, u64 mask) { - uint64_t val = vcpu_get_msr(vcpu, msr_index); - uint64_t bit; + u64 val = vcpu_get_msr(vcpu, msr_index); + u64 bit; mask &= val; @@ -26,11 +25,10 @@ static void vmx_fixed1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index, } } -static void vmx_fixed0_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index, - uint64_t mask) +static void vmx_fixed0_msr_test(struct kvm_vcpu *vcpu, u32 msr_index, u64 mask) { - uint64_t val = vcpu_get_msr(vcpu, msr_index); - uint64_t bit; + u64 val = vcpu_get_msr(vcpu, msr_index); + u64 bit; mask = ~mask | val; @@ -40,7 +38,7 @@ static void vmx_fixed0_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index, } } -static void vmx_fixed0and1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index) +static void vmx_fixed0and1_msr_test(struct kvm_vcpu *vcpu, u32 msr_index) { vmx_fixed0_msr_test(vcpu, msr_index, GENMASK_ULL(31, 0)); vmx_fixed1_msr_test(vcpu, msr_index, GENMASK_ULL(63, 32)); @@ -68,10 +66,10 @@ static void vmx_save_restore_msrs_test(struct kvm_vcpu *vcpu) } static void __ia32_feature_control_msr_test(struct kvm_vcpu *vcpu, - uint64_t msr_bit, + u64 msr_bit, struct kvm_x86_cpu_feature feature) { - uint64_t val; + u64 val; vcpu_clear_cpuid_feature(vcpu, feature); @@ -90,7 +88,7 @@ static void __ia32_feature_control_msr_test(struct kvm_vcpu *vcpu, static void ia32_feature_control_msr_test(struct kvm_vcpu *vcpu) { - uint64_t supported_bits = FEAT_CTL_LOCKED | + u64 supported_bits = FEAT_CTL_LOCKED | FEAT_CTL_VMX_ENABLED_INSIDE_SMX | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX | FEAT_CTL_SGX_LC_ENABLED | diff --git a/tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c b/tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c index 915c42001dba..f13dee317383 100644 --- a/tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c +++ b/tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c @@ -30,7 +30,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages) #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; u64 guest_cr4; - vm_paddr_t pml5_pa, pml4_pa; + gpa_t pml5_pa, pml4_pa; u64 *pml5; u64 exit_reason; @@ -73,7 +73,7 @@ void guest_code(struct vmx_pages *vmx_pages) int main(int argc, char *argv[]) { - vm_vaddr_t vmx_pages_gva = 0; + gva_t vmx_pages_gva = 0; struct kvm_vm *vm; struct kvm_vcpu *vcpu; struct kvm_x86_state *state; diff --git a/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c index 7ff6f62e20a3..d004108dbdc6 100644 --- a/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c @@ -52,16 +52,16 @@ static const union perf_capabilities format_caps = { .pebs_format = -1, }; -static void guest_test_perf_capabilities_gp(uint64_t val) +static void guest_test_perf_capabilities_gp(u64 val) { - uint8_t vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val); + u8 vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val); __GUEST_ASSERT(vector == GP_VECTOR, "Expected #GP for value '0x%lx', got %s", val, ex_str(vector)); } -static void guest_code(uint64_t current_val) +static void guest_code(u64 current_val) { int i; @@ -129,7 +129,7 @@ KVM_ONE_VCPU_TEST(vmx_pmu_caps, basic_perf_capabilities, guest_code) KVM_ONE_VCPU_TEST(vmx_pmu_caps, fungible_perf_capabilities, guest_code) { - const uint64_t fungible_caps = host_cap.capabilities & ~immutable_caps.capabilities; + const u64 fungible_caps = host_cap.capabilities & ~immutable_caps.capabilities; int bit; for_each_set_bit(bit, &fungible_caps, 64) { @@ -148,7 +148,7 @@ KVM_ONE_VCPU_TEST(vmx_pmu_caps, fungible_perf_capabilities, guest_code) */ KVM_ONE_VCPU_TEST(vmx_pmu_caps, immutable_perf_capabilities, guest_code) { - const uint64_t reserved_caps = (~host_cap.capabilities | + const u64 reserved_caps = (~host_cap.capabilities | immutable_caps.capabilities) & ~format_caps.capabilities; union perf_capabilities val = host_cap; @@ -210,7 +210,7 @@ KVM_ONE_VCPU_TEST(vmx_pmu_caps, lbr_perf_capabilities, guest_code) KVM_ONE_VCPU_TEST(vmx_pmu_caps, perf_capabilities_unsupported, guest_code) { - uint64_t val; + u64 val; int i, r; vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); diff --git a/tools/testing/selftests/kvm/x86/vmx_preemption_timer_test.c b/tools/testing/selftests/kvm/x86/vmx_preemption_timer_test.c index 00dd2ac07a61..1b7b6ba23de7 100644 --- a/tools/testing/selftests/kvm/x86/vmx_preemption_timer_test.c +++ b/tools/testing/selftests/kvm/x86/vmx_preemption_timer_test.c @@ -152,7 +152,7 @@ void guest_code(struct vmx_pages *vmx_pages) int main(int argc, char *argv[]) { - vm_vaddr_t vmx_pages_gva = 0; + gva_t vmx_pages_gva = 0; struct kvm_regs regs1, regs2; struct kvm_vm *vm; diff --git a/tools/testing/selftests/kvm/x86/xapic_ipi_test.c b/tools/testing/selftests/kvm/x86/xapic_ipi_test.c index ae4a4b6c05ca..39ce9a9369f5 100644 --- a/tools/testing/selftests/kvm/x86/xapic_ipi_test.c +++ b/tools/testing/selftests/kvm/x86/xapic_ipi_test.c @@ -48,20 +48,20 @@ * Incremented in the IPI handler. Provides evidence to the sender that the IPI * arrived at the destination */ -static volatile uint64_t ipis_rcvd; +static volatile u64 ipis_rcvd; /* Data struct shared between host main thread and vCPUs */ struct test_data_page { - uint32_t halter_apic_id; - volatile uint64_t hlt_count; - volatile uint64_t wake_count; - uint64_t ipis_sent; - uint64_t migrations_attempted; - uint64_t migrations_completed; - uint32_t icr; - uint32_t icr2; - uint32_t halter_tpr; - uint32_t halter_ppr; + u32 halter_apic_id; + volatile u64 hlt_count; + volatile u64 wake_count; + u64 ipis_sent; + u64 migrations_attempted; + u64 migrations_completed; + u32 icr; + u32 icr2; + u32 halter_tpr; + u32 halter_ppr; /* * Record local version register as a cross-check that APIC access @@ -69,19 +69,19 @@ struct test_data_page { * arch/x86/kvm/lapic.c). If test is failing, check that values match * to determine whether APIC access exits are working. */ - uint32_t halter_lvr; + u32 halter_lvr; }; struct thread_params { struct test_data_page *data; struct kvm_vcpu *vcpu; - uint64_t *pipis_rcvd; /* host address of ipis_rcvd global */ + u64 *pipis_rcvd; /* host address of ipis_rcvd global */ }; void verify_apic_base_addr(void) { - uint64_t msr = rdmsr(MSR_IA32_APICBASE); - uint64_t base = GET_APIC_BASE(msr); + u64 msr = rdmsr(MSR_IA32_APICBASE); + u64 base = GET_APIC_BASE(msr); GUEST_ASSERT(base == APIC_DEFAULT_GPA); } @@ -125,12 +125,12 @@ static void guest_ipi_handler(struct ex_regs *regs) static void sender_guest_code(struct test_data_page *data) { - uint64_t last_wake_count; - uint64_t last_hlt_count; - uint64_t last_ipis_rcvd_count; - uint32_t icr_val; - uint32_t icr2_val; - uint64_t tsc_start; + u64 last_wake_count; + u64 last_hlt_count; + u64 last_ipis_rcvd_count; + u32 icr_val; + u32 icr2_val; + u64 tsc_start; verify_apic_base_addr(); xapic_enable(); @@ -248,7 +248,7 @@ static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu) } void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs, - uint64_t *pipis_rcvd) + u64 *pipis_rcvd) { long pages_not_moved; unsigned long nodemask = 0; @@ -259,9 +259,9 @@ void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs, int i; int from, to; unsigned long bit; - uint64_t hlt_count; - uint64_t wake_count; - uint64_t ipis_sent; + u64 hlt_count; + u64 wake_count; + u64 ipis_sent; fprintf(stderr, "Calling migrate_pages every %d microseconds\n", delay_usecs); @@ -393,12 +393,12 @@ int main(int argc, char *argv[]) int run_secs = 0; int delay_usecs = 0; struct test_data_page *data; - vm_vaddr_t test_data_page_vaddr; + gva_t test_data_page_gva; bool migrate = false; pthread_t threads[2]; struct thread_params params[2]; struct kvm_vm *vm; - uint64_t *pipis_rcvd; + u64 *pipis_rcvd; get_cmdline_args(argc, argv, &run_secs, &migrate, &delay_usecs); if (run_secs <= 0) @@ -414,16 +414,16 @@ int main(int argc, char *argv[]) params[1].vcpu = vm_vcpu_add(vm, 1, sender_guest_code); - test_data_page_vaddr = vm_vaddr_alloc_page(vm); - data = addr_gva2hva(vm, test_data_page_vaddr); + test_data_page_gva = vm_alloc_page(vm); + data = addr_gva2hva(vm, test_data_page_gva); memset(data, 0, sizeof(*data)); params[0].data = data; params[1].data = data; - vcpu_args_set(params[0].vcpu, 1, test_data_page_vaddr); - vcpu_args_set(params[1].vcpu, 1, test_data_page_vaddr); + vcpu_args_set(params[0].vcpu, 1, test_data_page_gva); + vcpu_args_set(params[1].vcpu, 1, test_data_page_gva); - pipis_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ipis_rcvd); + pipis_rcvd = (u64 *)addr_gva2hva(vm, (u64)&ipis_rcvd); params[0].pipis_rcvd = pipis_rcvd; params[1].pipis_rcvd = pipis_rcvd; diff --git a/tools/testing/selftests/kvm/x86/xapic_state_test.c b/tools/testing/selftests/kvm/x86/xapic_state_test.c index 0c5e12f5f14e..637bb90c1d93 100644 --- a/tools/testing/selftests/kvm/x86/xapic_state_test.c +++ b/tools/testing/selftests/kvm/x86/xapic_state_test.c @@ -23,7 +23,7 @@ static void xapic_guest_code(void) xapic_enable(); while (1) { - uint64_t val = (u64)xapic_read_reg(APIC_IRR) | + u64 val = (u64)xapic_read_reg(APIC_IRR) | (u64)xapic_read_reg(APIC_IRR + 0x10) << 32; xapic_write_reg(APIC_ICR2, val >> 32); @@ -43,7 +43,7 @@ static void x2apic_guest_code(void) x2apic_enable(); do { - uint64_t val = x2apic_read_reg(APIC_IRR) | + u64 val = x2apic_read_reg(APIC_IRR) | x2apic_read_reg(APIC_IRR + 0x10) << 32; if (val & X2APIC_RSVD_BITS_MASK) { @@ -56,12 +56,12 @@ static void x2apic_guest_code(void) } while (1); } -static void ____test_icr(struct xapic_vcpu *x, uint64_t val) +static void ____test_icr(struct xapic_vcpu *x, u64 val) { struct kvm_vcpu *vcpu = x->vcpu; struct kvm_lapic_state xapic; struct ucall uc; - uint64_t icr; + u64 icr; /* * Tell the guest what ICR value to write. Use the IRR to pass info, @@ -93,7 +93,7 @@ static void ____test_icr(struct xapic_vcpu *x, uint64_t val) TEST_ASSERT_EQ(icr, val & ~APIC_ICR_BUSY); } -static void __test_icr(struct xapic_vcpu *x, uint64_t val) +static void __test_icr(struct xapic_vcpu *x, u64 val) { /* * The BUSY bit is reserved on both AMD and Intel, but only AMD treats @@ -109,7 +109,7 @@ static void __test_icr(struct xapic_vcpu *x, uint64_t val) static void test_icr(struct xapic_vcpu *x) { struct kvm_vcpu *vcpu = x->vcpu; - uint64_t icr, i, j; + u64 icr, i, j; icr = APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_FIXED; for (i = 0; i <= 0xff; i++) @@ -142,9 +142,9 @@ static void test_icr(struct xapic_vcpu *x) __test_icr(x, -1ull & ~APIC_DM_FIXED_MASK); } -static void __test_apic_id(struct kvm_vcpu *vcpu, uint64_t apic_base) +static void __test_apic_id(struct kvm_vcpu *vcpu, u64 apic_base) { - uint32_t apic_id, expected; + u32 apic_id, expected; struct kvm_lapic_state xapic; vcpu_set_msr(vcpu, MSR_IA32_APICBASE, apic_base); @@ -170,9 +170,9 @@ static void __test_apic_id(struct kvm_vcpu *vcpu, uint64_t apic_base) */ static void test_apic_id(void) { - const uint32_t NR_VCPUS = 3; + const u32 NR_VCPUS = 3; struct kvm_vcpu *vcpus[NR_VCPUS]; - uint64_t apic_base; + u64 apic_base; struct kvm_vm *vm; int i; diff --git a/tools/testing/selftests/kvm/x86/xapic_tpr_test.c b/tools/testing/selftests/kvm/x86/xapic_tpr_test.c index 3862134d9d40..ab25db2235d5 100644 --- a/tools/testing/selftests/kvm/x86/xapic_tpr_test.c +++ b/tools/testing/selftests/kvm/x86/xapic_tpr_test.c @@ -58,7 +58,7 @@ static void tpr_guest_irq_queue(void) if (is_x2apic) { x2apic_write_reg(APIC_SELF_IPI, IRQ_VECTOR); } else { - uint32_t icr, icr2; + u32 icr, icr2; icr = APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | IRQ_VECTOR; @@ -69,9 +69,9 @@ static void tpr_guest_irq_queue(void) } } -static uint8_t tpr_guest_tpr_get(void) +static u8 tpr_guest_tpr_get(void) { - uint32_t taskpri; + u32 taskpri; if (is_x2apic) taskpri = x2apic_read_reg(APIC_TASKPRI); @@ -81,9 +81,9 @@ static uint8_t tpr_guest_tpr_get(void) return GET_APIC_PRI(taskpri); } -static uint8_t tpr_guest_ppr_get(void) +static u8 tpr_guest_ppr_get(void) { - uint32_t procpri; + u32 procpri; if (is_x2apic) procpri = x2apic_read_reg(APIC_PROCPRI); @@ -93,9 +93,9 @@ static uint8_t tpr_guest_ppr_get(void) return GET_APIC_PRI(procpri); } -static uint8_t tpr_guest_cr8_get(void) +static u8 tpr_guest_cr8_get(void) { - uint64_t cr8; + u64 cr8; asm volatile ("mov %%cr8, %[cr8]\n\t" : [cr8] "=r"(cr8)); @@ -104,7 +104,7 @@ static uint8_t tpr_guest_cr8_get(void) static void tpr_guest_check_tpr_ppr_cr8_equal(void) { - uint8_t tpr; + u8 tpr; tpr = tpr_guest_tpr_get(); @@ -157,19 +157,19 @@ static void tpr_guest_code(void) GUEST_DONE(); } -static uint8_t lapic_tpr_get(struct kvm_lapic_state *xapic) +static u8 lapic_tpr_get(struct kvm_lapic_state *xapic) { return GET_APIC_PRI(*((u32 *)&xapic->regs[APIC_TASKPRI])); } -static void lapic_tpr_set(struct kvm_lapic_state *xapic, uint8_t val) +static void lapic_tpr_set(struct kvm_lapic_state *xapic, u8 val) { u32 *taskpri = (u32 *)&xapic->regs[APIC_TASKPRI]; *taskpri = SET_APIC_PRI(*taskpri, val); } -static uint8_t sregs_tpr(struct kvm_sregs *sregs) +static u8 sregs_tpr(struct kvm_sregs *sregs) { return sregs->cr8 & GENMASK(3, 0); } @@ -197,7 +197,7 @@ static void test_tpr_check_tpr_cr8_equal(struct kvm_vcpu *vcpu) static void test_tpr_set_tpr_for_irq(struct kvm_vcpu *vcpu, bool mask) { struct kvm_lapic_state xapic; - uint8_t tpr; + u8 tpr; static_assert(IRQ_VECTOR >= 16, "invalid IRQ vector number"); tpr = IRQ_VECTOR / 16; diff --git a/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c b/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c index d038c1571729..40dc9e6b3fad 100644 --- a/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c +++ b/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c @@ -21,7 +21,7 @@ */ #define ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0, xfeatures, dependencies) \ do { \ - uint64_t __supported = (supported_xcr0) & ((xfeatures) | (dependencies)); \ + u64 __supported = (supported_xcr0) & ((xfeatures) | (dependencies)); \ \ __GUEST_ASSERT((__supported & (xfeatures)) != (xfeatures) || \ __supported == ((xfeatures) | (dependencies)), \ @@ -39,7 +39,7 @@ do { \ */ #define ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0, xfeatures) \ do { \ - uint64_t __supported = (supported_xcr0) & (xfeatures); \ + u64 __supported = (supported_xcr0) & (xfeatures); \ \ __GUEST_ASSERT(!__supported || __supported == (xfeatures), \ "supported = 0x%lx, xfeatures = 0x%llx", \ @@ -48,8 +48,8 @@ do { \ static void guest_code(void) { - uint64_t initial_xcr0; - uint64_t supported_xcr0; + u64 initial_xcr0; + u64 supported_xcr0; int i, vector; set_cr4(get_cr4() | X86_CR4_OSXSAVE); diff --git a/tools/testing/selftests/kvm/x86/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86/xen_shinfo_test.c index 23909b501ac2..5076f6a75455 100644 --- a/tools/testing/selftests/kvm/x86/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86/xen_shinfo_test.c @@ -116,15 +116,15 @@ struct pvclock_wall_clock { } __attribute__((__packed__)); struct vcpu_runstate_info { - uint32_t state; - uint64_t state_entry_time; - uint64_t time[5]; /* Extra field for overrun check */ + u32 state; + u64 state_entry_time; + u64 time[5]; /* Extra field for overrun check */ }; struct compat_vcpu_runstate_info { - uint32_t state; - uint64_t state_entry_time; - uint64_t time[5]; + u32 state; + u64 state_entry_time; + u64 time[5]; } __attribute__((__packed__)); struct arch_vcpu_info { @@ -133,8 +133,8 @@ struct arch_vcpu_info { }; struct vcpu_info { - uint8_t evtchn_upcall_pending; - uint8_t evtchn_upcall_mask; + u8 evtchn_upcall_pending; + u8 evtchn_upcall_mask; unsigned long evtchn_pending_sel; struct arch_vcpu_info arch; struct pvclock_vcpu_time_info time; @@ -145,7 +145,7 @@ struct shared_info { unsigned long evtchn_pending[64]; unsigned long evtchn_mask[64]; struct pvclock_wall_clock wc; - uint32_t wc_sec_hi; + u32 wc_sec_hi; /* arch_shared_info here */ }; @@ -658,7 +658,7 @@ int main(int argc, char *argv[]) printf("Testing RUNSTATE_ADJUST\n"); rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST; memset(&rst.u, 0, sizeof(rst.u)); - rst.u.runstate.state = (uint64_t)-1; + rst.u.runstate.state = (u64)-1; rst.u.runstate.time_blocked = 0x5a - rs->time[RUNSTATE_blocked]; rst.u.runstate.time_offline = @@ -1113,7 +1113,7 @@ int main(int argc, char *argv[]) /* Don't change the address, just trigger a write */ struct kvm_xen_vcpu_attr adj = { .type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST, - .u.runstate.state = (uint64_t)-1 + .u.runstate.state = (u64)-1 }; vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &adj); diff --git a/tools/testing/selftests/kvm/x86/xss_msr_test.c b/tools/testing/selftests/kvm/x86/xss_msr_test.c index f331a4e9bae3..12c63df6bbce 100644 --- a/tools/testing/selftests/kvm/x86/xss_msr_test.c +++ b/tools/testing/selftests/kvm/x86/xss_msr_test.c @@ -17,7 +17,7 @@ int main(int argc, char *argv[]) bool xss_in_msr_list; struct kvm_vm *vm; struct kvm_vcpu *vcpu; - uint64_t xss_val; + u64 xss_val; int i, r; /* Create VM */ diff --git a/tools/testing/selftests/mm/config b/tools/testing/selftests/mm/config index 1dbe2b4558ab..06f78bd232e2 100644 --- a/tools/testing/selftests/mm/config +++ b/tools/testing/selftests/mm/config @@ -13,3 +13,4 @@ CONFIG_PROFILING=y CONFIG_UPROBES=y CONFIG_MEMORY_FAILURE=y CONFIG_HWPOISON_INJECT=m +CONFIG_PROC_MEM_ALWAYS_FORCE=y diff --git a/tools/testing/selftests/sched_ext/Makefile b/tools/testing/selftests/sched_ext/Makefile index 789037be44c7..5d2dffca0e91 100644 --- a/tools/testing/selftests/sched_ext/Makefile +++ b/tools/testing/selftests/sched_ext/Makefile @@ -175,6 +175,7 @@ auto-test-targets := \ maximal \ maybe_null \ minimal \ + non_scx_kfunc_deny \ numa \ allowed_cpus \ peek_dsq \ diff --git a/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.bpf.c b/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.bpf.c new file mode 100644 index 000000000000..9f16d39255e7 --- /dev/null +++ b/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.bpf.c @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Verify that context-sensitive SCX kfuncs (even "unlocked" ones) are + * restricted to only SCX struct_ops programs. Non-SCX struct_ops programs, + * such as TCP congestion control programs, should be rejected by the BPF + * verifier when attempting to call these kfuncs. + * + * Copyright (C) 2026 Ching-Chun (Jim) Huang <jserv@ccns.ncku.edu.tw> + * Copyright (C) 2026 Cheng-Yang Chou <yphbchou0911@gmail.com> + */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +/* SCX kfunc from scx_kfunc_ids_any set */ +void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym; + +SEC("struct_ops/ssthresh") +__u32 BPF_PROG(tcp_ca_ssthresh, struct sock *sk) +{ + /* + * This call should be rejected by the verifier because this is a + * TCP congestion control program (non-SCX struct_ops). + */ + scx_bpf_kick_cpu(0, 0); + return 2; +} + +SEC("struct_ops/cong_avoid") +void BPF_PROG(tcp_ca_cong_avoid, struct sock *sk, __u32 ack, __u32 acked) {} + +SEC("struct_ops/undo_cwnd") +__u32 BPF_PROG(tcp_ca_undo_cwnd, struct sock *sk) { return 2; } + +SEC(".struct_ops") +struct tcp_congestion_ops tcp_non_scx_ca = { + .ssthresh = (void *)tcp_ca_ssthresh, + .cong_avoid = (void *)tcp_ca_cong_avoid, + .undo_cwnd = (void *)tcp_ca_undo_cwnd, + .name = "tcp_kfunc_deny", +}; + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.c b/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.c new file mode 100644 index 000000000000..1c031575fb87 --- /dev/null +++ b/tools/testing/selftests/sched_ext/non_scx_kfunc_deny.c @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Verify that context-sensitive SCX kfuncs (even "unlocked" ones) are + * restricted to only SCX struct_ops programs. Non-SCX struct_ops programs, + * such as TCP congestion control programs, should be rejected by the BPF + * verifier when attempting to call these kfuncs. + * + * Copyright (C) 2026 Ching-Chun (Jim) Huang <jserv@ccns.ncku.edu.tw> + * Copyright (C) 2026 Cheng-Yang Chou <yphbchou0911@gmail.com> + */ + +#include <bpf/bpf.h> +#include <scx/common.h> +#include <unistd.h> +#include <errno.h> +#include <stdio.h> +#include "non_scx_kfunc_deny.bpf.skel.h" +#include "scx_test.h" + +static enum scx_test_status run(void *ctx) +{ + struct non_scx_kfunc_deny *skel; + int err; + + skel = non_scx_kfunc_deny__open(); + if (!skel) { + SCX_ERR("Failed to open skel"); + return SCX_TEST_FAIL; + } + + err = non_scx_kfunc_deny__load(skel); + non_scx_kfunc_deny__destroy(skel); + + if (err == 0) { + SCX_ERR("non-SCX BPF program loaded when it should have been rejected"); + return SCX_TEST_FAIL; + } + + return SCX_TEST_PASS; +} + +struct scx_test non_scx_kfunc_deny = { + .name = "non_scx_kfunc_deny", + .description = "Verify that non-SCX struct_ops programs cannot call SCX kfuncs", + .run = run, +}; +REGISTER_SCX_TEST(&non_scx_kfunc_deny) diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json index 557fb074acf0..cd19d05925e4 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json +++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json @@ -302,5 +302,31 @@ "$TC qdisc del dev $ETH root", "echo \"1\" > /sys/bus/netdevsim/del_device" ] + }, + { + "id": "c7e1", + "name": "Class dump after graft and delete of explicit child qdisc", + "category": [ + "qdisc", + "taprio" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "echo \"1 1 8\" > /sys/bus/netdevsim/new_device", + "$TC qdisc replace dev $ETH handle 8001: parent root taprio num_tc 8 map 0 1 2 3 4 5 6 7 queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 base-time 0 sched-entry S ff 20000000 clockid CLOCK_TAI", + "$TC qdisc add dev $ETH parent 8001:1 handle 8002: pfifo", + "$TC qdisc del dev $ETH parent 8001:1 handle 8002:" + ], + "cmdUnderTest": "$TC class show dev $ETH", + "expExitCode": "0", + "verifyCmd": "$TC class show dev $ETH", + "matchPattern": "class taprio 8001:[0-9]+ root", + "matchCount": "8", + "teardown": [ + "$TC qdisc del dev $ETH root", + "echo \"1\" > /sys/bus/netdevsim/del_device" + ] } ] diff --git a/tools/testing/vma/include/dup.h b/tools/testing/vma/include/dup.h index b4864aad2db0..9e0dfd3a85b0 100644 --- a/tools/testing/vma/include/dup.h +++ b/tools/testing/vma/include/dup.h @@ -1330,7 +1330,7 @@ static inline int __compat_vma_mmap(struct vm_area_desc *desc, /* Update the VMA from the descriptor. */ compat_set_vma_from_desc(vma, desc); /* Complete any specified mmap actions. */ - return mmap_action_complete(vma, &desc->action); + return mmap_action_complete(vma, &desc->action, /*is_compat=*/true); } static inline int compat_vma_mmap(struct file *file, struct vm_area_struct *vma) diff --git a/tools/testing/vma/include/stubs.h b/tools/testing/vma/include/stubs.h index a30b8bc84955..64164e25658f 100644 --- a/tools/testing/vma/include/stubs.h +++ b/tools/testing/vma/include/stubs.h @@ -87,7 +87,8 @@ static inline int mmap_action_prepare(struct vm_area_desc *desc) } static inline int mmap_action_complete(struct vm_area_struct *vma, - struct mmap_action *action) + struct mmap_action *action, + bool is_compat) { return 0; } |
