diff options
Diffstat (limited to 'tools/testing/selftests/kvm/include')
34 files changed, 563 insertions, 579 deletions
diff --git a/tools/testing/selftests/kvm/include/arm64/arch_timer.h b/tools/testing/selftests/kvm/include/arm64/arch_timer.h index e2c4e9f0010f..a5836d4ab7ee 100644 --- a/tools/testing/selftests/kvm/include/arm64/arch_timer.h +++ b/tools/testing/selftests/kvm/include/arm64/arch_timer.h @@ -18,20 +18,20 @@ enum arch_timer { #define CTL_ISTATUS (1 << 2) #define msec_to_cycles(msec) \ - (timer_get_cntfrq() * (uint64_t)(msec) / 1000) + (timer_get_cntfrq() * (u64)(msec) / 1000) #define usec_to_cycles(usec) \ - (timer_get_cntfrq() * (uint64_t)(usec) / 1000000) + (timer_get_cntfrq() * (u64)(usec) / 1000000) #define cycles_to_usec(cycles) \ - ((uint64_t)(cycles) * 1000000 / timer_get_cntfrq()) + ((u64)(cycles) * 1000000 / timer_get_cntfrq()) -static inline uint32_t timer_get_cntfrq(void) +static inline u32 timer_get_cntfrq(void) { return read_sysreg(cntfrq_el0); } -static inline uint64_t timer_get_cntct(enum arch_timer timer) +static inline u64 timer_get_cntct(enum arch_timer timer) { isb(); @@ -48,7 +48,7 @@ static inline uint64_t timer_get_cntct(enum arch_timer timer) return 0; } -static inline void timer_set_cval(enum arch_timer timer, uint64_t cval) +static inline void timer_set_cval(enum arch_timer timer, u64 cval) { switch (timer) { case VIRTUAL: @@ -64,7 +64,7 @@ static inline void timer_set_cval(enum arch_timer timer, uint64_t cval) isb(); } -static inline uint64_t timer_get_cval(enum arch_timer timer) +static inline u64 timer_get_cval(enum arch_timer timer) { switch (timer) { case VIRTUAL: @@ -79,7 +79,7 @@ static inline uint64_t timer_get_cval(enum arch_timer timer) return 0; } -static inline void timer_set_tval(enum arch_timer timer, int32_t tval) +static inline void timer_set_tval(enum arch_timer timer, s32 tval) { switch (timer) { case VIRTUAL: @@ -95,7 +95,7 @@ static inline void timer_set_tval(enum arch_timer timer, int32_t tval) isb(); } -static inline int32_t timer_get_tval(enum arch_timer timer) +static inline s32 timer_get_tval(enum arch_timer timer) { isb(); switch (timer) { @@ -111,7 +111,7 @@ static inline int32_t timer_get_tval(enum arch_timer timer) return 0; } -static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl) +static inline void timer_set_ctl(enum arch_timer timer, u32 ctl) { switch (timer) { case VIRTUAL: @@ -127,7 +127,7 @@ static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl) isb(); } -static inline uint32_t timer_get_ctl(enum arch_timer timer) +static inline u32 timer_get_ctl(enum arch_timer timer) { switch (timer) { case VIRTUAL: @@ -142,15 +142,15 @@ static inline uint32_t timer_get_ctl(enum arch_timer timer) return 0; } -static inline void timer_set_next_cval_ms(enum arch_timer timer, uint32_t msec) +static inline void timer_set_next_cval_ms(enum arch_timer timer, u32 msec) { - uint64_t now_ct = timer_get_cntct(timer); - uint64_t next_ct = now_ct + msec_to_cycles(msec); + u64 now_ct = timer_get_cntct(timer); + u64 next_ct = now_ct + msec_to_cycles(msec); timer_set_cval(timer, next_ct); } -static inline void timer_set_next_tval_ms(enum arch_timer timer, uint32_t msec) +static inline void timer_set_next_tval_ms(enum arch_timer timer, u32 msec) { timer_set_tval(timer, msec_to_cycles(msec)); } diff --git a/tools/testing/selftests/kvm/include/arm64/delay.h b/tools/testing/selftests/kvm/include/arm64/delay.h index 329e4f5079ea..6a5d4634af2c 100644 --- a/tools/testing/selftests/kvm/include/arm64/delay.h +++ b/tools/testing/selftests/kvm/include/arm64/delay.h @@ -8,10 +8,10 @@ #include "arch_timer.h" -static inline void __delay(uint64_t cycles) +static inline void __delay(u64 cycles) { enum arch_timer timer = VIRTUAL; - uint64_t start = timer_get_cntct(timer); + u64 start = timer_get_cntct(timer); while ((timer_get_cntct(timer) - start) < cycles) cpu_relax(); diff --git a/tools/testing/selftests/kvm/include/arm64/gic.h b/tools/testing/selftests/kvm/include/arm64/gic.h index cc7a7f34ed37..615745093c98 100644 --- a/tools/testing/selftests/kvm/include/arm64/gic.h +++ b/tools/testing/selftests/kvm/include/arm64/gic.h @@ -48,8 +48,8 @@ void gic_set_dir(unsigned int intid); * split is true, EOI drops the priority and deactivates the interrupt. */ void gic_set_eoi_split(bool split); -void gic_set_priority_mask(uint64_t mask); -void gic_set_priority(uint32_t intid, uint32_t prio); +void gic_set_priority_mask(u64 mask); +void gic_set_priority(u32 intid, u32 prio); void gic_irq_set_active(unsigned int intid); void gic_irq_clear_active(unsigned int intid); bool gic_irq_get_active(unsigned int intid); @@ -59,7 +59,7 @@ bool gic_irq_get_pending(unsigned int intid); void gic_irq_set_config(unsigned int intid, bool is_edge); void gic_irq_set_group(unsigned int intid, bool group); -void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size, - vm_paddr_t pend_table); +void gic_rdist_enable_lpis(gpa_t cfg_table, size_t cfg_table_size, + gpa_t pend_table); #endif /* SELFTEST_KVM_GIC_H */ diff --git a/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h b/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h index 58feef3eb386..a43a407e2d5c 100644 --- a/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h +++ b/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h @@ -5,11 +5,10 @@ #include <linux/sizes.h> -void its_init(vm_paddr_t coll_tbl, size_t coll_tbl_sz, - vm_paddr_t device_tbl, size_t device_tbl_sz, - vm_paddr_t cmdq, size_t cmdq_size); +void its_init(gpa_t coll_tbl, size_t coll_tbl_sz, gpa_t device_tbl, + size_t device_tbl_sz, gpa_t cmdq, size_t cmdq_size); -void its_send_mapd_cmd(void *cmdq_base, u32 device_id, vm_paddr_t itt_base, +void its_send_mapd_cmd(void *cmdq_base, u32 device_id, gpa_t itt_base, size_t itt_size, bool valid); void its_send_mapc_cmd(void *cmdq_base, u32 vcpu_id, u32 collection_id, bool valid); void its_send_mapti_cmd(void *cmdq_base, u32 device_id, u32 event_id, diff --git a/tools/testing/selftests/kvm/include/arm64/processor.h b/tools/testing/selftests/kvm/include/arm64/processor.h index ac97a1c436fc..b8a902ba8573 100644 --- a/tools/testing/selftests/kvm/include/arm64/processor.h +++ b/tools/testing/selftests/kvm/include/arm64/processor.h @@ -128,7 +128,7 @@ #define PTE_ADDR_51_50_LPA2_SHIFT 8 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init); -struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, +struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, struct kvm_vcpu_init *init, void *guest_code); struct ex_regs { @@ -167,8 +167,8 @@ enum { (v) == VECTOR_SYNC_LOWER_64 || \ (v) == VECTOR_SYNC_LOWER_32) -void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k, - uint32_t *ipa16k, uint32_t *ipa64k); +void aarch64_get_supported_page_sizes(u32 ipa, u32 *ipa4k, + u32 *ipa16k, u32 *ipa64k); void vm_init_descriptor_tables(struct kvm_vm *vm); void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu); @@ -179,8 +179,8 @@ void vm_install_exception_handler(struct kvm_vm *vm, void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec, handler_fn handler); -uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level); -uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva); +u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level); +u64 *virt_get_pte_hva(struct kvm_vm *vm, gva_t gva); static inline void cpu_relax(void) { @@ -287,9 +287,9 @@ struct arm_smccc_res { * @res: pointer to write the return values from registers x0-x3 * */ -void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1, - uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, - uint64_t arg6, struct arm_smccc_res *res); +void smccc_hvc(u32 function_id, u64 arg0, u64 arg1, + u64 arg2, u64 arg3, u64 arg4, u64 arg5, + u64 arg6, struct arm_smccc_res *res); /** * smccc_smc - Invoke a SMCCC function using the smc conduit @@ -298,9 +298,9 @@ void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1, * @res: pointer to write the return values from registers x0-x3 * */ -void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1, - uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, - uint64_t arg6, struct arm_smccc_res *res); +void smccc_smc(u32 function_id, u64 arg0, u64 arg1, + u64 arg2, u64 arg3, u64 arg4, u64 arg5, + u64 arg6, struct arm_smccc_res *res); /* Execute a Wait For Interrupt instruction. */ void wfi(void); diff --git a/tools/testing/selftests/kvm/include/arm64/ucall.h b/tools/testing/selftests/kvm/include/arm64/ucall.h index 4ec801f37f00..2210d3d94c40 100644 --- a/tools/testing/selftests/kvm/include/arm64/ucall.h +++ b/tools/testing/selftests/kvm/include/arm64/ucall.h @@ -10,9 +10,9 @@ * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each * VM), it must not be accessed from host code. */ -extern vm_vaddr_t *ucall_exit_mmio_addr; +extern gva_t *ucall_exit_mmio_addr; -static inline void ucall_arch_do_ucall(vm_vaddr_t uc) +static inline void ucall_arch_do_ucall(gva_t uc) { WRITE_ONCE(*ucall_exit_mmio_addr, uc); } diff --git a/tools/testing/selftests/kvm/include/arm64/vgic.h b/tools/testing/selftests/kvm/include/arm64/vgic.h index 688beccc9436..1f8b04373987 100644 --- a/tools/testing/selftests/kvm/include/arm64/vgic.h +++ b/tools/testing/selftests/kvm/include/arm64/vgic.h @@ -11,27 +11,27 @@ #include "kvm_util.h" #define REDIST_REGION_ATTR_ADDR(count, base, flags, index) \ - (((uint64_t)(count) << 52) | \ - ((uint64_t)((base) >> 16) << 16) | \ - ((uint64_t)(flags) << 12) | \ + (((u64)(count) << 52) | \ + ((u64)((base) >> 16) << 16) | \ + ((u64)(flags) << 12) | \ index) bool kvm_supports_vgic_v3(void); -int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs); +int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs); void __vgic_v3_init(int fd); -int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs); +int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs); #define VGIC_MAX_RESERVED 1023 -void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level); -int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level); +void kvm_irq_set_level_info(int gic_fd, u32 intid, int level); +int _kvm_irq_set_level_info(int gic_fd, u32 intid, int level); -void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level); -int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level); +void kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level); +int _kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level); /* The vcpu arg only applies to private interrupts. */ -void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu); -void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu); +void kvm_irq_write_ispendr(int gic_fd, u32 intid, struct kvm_vcpu *vcpu); +void kvm_irq_write_isactiver(int gic_fd, u32 intid, struct kvm_vcpu *vcpu); #define KVM_IRQCHIP_NUM_PINS (1020 - 32) diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index f861242b4ae8..2ecaaa0e9965 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -58,7 +58,7 @@ struct kvm_binary_stats { struct kvm_vcpu { struct list_head list; - uint32_t id; + u32 id; int fd; struct kvm_vm *vm; struct kvm_run *run; @@ -70,8 +70,8 @@ struct kvm_vcpu { #endif struct kvm_binary_stats stats; struct kvm_dirty_gfn *dirty_gfns; - uint32_t fetch_index; - uint32_t dirty_gfns_count; + u32 fetch_index; + u32 dirty_gfns_count; }; struct userspace_mem_regions { @@ -90,7 +90,7 @@ enum kvm_mem_region_type { struct kvm_mmu { bool pgd_created; - uint64_t pgd; + u64 pgd; int pgtable_levels; struct kvm_mmu_arch arch; @@ -105,16 +105,16 @@ struct kvm_vm { unsigned int page_shift; unsigned int pa_bits; unsigned int va_bits; - uint64_t max_gfn; + u64 max_gfn; struct list_head vcpus; struct userspace_mem_regions regions; struct sparsebit *vpages_valid; struct sparsebit *vpages_mapped; bool has_irqchip; - vm_paddr_t ucall_mmio_addr; - vm_vaddr_t handlers; - uint32_t dirty_ring_size; - uint64_t gpa_tag_mask; + gpa_t ucall_mmio_addr; + gva_t handlers; + u32 dirty_ring_size; + gpa_t gpa_tag_mask; /* * "mmu" is the guest's stage-1, with a short name because the vast @@ -132,7 +132,7 @@ struct kvm_vm { * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE] * memslot. */ - uint32_t memslots[NR_MEM_REGIONS]; + u32 memslots[NR_MEM_REGIONS]; }; struct vcpu_reg_sublist { @@ -164,7 +164,7 @@ struct vcpu_reg_list { else struct userspace_mem_region * -memslot2region(struct kvm_vm *vm, uint32_t memslot); +memslot2region(struct kvm_vm *vm, u32 memslot); static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, enum kvm_mem_region_type type) @@ -213,13 +213,13 @@ enum vm_guest_mode { }; struct vm_shape { - uint32_t type; - uint8_t mode; - uint8_t pad0; - uint16_t pad1; + u32 type; + u8 mode; + u8 pad0; + u16 pad1; }; -kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t)); +kvm_static_assert(sizeof(struct vm_shape) == sizeof(u64)); #define VM_TYPE_DEFAULT 0 @@ -404,21 +404,22 @@ static inline int vm_check_cap(struct kvm_vm *vm, long cap) return ret; } -static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) +static inline int __vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0) { struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); } -static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) + +static inline void vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0) { struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); } -static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa, - uint64_t size, uint64_t attributes) +static inline void vm_set_memory_attributes(struct kvm_vm *vm, gpa_t gpa, + u64 size, u64 attributes) { struct kvm_memory_attributes attr = { .attributes = attributes, @@ -438,35 +439,35 @@ static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa, } -static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa, - uint64_t size) +static inline void vm_mem_set_private(struct kvm_vm *vm, gpa_t gpa, + u64 size) { vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE); } -static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa, - uint64_t size) +static inline void vm_mem_set_shared(struct kvm_vm *vm, gpa_t gpa, + u64 size) { vm_set_memory_attributes(vm, gpa, size, 0); } -void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size, +void vm_guest_mem_fallocate(struct kvm_vm *vm, gpa_t gpa, u64 size, bool punch_hole); -static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa, - uint64_t size) +static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, gpa_t gpa, + u64 size) { vm_guest_mem_fallocate(vm, gpa, size, true); } -static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa, - uint64_t size) +static inline void vm_guest_mem_allocate(struct kvm_vm *vm, gpa_t gpa, + u64 size) { vm_guest_mem_fallocate(vm, gpa, size, false); } -void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size); -const char *vm_guest_mode_string(uint32_t i); +void vm_enable_dirty_ring(struct kvm_vm *vm, u32 ring_size); +const char *vm_guest_mode_string(u32 i); void kvm_vm_free(struct kvm_vm *vmp); void kvm_vm_restart(struct kvm_vm *vmp); @@ -474,7 +475,7 @@ void kvm_vm_release(struct kvm_vm *vmp); void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename); int kvm_memfd_alloc(size_t size, bool hugepages); -void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); +void vm_dump(FILE *stream, struct kvm_vm *vm, u8 indent); static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) { @@ -484,7 +485,7 @@ static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) } static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, - uint64_t first_page, uint32_t num_pages) + u64 first_page, u32 num_pages) { struct kvm_clear_dirty_log args = { .dirty_bitmap = log, @@ -496,14 +497,14 @@ static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args); } -static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) +static inline u32 kvm_vm_reset_dirty_ring(struct kvm_vm *vm) { return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL); } static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm, - uint64_t address, - uint64_t size, bool pio) + u64 address, + u64 size, bool pio) { struct kvm_coalesced_mmio_zone zone = { .addr = address, @@ -515,8 +516,8 @@ static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm, } static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm, - uint64_t address, - uint64_t size, bool pio) + u64 address, + u64 size, bool pio) { struct kvm_coalesced_mmio_zone zone = { .addr = address, @@ -535,8 +536,8 @@ static inline int vm_get_stats_fd(struct kvm_vm *vm) return fd; } -static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, - uint32_t flags) +static inline int __kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd, + u32 flags) { struct kvm_irqfd irqfd = { .fd = eventfd, @@ -548,20 +549,19 @@ static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, return __vm_ioctl(vm, KVM_IRQFD, &irqfd); } -static inline void kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, - uint32_t flags) +static inline void kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd, u32 flags) { int ret = __kvm_irqfd(vm, gsi, eventfd, flags); TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_IRQFD, ret, vm); } -static inline void kvm_assign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) +static inline void kvm_assign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd) { kvm_irqfd(vm, gsi, eventfd, 0); } -static inline void kvm_deassign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) +static inline void kvm_deassign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd) { kvm_irqfd(vm, gsi, eventfd, KVM_IRQFD_FLAG_DEASSIGN); } @@ -610,15 +610,15 @@ static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc } void read_stat_data(int stats_fd, struct kvm_stats_header *header, - struct kvm_stats_desc *desc, uint64_t *data, + struct kvm_stats_desc *desc, u64 *data, size_t max_elements); void kvm_get_stat(struct kvm_binary_stats *stats, const char *name, - uint64_t *data, size_t max_elements); + u64 *data, size_t max_elements); #define __get_stat(stats, stat) \ ({ \ - uint64_t data; \ + u64 data; \ \ kvm_get_stat(stats, #stat, &data, 1); \ data; \ @@ -664,8 +664,8 @@ static inline bool is_smt_on(void) void vm_create_irqchip(struct kvm_vm *vm); -static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, - uint64_t flags) +static inline int __vm_create_guest_memfd(struct kvm_vm *vm, u64 size, + u64 flags) { struct kvm_create_guest_memfd guest_memfd = { .size = size, @@ -675,8 +675,8 @@ static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd); } -static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, - uint64_t flags) +static inline int vm_create_guest_memfd(struct kvm_vm *vm, u64 size, + u64 flags) { int fd = __vm_create_guest_memfd(vm, size, flags); @@ -684,24 +684,23 @@ static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, return fd; } -void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva); -int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva); -void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva, - uint32_t guest_memfd, uint64_t guest_memfd_offset); -int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, - uint64_t gpa, uint64_t size, void *hva, - uint32_t guest_memfd, uint64_t guest_memfd_offset); +void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva); +int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva); +void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva, + u32 guest_memfd, u64 guest_memfd_offset); +int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags, + gpa_t gpa, u64 size, void *hva, + u32 guest_memfd, u64 guest_memfd_offset); void vm_userspace_mem_region_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, - uint64_t gpa, uint32_t slot, uint64_t npages, - uint32_t flags); + gpa_t gpa, u32 slot, u64 npages, u32 flags); void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, - uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags, - int guest_memfd_fd, uint64_t guest_memfd_offset); + gpa_t gpa, u32 slot, u64 npages, u32 flags, + int guest_memfd_fd, u64 guest_memfd_offset); #ifndef vm_arch_has_protected_memory static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) @@ -710,36 +709,34 @@ static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) } #endif -void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); -void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot); -void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); -void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); -struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); -void vm_populate_vaddr_bitmap(struct kvm_vm *vm); -vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); -vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); -vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, - enum kvm_mem_region_type type); -vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, - vm_vaddr_t vaddr_min, - enum kvm_mem_region_type type); -vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); -vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, - enum kvm_mem_region_type type); -vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); - -void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, +void vm_mem_region_set_flags(struct kvm_vm *vm, u32 slot, u32 flags); +void vm_mem_region_reload(struct kvm_vm *vm, u32 slot); +void vm_mem_region_move(struct kvm_vm *vm, u32 slot, u64 new_gpa); +void vm_mem_region_delete(struct kvm_vm *vm, u32 slot); +struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id); +void vm_populate_gva_bitmap(struct kvm_vm *vm); +gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t min_gva); +gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva); +gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva, + enum kvm_mem_region_type type); +gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t min_gva, + enum kvm_mem_region_type type); +gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages); +gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type); +gva_t vm_alloc_page(struct kvm_vm *vm); + +void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, unsigned int npages); -void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); -void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); -vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); -void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); +void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa); +void *addr_gva2hva(struct kvm_vm *vm, gva_t gva); +gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva); +void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa); #ifndef vcpu_arch_put_guest #define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0) #endif -static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa) +static inline gpa_t vm_untag_gpa(struct kvm_vm *vm, gpa_t gpa) { return gpa & ~vm->gpa_tag_mask; } @@ -755,8 +752,8 @@ static inline int __vcpu_run(struct kvm_vcpu *vcpu) void vcpu_run_complete_io(struct kvm_vcpu *vcpu); struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu); -static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap, - uint64_t arg0) +static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, u32 cap, + u64 arg0) { struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } }; @@ -811,31 +808,34 @@ static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) vcpu_ioctl(vcpu, KVM_SET_FPU, fpu); } -static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr) +static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id, void *addr) { - struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr }; + struct kvm_one_reg reg = { .id = id, .addr = (u64)addr }; return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®); } -static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) + +static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val) { - struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; + struct kvm_one_reg reg = { .id = id, .addr = (u64)&val }; return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®); } -static inline uint64_t vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id) + +static inline u64 vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id) { - uint64_t val; - struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; + u64 val; + struct kvm_one_reg reg = { .id = id, .addr = (u64)&val }; TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id); vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®); return val; } -static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val) + +static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val) { - struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val }; + struct kvm_one_reg reg = { .id = id, .addr = (u64)&val }; TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id); @@ -880,75 +880,75 @@ static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu) return fd; } -int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr); +int __kvm_has_device_attr(int dev_fd, u32 group, u64 attr); -static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) +static inline void kvm_has_device_attr(int dev_fd, u32 group, u64 attr) { int ret = __kvm_has_device_attr(dev_fd, group, attr); TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno); } -int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val); +int __kvm_device_attr_get(int dev_fd, u32 group, u64 attr, void *val); -static inline void kvm_device_attr_get(int dev_fd, uint32_t group, - uint64_t attr, void *val) +static inline void kvm_device_attr_get(int dev_fd, u32 group, + u64 attr, void *val) { int ret = __kvm_device_attr_get(dev_fd, group, attr, val); TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret)); } -int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val); +int __kvm_device_attr_set(int dev_fd, u32 group, u64 attr, void *val); -static inline void kvm_device_attr_set(int dev_fd, uint32_t group, - uint64_t attr, void *val) +static inline void kvm_device_attr_set(int dev_fd, u32 group, + u64 attr, void *val) { int ret = __kvm_device_attr_set(dev_fd, group, attr, val); TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret)); } -static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr) +static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group, + u64 attr) { return __kvm_has_device_attr(vcpu->fd, group, attr); } -static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr) +static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group, + u64 attr) { kvm_has_device_attr(vcpu->fd, group, attr); } -static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr, void *val) +static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group, + u64 attr, void *val) { return __kvm_device_attr_get(vcpu->fd, group, attr, val); } -static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr, void *val) +static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group, + u64 attr, void *val) { kvm_device_attr_get(vcpu->fd, group, attr, val); } -static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr, void *val) +static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group, + u64 attr, void *val) { return __kvm_device_attr_set(vcpu->fd, group, attr, val); } -static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group, - uint64_t attr, void *val) +static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group, + u64 attr, void *val) { kvm_device_attr_set(vcpu->fd, group, attr, val); } -int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type); -int __kvm_create_device(struct kvm_vm *vm, uint64_t type); +int __kvm_test_create_device(struct kvm_vm *vm, u64 type); +int __kvm_create_device(struct kvm_vm *vm, u64 type); -static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type) +static inline int kvm_create_device(struct kvm_vm *vm, u64 type) { int fd = __kvm_create_device(vm, type); @@ -964,7 +964,7 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu); * Input Args: * vcpu - vCPU * num - number of arguments - * ... - arguments, each of type uint64_t + * ... - arguments, each of type u64 * * Output Args: None * @@ -972,40 +972,38 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu); * * Sets the first @num input parameters for the function at @vcpu's entry point, * per the C calling convention of the architecture, to the values given as - * variable args. Each of the variable args is expected to be of type uint64_t. + * variable args. Each of the variable args is expected to be of type u64. * The maximum @num can be is specific to the architecture. */ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...); -void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); -int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); +void kvm_irq_line(struct kvm_vm *vm, u32 irq, int level); +int _kvm_irq_line(struct kvm_vm *vm, u32 irq, int level); #define KVM_MAX_IRQ_ROUTES 4096 struct kvm_irq_routing *kvm_gsi_routing_create(void); void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, - uint32_t gsi, uint32_t pin); + u32 gsi, u32 pin); int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); const char *exit_reason_str(unsigned int exit_reason); -vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, - uint32_t memslot); -vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, - vm_paddr_t paddr_min, uint32_t memslot, - bool protected); -vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); +gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot); +gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, gpa_t min_gpa, + u32 memslot, bool protected); +gpa_t vm_alloc_page_table(struct kvm_vm *vm); -static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, - vm_paddr_t paddr_min, uint32_t memslot) +static inline gpa_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + gpa_t min_gpa, u32 memslot) { /* * By default, allocate memory as protected for VMs that support * protected memory, as the majority of memory for such VMs is * protected, i.e. using shared memory is effectively opt-in. */ - return __vm_phy_pages_alloc(vm, num, paddr_min, memslot, + return __vm_phy_pages_alloc(vm, num, min_gpa, memslot, vm_arch_has_protected_memory(vm)); } @@ -1016,8 +1014,8 @@ static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, * calculate the amount of memory needed for per-vCPU data, e.g. stacks. */ struct kvm_vm *____vm_create(struct vm_shape shape); -struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, - uint64_t nr_extra_pages); +struct kvm_vm *__vm_create(struct vm_shape shape, u32 nr_runnable_vcpus, + u64 nr_extra_pages); static inline struct kvm_vm *vm_create_barebones(void) { @@ -1034,16 +1032,16 @@ static inline struct kvm_vm *vm_create_barebones_type(unsigned long type) return ____vm_create(shape); } -static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus) +static inline struct kvm_vm *vm_create(u32 nr_runnable_vcpus) { return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0); } -struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, - uint64_t extra_mem_pages, +struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, u32 nr_vcpus, + u64 extra_mem_pages, void *guest_code, struct kvm_vcpu *vcpus[]); -static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus, +static inline struct kvm_vm *vm_create_with_vcpus(u32 nr_vcpus, void *guest_code, struct kvm_vcpu *vcpus[]) { @@ -1054,7 +1052,7 @@ static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus, struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape, struct kvm_vcpu **vcpu, - uint64_t extra_mem_pages, + u64 extra_mem_pages, void *guest_code); /* @@ -1062,7 +1060,7 @@ struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape, * additional pages of guest memory. Returns the VM and vCPU (via out param). */ static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, - uint64_t extra_mem_pages, + u64 extra_mem_pages, void *guest_code) { return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu, @@ -1084,7 +1082,7 @@ static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm); -void kvm_set_files_rlimit(uint32_t nr_vcpus); +void kvm_set_files_rlimit(u32 nr_vcpus); int __pin_task_to_cpu(pthread_t task, int cpu); @@ -1115,7 +1113,7 @@ static inline int pin_self_to_any_cpu(void) } void kvm_print_vcpu_pinning_help(void); -void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], +void kvm_parse_vcpu_pinning(const char *pcpus_string, u32 vcpu_to_pcpu[], int nr_vcpus); unsigned long vm_compute_max_gfn(struct kvm_vm *vm); @@ -1131,12 +1129,12 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) } #define sync_global_to_guest(vm, g) ({ \ - typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ + typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ memcpy(_p, &(g), sizeof(g)); \ }) #define sync_global_from_guest(vm, g) ({ \ - typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ + typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ memcpy(&(g), _p, sizeof(g)); \ }) @@ -1147,7 +1145,7 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) * undesirable to change the host's copy of the global. */ #define write_guest_global(vm, g, val) ({ \ - typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ + typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \ typeof(g) _val = val; \ \ memcpy(_p, &(_val), sizeof(g)); \ @@ -1156,10 +1154,10 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) void assert_on_unhandled_exception(struct kvm_vcpu *vcpu); void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, - uint8_t indent); + u8 indent); static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu, - uint8_t indent) + u8 indent) { vcpu_arch_dump(stream, vcpu, indent); } @@ -1171,10 +1169,10 @@ static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu, * vm - Virtual Machine * vcpu_id - The id of the VCPU to add to the VM. */ -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id); void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code); -static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, +static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id, void *guest_code) { struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id); @@ -1185,10 +1183,10 @@ static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, } /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */ -struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id); +struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, u32 vcpu_id); static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm, - uint32_t vcpu_id) + u32 vcpu_id) { return vm_arch_vcpu_recreate(vm, vcpu_id); } @@ -1203,27 +1201,15 @@ static inline void virt_pgd_alloc(struct kvm_vm *vm) } /* - * VM Virtual Page Map - * - * Input Args: - * vm - Virtual Machine - * vaddr - VM Virtual Address - * paddr - VM Physical Address - * memslot - Memory region slot for new virtual translation tables - * - * Output Args: None - * - * Return: None - * * Within @vm, creates a virtual translation for the page starting - * at @vaddr to the page starting at @paddr. + * at @gva to the page starting at @gpa. */ -void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr); +void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa); -static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +static inline void virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa) { - virt_arch_pg_map(vm, vaddr, paddr); - sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); + virt_arch_pg_map(vm, gva, gpa); + sparsebit_set(vm->vpages_mapped, gva >> vm->page_shift); } @@ -1242,9 +1228,9 @@ static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr * Returns the VM physical address of the translated VM virtual * address given by @gva. */ -vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva); +gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva); -static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +static inline gpa_t addr_gva2gpa(struct kvm_vm *vm, gva_t gva) { return addr_arch_gva2gpa(vm, gva); } @@ -1264,9 +1250,9 @@ static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) * Dumps to the FILE stream given by @stream, the contents of all the * virtual translation tables for the VM given by @vm. */ -void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent); -static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +static inline void virt_dump(FILE *stream, struct kvm_vm *vm, u8 indent) { virt_arch_dump(stream, vm, indent); } @@ -1277,7 +1263,7 @@ static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm) return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0); } -static inline uint64_t vm_page_align(struct kvm_vm *vm, uint64_t v) +static inline u64 vm_page_align(struct kvm_vm *vm, u64 v) { return (v + vm->page_size - 1) & ~(vm->page_size - 1); } @@ -1293,9 +1279,9 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus); void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm); void kvm_arch_vm_release(struct kvm_vm *vm); -bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr); +bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa); -uint32_t guest_get_vcpuid(void); +u32 guest_get_vcpuid(void); bool kvm_arch_has_default_irqchip(void); diff --git a/tools/testing/selftests/kvm/include/kvm_util_types.h b/tools/testing/selftests/kvm/include/kvm_util_types.h index 0366e9bce7f9..ed0087e31674 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_types.h +++ b/tools/testing/selftests/kvm/include/kvm_util_types.h @@ -2,6 +2,8 @@ #ifndef SELFTEST_KVM_UTIL_TYPES_H #define SELFTEST_KVM_UTIL_TYPES_H +#include <linux/types.h> + /* * Provide a version of static_assert() that is guaranteed to have an optional * message param. _GNU_SOURCE is defined for all KVM selftests, _GNU_SOURCE @@ -14,9 +16,9 @@ #define __kvm_static_assert(expr, msg, ...) _Static_assert(expr, msg) #define kvm_static_assert(expr, ...) __kvm_static_assert(expr, ##__VA_ARGS__, #expr) -typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */ -typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ +typedef u64 gpa_t; /* Virtual Machine (Guest) physical address */ +typedef u64 gva_t; /* Virtual Machine (Guest) virtual address */ -#define INVALID_GPA (~(uint64_t)0) +#define INVALID_GPA (~(u64)0) #endif /* SELFTEST_KVM_UTIL_TYPES_H */ diff --git a/tools/testing/selftests/kvm/include/loongarch/arch_timer.h b/tools/testing/selftests/kvm/include/loongarch/arch_timer.h index 2ed106b32c81..3888aeeb3524 100644 --- a/tools/testing/selftests/kvm/include/loongarch/arch_timer.h +++ b/tools/testing/selftests/kvm/include/loongarch/arch_timer.h @@ -70,9 +70,9 @@ static inline void timer_set_next_cmp_ms(unsigned int msec, bool period) csr_write(val, LOONGARCH_CSR_TCFG); } -static inline void __delay(uint64_t cycles) +static inline void __delay(u64 cycles) { - uint64_t start = timer_get_cycles(); + u64 start = timer_get_cycles(); while ((timer_get_cycles() - start) < cycles) cpu_relax(); diff --git a/tools/testing/selftests/kvm/include/loongarch/ucall.h b/tools/testing/selftests/kvm/include/loongarch/ucall.h index 4ec801f37f00..2210d3d94c40 100644 --- a/tools/testing/selftests/kvm/include/loongarch/ucall.h +++ b/tools/testing/selftests/kvm/include/loongarch/ucall.h @@ -10,9 +10,9 @@ * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each * VM), it must not be accessed from host code. */ -extern vm_vaddr_t *ucall_exit_mmio_addr; +extern gva_t *ucall_exit_mmio_addr; -static inline void ucall_arch_do_ucall(vm_vaddr_t uc) +static inline void ucall_arch_do_ucall(gva_t uc) { WRITE_ONCE(*ucall_exit_mmio_addr, uc); } diff --git a/tools/testing/selftests/kvm/include/memstress.h b/tools/testing/selftests/kvm/include/memstress.h index 9071eb6dea60..0d1d6230cc05 100644 --- a/tools/testing/selftests/kvm/include/memstress.h +++ b/tools/testing/selftests/kvm/include/memstress.h @@ -20,9 +20,9 @@ #define MEMSTRESS_MEM_SLOT_INDEX 1 struct memstress_vcpu_args { - uint64_t gpa; - uint64_t gva; - uint64_t pages; + gpa_t gpa; + gva_t gva; + u64 pages; /* Only used by the host userspace part of the vCPU thread */ struct kvm_vcpu *vcpu; @@ -32,11 +32,11 @@ struct memstress_vcpu_args { struct memstress_args { struct kvm_vm *vm; /* The starting address and size of the guest test region. */ - uint64_t gpa; - uint64_t size; - uint64_t guest_page_size; - uint32_t random_seed; - uint32_t write_percent; + gpa_t gpa; + u64 size; + u64 guest_page_size; + u32 random_seed; + u32 write_percent; /* Run vCPUs in L2 instead of L1, if the architecture supports it. */ bool nested; @@ -45,7 +45,7 @@ struct memstress_args { /* True if all vCPUs are pinned to pCPUs */ bool pin_vcpus; /* The vCPU=>pCPU pinning map. Only valid if pin_vcpus is true. */ - uint32_t vcpu_to_pcpu[KVM_MAX_VCPUS]; + u32 vcpu_to_pcpu[KVM_MAX_VCPUS]; /* Test is done, stop running vCPUs. */ bool stop_vcpus; @@ -56,27 +56,27 @@ struct memstress_args { extern struct memstress_args memstress_args; struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus, - uint64_t vcpu_memory_bytes, int slots, + u64 vcpu_memory_bytes, int slots, enum vm_mem_backing_src_type backing_src, bool partition_vcpu_memory_access); void memstress_destroy_vm(struct kvm_vm *vm); -void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent); +void memstress_set_write_percent(struct kvm_vm *vm, u32 write_percent); void memstress_set_random_access(struct kvm_vm *vm, bool random_access); void memstress_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct memstress_vcpu_args *)); void memstress_join_vcpu_threads(int vcpus); -void memstress_guest_code(uint32_t vcpu_id); +void memstress_guest_code(u32 vcpu_id); -uint64_t memstress_nested_pages(int nr_vcpus); +u64 memstress_nested_pages(int nr_vcpus); void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]); void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots); void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots); void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots); void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], - int slots, uint64_t pages_per_slot); -unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot); + int slots, u64 pages_per_slot); +unsigned long **memstress_alloc_bitmaps(int slots, u64 pages_per_slot); void memstress_free_bitmaps(unsigned long *bitmaps[], int slots); #endif /* SELFTEST_KVM_MEMSTRESS_H */ diff --git a/tools/testing/selftests/kvm/include/riscv/arch_timer.h b/tools/testing/selftests/kvm/include/riscv/arch_timer.h index 225d81dad064..28ffc014da2a 100644 --- a/tools/testing/selftests/kvm/include/riscv/arch_timer.h +++ b/tools/testing/selftests/kvm/include/riscv/arch_timer.h @@ -14,25 +14,25 @@ static unsigned long timer_freq; #define msec_to_cycles(msec) \ - ((timer_freq) * (uint64_t)(msec) / 1000) + ((timer_freq) * (u64)(msec) / 1000) #define usec_to_cycles(usec) \ - ((timer_freq) * (uint64_t)(usec) / 1000000) + ((timer_freq) * (u64)(usec) / 1000000) #define cycles_to_usec(cycles) \ - ((uint64_t)(cycles) * 1000000 / (timer_freq)) + ((u64)(cycles) * 1000000 / (timer_freq)) -static inline uint64_t timer_get_cycles(void) +static inline u64 timer_get_cycles(void) { return csr_read(CSR_TIME); } -static inline void timer_set_cmp(uint64_t cval) +static inline void timer_set_cmp(u64 cval) { csr_write(CSR_STIMECMP, cval); } -static inline uint64_t timer_get_cmp(void) +static inline u64 timer_get_cmp(void) { return csr_read(CSR_STIMECMP); } @@ -47,17 +47,17 @@ static inline void timer_irq_disable(void) csr_clear(CSR_SIE, IE_TIE); } -static inline void timer_set_next_cmp_ms(uint32_t msec) +static inline void timer_set_next_cmp_ms(u32 msec) { - uint64_t now_ct = timer_get_cycles(); - uint64_t next_ct = now_ct + msec_to_cycles(msec); + u64 now_ct = timer_get_cycles(); + u64 next_ct = now_ct + msec_to_cycles(msec); timer_set_cmp(next_ct); } -static inline void __delay(uint64_t cycles) +static inline void __delay(u64 cycles) { - uint64_t start = timer_get_cycles(); + u64 start = timer_get_cycles(); while ((timer_get_cycles() - start) < cycles) cpu_relax(); diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h index 4dade8c4d18e..e3acf2ae9881 100644 --- a/tools/testing/selftests/kvm/include/riscv/processor.h +++ b/tools/testing/selftests/kvm/include/riscv/processor.h @@ -25,8 +25,7 @@ #define GET_RM(insn) (((insn) & INSN_MASK_FUNCT3) >> INSN_SHIFT_FUNCT3) #define GET_CSR_NUM(insn) (((insn) & INSN_CSR_MASK) >> INSN_CSR_SHIFT) -static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype, - uint64_t idx, uint64_t size) +static inline u64 __kvm_reg_id(u64 type, u64 subtype, u64 idx, u64 size) { return KVM_REG_RISCV | type | subtype | idx | size; } @@ -62,14 +61,14 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype, KVM_REG_RISCV_SBI_SINGLE, \ idx, KVM_REG_SIZE_ULONG) -bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext); +bool __vcpu_has_ext(struct kvm_vcpu *vcpu, u64 ext); -static inline bool __vcpu_has_isa_ext(struct kvm_vcpu *vcpu, uint64_t isa_ext) +static inline bool __vcpu_has_isa_ext(struct kvm_vcpu *vcpu, u64 isa_ext) { return __vcpu_has_ext(vcpu, RISCV_ISA_EXT_REG(isa_ext)); } -static inline bool __vcpu_has_sbi_ext(struct kvm_vcpu *vcpu, uint64_t sbi_ext) +static inline bool __vcpu_has_sbi_ext(struct kvm_vcpu *vcpu, u64 sbi_ext) { return __vcpu_has_ext(vcpu, RISCV_SBI_EXT_REG(sbi_ext)); } diff --git a/tools/testing/selftests/kvm/include/riscv/ucall.h b/tools/testing/selftests/kvm/include/riscv/ucall.h index a695ae36f3e0..2de7c6a36096 100644 --- a/tools/testing/selftests/kvm/include/riscv/ucall.h +++ b/tools/testing/selftests/kvm/include/riscv/ucall.h @@ -7,11 +7,11 @@ #define UCALL_EXIT_REASON KVM_EXIT_RISCV_SBI -static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) { } -static inline void ucall_arch_do_ucall(vm_vaddr_t uc) +static inline void ucall_arch_do_ucall(gva_t uc) { sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT, KVM_RISCV_SELFTESTS_SBI_UCALL, diff --git a/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h b/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h index b0ed71302722..6deaf18fec22 100644 --- a/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h +++ b/tools/testing/selftests/kvm/include/s390/diag318_test_handler.h @@ -8,6 +8,6 @@ #ifndef SELFTEST_KVM_DIAG318_TEST_HANDLER #define SELFTEST_KVM_DIAG318_TEST_HANDLER -uint64_t get_diag318_info(void); +u64 get_diag318_info(void); #endif diff --git a/tools/testing/selftests/kvm/include/s390/facility.h b/tools/testing/selftests/kvm/include/s390/facility.h index 00a1ced6538b..41a265742666 100644 --- a/tools/testing/selftests/kvm/include/s390/facility.h +++ b/tools/testing/selftests/kvm/include/s390/facility.h @@ -16,7 +16,7 @@ /* alt_stfle_fac_list[16] + stfle_fac_list[16] */ #define NB_STFL_DOUBLEWORDS 32 -extern uint64_t stfl_doublewords[NB_STFL_DOUBLEWORDS]; +extern u64 stfl_doublewords[NB_STFL_DOUBLEWORDS]; extern bool stfle_flag; static inline bool test_bit_inv(unsigned long nr, const unsigned long *ptr) @@ -24,7 +24,7 @@ static inline bool test_bit_inv(unsigned long nr, const unsigned long *ptr) return test_bit(nr ^ (BITS_PER_LONG - 1), ptr); } -static inline void stfle(uint64_t *fac, unsigned int nb_doublewords) +static inline void stfle(u64 *fac, unsigned int nb_doublewords) { register unsigned long r0 asm("0") = nb_doublewords - 1; diff --git a/tools/testing/selftests/kvm/include/s390/ucall.h b/tools/testing/selftests/kvm/include/s390/ucall.h index 8035a872a351..3907d629304f 100644 --- a/tools/testing/selftests/kvm/include/s390/ucall.h +++ b/tools/testing/selftests/kvm/include/s390/ucall.h @@ -6,11 +6,11 @@ #define UCALL_EXIT_REASON KVM_EXIT_S390_SIEIC -static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) { } -static inline void ucall_arch_do_ucall(vm_vaddr_t uc) +static inline void ucall_arch_do_ucall(gva_t uc) { /* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */ asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory"); diff --git a/tools/testing/selftests/kvm/include/sparsebit.h b/tools/testing/selftests/kvm/include/sparsebit.h index bc760761e1a3..e027e5790946 100644 --- a/tools/testing/selftests/kvm/include/sparsebit.h +++ b/tools/testing/selftests/kvm/include/sparsebit.h @@ -6,7 +6,7 @@ * * Header file that describes API to the sparsebit library. * This library provides a memory efficient means of storing - * the settings of bits indexed via a uint64_t. Memory usage + * the settings of bits indexed via a u64. Memory usage * is reasonable, significantly less than (2^64 / 8) bytes, as * long as bits that are mostly set or mostly cleared are close * to each other. This library is efficient in memory usage @@ -25,8 +25,8 @@ extern "C" { #endif struct sparsebit; -typedef uint64_t sparsebit_idx_t; -typedef uint64_t sparsebit_num_t; +typedef u64 sparsebit_idx_t; +typedef u64 sparsebit_num_t; struct sparsebit *sparsebit_alloc(void); void sparsebit_free(struct sparsebit **sbitp); diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index b4872ba8ed12..d9b433b834f1 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -22,6 +22,8 @@ #include <sys/mman.h> #include "kselftest.h" +#include <linux/types.h> + #define msecs_to_usecs(msec) ((msec) * 1000ULL) static inline __printf(1, 2) int _no_printf(const char *format, ...) { return 0; } @@ -99,25 +101,25 @@ do { \ size_t parse_size(const char *size); -int64_t timespec_to_ns(struct timespec ts); -struct timespec timespec_add_ns(struct timespec ts, int64_t ns); +s64 timespec_to_ns(struct timespec ts); +struct timespec timespec_add_ns(struct timespec ts, s64 ns); struct timespec timespec_add(struct timespec ts1, struct timespec ts2); struct timespec timespec_sub(struct timespec ts1, struct timespec ts2); struct timespec timespec_elapsed(struct timespec start); struct timespec timespec_div(struct timespec ts, int divisor); struct guest_random_state { - uint32_t seed; + u32 seed; }; -extern uint32_t guest_random_seed; +extern u32 guest_random_seed; extern struct guest_random_state guest_rng; -struct guest_random_state new_guest_random_state(uint32_t seed); -uint32_t guest_random_u32(struct guest_random_state *state); +struct guest_random_state new_guest_random_state(u32 seed); +u32 guest_random_u32(struct guest_random_state *state); static inline bool __guest_random_bool(struct guest_random_state *state, - uint8_t percent) + u8 percent) { return (guest_random_u32(state) % 100) < percent; } @@ -127,9 +129,9 @@ static inline bool guest_random_bool(struct guest_random_state *state) return __guest_random_bool(state, 50); } -static inline uint64_t guest_random_u64(struct guest_random_state *state) +static inline u64 guest_random_u64(struct guest_random_state *state) { - return ((uint64_t)guest_random_u32(state) << 32) | guest_random_u32(state); + return ((u64)guest_random_u32(state) << 32) | guest_random_u32(state); } enum vm_mem_backing_src_type { @@ -158,7 +160,7 @@ enum vm_mem_backing_src_type { struct vm_mem_backing_src_alias { const char *name; - uint32_t flag; + u32 flag; }; #define MIN_RUN_DELAY_NS 200000UL @@ -166,9 +168,9 @@ struct vm_mem_backing_src_alias { bool thp_configured(void); size_t get_trans_hugepagesz(void); size_t get_def_hugetlb_pagesz(void); -const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i); -size_t get_backing_src_pagesz(uint32_t i); -bool is_backing_src_hugetlb(uint32_t i); +const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(u32 i); +size_t get_backing_src_pagesz(u32 i); +bool is_backing_src_hugetlb(u32 i); void backing_src_help(const char *flag); enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name); long get_run_delay(void); @@ -189,18 +191,18 @@ static inline bool backing_src_can_be_huge(enum vm_mem_backing_src_type t) } /* Aligns x up to the next multiple of size. Size must be a power of 2. */ -static inline uint64_t align_up(uint64_t x, uint64_t size) +static inline u64 align_up(u64 x, u64 size) { - uint64_t mask = size - 1; + u64 mask = size - 1; TEST_ASSERT(size != 0 && !(size & (size - 1)), "size not a power of 2: %lu", size); return ((x + mask) & ~mask); } -static inline uint64_t align_down(uint64_t x, uint64_t size) +static inline u64 align_down(u64 x, u64 size) { - uint64_t x_aligned_up = align_up(x, size); + u64 x_aligned_up = align_up(x, size); if (x == x_aligned_up) return x; @@ -215,7 +217,7 @@ static inline void *align_ptr_up(void *x, size_t size) int atoi_paranoid(const char *num_str); -static inline uint32_t atoi_positive(const char *name, const char *num_str) +static inline u32 atoi_positive(const char *name, const char *num_str) { int num = atoi_paranoid(num_str); @@ -223,7 +225,7 @@ static inline uint32_t atoi_positive(const char *name, const char *num_str) return num; } -static inline uint32_t atoi_non_negative(const char *name, const char *num_str) +static inline u32 atoi_non_negative(const char *name, const char *num_str) { int num = atoi_paranoid(num_str); diff --git a/tools/testing/selftests/kvm/include/timer_test.h b/tools/testing/selftests/kvm/include/timer_test.h index 9b6edaafe6d4..b7d5d2c84701 100644 --- a/tools/testing/selftests/kvm/include/timer_test.h +++ b/tools/testing/selftests/kvm/include/timer_test.h @@ -18,21 +18,21 @@ /* Timer test cmdline parameters */ struct test_args { - uint32_t nr_vcpus; - uint32_t nr_iter; - uint32_t timer_period_ms; - uint32_t migration_freq_ms; - uint32_t timer_err_margin_us; + u32 nr_vcpus; + u32 nr_iter; + u32 timer_period_ms; + u32 migration_freq_ms; + u32 timer_err_margin_us; /* Members of struct kvm_arm_counter_offset */ - uint64_t counter_offset; - uint64_t reserved; + u64 counter_offset; + u64 reserved; }; /* Shared variables between host and guest */ struct test_vcpu_shared_data { - uint32_t nr_iter; + u32 nr_iter; int guest_stage; - uint64_t xcnt; + u64 xcnt; }; extern struct test_args test_args; diff --git a/tools/testing/selftests/kvm/include/ucall_common.h b/tools/testing/selftests/kvm/include/ucall_common.h index d9d6581b8d4f..cbdcb0a50c4f 100644 --- a/tools/testing/selftests/kvm/include/ucall_common.h +++ b/tools/testing/selftests/kvm/include/ucall_common.h @@ -21,26 +21,26 @@ enum { #define UCALL_BUFFER_LEN 1024 struct ucall { - uint64_t cmd; - uint64_t args[UCALL_MAX_ARGS]; + u64 cmd; + u64 args[UCALL_MAX_ARGS]; char buffer[UCALL_BUFFER_LEN]; /* Host virtual address of this struct. */ struct ucall *hva; }; -void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa); -void ucall_arch_do_ucall(vm_vaddr_t uc); +void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa); +void ucall_arch_do_ucall(gva_t uc); void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu); -void ucall(uint64_t cmd, int nargs, ...); -__printf(2, 3) void ucall_fmt(uint64_t cmd, const char *fmt, ...); -__printf(5, 6) void ucall_assert(uint64_t cmd, const char *exp, +void ucall(u64 cmd, int nargs, ...); +__printf(2, 3) void ucall_fmt(u64 cmd, const char *fmt, ...); +__printf(5, 6) void ucall_assert(u64 cmd, const char *exp, const char *file, unsigned int line, const char *fmt, ...); -uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc); -void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa); -int ucall_nr_pages_required(uint64_t page_size); +u64 get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc); +void ucall_init(struct kvm_vm *vm, gpa_t mmio_gpa); +int ucall_nr_pages_required(u64 page_size); /* * Perform userspace call without any associated data. This bare call avoids @@ -48,7 +48,7 @@ int ucall_nr_pages_required(uint64_t page_size); * the full ucall() are problematic and/or unwanted. Note, this will come out * as UCALL_NONE on the backend. */ -#define GUEST_UCALL_NONE() ucall_arch_do_ucall((vm_vaddr_t)NULL) +#define GUEST_UCALL_NONE() ucall_arch_do_ucall((gva_t)NULL) #define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \ ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4) diff --git a/tools/testing/selftests/kvm/include/userfaultfd_util.h b/tools/testing/selftests/kvm/include/userfaultfd_util.h index 60f7f9d435dc..0bc1dc16600e 100644 --- a/tools/testing/selftests/kvm/include/userfaultfd_util.h +++ b/tools/testing/selftests/kvm/include/userfaultfd_util.h @@ -25,7 +25,7 @@ struct uffd_reader_args { struct uffd_desc { int uffd; - uint64_t num_readers; + u64 num_readers; /* Holds the write ends of the pipes for killing the readers. */ int *pipefds; pthread_t *readers; @@ -33,8 +33,8 @@ struct uffd_desc { }; struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay, - void *hva, uint64_t len, - uint64_t num_readers, + void *hva, u64 len, + u64 num_readers, uffd_handler_t handler); void uffd_stop_demand_paging(struct uffd_desc *uffd); diff --git a/tools/testing/selftests/kvm/include/x86/apic.h b/tools/testing/selftests/kvm/include/x86/apic.h index 5ca6bacbd70e..31887bdc3d6c 100644 --- a/tools/testing/selftests/kvm/include/x86/apic.h +++ b/tools/testing/selftests/kvm/include/x86/apic.h @@ -79,42 +79,42 @@ void apic_disable(void); void xapic_enable(void); void x2apic_enable(void); -static inline uint32_t get_bsp_flag(void) +static inline u32 get_bsp_flag(void) { return rdmsr(MSR_IA32_APICBASE) & MSR_IA32_APICBASE_BSP; } -static inline uint32_t xapic_read_reg(unsigned int reg) +static inline u32 xapic_read_reg(unsigned int reg) { - return ((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2]; + return ((volatile u32 *)APIC_DEFAULT_GPA)[reg >> 2]; } -static inline void xapic_write_reg(unsigned int reg, uint32_t val) +static inline void xapic_write_reg(unsigned int reg, u32 val) { - ((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2] = val; + ((volatile u32 *)APIC_DEFAULT_GPA)[reg >> 2] = val; } -static inline uint64_t x2apic_read_reg(unsigned int reg) +static inline u64 x2apic_read_reg(unsigned int reg) { return rdmsr(APIC_BASE_MSR + (reg >> 4)); } -static inline uint8_t x2apic_write_reg_safe(unsigned int reg, uint64_t value) +static inline u8 x2apic_write_reg_safe(unsigned int reg, u64 value) { return wrmsr_safe(APIC_BASE_MSR + (reg >> 4), value); } -static inline void x2apic_write_reg(unsigned int reg, uint64_t value) +static inline void x2apic_write_reg(unsigned int reg, u64 value) { - uint8_t fault = x2apic_write_reg_safe(reg, value); + u8 fault = x2apic_write_reg_safe(reg, value); __GUEST_ASSERT(!fault, "Unexpected fault 0x%x on WRMSR(%x) = %lx\n", fault, APIC_BASE_MSR + (reg >> 4), value); } -static inline void x2apic_write_reg_fault(unsigned int reg, uint64_t value) +static inline void x2apic_write_reg_fault(unsigned int reg, u64 value) { - uint8_t fault = x2apic_write_reg_safe(reg, value); + u8 fault = x2apic_write_reg_safe(reg, value); __GUEST_ASSERT(fault == GP_VECTOR, "Wanted #GP on WRMSR(%x) = %lx, got 0x%x\n", diff --git a/tools/testing/selftests/kvm/include/x86/evmcs.h b/tools/testing/selftests/kvm/include/x86/evmcs.h index 5a74bb30e2f8..be79bda024bf 100644 --- a/tools/testing/selftests/kvm/include/x86/evmcs.h +++ b/tools/testing/selftests/kvm/include/x86/evmcs.h @@ -10,9 +10,9 @@ #include "hyperv.h" #include "vmx.h" -#define u16 uint16_t -#define u32 uint32_t -#define u64 uint64_t +#define u16 u16 +#define u32 u32 +#define u64 u64 #define EVMCS_VERSION 1 @@ -245,7 +245,7 @@ static inline void evmcs_enable(void) enable_evmcs = true; } -static inline int evmcs_vmptrld(uint64_t vmcs_pa, void *vmcs) +static inline int evmcs_vmptrld(u64 vmcs_pa, void *vmcs) { current_vp_assist->current_nested_vmcs = vmcs_pa; current_vp_assist->enlighten_vmentry = 1; @@ -265,7 +265,7 @@ static inline bool load_evmcs(struct hyperv_test_pages *hv) return true; } -static inline int evmcs_vmptrst(uint64_t *value) +static inline int evmcs_vmptrst(u64 *value) { *value = current_vp_assist->current_nested_vmcs & ~HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; @@ -273,7 +273,7 @@ static inline int evmcs_vmptrst(uint64_t *value) return 0; } -static inline int evmcs_vmread(uint64_t encoding, uint64_t *value) +static inline int evmcs_vmread(u64 encoding, u64 *value) { switch (encoding) { case GUEST_RIP: @@ -672,7 +672,7 @@ static inline int evmcs_vmread(uint64_t encoding, uint64_t *value) return 0; } -static inline int evmcs_vmwrite(uint64_t encoding, uint64_t value) +static inline int evmcs_vmwrite(u64 encoding, u64 value) { switch (encoding) { case GUEST_RIP: @@ -1226,9 +1226,9 @@ static inline int evmcs_vmlaunch(void) "pop %%rbp;" : [ret]"=&a"(ret) : [host_rsp]"r" - ((uint64_t)¤t_evmcs->host_rsp), + ((u64)¤t_evmcs->host_rsp), [host_rip]"r" - ((uint64_t)¤t_evmcs->host_rip) + ((u64)¤t_evmcs->host_rip) : "memory", "cc", "rbx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); return ret; @@ -1265,9 +1265,9 @@ static inline int evmcs_vmresume(void) "pop %%rbp;" : [ret]"=&a"(ret) : [host_rsp]"r" - ((uint64_t)¤t_evmcs->host_rsp), + ((u64)¤t_evmcs->host_rsp), [host_rip]"r" - ((uint64_t)¤t_evmcs->host_rip) + ((u64)¤t_evmcs->host_rip) : "memory", "cc", "rbx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); return ret; diff --git a/tools/testing/selftests/kvm/include/x86/hyperv.h b/tools/testing/selftests/kvm/include/x86/hyperv.h index f13e532be240..78003f5a22f3 100644 --- a/tools/testing/selftests/kvm/include/x86/hyperv.h +++ b/tools/testing/selftests/kvm/include/x86/hyperv.h @@ -254,12 +254,12 @@ * Issue a Hyper-V hypercall. Returns exception vector raised or 0, 'hv_status' * is set to the hypercall status (if no exception occurred). */ -static inline uint8_t __hyperv_hypercall(u64 control, vm_vaddr_t input_address, - vm_vaddr_t output_address, - uint64_t *hv_status) +static inline u8 __hyperv_hypercall(u64 control, gva_t input_address, + gva_t output_address, + u64 *hv_status) { - uint64_t error_code; - uint8_t vector; + u64 error_code; + u8 vector; /* Note both the hypercall and the "asm safe" clobber r9-r11. */ asm volatile("mov %[output_address], %%r8\n\t" @@ -274,11 +274,11 @@ static inline uint8_t __hyperv_hypercall(u64 control, vm_vaddr_t input_address, } /* Issue a Hyper-V hypercall and assert that it succeeded. */ -static inline void hyperv_hypercall(u64 control, vm_vaddr_t input_address, - vm_vaddr_t output_address) +static inline void hyperv_hypercall(u64 control, gva_t input_address, + gva_t output_address) { - uint64_t hv_status; - uint8_t vector; + u64 hv_status; + u8 vector; vector = __hyperv_hypercall(control, input_address, output_address, &hv_status); @@ -327,27 +327,27 @@ struct hv_vp_assist_page { extern struct hv_vp_assist_page *current_vp_assist; -int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist); +int enable_vp_assist(u64 vp_assist_pa, void *vp_assist); struct hyperv_test_pages { /* VP assist page */ void *vp_assist_hva; - uint64_t vp_assist_gpa; + u64 vp_assist_gpa; void *vp_assist; /* Partition assist page */ void *partition_assist_hva; - uint64_t partition_assist_gpa; + u64 partition_assist_gpa; void *partition_assist; /* Enlightened VMCS */ void *enlightened_vmcs_hva; - uint64_t enlightened_vmcs_gpa; + u64 enlightened_vmcs_gpa; void *enlightened_vmcs; }; struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm, - vm_vaddr_t *p_hv_pages_gva); + gva_t *p_hv_pages_gva); /* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */ #define HV_INVARIANT_TSC_EXPOSED BIT_ULL(0) diff --git a/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h b/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h index be35d26bb320..c33ab6e04171 100644 --- a/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h +++ b/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h @@ -11,19 +11,19 @@ extern bool is_forced_emulation_enabled; struct pte_masks { - uint64_t present; - uint64_t writable; - uint64_t user; - uint64_t readable; - uint64_t executable; - uint64_t accessed; - uint64_t dirty; - uint64_t huge; - uint64_t nx; - uint64_t c; - uint64_t s; + u64 present; + u64 writable; + u64 user; + u64 readable; + u64 executable; + u64 accessed; + u64 dirty; + u64 huge; + u64 nx; + u64 c; + u64 s; - uint64_t always_set; + u64 always_set; }; struct kvm_mmu_arch { @@ -33,12 +33,12 @@ struct kvm_mmu_arch { struct kvm_mmu; struct kvm_vm_arch { - vm_vaddr_t gdt; - vm_vaddr_t tss; - vm_vaddr_t idt; + gva_t gdt; + gva_t tss; + gva_t idt; - uint64_t c_bit; - uint64_t s_bit; + u64 c_bit; + u64 s_bit; int sev_fd; bool is_pt_protected; }; @@ -62,7 +62,7 @@ do { \ : "+m" (mem) \ : "r" (val) : "memory"); \ } else { \ - uint64_t __old = READ_ONCE(mem); \ + u64 __old = READ_ONCE(mem); \ \ __asm__ __volatile__(KVM_FEP LOCK_PREFIX "cmpxchg %[new], %[ptr]" \ : [ptr] "+m" (mem), [old] "+a" (__old) \ diff --git a/tools/testing/selftests/kvm/include/x86/pmu.h b/tools/testing/selftests/kvm/include/x86/pmu.h index 72575eadb63a..98537cc8840d 100644 --- a/tools/testing/selftests/kvm/include/x86/pmu.h +++ b/tools/testing/selftests/kvm/include/x86/pmu.h @@ -6,8 +6,8 @@ #define SELFTEST_KVM_PMU_H #include <stdbool.h> -#include <stdint.h> +#include <linux/types.h> #include <linux/bits.h> #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300 @@ -104,14 +104,15 @@ enum amd_pmu_zen_events { NR_AMD_ZEN_EVENTS, }; -extern const uint64_t intel_pmu_arch_events[]; -extern const uint64_t amd_pmu_zen_events[]; +extern const u64 intel_pmu_arch_events[]; +extern const u64 amd_pmu_zen_events[]; enum pmu_errata { INSTRUCTIONS_RETIRED_OVERCOUNT, BRANCHES_RETIRED_OVERCOUNT, }; -extern uint64_t pmu_errata_mask; + +extern u64 pmu_errata_mask; void kvm_init_pmu_errata(void); diff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h index d8634a760a60..77f576ee7789 100644 --- a/tools/testing/selftests/kvm/include/x86/processor.h +++ b/tools/testing/selftests/kvm/include/x86/processor.h @@ -23,7 +23,7 @@ extern bool host_cpu_is_intel; extern bool host_cpu_is_amd; extern bool host_cpu_is_hygon; extern bool host_cpu_is_amd_compatible; -extern uint64_t guest_tsc_khz; +extern u64 guest_tsc_khz; #ifndef MAX_NR_CPUID_ENTRIES #define MAX_NR_CPUID_ENTRIES 100 @@ -399,17 +399,17 @@ struct gpr64_regs { }; struct desc64 { - uint16_t limit0; - uint16_t base0; + u16 limit0; + u16 base0; unsigned base1:8, type:4, s:1, dpl:2, p:1; unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8; - uint32_t base3; - uint32_t zero1; + u32 base3; + u32 zero1; } __attribute__((packed)); struct desc_ptr { - uint16_t size; - uint64_t address; + u16 size; + u64 address; } __attribute__((packed)); struct kvm_x86_state { @@ -427,18 +427,18 @@ struct kvm_x86_state { struct kvm_msrs msrs; }; -static inline uint64_t get_desc64_base(const struct desc64 *desc) +static inline u64 get_desc64_base(const struct desc64 *desc) { - return (uint64_t)desc->base3 << 32 | - (uint64_t)desc->base2 << 24 | - (uint64_t)desc->base1 << 16 | - (uint64_t)desc->base0; + return (u64)desc->base3 << 32 | + (u64)desc->base2 << 24 | + (u64)desc->base1 << 16 | + (u64)desc->base0; } -static inline uint64_t rdtsc(void) +static inline u64 rdtsc(void) { - uint32_t eax, edx; - uint64_t tsc_val; + u32 eax, edx; + u64 tsc_val; /* * The lfence is to wait (on Intel CPUs) until all previous * instructions have been executed. If software requires RDTSC to be @@ -446,39 +446,39 @@ static inline uint64_t rdtsc(void) * execute LFENCE immediately after RDTSC */ __asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx)); - tsc_val = ((uint64_t)edx) << 32 | eax; + tsc_val = ((u64)edx) << 32 | eax; return tsc_val; } -static inline uint64_t rdtscp(uint32_t *aux) +static inline u64 rdtscp(u32 *aux) { - uint32_t eax, edx; + u32 eax, edx; __asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux)); - return ((uint64_t)edx) << 32 | eax; + return ((u64)edx) << 32 | eax; } -static inline uint64_t rdmsr(uint32_t msr) +static inline u64 rdmsr(u32 msr) { - uint32_t a, d; + u32 a, d; __asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory"); - return a | ((uint64_t) d << 32); + return a | ((u64)d << 32); } -static inline void wrmsr(uint32_t msr, uint64_t value) +static inline void wrmsr(u32 msr, u64 value) { - uint32_t a = value; - uint32_t d = value >> 32; + u32 a = value; + u32 d = value >> 32; __asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory"); } -static inline uint16_t inw(uint16_t port) +static inline u16 inw(u16 port) { - uint16_t tmp; + u16 tmp; __asm__ __volatile__("in %%dx, %%ax" : /* output */ "=a" (tmp) @@ -487,120 +487,120 @@ static inline uint16_t inw(uint16_t port) return tmp; } -static inline uint16_t get_es(void) +static inline u16 get_es(void) { - uint16_t es; + u16 es; __asm__ __volatile__("mov %%es, %[es]" : /* output */ [es]"=rm"(es)); return es; } -static inline uint16_t get_cs(void) +static inline u16 get_cs(void) { - uint16_t cs; + u16 cs; __asm__ __volatile__("mov %%cs, %[cs]" : /* output */ [cs]"=rm"(cs)); return cs; } -static inline uint16_t get_ss(void) +static inline u16 get_ss(void) { - uint16_t ss; + u16 ss; __asm__ __volatile__("mov %%ss, %[ss]" : /* output */ [ss]"=rm"(ss)); return ss; } -static inline uint16_t get_ds(void) +static inline u16 get_ds(void) { - uint16_t ds; + u16 ds; __asm__ __volatile__("mov %%ds, %[ds]" : /* output */ [ds]"=rm"(ds)); return ds; } -static inline uint16_t get_fs(void) +static inline u16 get_fs(void) { - uint16_t fs; + u16 fs; __asm__ __volatile__("mov %%fs, %[fs]" : /* output */ [fs]"=rm"(fs)); return fs; } -static inline uint16_t get_gs(void) +static inline u16 get_gs(void) { - uint16_t gs; + u16 gs; __asm__ __volatile__("mov %%gs, %[gs]" : /* output */ [gs]"=rm"(gs)); return gs; } -static inline uint16_t get_tr(void) +static inline u16 get_tr(void) { - uint16_t tr; + u16 tr; __asm__ __volatile__("str %[tr]" : /* output */ [tr]"=rm"(tr)); return tr; } -static inline uint64_t get_cr0(void) +static inline u64 get_cr0(void) { - uint64_t cr0; + u64 cr0; __asm__ __volatile__("mov %%cr0, %[cr0]" : /* output */ [cr0]"=r"(cr0)); return cr0; } -static inline void set_cr0(uint64_t val) +static inline void set_cr0(u64 val) { __asm__ __volatile__("mov %0, %%cr0" : : "r" (val) : "memory"); } -static inline uint64_t get_cr3(void) +static inline u64 get_cr3(void) { - uint64_t cr3; + u64 cr3; __asm__ __volatile__("mov %%cr3, %[cr3]" : /* output */ [cr3]"=r"(cr3)); return cr3; } -static inline void set_cr3(uint64_t val) +static inline void set_cr3(u64 val) { __asm__ __volatile__("mov %0, %%cr3" : : "r" (val) : "memory"); } -static inline uint64_t get_cr4(void) +static inline u64 get_cr4(void) { - uint64_t cr4; + u64 cr4; __asm__ __volatile__("mov %%cr4, %[cr4]" : /* output */ [cr4]"=r"(cr4)); return cr4; } -static inline void set_cr4(uint64_t val) +static inline void set_cr4(u64 val) { __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory"); } -static inline uint64_t get_cr8(void) +static inline u64 get_cr8(void) { - uint64_t cr8; + u64 cr8; __asm__ __volatile__("mov %%cr8, %[cr8]" : [cr8]"=r"(cr8)); return cr8; } -static inline void set_cr8(uint64_t val) +static inline void set_cr8(u64 val) { __asm__ __volatile__("mov %0, %%cr8" : : "r" (val) : "memory"); } @@ -651,14 +651,14 @@ static inline struct desc_ptr get_idt(void) return idt; } -static inline void outl(uint16_t port, uint32_t value) +static inline void outl(u16 port, u32 value) { __asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value)); } -static inline void __cpuid(uint32_t function, uint32_t index, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) +static inline void __cpuid(u32 function, u32 index, + u32 *eax, u32 *ebx, + u32 *ecx, u32 *edx) { *eax = function; *ecx = index; @@ -672,35 +672,35 @@ static inline void __cpuid(uint32_t function, uint32_t index, : "memory"); } -static inline void cpuid(uint32_t function, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) +static inline void cpuid(u32 function, + u32 *eax, u32 *ebx, + u32 *ecx, u32 *edx) { return __cpuid(function, 0, eax, ebx, ecx, edx); } -static inline uint32_t this_cpu_fms(void) +static inline u32 this_cpu_fms(void) { - uint32_t eax, ebx, ecx, edx; + u32 eax, ebx, ecx, edx; cpuid(1, &eax, &ebx, &ecx, &edx); return eax; } -static inline uint32_t this_cpu_family(void) +static inline u32 this_cpu_family(void) { return x86_family(this_cpu_fms()); } -static inline uint32_t this_cpu_model(void) +static inline u32 this_cpu_model(void) { return x86_model(this_cpu_fms()); } static inline bool this_cpu_vendor_string_is(const char *vendor) { - const uint32_t *chunk = (const uint32_t *)vendor; - uint32_t eax, ebx, ecx, edx; + const u32 *chunk = (const u32 *)vendor; + u32 eax, ebx, ecx, edx; cpuid(0, &eax, &ebx, &ecx, &edx); return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]); @@ -724,10 +724,9 @@ static inline bool this_cpu_is_hygon(void) return this_cpu_vendor_string_is("HygonGenuine"); } -static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index, - uint8_t reg, uint8_t lo, uint8_t hi) +static inline u32 __this_cpu_has(u32 function, u32 index, u8 reg, u8 lo, u8 hi) { - uint32_t gprs[4]; + u32 gprs[4]; __cpuid(function, index, &gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX], @@ -742,7 +741,7 @@ static inline bool this_cpu_has(struct kvm_x86_cpu_feature feature) feature.reg, feature.bit, feature.bit); } -static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property) +static inline u32 this_cpu_property(struct kvm_x86_cpu_property property) { return __this_cpu_has(property.function, property.index, property.reg, property.lo_bit, property.hi_bit); @@ -750,7 +749,7 @@ static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property) static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property) { - uint32_t max_leaf; + u32 max_leaf; switch (property.function & 0xc0000000) { case 0: @@ -770,7 +769,7 @@ static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property) static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature) { - uint32_t nr_bits; + u32 nr_bits; if (feature.f.reg == KVM_CPUID_EBX) { nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); @@ -782,13 +781,13 @@ static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature) return nr_bits > feature.f.bit || this_cpu_has(feature.f); } -static __always_inline uint64_t this_cpu_supported_xcr0(void) +static __always_inline u64 this_cpu_supported_xcr0(void) { if (!this_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO)) return 0; return this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) | - ((uint64_t)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); + ((u64)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); } typedef u32 __attribute__((vector_size(16))) sse128_t; @@ -867,7 +866,7 @@ static inline void cpu_relax(void) static inline void udelay(unsigned long usec) { - uint64_t start, now, cycles; + u64 start, now, cycles; GUEST_ASSERT(guest_tsc_khz); cycles = guest_tsc_khz / 1000 * usec; @@ -898,8 +897,8 @@ void kvm_x86_state_cleanup(struct kvm_x86_state *state); const struct kvm_msr_list *kvm_get_msr_index_list(void); const struct kvm_msr_list *kvm_get_feature_msr_index_list(void); -bool kvm_msr_is_in_save_restore_list(uint32_t msr_index); -uint64_t kvm_get_feature_msr(uint64_t msr_index); +bool kvm_msr_is_in_save_restore_list(u32 msr_index); +u64 kvm_get_feature_msr(u64 msr_index); static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs) @@ -954,20 +953,20 @@ static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs) } const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, - uint32_t function, uint32_t index); + u32 function, u32 index); const struct kvm_cpuid2 *kvm_get_supported_cpuid(void); -static inline uint32_t kvm_cpu_fms(void) +static inline u32 kvm_cpu_fms(void) { return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax; } -static inline uint32_t kvm_cpu_family(void) +static inline u32 kvm_cpu_family(void) { return x86_family(kvm_cpu_fms()); } -static inline uint32_t kvm_cpu_model(void) +static inline u32 kvm_cpu_model(void) { return x86_model(kvm_cpu_fms()); } @@ -980,17 +979,17 @@ static inline bool kvm_cpu_has(struct kvm_x86_cpu_feature feature) return kvm_cpuid_has(kvm_get_supported_cpuid(), feature); } -uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, - struct kvm_x86_cpu_property property); +u32 kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, + struct kvm_x86_cpu_property property); -static inline uint32_t kvm_cpu_property(struct kvm_x86_cpu_property property) +static inline u32 kvm_cpu_property(struct kvm_x86_cpu_property property) { return kvm_cpuid_property(kvm_get_supported_cpuid(), property); } static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property) { - uint32_t max_leaf; + u32 max_leaf; switch (property.function & 0xc0000000) { case 0: @@ -1010,7 +1009,7 @@ static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property) static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature) { - uint32_t nr_bits; + u32 nr_bits; if (feature.f.reg == KVM_CPUID_EBX) { nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); @@ -1022,13 +1021,13 @@ static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature) return nr_bits > feature.f.bit || kvm_cpu_has(feature.f); } -static __always_inline uint64_t kvm_cpu_supported_xcr0(void) +static __always_inline u64 kvm_cpu_supported_xcr0(void) { if (!kvm_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO)) return 0; return kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) | - ((uint64_t)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); + ((u64)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); } static inline size_t kvm_cpuid2_size(int nr_entries) @@ -1062,8 +1061,8 @@ static inline void vcpu_get_cpuid(struct kvm_vcpu *vcpu) } static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu, - uint32_t function, - uint32_t index) + u32 function, + u32 index) { TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first (or equivalent)"); @@ -1074,7 +1073,7 @@ static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *v } static inline struct kvm_cpuid_entry2 *vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu, - uint32_t function) + u32 function) { return __vcpu_get_cpuid_entry(vcpu, function, 0); } @@ -1104,10 +1103,10 @@ static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu) void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu, struct kvm_x86_cpu_property property, - uint32_t value); -void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr); + u32 value); +void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, u8 maxphyaddr); -void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function); +void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, u32 function); static inline bool vcpu_cpuid_has(struct kvm_vcpu *vcpu, struct kvm_x86_cpu_feature feature) @@ -1135,8 +1134,8 @@ static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu, vcpu_set_or_clear_cpuid_feature(vcpu, feature, false); } -uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index); -int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value); +u64 vcpu_get_msr(struct kvm_vcpu *vcpu, u64 msr_index); +int _vcpu_set_msr(struct kvm_vcpu *vcpu, u64 msr_index, u64 msr_value); /* * Assert on an MSR access(es) and pretty print the MSR name when possible. @@ -1161,14 +1160,14 @@ do { \ * is changing, etc. This is NOT an exhaustive list! The intent is to filter * out MSRs that are not durable _and_ that a selftest wants to write. */ -static inline bool is_durable_msr(uint32_t msr) +static inline bool is_durable_msr(u32 msr) { return msr != MSR_IA32_TSC; } #define vcpu_set_msr(vcpu, msr, val) \ do { \ - uint64_t r, v = val; \ + u64 r, v = val; \ \ TEST_ASSERT_MSR(_vcpu_set_msr(vcpu, msr, v) == 1, \ "KVM_SET_MSRS failed on %s, value = 0x%lx", msr, #msr, v); \ @@ -1182,28 +1181,28 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits); void kvm_init_vm_address_properties(struct kvm_vm *vm); struct ex_regs { - uint64_t rax, rcx, rdx, rbx; - uint64_t rbp, rsi, rdi; - uint64_t r8, r9, r10, r11; - uint64_t r12, r13, r14, r15; - uint64_t vector; - uint64_t error_code; - uint64_t rip; - uint64_t cs; - uint64_t rflags; + u64 rax, rcx, rdx, rbx; + u64 rbp, rsi, rdi; + u64 r8, r9, r10, r11; + u64 r12, r13, r14, r15; + u64 vector; + u64 error_code; + u64 rip; + u64 cs; + u64 rflags; }; struct idt_entry { - uint16_t offset0; - uint16_t selector; - uint16_t ist : 3; - uint16_t : 5; - uint16_t type : 4; - uint16_t : 1; - uint16_t dpl : 2; - uint16_t p : 1; - uint16_t offset1; - uint32_t offset2; uint32_t reserved; + u16 offset0; + u16 selector; + u16 ist : 3; + u16 : 5; + u16 type : 4; + u16 : 1; + u16 dpl : 2; + u16 p : 1; + u16 offset1; + u32 offset2; u32 reserved; }; void vm_install_exception_handler(struct kvm_vm *vm, int vector, @@ -1262,8 +1261,8 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, #define kvm_asm_safe(insn, inputs...) \ ({ \ - uint64_t ign_error_code; \ - uint8_t vector; \ + u64 ign_error_code; \ + u8 vector; \ \ asm volatile(KVM_ASM_SAFE(insn) \ : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \ @@ -1274,7 +1273,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, #define kvm_asm_safe_ec(insn, error_code, inputs...) \ ({ \ - uint8_t vector; \ + u8 vector; \ \ asm volatile(KVM_ASM_SAFE(insn) \ : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \ @@ -1285,8 +1284,8 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, #define kvm_asm_safe_fep(insn, inputs...) \ ({ \ - uint64_t ign_error_code; \ - uint8_t vector; \ + u64 ign_error_code; \ + u8 vector; \ \ asm volatile(KVM_ASM_SAFE_FEP(insn) \ : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \ @@ -1297,7 +1296,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, #define kvm_asm_safe_ec_fep(insn, error_code, inputs...) \ ({ \ - uint8_t vector; \ + u8 vector; \ \ asm volatile(KVM_ASM_SAFE_FEP(insn) \ : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \ @@ -1307,11 +1306,11 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, }) #define BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \ -static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \ +static inline u8 insn##_safe ##_fep(u32 idx, u64 *val) \ { \ - uint64_t error_code; \ - uint8_t vector; \ - uint32_t a, d; \ + u64 error_code; \ + u8 vector; \ + u32 a, d; \ \ asm volatile(KVM_ASM_SAFE##_FEP(#insn) \ : "=a"(a), "=d"(d), \ @@ -1319,7 +1318,7 @@ static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \ : "c"(idx) \ : KVM_ASM_SAFE_CLOBBERS); \ \ - *val = (uint64_t)a | ((uint64_t)d << 32); \ + *val = (u64)a | ((u64)d << 32); \ return vector; \ } @@ -1335,12 +1334,12 @@ BUILD_READ_U64_SAFE_HELPERS(rdmsr) BUILD_READ_U64_SAFE_HELPERS(rdpmc) BUILD_READ_U64_SAFE_HELPERS(xgetbv) -static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val) +static inline u8 wrmsr_safe(u32 msr, u64 val) { return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr)); } -static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value) +static inline u8 xsetbv_safe(u32 index, u64 value) { u32 eax = value; u32 edx = value >> 32; @@ -1395,23 +1394,20 @@ static inline bool kvm_is_lbrv_enabled(void) return !!get_kvm_amd_param_integer("lbrv"); } -uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr); +u64 *vm_get_pte(struct kvm_vm *vm, gva_t gva); -uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, - uint64_t a3); -uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1); -void xen_hypercall(uint64_t nr, uint64_t a0, void *a1); +u64 kvm_hypercall(u64 nr, u64 a0, u64 a1, u64 a2, u64 a3); +u64 __xen_hypercall(u64 nr, u64 a0, void *a1); +void xen_hypercall(u64 nr, u64 a0, void *a1); -static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa, - uint64_t size, uint64_t flags) +static inline u64 __kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags) { return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0); } -static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size, - uint64_t flags) +static inline void kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags) { - uint64_t ret = __kvm_hypercall_map_gpa_range(gpa, size, flags); + u64 ret = __kvm_hypercall_map_gpa_range(gpa, size, flags); GUEST_ASSERT(!ret); } @@ -1456,7 +1452,7 @@ static inline void cli(void) asm volatile ("cli"); } -void __vm_xsave_require_permission(uint64_t xfeature, const char *name); +void __vm_xsave_require_permission(u64 xfeature, const char *name); #define vm_xsave_require_permission(xfeature) \ __vm_xsave_require_permission(xfeature, #xfeature) @@ -1511,17 +1507,17 @@ enum pg_level { void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels, struct pte_masks *pte_masks); -void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr, - uint64_t paddr, int level); -void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, - uint64_t nr_bytes, int level); +void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva, + gpa_t gpa, int level); +void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa, + u64 nr_bytes, int level); void vm_enable_tdp(struct kvm_vm *vm); bool kvm_cpu_has_tdp(void); -void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size); +void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size); void tdp_identity_map_default_memslots(struct kvm_vm *vm); -void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size); -uint64_t *tdp_get_pte(struct kvm_vm *vm, uint64_t l2_gpa); +void tdp_identity_map_1g(struct kvm_vm *vm, u64 addr, u64 size); +u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa); /* * Basic CPU control in CR0 diff --git a/tools/testing/selftests/kvm/include/x86/sev.h b/tools/testing/selftests/kvm/include/x86/sev.h index 008b4169f5e2..1af44c151d60 100644 --- a/tools/testing/selftests/kvm/include/x86/sev.h +++ b/tools/testing/selftests/kvm/include/x86/sev.h @@ -46,16 +46,16 @@ static inline bool is_sev_vm(struct kvm_vm *vm) return is_sev_es_vm(vm) || vm->type == KVM_X86_SEV_VM; } -void sev_vm_launch(struct kvm_vm *vm, uint32_t policy); -void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement); +void sev_vm_launch(struct kvm_vm *vm, u32 policy); +void sev_vm_launch_measure(struct kvm_vm *vm, u8 *measurement); void sev_vm_launch_finish(struct kvm_vm *vm); -void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy); +void snp_vm_launch_start(struct kvm_vm *vm, u64 policy); void snp_vm_launch_update(struct kvm_vm *vm); void snp_vm_launch_finish(struct kvm_vm *vm); -struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, +struct kvm_vm *vm_sev_create_with_one_vcpu(u32 type, void *guest_code, struct kvm_vcpu **cpu); -void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement); +void vm_sev_launch(struct kvm_vm *vm, u64 policy, u8 *measurement); kvm_static_assert(SEV_RET_SUCCESS == 0); @@ -85,7 +85,7 @@ static inline u64 snp_default_policy(void) unsigned long raw; \ } sev_cmd = { .c = { \ .id = (cmd), \ - .data = (uint64_t)(arg), \ + .data = (u64)(arg), \ .sev_fd = (vm)->arch.sev_fd, \ } }; \ \ @@ -120,8 +120,8 @@ static inline void sev_register_encrypted_memory(struct kvm_vm *vm, vm_ioctl(vm, KVM_MEMORY_ENCRYPT_REG_REGION, &range); } -static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, - uint64_t size) +static inline void sev_launch_update_data(struct kvm_vm *vm, gpa_t gpa, + u64 size) { struct kvm_sev_launch_update_data update_data = { .uaddr = (unsigned long)addr_gpa2hva(vm, gpa), @@ -131,8 +131,8 @@ static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_DATA, &update_data); } -static inline void snp_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, - uint64_t hva, uint64_t size, uint8_t type) +static inline void snp_launch_update_data(struct kvm_vm *vm, gpa_t gpa, + u64 hva, u64 size, u8 type) { struct kvm_sev_snp_launch_update update_data = { .uaddr = hva, diff --git a/tools/testing/selftests/kvm/include/x86/smm.h b/tools/testing/selftests/kvm/include/x86/smm.h index 19337c34f13e..2d1afa09819b 100644 --- a/tools/testing/selftests/kvm/include/x86/smm.h +++ b/tools/testing/selftests/kvm/include/x86/smm.h @@ -8,8 +8,7 @@ #define SMRAM_MEMSLOT ((1 << 16) | 1) #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE) -void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, - uint64_t smram_gpa, +void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, u64 smram_gpa, const void *smi_handler, size_t handler_size); void inject_smi(struct kvm_vcpu *vcpu); diff --git a/tools/testing/selftests/kvm/include/x86/svm_util.h b/tools/testing/selftests/kvm/include/x86/svm_util.h index 5d7c42534bc4..6c013eb838be 100644 --- a/tools/testing/selftests/kvm/include/x86/svm_util.h +++ b/tools/testing/selftests/kvm/include/x86/svm_util.h @@ -16,20 +16,20 @@ struct svm_test_data { /* VMCB */ struct vmcb *vmcb; /* gva */ void *vmcb_hva; - uint64_t vmcb_gpa; + u64 vmcb_gpa; /* host state-save area */ struct vmcb_save_area *save_area; /* gva */ void *save_area_hva; - uint64_t save_area_gpa; + u64 save_area_gpa; /* MSR-Bitmap */ void *msr; /* gva */ void *msr_hva; - uint64_t msr_gpa; + u64 msr_gpa; /* NPT */ - uint64_t ncr3_gpa; + u64 ncr3_gpa; }; static inline void vmmcall(void) @@ -56,9 +56,9 @@ static inline void vmmcall(void) "clgi\n" \ ) -struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva); +struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, gva_t *p_svm_gva); void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp); -void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa); +void run_guest(struct vmcb *vmcb, u64 vmcb_gpa); static inline bool kvm_cpu_has_npt(void) { diff --git a/tools/testing/selftests/kvm/include/x86/ucall.h b/tools/testing/selftests/kvm/include/x86/ucall.h index d3825dcc3cd9..0e4950041e3e 100644 --- a/tools/testing/selftests/kvm/include/x86/ucall.h +++ b/tools/testing/selftests/kvm/include/x86/ucall.h @@ -6,7 +6,7 @@ #define UCALL_EXIT_REASON KVM_EXIT_IO -static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa) { } diff --git a/tools/testing/selftests/kvm/include/x86/vmx.h b/tools/testing/selftests/kvm/include/x86/vmx.h index 92b918700d24..90fffaf91595 100644 --- a/tools/testing/selftests/kvm/include/x86/vmx.h +++ b/tools/testing/selftests/kvm/include/x86/vmx.h @@ -285,16 +285,16 @@ enum vmcs_field { }; struct vmx_msr_entry { - uint32_t index; - uint32_t reserved; - uint64_t value; + u32 index; + u32 reserved; + u64 value; } __attribute__ ((aligned(16))); #include "evmcs.h" -static inline int vmxon(uint64_t phys) +static inline int vmxon(u64 phys) { - uint8_t ret; + u8 ret; __asm__ __volatile__ ("vmxon %[pa]; setna %[ret]" : [ret]"=rm"(ret) @@ -309,9 +309,9 @@ static inline void vmxoff(void) __asm__ __volatile__("vmxoff"); } -static inline int vmclear(uint64_t vmcs_pa) +static inline int vmclear(u64 vmcs_pa) { - uint8_t ret; + u8 ret; __asm__ __volatile__ ("vmclear %[pa]; setna %[ret]" : [ret]"=rm"(ret) @@ -321,9 +321,9 @@ static inline int vmclear(uint64_t vmcs_pa) return ret; } -static inline int vmptrld(uint64_t vmcs_pa) +static inline int vmptrld(u64 vmcs_pa) { - uint8_t ret; + u8 ret; if (enable_evmcs) return -1; @@ -336,10 +336,10 @@ static inline int vmptrld(uint64_t vmcs_pa) return ret; } -static inline int vmptrst(uint64_t *value) +static inline int vmptrst(u64 *value) { - uint64_t tmp; - uint8_t ret; + u64 tmp; + u8 ret; if (enable_evmcs) return evmcs_vmptrst(value); @@ -356,9 +356,9 @@ static inline int vmptrst(uint64_t *value) * A wrapper around vmptrst that ignores errors and returns zero if the * vmptrst instruction fails. */ -static inline uint64_t vmptrstz(void) +static inline u64 vmptrstz(void) { - uint64_t value = 0; + u64 value = 0; vmptrst(&value); return value; } @@ -391,8 +391,8 @@ static inline int vmlaunch(void) "pop %%rcx;" "pop %%rbp;" : [ret]"=&a"(ret) - : [host_rsp]"r"((uint64_t)HOST_RSP), - [host_rip]"r"((uint64_t)HOST_RIP) + : [host_rsp]"r"((u64)HOST_RSP), + [host_rip]"r"((u64)HOST_RIP) : "memory", "cc", "rbx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); return ret; @@ -426,8 +426,8 @@ static inline int vmresume(void) "pop %%rcx;" "pop %%rbp;" : [ret]"=&a"(ret) - : [host_rsp]"r"((uint64_t)HOST_RSP), - [host_rip]"r"((uint64_t)HOST_RIP) + : [host_rsp]"r"((u64)HOST_RSP), + [host_rip]"r"((u64)HOST_RIP) : "memory", "cc", "rbx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); return ret; @@ -447,10 +447,10 @@ static inline void vmcall(void) "r10", "r11", "r12", "r13", "r14", "r15"); } -static inline int vmread(uint64_t encoding, uint64_t *value) +static inline int vmread(u64 encoding, u64 *value) { - uint64_t tmp; - uint8_t ret; + u64 tmp; + u8 ret; if (enable_evmcs) return evmcs_vmread(encoding, value); @@ -468,16 +468,16 @@ static inline int vmread(uint64_t encoding, uint64_t *value) * A wrapper around vmread that ignores errors and returns zero if the * vmread instruction fails. */ -static inline uint64_t vmreadz(uint64_t encoding) +static inline u64 vmreadz(u64 encoding) { - uint64_t value = 0; + u64 value = 0; vmread(encoding, &value); return value; } -static inline int vmwrite(uint64_t encoding, uint64_t value) +static inline int vmwrite(u64 encoding, u64 value) { - uint8_t ret; + u8 ret; if (enable_evmcs) return evmcs_vmwrite(encoding, value); @@ -490,41 +490,41 @@ static inline int vmwrite(uint64_t encoding, uint64_t value) return ret; } -static inline uint32_t vmcs_revision(void) +static inline u32 vmcs_revision(void) { return rdmsr(MSR_IA32_VMX_BASIC); } struct vmx_pages { void *vmxon_hva; - uint64_t vmxon_gpa; + u64 vmxon_gpa; void *vmxon; void *vmcs_hva; - uint64_t vmcs_gpa; + u64 vmcs_gpa; void *vmcs; void *msr_hva; - uint64_t msr_gpa; + u64 msr_gpa; void *msr; void *shadow_vmcs_hva; - uint64_t shadow_vmcs_gpa; + u64 shadow_vmcs_gpa; void *shadow_vmcs; void *vmread_hva; - uint64_t vmread_gpa; + u64 vmread_gpa; void *vmread; void *vmwrite_hva; - uint64_t vmwrite_gpa; + u64 vmwrite_gpa; void *vmwrite; void *apic_access_hva; - uint64_t apic_access_gpa; + u64 apic_access_gpa; void *apic_access; - uint64_t eptp_gpa; + u64 eptp_gpa; }; union vmx_basic { @@ -550,7 +550,7 @@ union vmx_ctrl_msr { }; }; -struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva); +struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, gva_t *p_vmx_gva); bool prepare_for_vmx_operation(struct vmx_pages *vmx); void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); bool load_vmcs(struct vmx_pages *vmx); |
