summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/kvm/lib')
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic.c6
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic_private.h26
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic_v3.c90
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c11
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/processor.c165
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/ucall.c12
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/vgic.c40
-rw-r--r--tools/testing/selftests/kvm/lib/elf.c17
-rw-r--r--tools/testing/selftests/kvm/lib/guest_modes.c2
-rw-r--r--tools/testing/selftests/kvm/lib/guest_sprintf.c18
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c368
-rw-r--r--tools/testing/selftests/kvm/lib/loongarch/processor.c121
-rw-r--r--tools/testing/selftests/kvm/lib/loongarch/ucall.c12
-rw-r--r--tools/testing/selftests/kvm/lib/memstress.c42
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/processor.c96
-rw-r--r--tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c12
-rw-r--r--tools/testing/selftests/kvm/lib/s390/facility.c2
-rw-r--r--tools/testing/selftests/kvm/lib/s390/processor.c65
-rw-r--r--tools/testing/selftests/kvm/lib/sparsebit.c18
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c30
-rw-r--r--tools/testing/selftests/kvm/lib/ucall_common.c34
-rw-r--r--tools/testing/selftests/kvm/lib/userfaultfd_util.c14
-rw-r--r--tools/testing/selftests/kvm/lib/x86/apic.c2
-rw-r--r--tools/testing/selftests/kvm/lib/x86/hyperv.c14
-rw-r--r--tools/testing/selftests/kvm/lib/x86/memstress.c14
-rw-r--r--tools/testing/selftests/kvm/lib/x86/pmu.c8
-rw-r--r--tools/testing/selftests/kvm/lib/x86/processor.c307
-rw-r--r--tools/testing/selftests/kvm/lib/x86/sev.c20
-rw-r--r--tools/testing/selftests/kvm/lib/x86/svm.c18
-rw-r--r--tools/testing/selftests/kvm/lib/x86/ucall.c4
-rw-r--r--tools/testing/selftests/kvm/lib/x86/vmx.c44
31 files changed, 780 insertions, 852 deletions
diff --git a/tools/testing/selftests/kvm/lib/arm64/gic.c b/tools/testing/selftests/kvm/lib/arm64/gic.c
index b023868fe0b8..011dfe1dfcb3 100644
--- a/tools/testing/selftests/kvm/lib/arm64/gic.c
+++ b/tools/testing/selftests/kvm/lib/arm64/gic.c
@@ -50,7 +50,7 @@ static void gic_dist_init(enum gic_type type, unsigned int nr_cpus)
void gic_init(enum gic_type type, unsigned int nr_cpus)
{
- uint32_t cpu = guest_get_vcpuid();
+ u32 cpu = guest_get_vcpuid();
GUEST_ASSERT(type < GIC_TYPE_MAX);
GUEST_ASSERT(nr_cpus);
@@ -73,7 +73,7 @@ void gic_irq_disable(unsigned int intid)
unsigned int gic_get_and_ack_irq(void)
{
- uint64_t irqstat;
+ u64 irqstat;
unsigned int intid;
GUEST_ASSERT(gic_common_ops);
@@ -102,7 +102,7 @@ void gic_set_eoi_split(bool split)
gic_common_ops->gic_set_eoi_split(split);
}
-void gic_set_priority_mask(uint64_t pmr)
+void gic_set_priority_mask(u64 pmr)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_set_priority_mask(pmr);
diff --git a/tools/testing/selftests/kvm/lib/arm64/gic_private.h b/tools/testing/selftests/kvm/lib/arm64/gic_private.h
index b6a7e30c3eb1..6d393f5c5685 100644
--- a/tools/testing/selftests/kvm/lib/arm64/gic_private.h
+++ b/tools/testing/selftests/kvm/lib/arm64/gic_private.h
@@ -12,20 +12,20 @@ struct gic_common_ops {
void (*gic_cpu_init)(unsigned int cpu);
void (*gic_irq_enable)(unsigned int intid);
void (*gic_irq_disable)(unsigned int intid);
- uint64_t (*gic_read_iar)(void);
- void (*gic_write_eoir)(uint32_t irq);
- void (*gic_write_dir)(uint32_t irq);
+ u64 (*gic_read_iar)(void);
+ void (*gic_write_eoir)(u32 irq);
+ void (*gic_write_dir)(u32 irq);
void (*gic_set_eoi_split)(bool split);
- void (*gic_set_priority_mask)(uint64_t mask);
- void (*gic_set_priority)(uint32_t intid, uint32_t prio);
- void (*gic_irq_set_active)(uint32_t intid);
- void (*gic_irq_clear_active)(uint32_t intid);
- bool (*gic_irq_get_active)(uint32_t intid);
- void (*gic_irq_set_pending)(uint32_t intid);
- void (*gic_irq_clear_pending)(uint32_t intid);
- bool (*gic_irq_get_pending)(uint32_t intid);
- void (*gic_irq_set_config)(uint32_t intid, bool is_edge);
- void (*gic_irq_set_group)(uint32_t intid, bool group);
+ void (*gic_set_priority_mask)(u64 mask);
+ void (*gic_set_priority)(u32 intid, u32 prio);
+ void (*gic_irq_set_active)(u32 intid);
+ void (*gic_irq_clear_active)(u32 intid);
+ bool (*gic_irq_get_active)(u32 intid);
+ void (*gic_irq_set_pending)(u32 intid);
+ void (*gic_irq_clear_pending)(u32 intid);
+ bool (*gic_irq_get_pending)(u32 intid);
+ void (*gic_irq_set_config)(u32 intid, bool is_edge);
+ void (*gic_irq_set_group)(u32 intid, bool group);
};
extern const struct gic_common_ops gicv3_ops;
diff --git a/tools/testing/selftests/kvm/lib/arm64/gic_v3.c b/tools/testing/selftests/kvm/lib/arm64/gic_v3.c
index 50754a27f493..a99a53accfe9 100644
--- a/tools/testing/selftests/kvm/lib/arm64/gic_v3.c
+++ b/tools/testing/selftests/kvm/lib/arm64/gic_v3.c
@@ -50,13 +50,13 @@ static void gicv3_gicd_wait_for_rwp(void)
}
}
-static inline volatile void *gicr_base_cpu(uint32_t cpu)
+static inline volatile void *gicr_base_cpu(u32 cpu)
{
/* Align all the redistributors sequentially */
return GICR_BASE_GVA + cpu * SZ_64K * 2;
}
-static void gicv3_gicr_wait_for_rwp(uint32_t cpu)
+static void gicv3_gicr_wait_for_rwp(u32 cpu)
{
unsigned int count = 100000; /* 1s */
@@ -66,7 +66,7 @@ static void gicv3_gicr_wait_for_rwp(uint32_t cpu)
}
}
-static void gicv3_wait_for_rwp(uint32_t cpu_or_dist)
+static void gicv3_wait_for_rwp(u32 cpu_or_dist)
{
if (cpu_or_dist & DIST_BIT)
gicv3_gicd_wait_for_rwp();
@@ -91,34 +91,34 @@ static enum gicv3_intid_range get_intid_range(unsigned int intid)
return INVALID_RANGE;
}
-static uint64_t gicv3_read_iar(void)
+static u64 gicv3_read_iar(void)
{
- uint64_t irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
+ u64 irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
dsb(sy);
return irqstat;
}
-static void gicv3_write_eoir(uint32_t irq)
+static void gicv3_write_eoir(u32 irq)
{
write_sysreg_s(irq, SYS_ICC_EOIR1_EL1);
isb();
}
-static void gicv3_write_dir(uint32_t irq)
+static void gicv3_write_dir(u32 irq)
{
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
isb();
}
-static void gicv3_set_priority_mask(uint64_t mask)
+static void gicv3_set_priority_mask(u64 mask)
{
write_sysreg_s(mask, SYS_ICC_PMR_EL1);
}
static void gicv3_set_eoi_split(bool split)
{
- uint32_t val;
+ u32 val;
/*
* All other fields are read-only, so no need to read CTLR first. In
@@ -129,29 +129,29 @@ static void gicv3_set_eoi_split(bool split)
isb();
}
-uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset)
+u32 gicv3_reg_readl(u32 cpu_or_dist, u64 offset)
{
volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA
: sgi_base_from_redist(gicr_base_cpu(cpu_or_dist));
return readl(base + offset);
}
-void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val)
+void gicv3_reg_writel(u32 cpu_or_dist, u64 offset, u32 reg_val)
{
volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA
: sgi_base_from_redist(gicr_base_cpu(cpu_or_dist));
writel(reg_val, base + offset);
}
-uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask)
+u32 gicv3_getl_fields(u32 cpu_or_dist, u64 offset, u32 mask)
{
return gicv3_reg_readl(cpu_or_dist, offset) & mask;
}
-void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset,
- uint32_t mask, uint32_t reg_val)
+void gicv3_setl_fields(u32 cpu_or_dist, u64 offset,
+ u32 mask, u32 reg_val)
{
- uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask;
+ u32 tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask;
tmp |= (reg_val & mask);
gicv3_reg_writel(cpu_or_dist, offset, tmp);
@@ -165,14 +165,14 @@ void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset,
* map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being
* marked as "Reserved" in the Distributor map.
*/
-static void gicv3_access_reg(uint32_t intid, uint64_t offset,
- uint32_t reg_bits, uint32_t bits_per_field,
- bool write, uint32_t *val)
+static void gicv3_access_reg(u32 intid, u64 offset,
+ u32 reg_bits, u32 bits_per_field,
+ bool write, u32 *val)
{
- uint32_t cpu = guest_get_vcpuid();
+ u32 cpu = guest_get_vcpuid();
enum gicv3_intid_range intid_range = get_intid_range(intid);
- uint32_t fields_per_reg, index, mask, shift;
- uint32_t cpu_or_dist;
+ u32 fields_per_reg, index, mask, shift;
+ u32 cpu_or_dist;
GUEST_ASSERT(bits_per_field <= reg_bits);
GUEST_ASSERT(!write || *val < (1U << bits_per_field));
@@ -197,32 +197,32 @@ static void gicv3_access_reg(uint32_t intid, uint64_t offset,
*val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift;
}
-static void gicv3_write_reg(uint32_t intid, uint64_t offset,
- uint32_t reg_bits, uint32_t bits_per_field, uint32_t val)
+static void gicv3_write_reg(u32 intid, u64 offset,
+ u32 reg_bits, u32 bits_per_field, u32 val)
{
gicv3_access_reg(intid, offset, reg_bits,
bits_per_field, true, &val);
}
-static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset,
- uint32_t reg_bits, uint32_t bits_per_field)
+static u32 gicv3_read_reg(u32 intid, u64 offset,
+ u32 reg_bits, u32 bits_per_field)
{
- uint32_t val;
+ u32 val;
gicv3_access_reg(intid, offset, reg_bits,
bits_per_field, false, &val);
return val;
}
-static void gicv3_set_priority(uint32_t intid, uint32_t prio)
+static void gicv3_set_priority(u32 intid, u32 prio)
{
gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio);
}
/* Sets the intid to be level-sensitive or edge-triggered. */
-static void gicv3_irq_set_config(uint32_t intid, bool is_edge)
+static void gicv3_irq_set_config(u32 intid, bool is_edge)
{
- uint32_t val;
+ u32 val;
/* N/A for private interrupts. */
GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE);
@@ -230,57 +230,57 @@ static void gicv3_irq_set_config(uint32_t intid, bool is_edge)
gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val);
}
-static void gicv3_irq_enable(uint32_t intid)
+static void gicv3_irq_enable(u32 intid)
{
bool is_spi = get_intid_range(intid) == SPI_RANGE;
- uint32_t cpu = guest_get_vcpuid();
+ u32 cpu = guest_get_vcpuid();
gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1);
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
}
-static void gicv3_irq_disable(uint32_t intid)
+static void gicv3_irq_disable(u32 intid)
{
bool is_spi = get_intid_range(intid) == SPI_RANGE;
- uint32_t cpu = guest_get_vcpuid();
+ u32 cpu = guest_get_vcpuid();
gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1);
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
}
-static void gicv3_irq_set_active(uint32_t intid)
+static void gicv3_irq_set_active(u32 intid)
{
gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1);
}
-static void gicv3_irq_clear_active(uint32_t intid)
+static void gicv3_irq_clear_active(u32 intid)
{
gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1);
}
-static bool gicv3_irq_get_active(uint32_t intid)
+static bool gicv3_irq_get_active(u32 intid)
{
return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1);
}
-static void gicv3_irq_set_pending(uint32_t intid)
+static void gicv3_irq_set_pending(u32 intid)
{
gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1);
}
-static void gicv3_irq_clear_pending(uint32_t intid)
+static void gicv3_irq_clear_pending(u32 intid)
{
gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1);
}
-static bool gicv3_irq_get_pending(uint32_t intid)
+static bool gicv3_irq_get_pending(u32 intid)
{
return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1);
}
static void gicv3_enable_redist(volatile void *redist_base)
{
- uint32_t val = readl(redist_base + GICR_WAKER);
+ u32 val = readl(redist_base + GICR_WAKER);
unsigned int count = 100000; /* 1s */
val &= ~GICR_WAKER_ProcessorSleep;
@@ -293,10 +293,10 @@ static void gicv3_enable_redist(volatile void *redist_base)
}
}
-static void gicv3_set_group(uint32_t intid, bool grp)
+static void gicv3_set_group(u32 intid, bool grp)
{
- uint32_t cpu_or_dist;
- uint32_t val;
+ u32 cpu_or_dist;
+ u32 val;
cpu_or_dist = (get_intid_range(intid) == SPI_RANGE) ? DIST_BIT : guest_get_vcpuid();
val = gicv3_reg_readl(cpu_or_dist, GICD_IGROUPR + (intid / 32) * 4);
@@ -424,8 +424,8 @@ const struct gic_common_ops gicv3_ops = {
.gic_irq_set_group = gicv3_set_group,
};
-void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size,
- vm_paddr_t pend_table)
+void gic_rdist_enable_lpis(gpa_t cfg_table, size_t cfg_table_size,
+ gpa_t pend_table)
{
volatile void *rdist_base = gicr_base_cpu(guest_get_vcpuid());
diff --git a/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c b/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c
index 7f9fdcf42ae6..1188b578121d 100644
--- a/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c
+++ b/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c
@@ -54,7 +54,7 @@ static unsigned long its_find_baser(unsigned int type)
return -1;
}
-static void its_install_table(unsigned int type, vm_paddr_t base, size_t size)
+static void its_install_table(unsigned int type, gpa_t base, size_t size)
{
unsigned long offset = its_find_baser(type);
u64 baser;
@@ -69,7 +69,7 @@ static void its_install_table(unsigned int type, vm_paddr_t base, size_t size)
its_write_u64(offset, baser);
}
-static void its_install_cmdq(vm_paddr_t base, size_t size)
+static void its_install_cmdq(gpa_t base, size_t size)
{
u64 cbaser;
@@ -82,9 +82,8 @@ static void its_install_cmdq(vm_paddr_t base, size_t size)
its_write_u64(GITS_CBASER, cbaser);
}
-void its_init(vm_paddr_t coll_tbl, size_t coll_tbl_sz,
- vm_paddr_t device_tbl, size_t device_tbl_sz,
- vm_paddr_t cmdq, size_t cmdq_size)
+void its_init(gpa_t coll_tbl, size_t coll_tbl_sz, gpa_t device_tbl,
+ size_t device_tbl_sz, gpa_t cmdq, size_t cmdq_size)
{
u32 ctlr;
@@ -204,7 +203,7 @@ static void its_send_cmd(void *cmdq_base, struct its_cmd_block *cmd)
}
}
-void its_send_mapd_cmd(void *cmdq_base, u32 device_id, vm_paddr_t itt_base,
+void its_send_mapd_cmd(void *cmdq_base, u32 device_id, gpa_t itt_base,
size_t itt_size, bool valid)
{
struct its_cmd_block cmd = {};
diff --git a/tools/testing/selftests/kvm/lib/arm64/processor.c b/tools/testing/selftests/kvm/lib/arm64/processor.c
index 43ea40edc533..01325bf4d36f 100644
--- a/tools/testing/selftests/kvm/lib/arm64/processor.c
+++ b/tools/testing/selftests/kvm/lib/arm64/processor.c
@@ -19,20 +19,20 @@
#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
-static vm_vaddr_t exception_handlers;
+static gva_t exception_handlers;
-static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
+static u64 pgd_index(struct kvm_vm *vm, gva_t gva)
{
unsigned int shift = (vm->mmu.pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
- uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
+ u64 mask = (1UL << (vm->va_bits - shift)) - 1;
return (gva >> shift) & mask;
}
-static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
+static u64 pud_index(struct kvm_vm *vm, gva_t gva)
{
unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
- uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
+ u64 mask = (1UL << (vm->page_shift - 3)) - 1;
TEST_ASSERT(vm->mmu.pgtable_levels == 4,
"Mode %d does not have 4 page table levels", vm->mode);
@@ -40,10 +40,10 @@ static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
return (gva >> shift) & mask;
}
-static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
+static u64 pmd_index(struct kvm_vm *vm, gva_t gva)
{
unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
- uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
+ u64 mask = (1UL << (vm->page_shift - 3)) - 1;
TEST_ASSERT(vm->mmu.pgtable_levels >= 3,
"Mode %d does not have >= 3 page table levels", vm->mode);
@@ -51,9 +51,9 @@ static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
return (gva >> shift) & mask;
}
-static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
+static u64 pte_index(struct kvm_vm *vm, gva_t gva)
{
- uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
+ u64 mask = (1UL << (vm->page_shift - 3)) - 1;
return (gva >> vm->page_shift) & mask;
}
@@ -63,9 +63,9 @@ static inline bool use_lpa2_pte_format(struct kvm_vm *vm)
(vm->pa_bits > 48 || vm->va_bits > 48);
}
-static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs)
+static u64 addr_pte(struct kvm_vm *vm, u64 pa, u64 attrs)
{
- uint64_t pte;
+ u64 pte;
if (use_lpa2_pte_format(vm)) {
pte = pa & PTE_ADDR_MASK_LPA2(vm->page_shift);
@@ -81,9 +81,9 @@ static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs)
return pte;
}
-static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte)
+static u64 pte_addr(struct kvm_vm *vm, u64 pte)
{
- uint64_t pa;
+ u64 pa;
if (use_lpa2_pte_format(vm)) {
pa = pte & PTE_ADDR_MASK_LPA2(vm->page_shift);
@@ -97,13 +97,13 @@ static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte)
return pa;
}
-static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
+static u64 ptrs_per_pgd(struct kvm_vm *vm)
{
unsigned int shift = (vm->mmu.pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
return 1 << (vm->va_bits - shift);
}
-static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
+static u64 __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
{
return 1 << (vm->page_shift - 3);
}
@@ -121,47 +121,46 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
vm->mmu.pgd_created = true;
}
-static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- uint64_t flags)
+static void _virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
+ u64 flags)
{
- uint8_t attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT);
- uint64_t pg_attr;
- uint64_t *ptep;
+ u8 attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT);
+ u64 pg_attr;
+ u64 *ptep;
- TEST_ASSERT((vaddr % vm->page_size) == 0,
+ TEST_ASSERT((gva % vm->page_size) == 0,
"Virtual address not on page boundary,\n"
- " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
- TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
- (vaddr >> vm->page_shift)),
- "Invalid virtual address, vaddr: 0x%lx", vaddr);
- TEST_ASSERT((paddr % vm->page_size) == 0,
- "Physical address not on page boundary,\n"
- " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
- TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
- "Physical address beyond beyond maximum supported,\n"
- " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
- paddr, vm->max_gfn, vm->page_size);
-
- ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, vaddr) * 8;
+ " gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size);
+ TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
+ "Invalid virtual address, gva: 0x%lx", gva);
+ TEST_ASSERT((gpa % vm->page_size) == 0,
+ "Physical address not on page boundary,\n"
+ " gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size);
+ TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
+ "Physical address beyond beyond maximum supported,\n"
+ " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ gpa, vm->max_gfn, vm->page_size);
+
+ ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, gva) * 8;
if (!*ptep)
*ptep = addr_pte(vm, vm_alloc_page_table(vm),
PGD_TYPE_TABLE | PTE_VALID);
switch (vm->mmu.pgtable_levels) {
case 4:
- ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
if (!*ptep)
*ptep = addr_pte(vm, vm_alloc_page_table(vm),
PUD_TYPE_TABLE | PTE_VALID);
/* fall through */
case 3:
- ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
if (!*ptep)
*ptep = addr_pte(vm, vm_alloc_page_table(vm),
PMD_TYPE_TABLE | PTE_VALID);
/* fall through */
case 2:
- ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
break;
default:
TEST_FAIL("Page table levels must be 2, 3, or 4");
@@ -171,19 +170,19 @@ static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
if (!use_lpa2_pte_format(vm))
pg_attr |= PTE_SHARED;
- *ptep = addr_pte(vm, paddr, pg_attr);
+ *ptep = addr_pte(vm, gpa, pg_attr);
}
-void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
- uint64_t attr_idx = MT_NORMAL;
+ u64 attr_idx = MT_NORMAL;
- _virt_pg_map(vm, vaddr, paddr, attr_idx);
+ _virt_pg_map(vm, gva, gpa, attr_idx);
}
-uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level)
+u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level)
{
- uint64_t *ptep;
+ u64 *ptep;
if (!vm->mmu.pgd_created)
goto unmapped_gva;
@@ -225,23 +224,23 @@ unmapped_gva:
exit(EXIT_FAILURE);
}
-uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva)
+u64 *virt_get_pte_hva(struct kvm_vm *vm, gva_t gva)
{
return virt_get_pte_hva_at_level(vm, gva, 3);
}
-vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
{
- uint64_t *ptep = virt_get_pte_hva(vm, gva);
+ u64 *ptep = virt_get_pte_hva(vm, gva);
return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
}
-static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
+static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, u64 page, int level)
{
#ifdef DEBUG
static const char * const type[] = { "", "pud", "pmd", "pte" };
- uint64_t pte, *ptep;
+ u64 pte, *ptep;
if (level == 4)
return;
@@ -256,10 +255,10 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t p
#endif
}
-void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
{
int level = 4 - (vm->mmu.pgtable_levels - 1);
- uint64_t pgd, *ptep;
+ u64 pgd, *ptep;
if (!vm->mmu.pgd_created)
return;
@@ -298,7 +297,7 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
{
struct kvm_vcpu_init default_init = { .target = -1, };
struct kvm_vm *vm = vcpu->vm;
- uint64_t sctlr_el1, tcr_el1, ttbr0_el1;
+ u64 sctlr_el1, tcr_el1, ttbr0_el1;
if (!init) {
kvm_get_default_vcpu_target(vm, &default_init);
@@ -397,9 +396,9 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
HCR_EL2_RW | HCR_EL2_TGE | HCR_EL2_E2H);
}
-void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
{
- uint64_t pstate, pc;
+ u64 pstate, pc;
pstate = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate));
pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc));
@@ -410,29 +409,29 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
{
- vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
+ vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (u64)guest_code);
}
-static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id,
struct kvm_vcpu_init *init)
{
size_t stack_size;
- uint64_t stack_vaddr;
+ gva_t stack_gva;
struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
vm->page_size;
- stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
- DEFAULT_ARM64_GUEST_STACK_VADDR_MIN,
- MEM_REGION_DATA);
+ stack_gva = __vm_alloc(vm, stack_size,
+ DEFAULT_ARM64_GUEST_STACK_VADDR_MIN,
+ MEM_REGION_DATA);
aarch64_vcpu_setup(vcpu, init);
- vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), stack_vaddr + stack_size);
+ vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), stack_gva + stack_size);
return vcpu;
}
-struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id,
struct kvm_vcpu_init *init, void *guest_code)
{
struct kvm_vcpu *vcpu = __aarch64_vcpu_add(vm, vcpu_id, init);
@@ -442,7 +441,7 @@ struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
return vcpu;
}
-struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
{
return __aarch64_vcpu_add(vm, vcpu_id, NULL);
}
@@ -459,13 +458,13 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
for (i = 0; i < num; i++) {
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]),
- va_arg(ap, uint64_t));
+ va_arg(ap, u64));
}
va_end(ap);
}
-void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec)
+void kvm_exit_unexpected_exception(int vector, u64 ec, bool valid_ec)
{
ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec);
while (1)
@@ -498,7 +497,7 @@ void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
{
extern char vectors;
- vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_VBAR_EL1), (uint64_t)&vectors);
+ vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_VBAR_EL1), (u64)&vectors);
}
void route_exception(struct ex_regs *regs, int vector)
@@ -536,10 +535,10 @@ unexpected_exception:
void vm_init_descriptor_tables(struct kvm_vm *vm)
{
- vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
- vm->page_size, MEM_REGION_DATA);
+ vm->handlers = __vm_alloc(vm, sizeof(struct handlers), vm->page_size,
+ MEM_REGION_DATA);
- *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
+ *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers;
}
void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec,
@@ -563,13 +562,13 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
handlers->exception_handlers[vector][0] = handler;
}
-uint32_t guest_get_vcpuid(void)
+u32 guest_get_vcpuid(void)
{
return read_sysreg(tpidr_el1);
}
-static uint32_t max_ipa_for_page_size(uint32_t vm_ipa, uint32_t gran,
- uint32_t not_sup_val, uint32_t ipa52_min_val)
+static u32 max_ipa_for_page_size(u32 vm_ipa, u32 gran,
+ u32 not_sup_val, u32 ipa52_min_val)
{
if (gran == not_sup_val)
return 0;
@@ -579,16 +578,16 @@ static uint32_t max_ipa_for_page_size(uint32_t vm_ipa, uint32_t gran,
return min(vm_ipa, 48U);
}
-void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
- uint32_t *ipa16k, uint32_t *ipa64k)
+void aarch64_get_supported_page_sizes(u32 ipa, u32 *ipa4k,
+ u32 *ipa16k, u32 *ipa64k)
{
struct kvm_vcpu_init preferred_init;
int kvm_fd, vm_fd, vcpu_fd, err;
- uint64_t val;
- uint32_t gran;
+ u64 val;
+ u32 gran;
struct kvm_one_reg reg = {
.id = KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1),
- .addr = (uint64_t)&val,
+ .addr = (u64)&val,
};
kvm_fd = open_kvm_dev_path_or_exit();
@@ -646,17 +645,17 @@ void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
: "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7")
-void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
- uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
- uint64_t arg6, struct arm_smccc_res *res)
+void smccc_hvc(u32 function_id, u64 arg0, u64 arg1,
+ u64 arg2, u64 arg3, u64 arg4, u64 arg5,
+ u64 arg6, struct arm_smccc_res *res)
{
__smccc_call(hvc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
arg6, res);
}
-void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
- uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
- uint64_t arg6, struct arm_smccc_res *res)
+void smccc_smc(u32 function_id, u64 arg0, u64 arg1,
+ u64 arg2, u64 arg3, u64 arg4, u64 arg5,
+ u64 arg6, struct arm_smccc_res *res)
{
__smccc_call(smc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
arg6, res);
@@ -671,7 +670,7 @@ void kvm_selftest_arch_init(void)
guest_modes_append_default();
}
-void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
+void vm_populate_gva_bitmap(struct kvm_vm *vm)
{
/*
* arm64 selftests use only TTBR0_EL1, meaning that the valid VA space
diff --git a/tools/testing/selftests/kvm/lib/arm64/ucall.c b/tools/testing/selftests/kvm/lib/arm64/ucall.c
index ddab0ce89d4d..e0550ad5aa75 100644
--- a/tools/testing/selftests/kvm/lib/arm64/ucall.c
+++ b/tools/testing/selftests/kvm/lib/arm64/ucall.c
@@ -6,17 +6,17 @@
*/
#include "kvm_util.h"
-vm_vaddr_t *ucall_exit_mmio_addr;
+gva_t *ucall_exit_mmio_addr;
-void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
+void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa)
{
- vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
+ gva_t mmio_gva = vm_unused_gva_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
virt_map(vm, mmio_gva, mmio_gpa, 1);
vm->ucall_mmio_addr = mmio_gpa;
- write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva);
+ write_guest_global(vm, ucall_exit_mmio_addr, (gva_t *)mmio_gva);
}
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
@@ -25,9 +25,9 @@ void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_MMIO &&
run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) {
- TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t),
+ TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(u64),
"Unexpected ucall exit mmio address access");
- return (void *)(*((uint64_t *)run->mmio.data));
+ return (void *)(*((u64 *)run->mmio.data));
}
return NULL;
diff --git a/tools/testing/selftests/kvm/lib/arm64/vgic.c b/tools/testing/selftests/kvm/lib/arm64/vgic.c
index d0f7bd0984b8..4ecebf3146a2 100644
--- a/tools/testing/selftests/kvm/lib/arm64/vgic.c
+++ b/tools/testing/selftests/kvm/lib/arm64/vgic.c
@@ -41,10 +41,10 @@ bool kvm_supports_vgic_v3(void)
* redistributor regions of the guest. Since it depends on the number of
* vCPUs for the VM, it must be called after all the vCPUs have been created.
*/
-int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs)
+int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs)
{
int gic_fd;
- uint64_t attr;
+ u64 attr;
unsigned int nr_gic_pages;
/* Distributor setup */
@@ -77,7 +77,7 @@ void __vgic_v3_init(int fd)
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
}
-int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs)
+int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs)
{
unsigned int nr_vcpus_created = 0;
struct list_head *iter;
@@ -104,11 +104,11 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs)
}
/* should only work for level sensitive interrupts */
-int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
+int _kvm_irq_set_level_info(int gic_fd, u32 intid, int level)
{
- uint64_t attr = 32 * (intid / 32);
- uint64_t index = intid % 32;
- uint64_t val;
+ u64 attr = 32 * (intid / 32);
+ u64 index = intid % 32;
+ u64 val;
int ret;
ret = __kvm_device_attr_get(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
@@ -122,16 +122,16 @@ int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
return ret;
}
-void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
+void kvm_irq_set_level_info(int gic_fd, u32 intid, int level)
{
int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, ret));
}
-int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
+int _kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level)
{
- uint32_t irq = intid & KVM_ARM_IRQ_NUM_MASK;
+ u32 irq = intid & KVM_ARM_IRQ_NUM_MASK;
TEST_ASSERT(!INTID_IS_SGI(intid), "KVM_IRQ_LINE's interface itself "
"doesn't allow injecting SGIs. There's no mask for it.");
@@ -144,23 +144,23 @@ int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
return _kvm_irq_line(vm, irq, level);
}
-void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
+void kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level)
{
int ret = _kvm_arm_irq_line(vm, intid, level);
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret));
}
-static void vgic_poke_irq(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu,
- uint64_t reg_off)
+static void vgic_poke_irq(int gic_fd, u32 intid, struct kvm_vcpu *vcpu,
+ u64 reg_off)
{
- uint64_t reg = intid / 32;
- uint64_t index = intid % 32;
- uint64_t attr = reg_off + reg * 4;
- uint64_t val;
+ u64 reg = intid / 32;
+ u64 index = intid % 32;
+ u64 attr = reg_off + reg * 4;
+ u64 val;
bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid);
- uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
+ u32 group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
: KVM_DEV_ARM_VGIC_GRP_DIST_REGS;
if (intid_is_private) {
@@ -183,12 +183,12 @@ static void vgic_poke_irq(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu,
kvm_device_attr_set(gic_fd, group, attr, &val);
}
-void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu)
+void kvm_irq_write_ispendr(int gic_fd, u32 intid, struct kvm_vcpu *vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISPENDR);
}
-void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu)
+void kvm_irq_write_isactiver(int gic_fd, u32 intid, struct kvm_vcpu *vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISACTIVER);
}
diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c
index f34d926d9735..b689c4df4a01 100644
--- a/tools/testing/selftests/kvm/lib/elf.c
+++ b/tools/testing/selftests/kvm/lib/elf.c
@@ -156,21 +156,20 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
TEST_ASSERT(phdr.p_memsz > 0, "Unexpected loadable segment "
"memsize of 0,\n"
" phdr index: %u p_memsz: 0x%" PRIx64,
- n1, (uint64_t) phdr.p_memsz);
- vm_vaddr_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size);
- vm_vaddr_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1;
+ n1, (u64)phdr.p_memsz);
+ gva_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size);
+ gva_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1;
seg_vend |= vm->page_size - 1;
size_t seg_size = seg_vend - seg_vstart + 1;
- vm_vaddr_t vaddr = __vm_vaddr_alloc(vm, seg_size, seg_vstart,
- MEM_REGION_CODE);
- TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate "
+ gva_t gva = __vm_alloc(vm, seg_size, seg_vstart, MEM_REGION_CODE);
+ TEST_ASSERT(gva == seg_vstart, "Unable to allocate "
"virtual memory for segment at requested min addr,\n"
" segment idx: %u\n"
" seg_vstart: 0x%lx\n"
- " vaddr: 0x%lx",
- n1, seg_vstart, vaddr);
- memset(addr_gva2hva(vm, vaddr), 0, seg_size);
+ " gva: 0x%lx",
+ n1, seg_vstart, gva);
+ memset(addr_gva2hva(vm, gva), 0, seg_size);
/* TODO(lhuemill): Set permissions of each memory segment
* based on the least-significant 3 bits of phdr.p_flags.
*/
diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c
index ce3099630397..7a96c43b5704 100644
--- a/tools/testing/selftests/kvm/lib/guest_modes.c
+++ b/tools/testing/selftests/kvm/lib/guest_modes.c
@@ -20,7 +20,7 @@ void guest_modes_append_default(void)
#ifdef __aarch64__
{
unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
- uint32_t ipa4k, ipa16k, ipa64k;
+ u32 ipa4k, ipa16k, ipa64k;
int i;
aarch64_get_supported_page_sizes(limit, &ipa4k, &ipa16k, &ipa64k);
diff --git a/tools/testing/selftests/kvm/lib/guest_sprintf.c b/tools/testing/selftests/kvm/lib/guest_sprintf.c
index 74627514c4d4..7a33965349a7 100644
--- a/tools/testing/selftests/kvm/lib/guest_sprintf.c
+++ b/tools/testing/selftests/kvm/lib/guest_sprintf.c
@@ -35,8 +35,8 @@ static int skip_atoi(const char **s)
({ \
int __res; \
\
- __res = ((uint64_t) n) % (uint32_t) base; \
- n = ((uint64_t) n) / (uint32_t) base; \
+ __res = ((u64)n) % (u32)base; \
+ n = ((u64)n) / (u32)base; \
__res; \
})
@@ -119,7 +119,7 @@ int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args)
{
char *str, *end;
const char *s;
- uint64_t num;
+ u64 num;
int i, base;
int len;
@@ -216,7 +216,7 @@ repeat:
while (--field_width > 0)
APPEND_BUFFER_SAFE(str, end, ' ');
APPEND_BUFFER_SAFE(str, end,
- (uint8_t)va_arg(args, int));
+ (u8)va_arg(args, int));
while (--field_width > 0)
APPEND_BUFFER_SAFE(str, end, ' ');
continue;
@@ -240,7 +240,7 @@ repeat:
flags |= SPECIAL | SMALL | ZEROPAD;
}
str = number(str, end,
- (uint64_t)va_arg(args, void *), 16,
+ (u64)va_arg(args, void *), 16,
field_width, precision, flags);
continue;
@@ -284,15 +284,15 @@ repeat:
continue;
}
if (qualifier == 'l')
- num = va_arg(args, uint64_t);
+ num = va_arg(args, u64);
else if (qualifier == 'h') {
- num = (uint16_t)va_arg(args, int);
+ num = (u16)va_arg(args, int);
if (flags & SIGN)
- num = (int16_t)num;
+ num = (s16)num;
} else if (flags & SIGN)
num = va_arg(args, int);
else
- num = va_arg(args, uint32_t);
+ num = va_arg(args, u32);
str = number(str, end, num, base, field_width, precision, flags);
}
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 1959bf556e88..2a76eca7029d 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -20,9 +20,9 @@
#define KVM_UTIL_MIN_PFN 2
-uint32_t guest_random_seed;
+u32 guest_random_seed;
struct guest_random_state guest_rng;
-static uint32_t last_guest_seed;
+static u32 last_guest_seed;
static size_t vcpu_mmap_sz(void);
@@ -165,7 +165,7 @@ unsigned int kvm_check_cap(long cap)
return (unsigned int)ret;
}
-void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
+void vm_enable_dirty_ring(struct kvm_vm *vm, u32 ring_size)
{
if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL))
vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size);
@@ -189,7 +189,7 @@ static void vm_open(struct kvm_vm *vm)
vm->stats.fd = -1;
}
-const char *vm_guest_mode_string(uint32_t i)
+const char *vm_guest_mode_string(u32 i)
{
static const char * const strings[] = {
[VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages",
@@ -267,7 +267,7 @@ _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params)
* based on the MSB of the VA. On architectures with this behavior
* the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
*/
-__weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
+__weak void vm_populate_gva_bitmap(struct kvm_vm *vm)
{
sparsebit_set_num(vm->vpages_valid,
0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
@@ -385,7 +385,7 @@ struct kvm_vm *____vm_create(struct vm_shape shape)
/* Limit to VA-bit canonical virtual addresses. */
vm->vpages_valid = sparsebit_alloc();
- vm_vaddr_populate_bitmap(vm);
+ vm_populate_gva_bitmap(vm);
/* Limit physical addresses to PA-bits. */
vm->max_gfn = vm_compute_max_gfn(vm);
@@ -396,12 +396,12 @@ struct kvm_vm *____vm_create(struct vm_shape shape)
return vm;
}
-static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
- uint32_t nr_runnable_vcpus,
- uint64_t extra_mem_pages)
+static u64 vm_nr_pages_required(enum vm_guest_mode mode,
+ u32 nr_runnable_vcpus,
+ u64 extra_mem_pages)
{
- uint64_t page_size = vm_guest_mode_params[mode].page_size;
- uint64_t nr_pages;
+ u64 page_size = vm_guest_mode_params[mode].page_size;
+ u64 nr_pages;
TEST_ASSERT(nr_runnable_vcpus,
"Use vm_create_barebones() for VMs that _never_ have vCPUs");
@@ -435,7 +435,7 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
return vm_adjust_num_guest_pages(mode, nr_pages);
}
-void kvm_set_files_rlimit(uint32_t nr_vcpus)
+void kvm_set_files_rlimit(u32 nr_vcpus)
{
/*
* Each vCPU will open two file descriptors: the vCPU itself and the
@@ -476,10 +476,10 @@ static bool is_guest_memfd_required(struct vm_shape shape)
#endif
}
-struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
- uint64_t nr_extra_pages)
+struct kvm_vm *__vm_create(struct vm_shape shape, u32 nr_runnable_vcpus,
+ u64 nr_extra_pages)
{
- uint64_t nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus,
+ u64 nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus,
nr_extra_pages);
struct userspace_mem_region *slot0;
struct kvm_vm *vm;
@@ -546,8 +546,8 @@ struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
* extra_mem_pages is only used to calculate the maximum page table size,
* no real memory allocation for non-slot0 memory in this function.
*/
-struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
- uint64_t extra_mem_pages,
+struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, u32 nr_vcpus,
+ u64 extra_mem_pages,
void *guest_code, struct kvm_vcpu *vcpus[])
{
struct kvm_vm *vm;
@@ -566,7 +566,7 @@ struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
struct kvm_vcpu **vcpu,
- uint64_t extra_mem_pages,
+ u64 extra_mem_pages,
void *guest_code)
{
struct kvm_vcpu *vcpus[1];
@@ -614,7 +614,7 @@ void kvm_vm_restart(struct kvm_vm *vmp)
}
__weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm,
- uint32_t vcpu_id)
+ u32 vcpu_id)
{
return __vm_vcpu_add(vm, vcpu_id);
}
@@ -636,9 +636,9 @@ int __pin_task_to_cpu(pthread_t task, int cpu)
return pthread_setaffinity_np(task, sizeof(cpuset), &cpuset);
}
-static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
+static u32 parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
{
- uint32_t pcpu = atoi_non_negative("CPU number", cpu_str);
+ u32 pcpu = atoi_non_negative("CPU number", cpu_str);
TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask),
"Not allowed to run on pCPU '%d', check cgroups?", pcpu);
@@ -662,7 +662,7 @@ void kvm_print_vcpu_pinning_help(void)
" (default: no pinning)\n", name, name);
}
-void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
+void kvm_parse_vcpu_pinning(const char *pcpus_string, u32 vcpu_to_pcpu[],
int nr_vcpus)
{
cpu_set_t allowed_mask;
@@ -715,15 +715,15 @@ void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
* region exists.
*/
static struct userspace_mem_region *
-userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
+userspace_mem_region_find(struct kvm_vm *vm, u64 start, u64 end)
{
struct rb_node *node;
for (node = vm->regions.gpa_tree.rb_node; node; ) {
struct userspace_mem_region *region =
container_of(node, struct userspace_mem_region, gpa_node);
- uint64_t existing_start = region->region.guest_phys_addr;
- uint64_t existing_end = region->region.guest_phys_addr
+ u64 existing_start = region->region.guest_phys_addr;
+ u64 existing_end = region->region.guest_phys_addr
+ region->region.memory_size - 1;
if (start <= existing_end && end >= existing_start)
return region;
@@ -918,8 +918,8 @@ static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree,
}
-int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
- uint64_t gpa, uint64_t size, void *hva)
+int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
+ gpa_t gpa, u64 size, void *hva)
{
struct kvm_userspace_memory_region region = {
.slot = slot,
@@ -932,8 +932,8 @@ int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags
return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region);
}
-void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
- uint64_t gpa, uint64_t size, void *hva)
+void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
+ gpa_t gpa, u64 size, void *hva)
{
int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva);
@@ -945,9 +945,9 @@ void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
__TEST_REQUIRE(kvm_has_cap(KVM_CAP_USER_MEMORY2), \
"KVM selftests now require KVM_SET_USER_MEMORY_REGION2 (introduced in v6.8)")
-int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
- uint64_t gpa, uint64_t size, void *hva,
- uint32_t guest_memfd, uint64_t guest_memfd_offset)
+int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
+ gpa_t gpa, u64 size, void *hva,
+ u32 guest_memfd, u64 guest_memfd_offset)
{
struct kvm_userspace_memory_region2 region = {
.slot = slot,
@@ -964,9 +964,9 @@ int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flag
return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, &region);
}
-void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
- uint64_t gpa, uint64_t size, void *hva,
- uint32_t guest_memfd, uint64_t guest_memfd_offset)
+void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
+ gpa_t gpa, u64 size, void *hva,
+ u32 guest_memfd, u64 guest_memfd_offset)
{
int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva,
guest_memfd, guest_memfd_offset);
@@ -978,14 +978,14 @@ void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags
/* FIXME: This thing needs to be ripped apart and rewritten. */
void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
- uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags,
- int guest_memfd, uint64_t guest_memfd_offset)
+ gpa_t gpa, u32 slot, u64 npages, u32 flags,
+ int guest_memfd, u64 guest_memfd_offset)
{
int ret;
struct userspace_mem_region *region;
size_t backing_src_pagesz = get_backing_src_pagesz(src_type);
size_t mem_size = npages * vm->page_size;
- size_t alignment;
+ size_t alignment = 1;
TEST_REQUIRE_SET_USER_MEMORY_REGION2();
@@ -1016,8 +1016,8 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
" requested gpa: 0x%lx npages: 0x%lx page_size: 0x%x\n"
" existing gpa: 0x%lx size: 0x%lx",
gpa, npages, vm->page_size,
- (uint64_t) region->region.guest_phys_addr,
- (uint64_t) region->region.memory_size);
+ (u64)region->region.guest_phys_addr,
+ (u64)region->region.memory_size);
/* Confirm no region with the requested slot already exists. */
hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
@@ -1027,11 +1027,11 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
TEST_FAIL("A mem region with the requested slot "
"already exists.\n"
- " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
- " existing slot: %u paddr: 0x%lx size: 0x%lx",
+ " requested slot: %u gpa: 0x%lx npages: 0x%lx\n"
+ " existing slot: %u gpa: 0x%lx size: 0x%lx",
slot, gpa, npages, region->region.slot,
- (uint64_t) region->region.guest_phys_addr,
- (uint64_t) region->region.memory_size);
+ (u64)region->region.guest_phys_addr,
+ (u64)region->region.memory_size);
}
/* Allocate and initialize new mem region structure. */
@@ -1039,13 +1039,6 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
TEST_ASSERT(region != NULL, "Insufficient Memory");
region->mmap_size = mem_size;
-#ifdef __s390x__
- /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
- alignment = 0x100000;
-#else
- alignment = 1;
-#endif
-
/*
* When using THP mmap is not guaranteed to returned a hugepage aligned
* address so we have to pad the mmap. Padding is not needed for HugeTLB
@@ -1092,7 +1085,7 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
if (flags & KVM_MEM_GUEST_MEMFD) {
if (guest_memfd < 0) {
- uint32_t guest_memfd_flags = 0;
+ u32 guest_memfd_flags = 0;
TEST_ASSERT(!guest_memfd_offset,
"Offset must be zero when creating new guest_memfd");
guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags);
@@ -1148,8 +1141,7 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
void vm_userspace_mem_region_add(struct kvm_vm *vm,
enum vm_mem_backing_src_type src_type,
- uint64_t gpa, uint32_t slot, uint64_t npages,
- uint32_t flags)
+ gpa_t gpa, u32 slot, u64 npages, u32 flags)
{
vm_mem_add(vm, src_type, gpa, slot, npages, flags, -1, 0);
}
@@ -1170,7 +1162,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
* memory slot ID).
*/
struct userspace_mem_region *
-memslot2region(struct kvm_vm *vm, uint32_t memslot)
+memslot2region(struct kvm_vm *vm, u32 memslot)
{
struct userspace_mem_region *region;
@@ -1201,7 +1193,7 @@ memslot2region(struct kvm_vm *vm, uint32_t memslot)
* Sets the flags of the memory region specified by the value of slot,
* to the values given by flags.
*/
-void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
+void vm_mem_region_set_flags(struct kvm_vm *vm, u32 slot, u32 flags)
{
int ret;
struct userspace_mem_region *region;
@@ -1217,7 +1209,7 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
ret, errno, slot, flags);
}
-void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot)
+void vm_mem_region_reload(struct kvm_vm *vm, u32 slot)
{
struct userspace_mem_region *region = memslot2region(vm, slot);
struct kvm_userspace_memory_region2 tmp = region->region;
@@ -1241,7 +1233,7 @@ void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot)
*
* Change the gpa of a memory region.
*/
-void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
+void vm_mem_region_move(struct kvm_vm *vm, u32 slot, u64 new_gpa)
{
struct userspace_mem_region *region;
int ret;
@@ -1270,7 +1262,7 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
*
* Delete a memory region.
*/
-void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
+void vm_mem_region_delete(struct kvm_vm *vm, u32 slot)
{
struct userspace_mem_region *region = memslot2region(vm, slot);
@@ -1280,18 +1272,18 @@ void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
__vm_mem_region_delete(vm, region);
}
-void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
+void vm_guest_mem_fallocate(struct kvm_vm *vm, u64 base, u64 size,
bool punch_hole)
{
const int mode = FALLOC_FL_KEEP_SIZE | (punch_hole ? FALLOC_FL_PUNCH_HOLE : 0);
struct userspace_mem_region *region;
- uint64_t end = base + size;
- uint64_t gpa, len;
+ u64 end = base + size;
+ gpa_t gpa, len;
off_t fd_offset;
int ret;
for (gpa = base; gpa < end; gpa += len) {
- uint64_t offset;
+ u64 offset;
region = userspace_mem_region_find(vm, gpa, gpa);
TEST_ASSERT(region && region->region.flags & KVM_MEM_GUEST_MEMFD,
@@ -1299,7 +1291,7 @@ void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
offset = gpa - region->region.guest_phys_addr;
fd_offset = region->region.guest_memfd_offset + offset;
- len = min_t(uint64_t, end - gpa, region->region.memory_size - offset);
+ len = min_t(u64, end - gpa, region->region.memory_size - offset);
ret = fallocate(region->region.guest_memfd, mode, fd_offset, len);
TEST_ASSERT(!ret, "fallocate() failed to %s at %lx (len = %lu), fd = %d, mode = %x, offset = %lx",
@@ -1324,7 +1316,7 @@ static size_t vcpu_mmap_sz(void)
return ret;
}
-static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
+static bool vcpu_exists(struct kvm_vm *vm, u32 vcpu_id)
{
struct kvm_vcpu *vcpu;
@@ -1340,7 +1332,7 @@ static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
* Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
* No additional vCPU setup is done. Returns the vCPU.
*/
-struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
+struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
{
struct kvm_vcpu *vcpu;
@@ -1374,33 +1366,18 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
}
/*
- * VM Virtual Address Unused Gap
- *
- * Input Args:
- * vm - Virtual Machine
- * sz - Size (bytes)
- * vaddr_min - Minimum Virtual Address
- *
- * Output Args: None
- *
- * Return:
- * Lowest virtual address at or above vaddr_min, with at least
- * sz unused bytes. TEST_ASSERT failure if no area of at least
- * size sz is available.
- *
- * Within the VM specified by vm, locates the lowest starting virtual
- * address >= vaddr_min, that has at least sz unallocated bytes. A
+ * Within the VM specified by @vm, locates the lowest starting guest virtual
+ * address >= @min_gva, that has at least @sz unallocated bytes. A
* TEST_ASSERT failure occurs for invalid input or no area of at least
- * sz unallocated bytes >= vaddr_min is available.
+ * @sz unallocated bytes >= @min_gva is available.
*/
-vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
- vm_vaddr_t vaddr_min)
+gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t min_gva)
{
- uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
+ u64 pages = (sz + vm->page_size - 1) >> vm->page_shift;
/* Determine lowest permitted virtual page index. */
- uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
- if ((pgidx_start * vm->page_size) < vaddr_min)
+ u64 pgidx_start = (min_gva + vm->page_size - 1) >> vm->page_shift;
+ if ((pgidx_start * vm->page_size) < min_gva)
goto no_va_found;
/* Loop over section with enough valid virtual page indexes. */
@@ -1437,7 +1414,7 @@ vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
} while (pgidx_start != 0);
no_va_found:
- TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages);
+ TEST_FAIL("No gva of specified pages available, pages: 0x%lx", pages);
/* NOT REACHED */
return -1;
@@ -1459,145 +1436,91 @@ va_found:
return pgidx_start * vm->page_size;
}
-static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
- vm_vaddr_t vaddr_min,
- enum kvm_mem_region_type type,
- bool protected)
+static gva_t ____vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva,
+ enum kvm_mem_region_type type, bool protected)
{
- uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
+ u64 pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
virt_pgd_alloc(vm);
- vm_paddr_t paddr = __vm_phy_pages_alloc(vm, pages,
- KVM_UTIL_MIN_PFN * vm->page_size,
- vm->memslots[type], protected);
+ gpa_t gpa = __vm_phy_pages_alloc(vm, pages,
+ KVM_UTIL_MIN_PFN * vm->page_size,
+ vm->memslots[type], protected);
/*
* Find an unused range of virtual page addresses of at least
* pages in length.
*/
- vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
+ gva_t gva_start = vm_unused_gva_gap(vm, sz, min_gva);
/* Map the virtual pages. */
- for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
- pages--, vaddr += vm->page_size, paddr += vm->page_size) {
+ for (gva_t gva = gva_start; pages > 0;
+ pages--, gva += vm->page_size, gpa += vm->page_size) {
- virt_pg_map(vm, vaddr, paddr);
+ virt_pg_map(vm, gva, gpa);
}
- return vaddr_start;
+ return gva_start;
}
-vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
- enum kvm_mem_region_type type)
+gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva,
+ enum kvm_mem_region_type type)
{
- return ____vm_vaddr_alloc(vm, sz, vaddr_min, type,
- vm_arch_has_protected_memory(vm));
+ return ____vm_alloc(vm, sz, min_gva, type,
+ vm_arch_has_protected_memory(vm));
}
-vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
- vm_vaddr_t vaddr_min,
- enum kvm_mem_region_type type)
+gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t min_gva,
+ enum kvm_mem_region_type type)
{
- return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, false);
+ return ____vm_alloc(vm, sz, min_gva, type, false);
}
/*
- * VM Virtual Address Allocate
- *
- * Input Args:
- * vm - Virtual Machine
- * sz - Size in bytes
- * vaddr_min - Minimum starting virtual address
- *
- * Output Args: None
- *
- * Return:
- * Starting guest virtual address
- *
- * Allocates at least sz bytes within the virtual address space of the vm
- * given by vm. The allocated bytes are mapped to a virtual address >=
- * the address given by vaddr_min. Note that each allocation uses a
- * a unique set of pages, with the minimum real allocation being at least
- * a page. The allocated physical space comes from the TEST_DATA memory region.
+ * Allocates at least sz bytes within the virtual address space of the VM
+ * given by @vm. The allocated bytes are mapped to a virtual address >= the
+ * address given by @min_gva. Note that each allocation uses a a unique set
+ * of pages, with the minimum real allocation being at least a page. The
+ * allocated physical space comes from the TEST_DATA memory region.
*/
-vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
+gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva)
{
- return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA);
+ return __vm_alloc(vm, sz, min_gva, MEM_REGION_TEST_DATA);
}
-/*
- * VM Virtual Address Allocate Pages
- *
- * Input Args:
- * vm - Virtual Machine
- *
- * Output Args: None
- *
- * Return:
- * Starting guest virtual address
- *
- * Allocates at least N system pages worth of bytes within the virtual address
- * space of the vm.
- */
-vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
+gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages)
{
- return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
+ return vm_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
}
-vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type)
+gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type)
{
- return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type);
+ return __vm_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type);
}
-/*
- * VM Virtual Address Allocate Page
- *
- * Input Args:
- * vm - Virtual Machine
- *
- * Output Args: None
- *
- * Return:
- * Starting guest virtual address
- *
- * Allocates at least one system page worth of bytes within the virtual address
- * space of the vm.
- */
-vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm)
+gva_t vm_alloc_page(struct kvm_vm *vm)
{
- return vm_vaddr_alloc_pages(vm, 1);
+ return vm_alloc_pages(vm, 1);
}
/*
- * Map a range of VM virtual address to the VM's physical address
- *
- * Input Args:
- * vm - Virtual Machine
- * vaddr - Virtuall address to map
- * paddr - VM Physical Address
- * npages - The number of pages to map
+ * Map a range of VM virtual address to the VM's physical address.
*
- * Output Args: None
- *
- * Return: None
- *
- * Within the VM given by @vm, creates a virtual translation for
- * @npages starting at @vaddr to the page range starting at @paddr.
+ * Within the VM given by @vm, creates a virtual translation for @npages
+ * starting at @gva to the page range starting at @gpa.
*/
-void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- unsigned int npages)
+void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, unsigned int npages)
{
size_t page_size = vm->page_size;
size_t size = npages * page_size;
- TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
- TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
+ TEST_ASSERT(gva + size > gva, "Vaddr overflow");
+ TEST_ASSERT(gpa + size > gpa, "Paddr overflow");
while (npages--) {
- virt_pg_map(vm, vaddr, paddr);
+ virt_pg_map(vm, gva, gpa);
- vaddr += page_size;
- paddr += page_size;
+ gva += page_size;
+ gpa += page_size;
}
}
@@ -1618,7 +1541,7 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
* address providing the memory to the vm physical address is returned.
* A TEST_ASSERT failure occurs if no region containing gpa exists.
*/
-void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
+void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa)
{
struct userspace_mem_region *region;
@@ -1651,7 +1574,7 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
* VM physical address is returned. A TEST_ASSERT failure occurs if no
* region containing hva exists.
*/
-vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
+gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
{
struct rb_node *node;
@@ -1662,7 +1585,7 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
if (hva >= region->host_mem) {
if (hva <= (region->host_mem
+ region->region.memory_size - 1))
- return (vm_paddr_t)((uintptr_t)
+ return (gpa_t)((uintptr_t)
region->region.guest_phys_addr
+ (hva - (uintptr_t)region->host_mem));
@@ -1694,7 +1617,7 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
* memory without mapping said memory in the guest's address space. And, for
* userfaultfd-based demand paging, to do so without triggering userfaults.
*/
-void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
+void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa)
{
struct userspace_mem_region *region;
uintptr_t offset;
@@ -1788,8 +1711,8 @@ struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu)
void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
{
- uint32_t page_size = getpagesize();
- uint32_t size = vcpu->vm->dirty_ring_size;
+ u32 page_size = getpagesize();
+ u32 size = vcpu->vm->dirty_ring_size;
TEST_ASSERT(size > 0, "Should enable dirty ring first");
@@ -1818,7 +1741,7 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
* Device Ioctl
*/
-int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
+int __kvm_has_device_attr(int dev_fd, u32 group, u64 attr)
{
struct kvm_device_attr attribute = {
.group = group,
@@ -1829,7 +1752,7 @@ int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute);
}
-int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type)
+int __kvm_test_create_device(struct kvm_vm *vm, u64 type)
{
struct kvm_create_device create_dev = {
.type = type,
@@ -1839,7 +1762,7 @@ int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type)
return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
}
-int __kvm_create_device(struct kvm_vm *vm, uint64_t type)
+int __kvm_create_device(struct kvm_vm *vm, u64 type)
{
struct kvm_create_device create_dev = {
.type = type,
@@ -1853,7 +1776,7 @@ int __kvm_create_device(struct kvm_vm *vm, uint64_t type)
return err ? : create_dev.fd;
}
-int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val)
+int __kvm_device_attr_get(int dev_fd, u32 group, u64 attr, void *val)
{
struct kvm_device_attr kvmattr = {
.group = group,
@@ -1865,7 +1788,7 @@ int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val)
return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr);
}
-int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val)
+int __kvm_device_attr_set(int dev_fd, u32 group, u64 attr, void *val)
{
struct kvm_device_attr kvmattr = {
.group = group,
@@ -1881,7 +1804,7 @@ int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val)
* IRQ related functions.
*/
-int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
+int _kvm_irq_line(struct kvm_vm *vm, u32 irq, int level)
{
struct kvm_irq_level irq_level = {
.irq = irq,
@@ -1891,7 +1814,7 @@ int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
}
-void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
+void kvm_irq_line(struct kvm_vm *vm, u32 irq, int level)
{
int ret = _kvm_irq_line(vm, irq, level);
@@ -1913,7 +1836,7 @@ struct kvm_irq_routing *kvm_gsi_routing_create(void)
}
void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
- uint32_t gsi, uint32_t pin)
+ u32 gsi, u32 pin)
{
int i;
@@ -1963,7 +1886,7 @@ void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
* Dumps the current state of the VM given by vm, to the FILE stream
* given by stream.
*/
-void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+void vm_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
{
int ctr;
struct userspace_mem_region *region;
@@ -1976,8 +1899,8 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
"host_virt: %p\n", indent + 2, "",
- (uint64_t) region->region.guest_phys_addr,
- (uint64_t) region->region.memory_size,
+ (u64)region->region.guest_phys_addr,
+ (u64)region->region.memory_size,
region->host_mem);
fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
sparsebit_dump(stream, region->unused_phy_pages, 0);
@@ -2084,7 +2007,7 @@ const char *exit_reason_str(unsigned int exit_reason)
* Input Args:
* vm - Virtual Machine
* num - number of pages
- * paddr_min - Physical address minimum
+ * min_gpa - Physical address minimum
* memslot - Memory region to allocate page from
* protected - True if the pages will be used as protected/private memory
*
@@ -2094,29 +2017,29 @@ const char *exit_reason_str(unsigned int exit_reason)
* Starting physical address
*
* Within the VM specified by vm, locates a range of available physical
- * pages at or above paddr_min. If found, the pages are marked as in use
+ * pages at or above min_gpa. If found, the pages are marked as in use
* and their base address is returned. A TEST_ASSERT failure occurs if
- * not enough pages are available at or above paddr_min.
+ * not enough pages are available at or above min_gpa.
*/
-vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
- vm_paddr_t paddr_min, uint32_t memslot,
- bool protected)
+gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ gpa_t min_gpa, u32 memslot,
+ bool protected)
{
struct userspace_mem_region *region;
sparsebit_idx_t pg, base;
TEST_ASSERT(num > 0, "Must allocate at least one page");
- TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
+ TEST_ASSERT((min_gpa % vm->page_size) == 0, "Min physical address "
"not divisible by page size.\n"
- " paddr_min: 0x%lx page_size: 0x%x",
- paddr_min, vm->page_size);
+ " min_gpa: 0x%lx page_size: 0x%x",
+ min_gpa, vm->page_size);
region = memslot2region(vm, memslot);
TEST_ASSERT(!protected || region->protected_phy_pages,
"Region doesn't support protected memory");
- base = pg = paddr_min >> vm->page_shift;
+ base = pg = min_gpa >> vm->page_shift;
do {
for (; pg < base + num; ++pg) {
if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
@@ -2128,8 +2051,8 @@ vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
if (pg == 0) {
fprintf(stderr, "No guest physical page available, "
- "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
- paddr_min, vm->page_size, memslot);
+ "min_gpa: 0x%lx page_size: 0x%x memslot: %u\n",
+ min_gpa, vm->page_size, memslot);
fputs("---- vm dump ----\n", stderr);
vm_dump(stderr, vm, 2);
abort();
@@ -2144,13 +2067,12 @@ vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
return base * vm->page_size;
}
-vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
- uint32_t memslot)
+gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot)
{
- return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
+ return vm_phy_pages_alloc(vm, 1, min_gpa, memslot);
}
-vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
+gpa_t vm_alloc_page_table(struct kvm_vm *vm)
{
return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR,
vm->memslots[MEM_REGION_PT]);
@@ -2168,7 +2090,7 @@ vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
* Return:
* Equivalent host virtual address
*/
-void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
+void *addr_gva2hva(struct kvm_vm *vm, gva_t gva)
{
return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
}
@@ -2266,7 +2188,7 @@ struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
* Read the data values of a specified stat from the binary stats interface.
*/
void read_stat_data(int stats_fd, struct kvm_stats_header *header,
- struct kvm_stats_desc *desc, uint64_t *data,
+ struct kvm_stats_desc *desc, u64 *data,
size_t max_elements)
{
size_t nr_elements = min_t(ssize_t, desc->size, max_elements);
@@ -2287,7 +2209,7 @@ void read_stat_data(int stats_fd, struct kvm_stats_header *header,
}
void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
- uint64_t *data, size_t max_elements)
+ u64 *data, size_t max_elements)
{
struct kvm_stats_desc *desc;
size_t size_desc;
@@ -2364,7 +2286,7 @@ void __attribute((constructor)) kvm_selftest_init(void)
kvm_selftest_arch_init();
}
-bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr)
+bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa)
{
sparsebit_idx_t pg = 0;
struct userspace_mem_region *region;
@@ -2372,10 +2294,10 @@ bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr)
if (!vm_arch_has_protected_memory(vm))
return false;
- region = userspace_mem_region_find(vm, paddr, paddr);
- TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr);
+ region = userspace_mem_region_find(vm, gpa, gpa);
+ TEST_ASSERT(region, "No vm physical memory at 0x%lx", gpa);
- pg = paddr >> vm->page_shift;
+ pg = gpa >> vm->page_shift;
return sparsebit_is_set(region->protected_phy_pages, pg);
}
diff --git a/tools/testing/selftests/kvm/lib/loongarch/processor.c b/tools/testing/selftests/kvm/lib/loongarch/processor.c
index 17aa55a2047a..64d91fb76522 100644
--- a/tools/testing/selftests/kvm/lib/loongarch/processor.c
+++ b/tools/testing/selftests/kvm/lib/loongarch/processor.c
@@ -5,38 +5,39 @@
#include <asm/kvm.h>
#include "kvm_util.h"
+#include "pmu.h"
#include "processor.h"
#include "ucall_common.h"
#define LOONGARCH_PAGE_TABLE_PHYS_MIN 0x200000
#define LOONGARCH_GUEST_STACK_VADDR_MIN 0x200000
-static vm_paddr_t invalid_pgtable[4];
-static vm_vaddr_t exception_handlers;
+static gpa_t invalid_pgtable[4];
+static gva_t exception_handlers;
-static uint64_t virt_pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
+static u64 virt_pte_index(struct kvm_vm *vm, gva_t gva, int level)
{
unsigned int shift;
- uint64_t mask;
+ u64 mask;
shift = level * (vm->page_shift - 3) + vm->page_shift;
mask = (1UL << (vm->page_shift - 3)) - 1;
return (gva >> shift) & mask;
}
-static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
+static u64 pte_addr(struct kvm_vm *vm, u64 entry)
{
return entry & ~((0x1UL << vm->page_shift) - 1);
}
-static uint64_t ptrs_per_pte(struct kvm_vm *vm)
+static u64 ptrs_per_pte(struct kvm_vm *vm)
{
return 1 << (vm->page_shift - 3);
}
-static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t child)
+static void virt_set_pgtable(struct kvm_vm *vm, gpa_t table, gpa_t child)
{
- uint64_t *ptep;
+ u64 *ptep;
int i, ptrs_per_pte;
ptep = addr_gpa2hva(vm, table);
@@ -48,7 +49,7 @@ static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t chi
void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
int i;
- vm_paddr_t child, table;
+ gpa_t child, table;
if (vm->mmu.pgd_created)
return;
@@ -66,16 +67,16 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
vm->mmu.pgd_created = true;
}
-static int virt_pte_none(uint64_t *ptep, int level)
+static int virt_pte_none(u64 *ptep, int level)
{
return *ptep == invalid_pgtable[level];
}
-static uint64_t *virt_populate_pte(struct kvm_vm *vm, vm_vaddr_t gva, int alloc)
+static u64 *virt_populate_pte(struct kvm_vm *vm, gva_t gva, int alloc)
{
int level;
- uint64_t *ptep;
- vm_paddr_t child;
+ u64 *ptep;
+ gpa_t child;
if (!vm->mmu.pgd_created)
goto unmapped_gva;
@@ -105,43 +106,42 @@ unmapped_gva:
exit(EXIT_FAILURE);
}
-vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
{
- uint64_t *ptep;
+ u64 *ptep;
ptep = virt_populate_pte(vm, gva, 0);
- TEST_ASSERT(*ptep != 0, "Virtual address vaddr: 0x%lx not mapped\n", gva);
+ TEST_ASSERT(*ptep != 0, "Virtual address gva: 0x%lx not mapped\n", gva);
return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
}
-void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
- uint32_t prot_bits;
- uint64_t *ptep;
+ u32 prot_bits;
+ u64 *ptep;
- TEST_ASSERT((vaddr % vm->page_size) == 0,
+ TEST_ASSERT((gva % vm->page_size) == 0,
"Virtual address not on page boundary,\n"
- "vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
- TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
- (vaddr >> vm->page_shift)),
- "Invalid virtual address, vaddr: 0x%lx", vaddr);
- TEST_ASSERT((paddr % vm->page_size) == 0,
+ "gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size);
+ TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
+ "Invalid virtual address, gva: 0x%lx", gva);
+ TEST_ASSERT((gpa % vm->page_size) == 0,
"Physical address not on page boundary,\n"
- "paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
- TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
+ "gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size);
+ TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond maximum supported,\n"
- "paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
- paddr, vm->max_gfn, vm->page_size);
+ "gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ gpa, vm->max_gfn, vm->page_size);
- ptep = virt_populate_pte(vm, vaddr, 1);
+ ptep = virt_populate_pte(vm, gva, 1);
prot_bits = _PAGE_PRESENT | __READABLE | __WRITEABLE | _CACHE_CC | _PAGE_USER;
- WRITE_ONCE(*ptep, paddr | prot_bits);
+ WRITE_ONCE(*ptep, gpa | prot_bits);
}
-static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
+static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, u64 page, int level)
{
- uint64_t pte, *ptep;
+ u64 pte, *ptep;
static const char * const type[] = { "pte", "pmd", "pud", "pgd"};
if (level < 0)
@@ -157,7 +157,7 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t p
}
}
-void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
{
int level;
@@ -168,7 +168,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
pte_dump(stream, vm, indent, vm->mmu.pgd, level);
}
-void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
{
}
@@ -205,8 +205,9 @@ void vm_init_descriptor_tables(struct kvm_vm *vm)
{
void *addr;
- vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
- LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
+ vm->handlers = __vm_alloc(vm, sizeof(struct handlers),
+ LOONGARCH_GUEST_STACK_VADDR_MIN,
+ MEM_REGION_DATA);
addr = addr_gva2hva(vm, vm->handlers);
memset(addr, 0, vm->page_size);
@@ -222,7 +223,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, handler_fn hand
handlers->exception_handlers[vector] = handler;
}
-uint32_t guest_get_vcpuid(void)
+u32 guest_get_vcpuid(void)
{
return csr_read(LOONGARCH_CSR_CPUID);
}
@@ -240,36 +241,45 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
va_start(ap, num);
for (i = 0; i < num; i++)
- regs.gpr[i + 4] = va_arg(ap, uint64_t);
+ regs.gpr[i + 4] = va_arg(ap, u64);
va_end(ap);
vcpu_regs_set(vcpu, &regs);
}
-static void loongarch_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
+static void loongarch_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val)
{
__vcpu_set_reg(vcpu, id, val);
}
-static void loongarch_get_csr(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
+static void loongarch_set_cpucfg(struct kvm_vcpu *vcpu, u64 id, u64 val)
{
- uint64_t csrid;
+ u64 cfgid;
+
+ cfgid = KVM_REG_LOONGARCH_CPUCFG | KVM_REG_SIZE_U64 | 8 * id;
+ __vcpu_set_reg(vcpu, cfgid, val);
+}
+
+static void loongarch_get_csr(struct kvm_vcpu *vcpu, u64 id, void *addr)
+{
+ u64 csrid;
csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id;
__vcpu_get_reg(vcpu, csrid, addr);
}
-static void loongarch_set_csr(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
+static void loongarch_set_csr(struct kvm_vcpu *vcpu, u64 id, u64 val)
{
- uint64_t csrid;
+ u64 csrid;
csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id;
__vcpu_set_reg(vcpu, csrid, val);
}
-static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
+void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
{
int width;
+ unsigned int cfg;
unsigned long val;
struct kvm_vm *vm = vcpu->vm;
@@ -282,6 +292,9 @@ static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
}
+ cfg = read_cpucfg(LOONGARCH_CPUCFG6);
+ loongarch_set_cpucfg(vcpu, LOONGARCH_CPUCFG6, cfg);
+
/* kernel mode and page enable mode */
val = PLV_KERN | CSR_CRMD_PG;
loongarch_set_csr(vcpu, LOONGARCH_CSR_CRMD, val);
@@ -341,8 +354,8 @@ static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
loongarch_set_csr(vcpu, LOONGARCH_CSR_STLBPGSIZE, PS_DEFAULT_SIZE);
/* LOONGARCH_CSR_KS1 is used for exception stack */
- val = __vm_vaddr_alloc(vm, vm->page_size,
- LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
+ val = __vm_alloc(vm, vm->page_size, LOONGARCH_GUEST_STACK_VADDR_MIN,
+ MEM_REGION_DATA);
TEST_ASSERT(val != 0, "No memory for exception stack");
val = val + vm->page_size;
loongarch_set_csr(vcpu, LOONGARCH_CSR_KS1, val);
@@ -356,23 +369,23 @@ static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
loongarch_set_csr(vcpu, LOONGARCH_CSR_TMID, vcpu->id);
}
-struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
{
size_t stack_size;
- uint64_t stack_vaddr;
+ u64 stack_gva;
struct kvm_regs regs;
struct kvm_vcpu *vcpu;
vcpu = __vm_vcpu_add(vm, vcpu_id);
stack_size = vm->page_size;
- stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
- LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
- TEST_ASSERT(stack_vaddr != 0, "No memory for vm stack");
+ stack_gva = __vm_alloc(vm, stack_size,
+ LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
+ TEST_ASSERT(stack_gva != 0, "No memory for vm stack");
loongarch_vcpu_setup(vcpu);
/* Setup guest general purpose registers */
vcpu_regs_get(vcpu, &regs);
- regs.gpr[3] = stack_vaddr + stack_size;
+ regs.gpr[3] = stack_gva + stack_size;
vcpu_regs_set(vcpu, &regs);
return vcpu;
@@ -384,6 +397,6 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
/* Setup guest PC register */
vcpu_regs_get(vcpu, &regs);
- regs.pc = (uint64_t)guest_code;
+ regs.pc = (u64)guest_code;
vcpu_regs_set(vcpu, &regs);
}
diff --git a/tools/testing/selftests/kvm/lib/loongarch/ucall.c b/tools/testing/selftests/kvm/lib/loongarch/ucall.c
index fc6cbb50573f..cd49a3440ead 100644
--- a/tools/testing/selftests/kvm/lib/loongarch/ucall.c
+++ b/tools/testing/selftests/kvm/lib/loongarch/ucall.c
@@ -9,17 +9,17 @@
* ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each
* VM), it must not be accessed from host code.
*/
-vm_vaddr_t *ucall_exit_mmio_addr;
+gva_t *ucall_exit_mmio_addr;
-void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
+void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa)
{
- vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
+ gva_t mmio_gva = vm_unused_gva_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
virt_map(vm, mmio_gva, mmio_gpa, 1);
vm->ucall_mmio_addr = mmio_gpa;
- write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva);
+ write_guest_global(vm, ucall_exit_mmio_addr, (gva_t *)mmio_gva);
}
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
@@ -28,10 +28,10 @@ void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_MMIO &&
run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) {
- TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t),
+ TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(u64),
"Unexpected ucall exit mmio address access");
- return (void *)(*((uint64_t *)run->mmio.data));
+ return (void *)(*((u64 *)run->mmio.data));
}
return NULL;
diff --git a/tools/testing/selftests/kvm/lib/memstress.c b/tools/testing/selftests/kvm/lib/memstress.c
index 557c0a0a5658..6dcd15910a06 100644
--- a/tools/testing/selftests/kvm/lib/memstress.c
+++ b/tools/testing/selftests/kvm/lib/memstress.c
@@ -16,7 +16,7 @@ struct memstress_args memstress_args;
* Guest virtual memory offset of the testing memory slot.
* Must not conflict with identity mapped test code.
*/
-static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
+static u64 guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
struct vcpu_thread {
/* The index of the vCPU. */
@@ -44,15 +44,15 @@ static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
* Continuously write to the first 8 bytes of each page in the
* specified region.
*/
-void memstress_guest_code(uint32_t vcpu_idx)
+void memstress_guest_code(u32 vcpu_idx)
{
struct memstress_args *args = &memstress_args;
struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx];
struct guest_random_state rand_state;
- uint64_t gva;
- uint64_t pages;
- uint64_t addr;
- uint64_t page;
+ gva_t gva;
+ u64 pages;
+ u64 addr;
+ u64 page;
int i;
rand_state = new_guest_random_state(guest_random_seed + vcpu_idx);
@@ -76,9 +76,9 @@ void memstress_guest_code(uint32_t vcpu_idx)
addr = gva + (page * args->guest_page_size);
if (__guest_random_bool(&rand_state, args->write_percent))
- *(uint64_t *)addr = 0x0123456789ABCDEF;
+ *(u64 *)addr = 0x0123456789ABCDEF;
else
- READ_ONCE(*(uint64_t *)addr);
+ READ_ONCE(*(u64 *)addr);
}
GUEST_SYNC(1);
@@ -87,7 +87,7 @@ void memstress_guest_code(uint32_t vcpu_idx)
void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
struct kvm_vcpu *vcpus[],
- uint64_t vcpu_memory_bytes,
+ u64 vcpu_memory_bytes,
bool partition_vcpu_memory_access)
{
struct memstress_args *args = &memstress_args;
@@ -122,15 +122,15 @@ void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
}
struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
- uint64_t vcpu_memory_bytes, int slots,
+ u64 vcpu_memory_bytes, int slots,
enum vm_mem_backing_src_type backing_src,
bool partition_vcpu_memory_access)
{
struct memstress_args *args = &memstress_args;
struct kvm_vm *vm;
- uint64_t guest_num_pages, slot0_pages = 0;
- uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
- uint64_t region_end_gfn;
+ u64 guest_num_pages, slot0_pages = 0;
+ u64 backing_src_pagesz = get_backing_src_pagesz(backing_src);
+ u64 region_end_gfn;
int i;
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
@@ -196,18 +196,14 @@ struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
args->gpa = (region_end_gfn - guest_num_pages - 1) * args->guest_page_size;
args->gpa = align_down(args->gpa, backing_src_pagesz);
-#ifdef __s390x__
- /* Align to 1M (segment size) */
- args->gpa = align_down(args->gpa, 1 << 20);
-#endif
args->size = guest_num_pages * args->guest_page_size;
pr_info("guest physical test memory: [0x%lx, 0x%lx)\n",
args->gpa, args->gpa + args->size);
/* Add extra memory slots for testing */
for (i = 0; i < slots; i++) {
- uint64_t region_pages = guest_num_pages / slots;
- vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i;
+ u64 region_pages = guest_num_pages / slots;
+ gpa_t region_start = args->gpa + region_pages * args->guest_page_size * i;
vm_userspace_mem_region_add(vm, backing_src, region_start,
MEMSTRESS_MEM_SLOT_INDEX + i,
@@ -236,7 +232,7 @@ void memstress_destroy_vm(struct kvm_vm *vm)
kvm_vm_free(vm);
}
-void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent)
+void memstress_set_write_percent(struct kvm_vm *vm, u32 write_percent)
{
memstress_args.write_percent = write_percent;
sync_global_to_guest(vm, memstress_args.write_percent);
@@ -248,7 +244,7 @@ void memstress_set_random_access(struct kvm_vm *vm, bool random_access)
sync_global_to_guest(vm, memstress_args.random_access);
}
-uint64_t __weak memstress_nested_pages(int nr_vcpus)
+u64 __weak memstress_nested_pages(int nr_vcpus)
{
return 0;
}
@@ -353,7 +349,7 @@ void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int sl
}
void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
- int slots, uint64_t pages_per_slot)
+ int slots, u64 pages_per_slot)
{
int i;
@@ -364,7 +360,7 @@ void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
}
}
-unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot)
+unsigned long **memstress_alloc_bitmaps(int slots, u64 pages_per_slot)
{
unsigned long **bitmaps;
int i;
diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c
index 51dd455ff52c..ded5429f3448 100644
--- a/tools/testing/selftests/kvm/lib/riscv/processor.c
+++ b/tools/testing/selftests/kvm/lib/riscv/processor.c
@@ -15,9 +15,9 @@
#define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN 0xac0000
-static vm_vaddr_t exception_handlers;
+static gva_t exception_handlers;
-bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext)
+bool __vcpu_has_ext(struct kvm_vcpu *vcpu, u64 ext)
{
unsigned long value = 0;
int ret;
@@ -27,32 +27,32 @@ bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext)
return !ret && !!value;
}
-static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
+static u64 pte_addr(struct kvm_vm *vm, u64 entry)
{
return ((entry & PGTBL_PTE_ADDR_MASK) >> PGTBL_PTE_ADDR_SHIFT) <<
PGTBL_PAGE_SIZE_SHIFT;
}
-static uint64_t ptrs_per_pte(struct kvm_vm *vm)
+static u64 ptrs_per_pte(struct kvm_vm *vm)
{
- return PGTBL_PAGE_SIZE / sizeof(uint64_t);
+ return PGTBL_PAGE_SIZE / sizeof(u64);
}
-static uint64_t pte_index_mask[] = {
+static u64 pte_index_mask[] = {
PGTBL_L0_INDEX_MASK,
PGTBL_L1_INDEX_MASK,
PGTBL_L2_INDEX_MASK,
PGTBL_L3_INDEX_MASK,
};
-static uint32_t pte_index_shift[] = {
+static u32 pte_index_shift[] = {
PGTBL_L0_INDEX_SHIFT,
PGTBL_L1_INDEX_SHIFT,
PGTBL_L2_INDEX_SHIFT,
PGTBL_L3_INDEX_SHIFT,
};
-static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
+static u64 pte_index(struct kvm_vm *vm, gva_t gva, int level)
{
TEST_ASSERT(level > -1,
"Negative page table level (%d) not possible", level);
@@ -75,26 +75,25 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
vm->mmu.pgd_created = true;
}
-void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
- uint64_t *ptep, next_ppn;
+ u64 *ptep, next_ppn;
int level = vm->mmu.pgtable_levels - 1;
- TEST_ASSERT((vaddr % vm->page_size) == 0,
+ TEST_ASSERT((gva % vm->page_size) == 0,
"Virtual address not on page boundary,\n"
- " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
- TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
- (vaddr >> vm->page_shift)),
- "Invalid virtual address, vaddr: 0x%lx", vaddr);
- TEST_ASSERT((paddr % vm->page_size) == 0,
+ " gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size);
+ TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
+ "Invalid virtual address, gva: 0x%lx", gva);
+ TEST_ASSERT((gpa % vm->page_size) == 0,
"Physical address not on page boundary,\n"
- " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
- TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
+ " gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size);
+ TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond maximum supported,\n"
- " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
- paddr, vm->max_gfn, vm->page_size);
+ " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ gpa, vm->max_gfn, vm->page_size);
- ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, vaddr, level) * 8;
+ ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, gva, level) * 8;
if (!*ptep) {
next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT;
*ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) |
@@ -104,7 +103,7 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
while (level > -1) {
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) +
- pte_index(vm, vaddr, level) * 8;
+ pte_index(vm, gva, level) * 8;
if (!*ptep && level > 0) {
next_ppn = vm_alloc_page_table(vm) >>
PGTBL_PAGE_SIZE_SHIFT;
@@ -114,14 +113,14 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
level--;
}
- paddr = paddr >> PGTBL_PAGE_SIZE_SHIFT;
- *ptep = (paddr << PGTBL_PTE_ADDR_SHIFT) |
+ gpa = gpa >> PGTBL_PAGE_SIZE_SHIFT;
+ *ptep = (gpa << PGTBL_PTE_ADDR_SHIFT) |
PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK;
}
-vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
{
- uint64_t *ptep;
+ u64 *ptep;
int level = vm->mmu.pgtable_levels - 1;
if (!vm->mmu.pgd_created)
@@ -148,12 +147,12 @@ unmapped_gva:
exit(1);
}
-static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent,
- uint64_t page, int level)
+static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent,
+ u64 page, int level)
{
#ifdef DEBUG
static const char *const type[] = { "pte", "pmd", "pud", "p4d"};
- uint64_t pte, *ptep;
+ u64 pte, *ptep;
if (level < 0)
return;
@@ -170,11 +169,11 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent,
#endif
}
-void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
{
struct kvm_mmu *mmu = &vm->mmu;
int level = mmu->pgtable_levels - 1;
- uint64_t pgd, *ptep;
+ u64 pgd, *ptep;
if (!mmu->pgd_created)
return;
@@ -233,7 +232,7 @@ void riscv_vcpu_mmu_setup(struct kvm_vcpu *vcpu)
vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(satp), satp);
}
-void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
{
struct kvm_riscv_core core;
@@ -311,20 +310,20 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code);
}
-struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
{
int r;
size_t stack_size;
- unsigned long stack_vaddr;
+ unsigned long stack_gva;
unsigned long current_gp = 0;
struct kvm_mp_state mps;
struct kvm_vcpu *vcpu;
stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
vm->page_size;
- stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
- DEFAULT_RISCV_GUEST_STACK_VADDR_MIN,
- MEM_REGION_DATA);
+ stack_gva = __vm_alloc(vm, stack_size,
+ DEFAULT_RISCV_GUEST_STACK_VADDR_MIN,
+ MEM_REGION_DATA);
vcpu = __vm_vcpu_add(vm, vcpu_id);
riscv_vcpu_mmu_setup(vcpu);
@@ -344,7 +343,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.gp), current_gp);
/* Setup stack pointer and program counter of guest */
- vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size);
+ vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_gva + stack_size);
/* Setup sscratch for guest_get_vcpuid() */
vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(sscratch), vcpu_id);
@@ -358,7 +357,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{
va_list ap;
- uint64_t id = RISCV_CORE_REG(regs.a0);
+ u64 id = RISCV_CORE_REG(regs.a0);
int i;
TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
@@ -393,7 +392,7 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
id = RISCV_CORE_REG(regs.a7);
break;
}
- vcpu_set_reg(vcpu, id, va_arg(ap, uint64_t));
+ vcpu_set_reg(vcpu, id, va_arg(ap, u64));
}
va_end(ap);
@@ -449,10 +448,10 @@ void vcpu_init_vector_tables(struct kvm_vcpu *vcpu)
void vm_init_vector_tables(struct kvm_vm *vm)
{
- vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
- vm->page_size, MEM_REGION_DATA);
+ vm->handlers = __vm_alloc(vm, sizeof(struct handlers), vm->page_size,
+ MEM_REGION_DATA);
- *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
+ *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers;
}
void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler)
@@ -470,7 +469,7 @@ void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handle
handlers->exception_handlers[1][0] = handler;
}
-uint32_t guest_get_vcpuid(void)
+u32 guest_get_vcpuid(void)
{
return csr_read(CSR_SSCRATCH);
}
@@ -544,10 +543,10 @@ void kvm_selftest_arch_init(void)
unsigned long riscv64_get_satp_mode(void)
{
int kvm_fd, vm_fd, vcpu_fd, err;
- uint64_t val;
+ u64 val;
struct kvm_one_reg reg = {
.id = RISCV_CONFIG_REG(satp_mode),
- .addr = (uint64_t)&val,
+ .addr = (u64)&val,
};
kvm_fd = open_kvm_dev_path_or_exit();
@@ -566,3 +565,8 @@ unsigned long riscv64_get_satp_mode(void)
return val;
}
+
+bool kvm_arch_has_default_irqchip(void)
+{
+ return kvm_check_cap(KVM_CAP_IRQCHIP);
+}
diff --git a/tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c b/tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c
index 2c432fa164f1..f5480473f192 100644
--- a/tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c
+++ b/tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c
@@ -13,7 +13,7 @@
static void guest_code(void)
{
- uint64_t diag318_info = 0x12345678;
+ u64 diag318_info = 0x12345678;
asm volatile ("diag %0,0,0x318\n" : : "d" (diag318_info));
}
@@ -23,13 +23,13 @@ static void guest_code(void)
* we create an ad-hoc VM here to handle the instruction then extract the
* necessary data. It is up to the caller to decide what to do with that data.
*/
-static uint64_t diag318_handler(void)
+static u64 diag318_handler(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
- uint64_t reg;
- uint64_t diag318_info;
+ u64 reg;
+ u64 diag318_info;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_run(vcpu);
@@ -51,9 +51,9 @@ static uint64_t diag318_handler(void)
return diag318_info;
}
-uint64_t get_diag318_info(void)
+u64 get_diag318_info(void)
{
- static uint64_t diag318_info;
+ static u64 diag318_info;
static bool printed_skip;
/*
diff --git a/tools/testing/selftests/kvm/lib/s390/facility.c b/tools/testing/selftests/kvm/lib/s390/facility.c
index d540812d911a..9a778054f07f 100644
--- a/tools/testing/selftests/kvm/lib/s390/facility.c
+++ b/tools/testing/selftests/kvm/lib/s390/facility.c
@@ -10,5 +10,5 @@
#include "facility.h"
-uint64_t stfl_doublewords[NB_STFL_DOUBLEWORDS];
+u64 stfl_doublewords[NB_STFL_DOUBLEWORDS];
bool stfle_flag;
diff --git a/tools/testing/selftests/kvm/lib/s390/processor.c b/tools/testing/selftests/kvm/lib/s390/processor.c
index 6a9a660413a7..a9adb3782b35 100644
--- a/tools/testing/selftests/kvm/lib/s390/processor.c
+++ b/tools/testing/selftests/kvm/lib/s390/processor.c
@@ -12,7 +12,7 @@
void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
- vm_paddr_t paddr;
+ gpa_t gpa;
TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
vm->page_size);
@@ -20,12 +20,12 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
if (vm->mmu.pgd_created)
return;
- paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
+ gpa = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
KVM_GUEST_PAGE_TABLE_MIN_PADDR,
vm->memslots[MEM_REGION_PT]);
- memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
+ memset(addr_gpa2hva(vm, gpa), 0xff, PAGES_PER_REGION * vm->page_size);
- vm->mmu.pgd = paddr;
+ vm->mmu.pgd = gpa;
vm->mmu.pgd_created = true;
}
@@ -34,9 +34,9 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
* a page table (ri == 4). Returns a suitable region/segment table entry
* which points to the freshly allocated pages.
*/
-static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
+static u64 virt_alloc_region(struct kvm_vm *vm, int ri)
{
- uint64_t taddr;
+ u64 taddr;
taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1,
KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
@@ -47,26 +47,24 @@ static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
| ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
}
-void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
int ri, idx;
- uint64_t *entry;
+ u64 *entry;
TEST_ASSERT((gva % vm->page_size) == 0,
- "Virtual address not on page boundary,\n"
- " vaddr: 0x%lx vm->page_size: 0x%x",
- gva, vm->page_size);
- TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
- (gva >> vm->page_shift)),
- "Invalid virtual address, vaddr: 0x%lx",
- gva);
+ "Virtual address not on page boundary,\n"
+ " gva: 0x%lx vm->page_size: 0x%x",
+ gva, vm->page_size);
+ TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
+ "Invalid virtual address, gva: 0x%lx", gva);
TEST_ASSERT((gpa % vm->page_size) == 0,
"Physical address not on page boundary,\n"
- " paddr: 0x%lx vm->page_size: 0x%x",
+ " gpa: 0x%lx vm->page_size: 0x%x",
gva, vm->page_size);
TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond beyond maximum supported,\n"
- " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
gva, vm->max_gfn, vm->page_size);
/* Walk through region and segment tables */
@@ -86,10 +84,10 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
entry[idx] = gpa;
}
-vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
{
int ri, idx;
- uint64_t *entry;
+ u64 *entry;
TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
vm->page_size);
@@ -111,10 +109,10 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
return (entry[idx] & ~0xffful) + (gva & 0xffful);
}
-static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent,
- uint64_t ptea_start)
+static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, u8 indent,
+ u64 ptea_start)
{
- uint64_t *pte, ptea;
+ u64 *pte, ptea;
for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) {
pte = addr_gpa2hva(vm, ptea);
@@ -125,10 +123,10 @@ static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent,
}
}
-static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
- uint64_t reg_tab_addr)
+static void virt_dump_region(FILE *stream, struct kvm_vm *vm, u8 indent,
+ u64 reg_tab_addr)
{
- uint64_t addr, *entry;
+ u64 addr, *entry;
for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) {
entry = addr_gpa2hva(vm, addr);
@@ -147,7 +145,7 @@ static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
}
}
-void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
{
if (!vm->mmu.pgd_created)
return;
@@ -160,10 +158,10 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
vcpu->run->psw_addr = (uintptr_t)guest_code;
}
-struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
{
size_t stack_size = DEFAULT_STACK_PGS * getpagesize();
- uint64_t stack_vaddr;
+ u64 stack_gva;
struct kvm_regs regs;
struct kvm_sregs sregs;
struct kvm_vcpu *vcpu;
@@ -171,15 +169,14 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
vm->page_size);
- stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
- DEFAULT_GUEST_STACK_VADDR_MIN,
- MEM_REGION_DATA);
+ stack_gva = __vm_alloc(vm, stack_size, DEFAULT_GUEST_STACK_VADDR_MIN,
+ MEM_REGION_DATA);
vcpu = __vm_vcpu_add(vm, vcpu_id);
/* Setup guest registers */
vcpu_regs_get(vcpu, &regs);
- regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160;
+ regs.gprs[15] = stack_gva + (DEFAULT_STACK_PGS * getpagesize()) - 160;
vcpu_regs_set(vcpu, &regs);
vcpu_sregs_get(vcpu, &sregs);
@@ -206,13 +203,13 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
vcpu_regs_get(vcpu, &regs);
for (i = 0; i < num; i++)
- regs.gprs[i + 2] = va_arg(ap, uint64_t);
+ regs.gprs[i + 2] = va_arg(ap, u64);
vcpu_regs_set(vcpu, &regs);
va_end(ap);
}
-void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
{
fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr);
diff --git a/tools/testing/selftests/kvm/lib/sparsebit.c b/tools/testing/selftests/kvm/lib/sparsebit.c
index a99188f87a38..4d845000de15 100644
--- a/tools/testing/selftests/kvm/lib/sparsebit.c
+++ b/tools/testing/selftests/kvm/lib/sparsebit.c
@@ -76,11 +76,11 @@
* the use of a binary-search tree, where each node contains at least
* the following members:
*
- * typedef uint64_t sparsebit_idx_t;
- * typedef uint64_t sparsebit_num_t;
+ * typedef u64 sparsebit_idx_t;
+ * typedef u64 sparsebit_num_t;
*
* sparsebit_idx_t idx;
- * uint32_t mask;
+ * u32 mask;
* sparsebit_num_t num_after;
*
* The idx member contains the bit index of the first bit described by this
@@ -162,7 +162,7 @@
#define DUMP_LINE_MAX 100 /* Does not include indent amount */
-typedef uint32_t mask_t;
+typedef u32 mask_t;
#define MASK_BITS (sizeof(mask_t) * CHAR_BIT)
struct node {
@@ -2056,9 +2056,9 @@ unsigned char get8(void)
return ch;
}
-uint64_t get64(void)
+u64 get64(void)
{
- uint64_t x;
+ u64 x;
x = get8();
x = (x << 8) | get8();
@@ -2074,9 +2074,9 @@ int main(void)
{
s = sparsebit_alloc();
for (;;) {
- uint8_t op = get8() & 0xf;
- uint64_t first = get64();
- uint64_t last = get64();
+ u8 op = get8() & 0xf;
+ u64 first = get64();
+ u64 last = get64();
operate(op, first, last);
}
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index 8a1848586a85..bab1bd2b775b 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -30,15 +30,15 @@ void __attribute__((used)) expect_sigbus_handler(int signum)
* Park-Miller LCG using standard constants.
*/
-struct guest_random_state new_guest_random_state(uint32_t seed)
+struct guest_random_state new_guest_random_state(u32 seed)
{
struct guest_random_state s = {.seed = seed};
return s;
}
-uint32_t guest_random_u32(struct guest_random_state *state)
+u32 guest_random_u32(struct guest_random_state *state)
{
- state->seed = (uint64_t)state->seed * 48271 % ((uint32_t)(1 << 31) - 1);
+ state->seed = (u64)state->seed * 48271 % ((u32)(1 << 31) - 1);
return state->seed;
}
@@ -83,12 +83,12 @@ size_t parse_size(const char *size)
return base << shift;
}
-int64_t timespec_to_ns(struct timespec ts)
+s64 timespec_to_ns(struct timespec ts)
{
- return (int64_t)ts.tv_nsec + 1000000000LL * (int64_t)ts.tv_sec;
+ return (s64)ts.tv_nsec + 1000000000LL * (s64)ts.tv_sec;
}
-struct timespec timespec_add_ns(struct timespec ts, int64_t ns)
+struct timespec timespec_add_ns(struct timespec ts, s64 ns)
{
struct timespec res;
@@ -101,15 +101,15 @@ struct timespec timespec_add_ns(struct timespec ts, int64_t ns)
struct timespec timespec_add(struct timespec ts1, struct timespec ts2)
{
- int64_t ns1 = timespec_to_ns(ts1);
- int64_t ns2 = timespec_to_ns(ts2);
+ s64 ns1 = timespec_to_ns(ts1);
+ s64 ns2 = timespec_to_ns(ts2);
return timespec_add_ns((struct timespec){0}, ns1 + ns2);
}
struct timespec timespec_sub(struct timespec ts1, struct timespec ts2)
{
- int64_t ns1 = timespec_to_ns(ts1);
- int64_t ns2 = timespec_to_ns(ts2);
+ s64 ns1 = timespec_to_ns(ts1);
+ s64 ns2 = timespec_to_ns(ts2);
return timespec_add_ns((struct timespec){0}, ns1 - ns2);
}
@@ -123,7 +123,7 @@ struct timespec timespec_elapsed(struct timespec start)
struct timespec timespec_div(struct timespec ts, int divisor)
{
- int64_t ns = timespec_to_ns(ts) / divisor;
+ s64 ns = timespec_to_ns(ts) / divisor;
return timespec_add_ns((struct timespec){0}, ns);
}
@@ -225,7 +225,7 @@ size_t get_def_hugetlb_pagesz(void)
#define ANON_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS)
#define ANON_HUGE_FLAGS (ANON_FLAGS | MAP_HUGETLB)
-const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i)
+const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(u32 i)
{
static const struct vm_mem_backing_src_alias aliases[] = {
[VM_MEM_SRC_ANONYMOUS] = {
@@ -317,9 +317,9 @@ const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i)
#define MAP_HUGE_PAGE_SIZE(x) (1ULL << ((x >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK))
-size_t get_backing_src_pagesz(uint32_t i)
+size_t get_backing_src_pagesz(u32 i)
{
- uint32_t flag = vm_mem_backing_src_alias(i)->flag;
+ u32 flag = vm_mem_backing_src_alias(i)->flag;
switch (i) {
case VM_MEM_SRC_ANONYMOUS:
@@ -335,7 +335,7 @@ size_t get_backing_src_pagesz(uint32_t i)
}
}
-bool is_backing_src_hugetlb(uint32_t i)
+bool is_backing_src_hugetlb(u32 i)
{
return !!(vm_mem_backing_src_alias(i)->flag & MAP_HUGETLB);
}
diff --git a/tools/testing/selftests/kvm/lib/ucall_common.c b/tools/testing/selftests/kvm/lib/ucall_common.c
index 42151e571953..029ce21f9f2f 100644
--- a/tools/testing/selftests/kvm/lib/ucall_common.c
+++ b/tools/testing/selftests/kvm/lib/ucall_common.c
@@ -14,7 +14,7 @@ struct ucall_header {
struct ucall ucalls[KVM_MAX_VCPUS];
};
-int ucall_nr_pages_required(uint64_t page_size)
+int ucall_nr_pages_required(u64 page_size)
{
return align_up(sizeof(struct ucall_header), page_size) / page_size;
}
@@ -25,16 +25,16 @@ int ucall_nr_pages_required(uint64_t page_size)
*/
static struct ucall_header *ucall_pool;
-void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
+void ucall_init(struct kvm_vm *vm, gpa_t mmio_gpa)
{
struct ucall_header *hdr;
struct ucall *uc;
- vm_vaddr_t vaddr;
+ gva_t gva;
int i;
- vaddr = vm_vaddr_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR,
- MEM_REGION_DATA);
- hdr = (struct ucall_header *)addr_gva2hva(vm, vaddr);
+ gva = vm_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR,
+ MEM_REGION_DATA);
+ hdr = (struct ucall_header *)addr_gva2hva(vm, gva);
memset(hdr, 0, sizeof(*hdr));
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
@@ -42,7 +42,7 @@ void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
uc->hva = uc;
}
- write_guest_global(vm, ucall_pool, (struct ucall_header *)vaddr);
+ write_guest_global(vm, ucall_pool, (struct ucall_header *)gva);
ucall_arch_init(vm, mmio_gpa);
}
@@ -79,7 +79,7 @@ static void ucall_free(struct ucall *uc)
clear_bit(uc - ucall_pool->ucalls, ucall_pool->in_use);
}
-void ucall_assert(uint64_t cmd, const char *exp, const char *file,
+void ucall_assert(u64 cmd, const char *exp, const char *file,
unsigned int line, const char *fmt, ...)
{
struct ucall *uc;
@@ -88,20 +88,20 @@ void ucall_assert(uint64_t cmd, const char *exp, const char *file,
uc = ucall_alloc();
uc->cmd = cmd;
- WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (uint64_t)(exp));
- WRITE_ONCE(uc->args[GUEST_FILE], (uint64_t)(file));
+ WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (u64)(exp));
+ WRITE_ONCE(uc->args[GUEST_FILE], (u64)(file));
WRITE_ONCE(uc->args[GUEST_LINE], line);
va_start(va, fmt);
guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va);
va_end(va);
- ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
+ ucall_arch_do_ucall((gva_t)uc->hva);
ucall_free(uc);
}
-void ucall_fmt(uint64_t cmd, const char *fmt, ...)
+void ucall_fmt(u64 cmd, const char *fmt, ...)
{
struct ucall *uc;
va_list va;
@@ -113,12 +113,12 @@ void ucall_fmt(uint64_t cmd, const char *fmt, ...)
guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va);
va_end(va);
- ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
+ ucall_arch_do_ucall((gva_t)uc->hva);
ucall_free(uc);
}
-void ucall(uint64_t cmd, int nargs, ...)
+void ucall(u64 cmd, int nargs, ...)
{
struct ucall *uc;
va_list va;
@@ -132,15 +132,15 @@ void ucall(uint64_t cmd, int nargs, ...)
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
- WRITE_ONCE(uc->args[i], va_arg(va, uint64_t));
+ WRITE_ONCE(uc->args[i], va_arg(va, u64));
va_end(va);
- ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
+ ucall_arch_do_ucall((gva_t)uc->hva);
ucall_free(uc);
}
-uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
+u64 get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{
struct ucall ucall;
void *addr;
diff --git a/tools/testing/selftests/kvm/lib/userfaultfd_util.c b/tools/testing/selftests/kvm/lib/userfaultfd_util.c
index 5bde176cedd5..ef8d76f71f83 100644
--- a/tools/testing/selftests/kvm/lib/userfaultfd_util.c
+++ b/tools/testing/selftests/kvm/lib/userfaultfd_util.c
@@ -27,7 +27,7 @@ static void *uffd_handler_thread_fn(void *arg)
{
struct uffd_reader_args *reader_args = (struct uffd_reader_args *)arg;
int uffd = reader_args->uffd;
- int64_t pages = 0;
+ s64 pages = 0;
struct timespec start;
struct timespec ts_diff;
struct epoll_event evt;
@@ -100,8 +100,8 @@ static void *uffd_handler_thread_fn(void *arg)
}
struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
- void *hva, uint64_t len,
- uint64_t num_readers,
+ void *hva, u64 len,
+ u64 num_readers,
uffd_handler_t handler)
{
struct uffd_desc *uffd_desc;
@@ -109,7 +109,7 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
int uffd;
struct uffdio_api uffdio_api;
struct uffdio_register uffdio_register;
- uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY;
+ u64 expected_ioctls = ((u64)1) << _UFFDIO_COPY;
int ret, i;
PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n",
@@ -132,7 +132,7 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
/* In order to get minor faults, prefault via the alias. */
if (is_minor)
- expected_ioctls = ((uint64_t) 1) << _UFFDIO_CONTINUE;
+ expected_ioctls = ((u64)1) << _UFFDIO_CONTINUE;
uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
TEST_ASSERT(uffd >= 0, "uffd creation failed, errno: %d", errno);
@@ -141,9 +141,9 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
uffdio_api.features = 0;
TEST_ASSERT(ioctl(uffd, UFFDIO_API, &uffdio_api) != -1,
"ioctl UFFDIO_API failed: %" PRIu64,
- (uint64_t)uffdio_api.api);
+ (u64)uffdio_api.api);
- uffdio_register.range.start = (uint64_t)hva;
+ uffdio_register.range.start = (u64)hva;
uffdio_register.range.len = len;
uffdio_register.mode = uffd_mode;
TEST_ASSERT(ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) != -1,
diff --git a/tools/testing/selftests/kvm/lib/x86/apic.c b/tools/testing/selftests/kvm/lib/x86/apic.c
index 89153a333e83..5182fd0d6a76 100644
--- a/tools/testing/selftests/kvm/lib/x86/apic.c
+++ b/tools/testing/selftests/kvm/lib/x86/apic.c
@@ -14,7 +14,7 @@ void apic_disable(void)
void xapic_enable(void)
{
- uint64_t val = rdmsr(MSR_IA32_APICBASE);
+ u64 val = rdmsr(MSR_IA32_APICBASE);
/* Per SDM: to enable xAPIC when in x2APIC must first disable APIC */
if (val & MSR_IA32_APICBASE_EXTD) {
diff --git a/tools/testing/selftests/kvm/lib/x86/hyperv.c b/tools/testing/selftests/kvm/lib/x86/hyperv.c
index 15bc8cd583aa..d200c5c26e2e 100644
--- a/tools/testing/selftests/kvm/lib/x86/hyperv.c
+++ b/tools/testing/selftests/kvm/lib/x86/hyperv.c
@@ -76,23 +76,23 @@ bool kvm_hv_cpu_has(struct kvm_x86_cpu_feature feature)
}
struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
- vm_vaddr_t *p_hv_pages_gva)
+ gva_t *p_hv_pages_gva)
{
- vm_vaddr_t hv_pages_gva = vm_vaddr_alloc_page(vm);
+ gva_t hv_pages_gva = vm_alloc_page(vm);
struct hyperv_test_pages *hv = addr_gva2hva(vm, hv_pages_gva);
/* Setup of a region of guest memory for the VP Assist page. */
- hv->vp_assist = (void *)vm_vaddr_alloc_page(vm);
+ hv->vp_assist = (void *)vm_alloc_page(vm);
hv->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->vp_assist);
hv->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->vp_assist);
/* Setup of a region of guest memory for the partition assist page. */
- hv->partition_assist = (void *)vm_vaddr_alloc_page(vm);
+ hv->partition_assist = (void *)vm_alloc_page(vm);
hv->partition_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->partition_assist);
hv->partition_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->partition_assist);
/* Setup of a region of guest memory for the enlightened VMCS. */
- hv->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm);
+ hv->enlightened_vmcs = (void *)vm_alloc_page(vm);
hv->enlightened_vmcs_hva = addr_gva2hva(vm, (uintptr_t)hv->enlightened_vmcs);
hv->enlightened_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)hv->enlightened_vmcs);
@@ -100,9 +100,9 @@ struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
return hv;
}
-int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
+int enable_vp_assist(u64 vp_assist_pa, void *vp_assist)
{
- uint64_t val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
+ u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
wrmsr(HV_X64_MSR_VP_ASSIST_PAGE, val);
diff --git a/tools/testing/selftests/kvm/lib/x86/memstress.c b/tools/testing/selftests/kvm/lib/x86/memstress.c
index f53414ba7103..61cf952cd2dc 100644
--- a/tools/testing/selftests/kvm/lib/x86/memstress.c
+++ b/tools/testing/selftests/kvm/lib/x86/memstress.c
@@ -16,7 +16,7 @@
#include "svm_util.h"
#include "vmx.h"
-void memstress_l2_guest_code(uint64_t vcpu_id)
+void memstress_l2_guest_code(u64 vcpu_id)
{
memstress_guest_code(vcpu_id);
vmcall();
@@ -32,7 +32,7 @@ __asm__(
#define L2_GUEST_STACK_SIZE 64
-static void l1_vmx_code(struct vmx_pages *vmx, uint64_t vcpu_id)
+static void l1_vmx_code(struct vmx_pages *vmx, u64 vcpu_id)
{
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
unsigned long *rsp;
@@ -51,7 +51,7 @@ static void l1_vmx_code(struct vmx_pages *vmx, uint64_t vcpu_id)
GUEST_DONE();
}
-static void l1_svm_code(struct svm_test_data *svm, uint64_t vcpu_id)
+static void l1_svm_code(struct svm_test_data *svm, u64 vcpu_id)
{
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
unsigned long *rsp;
@@ -67,7 +67,7 @@ static void l1_svm_code(struct svm_test_data *svm, uint64_t vcpu_id)
}
-static void memstress_l1_guest_code(void *data, uint64_t vcpu_id)
+static void memstress_l1_guest_code(void *data, u64 vcpu_id)
{
if (this_cpu_has(X86_FEATURE_VMX))
l1_vmx_code(data, vcpu_id);
@@ -75,7 +75,7 @@ static void memstress_l1_guest_code(void *data, uint64_t vcpu_id)
l1_svm_code(data, vcpu_id);
}
-uint64_t memstress_nested_pages(int nr_vcpus)
+u64 memstress_nested_pages(int nr_vcpus)
{
/*
* 513 page tables is enough to identity-map 256 TiB of L2 with 1G
@@ -87,7 +87,7 @@ uint64_t memstress_nested_pages(int nr_vcpus)
static void memstress_setup_ept_mappings(struct kvm_vm *vm)
{
- uint64_t start, end;
+ u64 start, end;
/*
* Identity map the first 4G and the test region with 1G pages so that
@@ -104,7 +104,7 @@ static void memstress_setup_ept_mappings(struct kvm_vm *vm)
void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
{
struct kvm_regs regs;
- vm_vaddr_t nested_gva;
+ gva_t nested_gva;
int vcpu_id;
TEST_REQUIRE(kvm_cpu_has_tdp());
diff --git a/tools/testing/selftests/kvm/lib/x86/pmu.c b/tools/testing/selftests/kvm/lib/x86/pmu.c
index 34cb57d1d671..0851b74b4e46 100644
--- a/tools/testing/selftests/kvm/lib/x86/pmu.c
+++ b/tools/testing/selftests/kvm/lib/x86/pmu.c
@@ -11,7 +11,7 @@
#include "processor.h"
#include "pmu.h"
-const uint64_t intel_pmu_arch_events[] = {
+const u64 intel_pmu_arch_events[] = {
INTEL_ARCH_CPU_CYCLES,
INTEL_ARCH_INSTRUCTIONS_RETIRED,
INTEL_ARCH_REFERENCE_CYCLES,
@@ -28,7 +28,7 @@ const uint64_t intel_pmu_arch_events[] = {
};
kvm_static_assert(ARRAY_SIZE(intel_pmu_arch_events) == NR_INTEL_ARCH_EVENTS);
-const uint64_t amd_pmu_zen_events[] = {
+const u64 amd_pmu_zen_events[] = {
AMD_ZEN_CORE_CYCLES,
AMD_ZEN_INSTRUCTIONS_RETIRED,
AMD_ZEN_BRANCHES_RETIRED,
@@ -50,7 +50,7 @@ kvm_static_assert(ARRAY_SIZE(amd_pmu_zen_events) == NR_AMD_ZEN_EVENTS);
* be overcounted on these certain instructions, but for Clearwater Forest
* only "Instruction Retired" event is overcounted on these instructions.
*/
-static uint64_t get_pmu_errata(void)
+static u64 get_pmu_errata(void)
{
if (!this_cpu_is_intel())
return 0;
@@ -72,7 +72,7 @@ static uint64_t get_pmu_errata(void)
}
}
-uint64_t pmu_errata_mask;
+u64 pmu_errata_mask;
void kvm_init_pmu_errata(void)
{
diff --git a/tools/testing/selftests/kvm/lib/x86/processor.c b/tools/testing/selftests/kvm/lib/x86/processor.c
index 23a44941e283..b51467d70f6e 100644
--- a/tools/testing/selftests/kvm/lib/x86/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86/processor.c
@@ -21,11 +21,13 @@
#define KERNEL_DS 0x10
#define KERNEL_TSS 0x18
-vm_vaddr_t exception_handlers;
+gva_t exception_handlers;
bool host_cpu_is_amd;
bool host_cpu_is_intel;
+bool host_cpu_is_hygon;
+bool host_cpu_is_amd_compatible;
bool is_forced_emulation_enabled;
-uint64_t guest_tsc_khz;
+u64 guest_tsc_khz;
const char *ex_str(int vector)
{
@@ -60,7 +62,7 @@ const char *ex_str(int vector)
}
}
-static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
+static void regs_dump(FILE *stream, struct kvm_regs *regs, u8 indent)
{
fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
"rcx: 0x%.16llx rdx: 0x%.16llx\n",
@@ -84,7 +86,7 @@ static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
}
static void segment_dump(FILE *stream, struct kvm_segment *segment,
- uint8_t indent)
+ u8 indent)
{
fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x "
"selector: 0x%.4x type: 0x%.2x\n",
@@ -101,7 +103,7 @@ static void segment_dump(FILE *stream, struct kvm_segment *segment,
}
static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
- uint8_t indent)
+ u8 indent)
{
fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x "
"padding: 0x%.4x 0x%.4x 0x%.4x\n",
@@ -109,7 +111,7 @@ static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
dtable->padding[0], dtable->padding[1], dtable->padding[2]);
}
-static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent)
+static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, u8 indent)
{
unsigned int i;
@@ -205,37 +207,37 @@ void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels,
}
static void *virt_get_pte(struct kvm_vm *vm, struct kvm_mmu *mmu,
- uint64_t *parent_pte, uint64_t vaddr, int level)
+ u64 *parent_pte, gva_t gva, int level)
{
- uint64_t pt_gpa = PTE_GET_PA(*parent_pte);
- uint64_t *page_table = addr_gpa2hva(vm, pt_gpa);
- int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
+ u64 pt_gpa = PTE_GET_PA(*parent_pte);
+ u64 *page_table = addr_gpa2hva(vm, pt_gpa);
+ int index = (gva >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
TEST_ASSERT((*parent_pte == mmu->pgd) || is_present_pte(mmu, parent_pte),
"Parent PTE (level %d) not PRESENT for gva: 0x%08lx",
- level + 1, vaddr);
+ level + 1, gva);
return &page_table[index];
}
-static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
- struct kvm_mmu *mmu,
- uint64_t *parent_pte,
- uint64_t vaddr,
- uint64_t paddr,
- int current_level,
- int target_level)
+static u64 *virt_create_upper_pte(struct kvm_vm *vm,
+ struct kvm_mmu *mmu,
+ u64 *parent_pte,
+ gva_t gva,
+ gpa_t gpa,
+ int current_level,
+ int target_level)
{
- uint64_t *pte = virt_get_pte(vm, mmu, parent_pte, vaddr, current_level);
+ u64 *pte = virt_get_pte(vm, mmu, parent_pte, gva, current_level);
- paddr = vm_untag_gpa(vm, paddr);
+ gpa = vm_untag_gpa(vm, gpa);
if (!is_present_pte(mmu, pte)) {
*pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
PTE_ALWAYS_SET_MASK(mmu);
if (current_level == target_level)
- *pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
+ *pte |= PTE_HUGE_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK);
else
*pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
} else {
@@ -245,39 +247,39 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
* this level.
*/
TEST_ASSERT(current_level != target_level,
- "Cannot create hugepage at level: %u, vaddr: 0x%lx",
- current_level, vaddr);
+ "Cannot create hugepage at level: %u, gva: 0x%lx",
+ current_level, gva);
TEST_ASSERT(!is_huge_pte(mmu, pte),
- "Cannot create page table at level: %u, vaddr: 0x%lx",
- current_level, vaddr);
+ "Cannot create page table at level: %u, gva: 0x%lx",
+ current_level, gva);
}
return pte;
}
-void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
- uint64_t paddr, int level)
+void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva,
+ gpa_t gpa, int level)
{
- const uint64_t pg_size = PG_LEVEL_SIZE(level);
- uint64_t *pte = &mmu->pgd;
+ const u64 pg_size = PG_LEVEL_SIZE(level);
+ u64 *pte = &mmu->pgd;
int current_level;
TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
"Unknown or unsupported guest mode: 0x%x", vm->mode);
- TEST_ASSERT((vaddr % pg_size) == 0,
+ TEST_ASSERT((gva % pg_size) == 0,
"Virtual address not aligned,\n"
- "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size);
- TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)),
- "Invalid virtual address, vaddr: 0x%lx", vaddr);
- TEST_ASSERT((paddr % pg_size) == 0,
+ "gva: 0x%lx page size: 0x%lx", gva, pg_size);
+ TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
+ "Invalid virtual address, gva: 0x%lx", gva);
+ TEST_ASSERT((gpa % pg_size) == 0,
"Physical address not aligned,\n"
- " paddr: 0x%lx page size: 0x%lx", paddr, pg_size);
- TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
+ " gpa: 0x%lx page size: 0x%lx", gpa, pg_size);
+ TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond maximum supported,\n"
- " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
- paddr, vm->max_gfn, vm->page_size);
- TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr,
- "Unexpected bits in paddr: %lx", paddr);
+ " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ gpa, vm->max_gfn, vm->page_size);
+ TEST_ASSERT(vm_untag_gpa(vm, gpa) == gpa,
+ "Unexpected bits in gpa: %lx", gpa);
TEST_ASSERT(!PTE_EXECUTABLE_MASK(mmu) || !PTE_NX_MASK(mmu),
"X and NX bit masks cannot be used simultaneously");
@@ -289,40 +291,40 @@ void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
for (current_level = mmu->pgtable_levels;
current_level > PG_LEVEL_4K;
current_level--) {
- pte = virt_create_upper_pte(vm, mmu, pte, vaddr, paddr,
+ pte = virt_create_upper_pte(vm, mmu, pte, gva, gpa,
current_level, level);
if (is_huge_pte(mmu, pte))
return;
}
/* Fill in page table entry. */
- pte = virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
+ pte = virt_get_pte(vm, mmu, pte, gva, PG_LEVEL_4K);
TEST_ASSERT(!is_present_pte(mmu, pte),
- "PTE already present for 4k page at vaddr: 0x%lx", vaddr);
+ "PTE already present for 4k page at gva: 0x%lx", gva);
*pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
- PTE_ALWAYS_SET_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
+ PTE_ALWAYS_SET_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK);
/*
* Neither SEV nor TDX supports shared page tables, so only the final
* leaf PTE needs manually set the C/S-bit.
*/
- if (vm_is_gpa_protected(vm, paddr))
+ if (vm_is_gpa_protected(vm, gpa))
*pte |= PTE_C_BIT_MASK(mmu);
else
*pte |= PTE_S_BIT_MASK(mmu);
}
-void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
- __virt_pg_map(vm, &vm->mmu, vaddr, paddr, PG_LEVEL_4K);
+ __virt_pg_map(vm, &vm->mmu, gva, gpa, PG_LEVEL_4K);
}
-void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- uint64_t nr_bytes, int level)
+void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
+ u64 nr_bytes, int level)
{
- uint64_t pg_size = PG_LEVEL_SIZE(level);
- uint64_t nr_pages = nr_bytes / pg_size;
+ u64 pg_size = PG_LEVEL_SIZE(level);
+ u64 nr_pages = nr_bytes / pg_size;
int i;
TEST_ASSERT(nr_bytes % pg_size == 0,
@@ -330,16 +332,16 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
nr_bytes, pg_size);
for (i = 0; i < nr_pages; i++) {
- __virt_pg_map(vm, &vm->mmu, vaddr, paddr, level);
- sparsebit_set_num(vm->vpages_mapped, vaddr >> vm->page_shift,
+ __virt_pg_map(vm, &vm->mmu, gva, gpa, level);
+ sparsebit_set_num(vm->vpages_mapped, gva >> vm->page_shift,
nr_bytes / PAGE_SIZE);
- vaddr += pg_size;
- paddr += pg_size;
+ gva += pg_size;
+ gpa += pg_size;
}
}
-static bool vm_is_target_pte(struct kvm_mmu *mmu, uint64_t *pte,
+static bool vm_is_target_pte(struct kvm_mmu *mmu, u64 *pte,
int *level, int current_level)
{
if (is_huge_pte(mmu, pte)) {
@@ -352,13 +354,13 @@ static bool vm_is_target_pte(struct kvm_mmu *mmu, uint64_t *pte,
return *level == current_level;
}
-static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm,
- struct kvm_mmu *mmu,
- uint64_t vaddr,
- int *level)
+static u64 *__vm_get_page_table_entry(struct kvm_vm *vm,
+ struct kvm_mmu *mmu,
+ gva_t gva,
+ int *level)
{
int va_width = 12 + (mmu->pgtable_levels) * 9;
- uint64_t *pte = &mmu->pgd;
+ u64 *pte = &mmu->pgd;
int current_level;
TEST_ASSERT(!vm->arch.is_pt_protected,
@@ -369,49 +371,46 @@ static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm,
TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
"Unknown or unsupported guest mode: 0x%x", vm->mode);
- TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
- (vaddr >> vm->page_shift)),
- "Invalid virtual address, vaddr: 0x%lx",
- vaddr);
+ TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
+ "Invalid virtual address, gva: 0x%lx", gva);
/*
- * Check that the vaddr is a sign-extended va_width value.
+ * Check that the gva is a sign-extended va_width value.
*/
- TEST_ASSERT(vaddr ==
- (((int64_t)vaddr << (64 - va_width) >> (64 - va_width))),
+ TEST_ASSERT(gva == (((s64)gva << (64 - va_width) >> (64 - va_width))),
"Canonical check failed. The virtual address is invalid.");
for (current_level = mmu->pgtable_levels;
current_level > PG_LEVEL_4K;
current_level--) {
- pte = virt_get_pte(vm, mmu, pte, vaddr, current_level);
+ pte = virt_get_pte(vm, mmu, pte, gva, current_level);
if (vm_is_target_pte(mmu, pte, level, current_level))
return pte;
}
- return virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
+ return virt_get_pte(vm, mmu, pte, gva, PG_LEVEL_4K);
}
-uint64_t *tdp_get_pte(struct kvm_vm *vm, uint64_t l2_gpa)
+u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa)
{
int level = PG_LEVEL_4K;
return __vm_get_page_table_entry(vm, &vm->stage2_mmu, l2_gpa, &level);
}
-uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr)
+u64 *vm_get_pte(struct kvm_vm *vm, gva_t gva)
{
int level = PG_LEVEL_4K;
- return __vm_get_page_table_entry(vm, &vm->mmu, vaddr, &level);
+ return __vm_get_page_table_entry(vm, &vm->mmu, gva, &level);
}
-void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
{
struct kvm_mmu *mmu = &vm->mmu;
- uint64_t *pml4e, *pml4e_start;
- uint64_t *pdpe, *pdpe_start;
- uint64_t *pde, *pde_start;
- uint64_t *pte, *pte_start;
+ u64 *pml4e, *pml4e_start;
+ u64 *pdpe, *pdpe_start;
+ u64 *pde, *pde_start;
+ u64 *pte, *pte_start;
if (!mmu->pgd_created)
return;
@@ -421,8 +420,8 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
fprintf(stream, "%*s index hvaddr gpaddr "
"addr w exec dirty\n",
indent, "");
- pml4e_start = (uint64_t *) addr_gpa2hva(vm, mmu->pgd);
- for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
+ pml4e_start = (u64 *)addr_gpa2hva(vm, mmu->pgd);
+ for (u16 n1 = 0; n1 <= 0x1ffu; n1++) {
pml4e = &pml4e_start[n1];
if (!is_present_pte(mmu, pml4e))
continue;
@@ -434,7 +433,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
is_writable_pte(mmu, pml4e), is_nx_pte(mmu, pml4e));
pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK);
- for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
+ for (u16 n2 = 0; n2 <= 0x1ffu; n2++) {
pdpe = &pdpe_start[n2];
if (!is_present_pte(mmu, pdpe))
continue;
@@ -447,7 +446,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
is_nx_pte(mmu, pdpe));
pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK);
- for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
+ for (u16 n3 = 0; n3 <= 0x1ffu; n3++) {
pde = &pde_start[n3];
if (!is_present_pte(mmu, pde))
continue;
@@ -459,7 +458,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
is_nx_pte(mmu, pde));
pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK);
- for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
+ for (u16 n4 = 0; n4 <= 0x1ffu; n4++) {
pte = &pte_start[n4];
if (!is_present_pte(mmu, pte))
continue;
@@ -473,10 +472,10 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
is_writable_pte(mmu, pte),
is_nx_pte(mmu, pte),
is_dirty_pte(mmu, pte),
- ((uint64_t) n1 << 27)
- | ((uint64_t) n2 << 18)
- | ((uint64_t) n3 << 9)
- | ((uint64_t) n4));
+ ((u64)n1 << 27)
+ | ((u64)n2 << 18)
+ | ((u64)n3 << 9)
+ | ((u64)n4));
}
}
}
@@ -496,26 +495,24 @@ bool kvm_cpu_has_tdp(void)
return kvm_cpu_has_ept() || kvm_cpu_has_npt();
}
-void __tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
- uint64_t size, int level)
+void __tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size, int level)
{
size_t page_size = PG_LEVEL_SIZE(level);
size_t npages = size / page_size;
- TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow");
- TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
+ TEST_ASSERT(l2_gpa + size > l2_gpa, "L2 GPA overflow");
+ TEST_ASSERT(gpa + size > gpa, "GPA overflow");
while (npages--) {
- __virt_pg_map(vm, &vm->stage2_mmu, nested_paddr, paddr, level);
- nested_paddr += page_size;
- paddr += page_size;
+ __virt_pg_map(vm, &vm->stage2_mmu, l2_gpa, gpa, level);
+ l2_gpa += page_size;
+ gpa += page_size;
}
}
-void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
- uint64_t size)
+void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size)
{
- __tdp_map(vm, nested_paddr, paddr, size, PG_LEVEL_4K);
+ __tdp_map(vm, l2_gpa, gpa, size, PG_LEVEL_4K);
}
/* Prepare an identity extended page table that maps all the
@@ -523,7 +520,7 @@ void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
*/
void tdp_identity_map_default_memslots(struct kvm_vm *vm)
{
- uint32_t s, memslot = 0;
+ u32 s, memslot = 0;
sparsebit_idx_t i, last;
struct userspace_mem_region *region = memslot2region(vm, memslot);
@@ -538,13 +535,13 @@ void tdp_identity_map_default_memslots(struct kvm_vm *vm)
if (i > last)
break;
- tdp_map(vm, (uint64_t)i << vm->page_shift,
- (uint64_t)i << vm->page_shift, 1 << vm->page_shift);
+ tdp_map(vm, (u64)i << vm->page_shift,
+ (u64)i << vm->page_shift, 1 << vm->page_shift);
}
}
/* Identity map a region with 1GiB Pages. */
-void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size)
+void tdp_identity_map_1g(struct kvm_vm *vm, u64 addr, u64 size)
{
__tdp_map(vm, addr, addr, size, PG_LEVEL_1G);
}
@@ -616,10 +613,10 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_segment *segp)
segp->present = true;
}
-vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
{
int level = PG_LEVEL_NONE;
- uint64_t *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level);
+ u64 *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level);
TEST_ASSERT(is_present_pte(&vm->mmu, pte),
"Leaf PTE not PRESENT for gva: 0x%08lx", gva);
@@ -631,7 +628,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level));
}
-static void kvm_seg_set_tss_64bit(vm_vaddr_t base, struct kvm_segment *segp)
+static void kvm_seg_set_tss_64bit(gva_t base, struct kvm_segment *segp)
{
memset(segp, 0, sizeof(*segp));
segp->base = base;
@@ -744,16 +741,16 @@ static void vm_init_descriptor_tables(struct kvm_vm *vm)
struct kvm_segment seg;
int i;
- vm->arch.gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
- vm->arch.idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
- vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
- vm->arch.tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
+ vm->arch.gdt = __vm_alloc_page(vm, MEM_REGION_DATA);
+ vm->arch.idt = __vm_alloc_page(vm, MEM_REGION_DATA);
+ vm->handlers = __vm_alloc_page(vm, MEM_REGION_DATA);
+ vm->arch.tss = __vm_alloc_page(vm, MEM_REGION_DATA);
/* Handlers have the same address in both address spaces.*/
for (i = 0; i < NUM_INTERRUPTS; i++)
set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, KERNEL_CS);
- *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
+ *(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers;
kvm_seg_set_kernel_code_64bit(&seg);
kvm_seg_fill_gdt_64bit(vm, &seg);
@@ -768,9 +765,9 @@ static void vm_init_descriptor_tables(struct kvm_vm *vm)
void vm_install_exception_handler(struct kvm_vm *vm, int vector,
void (*handler)(struct ex_regs *))
{
- vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers);
+ gva_t *handlers = (gva_t *)addr_gva2hva(vm, vm->handlers);
- handlers[vector] = (vm_vaddr_t)handler;
+ handlers[vector] = (gva_t)handler;
}
void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
@@ -793,6 +790,8 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus)
sync_global_to_guest(vm, host_cpu_is_intel);
sync_global_to_guest(vm, host_cpu_is_amd);
+ sync_global_to_guest(vm, host_cpu_is_hygon);
+ sync_global_to_guest(vm, host_cpu_is_amd_compatible);
sync_global_to_guest(vm, is_forced_emulation_enabled);
sync_global_to_guest(vm, pmu_errata_mask);
@@ -817,18 +816,17 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
vcpu_regs_set(vcpu, &regs);
}
-struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
{
struct kvm_mp_state mp_state;
struct kvm_regs regs;
- vm_vaddr_t stack_vaddr;
+ gva_t stack_gva;
struct kvm_vcpu *vcpu;
- stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
- DEFAULT_GUEST_STACK_VADDR_MIN,
- MEM_REGION_DATA);
+ stack_gva = __vm_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
+ DEFAULT_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
- stack_vaddr += DEFAULT_STACK_PGS * getpagesize();
+ stack_gva += DEFAULT_STACK_PGS * getpagesize();
/*
* Align stack to match calling sequence requirements in section "The
@@ -839,9 +837,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
* If this code is ever used to launch a vCPU with 32-bit entry point it
* may need to subtract 4 bytes instead of 8 bytes.
*/
- TEST_ASSERT(IS_ALIGNED(stack_vaddr, PAGE_SIZE),
- "__vm_vaddr_alloc() did not provide a page-aligned address");
- stack_vaddr -= 8;
+ TEST_ASSERT(IS_ALIGNED(stack_gva, PAGE_SIZE),
+ "__vm_alloc() did not provide a page-aligned address");
+ stack_gva -= 8;
vcpu = __vm_vcpu_add(vm, vcpu_id);
vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
@@ -851,7 +849,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
/* Setup guest general purpose registers */
vcpu_regs_get(vcpu, &regs);
regs.rflags = regs.rflags | 0x2;
- regs.rsp = stack_vaddr;
+ regs.rsp = stack_gva;
vcpu_regs_set(vcpu, &regs);
/* Setup the MP state */
@@ -868,7 +866,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
return vcpu;
}
-struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id)
+struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, u32 vcpu_id)
{
struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
@@ -903,9 +901,9 @@ const struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
return kvm_supported_cpuid;
}
-static uint32_t __kvm_cpu_has(const struct kvm_cpuid2 *cpuid,
- uint32_t function, uint32_t index,
- uint8_t reg, uint8_t lo, uint8_t hi)
+static u32 __kvm_cpu_has(const struct kvm_cpuid2 *cpuid,
+ u32 function, u32 index,
+ u8 reg, u8 lo, u8 hi)
{
const struct kvm_cpuid_entry2 *entry;
int i;
@@ -932,14 +930,14 @@ bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
feature.reg, feature.bit, feature.bit);
}
-uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
- struct kvm_x86_cpu_property property)
+u32 kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
+ struct kvm_x86_cpu_property property)
{
return __kvm_cpu_has(cpuid, property.function, property.index,
property.reg, property.lo_bit, property.hi_bit);
}
-uint64_t kvm_get_feature_msr(uint64_t msr_index)
+u64 kvm_get_feature_msr(u64 msr_index)
{
struct {
struct kvm_msrs header;
@@ -958,7 +956,7 @@ uint64_t kvm_get_feature_msr(uint64_t msr_index)
return buffer.entry.data;
}
-void __vm_xsave_require_permission(uint64_t xfeature, const char *name)
+void __vm_xsave_require_permission(u64 xfeature, const char *name)
{
int kvm_fd;
u64 bitmask;
@@ -1015,7 +1013,7 @@ void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid)
void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
struct kvm_x86_cpu_property property,
- uint32_t value)
+ u32 value)
{
struct kvm_cpuid_entry2 *entry;
@@ -1030,7 +1028,7 @@ void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
TEST_ASSERT_EQ(kvm_cpuid_property(vcpu->cpuid, property), value);
}
-void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function)
+void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, u32 function)
{
struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function);
@@ -1059,7 +1057,7 @@ void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
vcpu_set_cpuid(vcpu);
}
-uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index)
+u64 vcpu_get_msr(struct kvm_vcpu *vcpu, u64 msr_index)
{
struct {
struct kvm_msrs header;
@@ -1074,7 +1072,7 @@ uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index)
return buffer.entry.data;
}
-int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value)
+int _vcpu_set_msr(struct kvm_vcpu *vcpu, u64 msr_index, u64 msr_value)
{
struct {
struct kvm_msrs header;
@@ -1102,28 +1100,28 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
vcpu_regs_get(vcpu, &regs);
if (num >= 1)
- regs.rdi = va_arg(ap, uint64_t);
+ regs.rdi = va_arg(ap, u64);
if (num >= 2)
- regs.rsi = va_arg(ap, uint64_t);
+ regs.rsi = va_arg(ap, u64);
if (num >= 3)
- regs.rdx = va_arg(ap, uint64_t);
+ regs.rdx = va_arg(ap, u64);
if (num >= 4)
- regs.rcx = va_arg(ap, uint64_t);
+ regs.rcx = va_arg(ap, u64);
if (num >= 5)
- regs.r8 = va_arg(ap, uint64_t);
+ regs.r8 = va_arg(ap, u64);
if (num >= 6)
- regs.r9 = va_arg(ap, uint64_t);
+ regs.r9 = va_arg(ap, u64);
vcpu_regs_set(vcpu, &regs);
va_end(ap);
}
-void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
{
struct kvm_regs regs;
struct kvm_sregs sregs;
@@ -1192,7 +1190,7 @@ const struct kvm_msr_list *kvm_get_feature_msr_index_list(void)
return list;
}
-bool kvm_msr_is_in_save_restore_list(uint32_t msr_index)
+bool kvm_msr_is_in_save_restore_list(u32 msr_index)
{
const struct kvm_msr_list *list = kvm_get_msr_index_list();
int i;
@@ -1323,7 +1321,7 @@ void kvm_init_vm_address_properties(struct kvm_vm *vm)
}
const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
- uint32_t function, uint32_t index)
+ u32 function, u32 index)
{
int i;
@@ -1340,7 +1338,7 @@ const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
#define X86_HYPERCALL(inputs...) \
({ \
- uint64_t r; \
+ u64 r; \
\
asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t" \
"jnz 1f\n\t" \
@@ -1349,23 +1347,23 @@ const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
"1: vmmcall\n\t" \
"2:" \
: "=a"(r) \
- : [use_vmmcall] "r" (host_cpu_is_amd), inputs); \
+ : [use_vmmcall] "r" (host_cpu_is_amd_compatible), \
+ inputs); \
\
r; \
})
-uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
- uint64_t a3)
+u64 kvm_hypercall(u64 nr, u64 a0, u64 a1, u64 a2, u64 a3)
{
return X86_HYPERCALL("a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3));
}
-uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
+u64 __xen_hypercall(u64 nr, u64 a0, void *a1)
{
return X86_HYPERCALL("a"(nr), "D"(a0), "S"(a1));
}
-void xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
+void xen_hypercall(u64 nr, u64 a0, void *a1)
{
GUEST_ASSERT(!__xen_hypercall(nr, a0, a1));
}
@@ -1374,7 +1372,7 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
{
const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
unsigned long ht_gfn, max_gfn, max_pfn;
- uint8_t maxphyaddr, guest_maxphyaddr;
+ u8 maxphyaddr, guest_maxphyaddr;
/*
* Use "guest MAXPHYADDR" from KVM if it's available. Guest MAXPHYADDR
@@ -1389,8 +1387,8 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
max_gfn = (1ULL << (guest_maxphyaddr - vm->page_shift)) - 1;
- /* Avoid reserved HyperTransport region on AMD processors. */
- if (!host_cpu_is_amd)
+ /* Avoid reserved HyperTransport region on AMD or Hygon processors. */
+ if (!host_cpu_is_amd_compatible)
return max_gfn;
/* On parts with <40 physical address bits, the area is fully hidden */
@@ -1404,7 +1402,7 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
/*
* Otherwise it's at the top of the physical address space, possibly
- * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use
+ * reduced due to SME or CSV by bits 11:6 of CPUID[0x8000001f].EBX. Use
* the old conservative value if MAXPHYADDR is not enumerated.
*/
if (!this_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR))
@@ -1425,6 +1423,8 @@ void kvm_selftest_arch_init(void)
{
host_cpu_is_intel = this_cpu_is_intel();
host_cpu_is_amd = this_cpu_is_amd();
+ host_cpu_is_hygon = this_cpu_is_hygon();
+ host_cpu_is_amd_compatible = host_cpu_is_amd || host_cpu_is_hygon;
is_forced_emulation_enabled = kvm_is_forced_emulation_enabled();
kvm_init_pmu_errata();
@@ -1446,8 +1446,7 @@ bool kvm_arch_has_default_irqchip(void)
return true;
}
-void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
- uint64_t smram_gpa,
+void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, u64 smram_gpa,
const void *smi_handler, size_t handler_size)
{
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, smram_gpa,
diff --git a/tools/testing/selftests/kvm/lib/x86/sev.c b/tools/testing/selftests/kvm/lib/x86/sev.c
index c3a9838f4806..93f916903461 100644
--- a/tools/testing/selftests/kvm/lib/x86/sev.c
+++ b/tools/testing/selftests/kvm/lib/x86/sev.c
@@ -15,10 +15,10 @@
* expression would cause us to quit the loop.
*/
static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region,
- uint8_t page_type, bool private)
+ u8 page_type, bool private)
{
const struct sparsebit *protected_phy_pages = region->protected_phy_pages;
- const vm_paddr_t gpa_base = region->region.guest_phys_addr;
+ const gpa_t gpa_base = region->region.guest_phys_addr;
const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift;
sparsebit_idx_t i, j;
@@ -29,15 +29,15 @@ static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *regio
sev_register_encrypted_memory(vm, region);
sparsebit_for_each_set_range(protected_phy_pages, i, j) {
- const uint64_t size = (j - i + 1) * vm->page_size;
- const uint64_t offset = (i - lowest_page_in_region) * vm->page_size;
+ const u64 size = (j - i + 1) * vm->page_size;
+ const u64 offset = (i - lowest_page_in_region) * vm->page_size;
if (private)
vm_mem_set_private(vm, gpa_base + offset, size);
if (is_sev_snp_vm(vm))
snp_launch_update_data(vm, gpa_base + offset,
- (uint64_t)addr_gpa2hva(vm, gpa_base + offset),
+ (u64)addr_gpa2hva(vm, gpa_base + offset),
size, page_type);
else
sev_launch_update_data(vm, gpa_base + offset, size);
@@ -79,7 +79,7 @@ void snp_vm_init(struct kvm_vm *vm)
vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);
}
-void sev_vm_launch(struct kvm_vm *vm, uint32_t policy)
+void sev_vm_launch(struct kvm_vm *vm, u32 policy)
{
struct kvm_sev_launch_start launch_start = {
.policy = policy,
@@ -103,7 +103,7 @@ void sev_vm_launch(struct kvm_vm *vm, uint32_t policy)
vm->arch.is_pt_protected = true;
}
-void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement)
+void sev_vm_launch_measure(struct kvm_vm *vm, u8 *measurement)
{
struct kvm_sev_launch_measure launch_measure;
struct kvm_sev_guest_status guest_status;
@@ -131,7 +131,7 @@ void sev_vm_launch_finish(struct kvm_vm *vm)
TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING);
}
-void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy)
+void snp_vm_launch_start(struct kvm_vm *vm, u64 policy)
{
struct kvm_sev_snp_launch_start launch_start = {
.policy = policy,
@@ -158,7 +158,7 @@ void snp_vm_launch_finish(struct kvm_vm *vm)
vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_FINISH, &launch_finish);
}
-struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
+struct kvm_vm *vm_sev_create_with_one_vcpu(u32 type, void *guest_code,
struct kvm_vcpu **cpu)
{
struct vm_shape shape = {
@@ -174,7 +174,7 @@ struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
return vm;
}
-void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement)
+void vm_sev_launch(struct kvm_vm *vm, u64 policy, u8 *measurement)
{
if (is_sev_snp_vm(vm)) {
vm_enable_cap(vm, KVM_CAP_EXIT_HYPERCALL, BIT(KVM_HC_MAP_GPA_RANGE));
diff --git a/tools/testing/selftests/kvm/lib/x86/svm.c b/tools/testing/selftests/kvm/lib/x86/svm.c
index 2e5c480c9afd..3b01605ab016 100644
--- a/tools/testing/selftests/kvm/lib/x86/svm.c
+++ b/tools/testing/selftests/kvm/lib/x86/svm.c
@@ -28,20 +28,20 @@ u64 rflags;
* Pointer to structure with the addresses of the SVM areas.
*/
struct svm_test_data *
-vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva)
+vcpu_alloc_svm(struct kvm_vm *vm, gva_t *p_svm_gva)
{
- vm_vaddr_t svm_gva = vm_vaddr_alloc_page(vm);
+ gva_t svm_gva = vm_alloc_page(vm);
struct svm_test_data *svm = addr_gva2hva(vm, svm_gva);
- svm->vmcb = (void *)vm_vaddr_alloc_page(vm);
+ svm->vmcb = (void *)vm_alloc_page(vm);
svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb);
svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb);
- svm->save_area = (void *)vm_vaddr_alloc_page(vm);
+ svm->save_area = (void *)vm_alloc_page(vm);
svm->save_area_hva = addr_gva2hva(vm, (uintptr_t)svm->save_area);
svm->save_area_gpa = addr_gva2gpa(vm, (uintptr_t)svm->save_area);
- svm->msr = (void *)vm_vaddr_alloc_page(vm);
+ svm->msr = (void *)vm_alloc_page(vm);
svm->msr_hva = addr_gva2hva(vm, (uintptr_t)svm->msr);
svm->msr_gpa = addr_gva2gpa(vm, (uintptr_t)svm->msr);
memset(svm->msr_hva, 0, getpagesize());
@@ -84,14 +84,14 @@ void vm_enable_npt(struct kvm_vm *vm)
void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp)
{
struct vmcb *vmcb = svm->vmcb;
- uint64_t vmcb_gpa = svm->vmcb_gpa;
+ u64 vmcb_gpa = svm->vmcb_gpa;
struct vmcb_save_area *save = &vmcb->save;
struct vmcb_control_area *ctrl = &vmcb->control;
u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
| SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
| SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
- uint64_t efer;
+ u64 efer;
efer = rdmsr(MSR_EFER);
wrmsr(MSR_EFER, efer | EFER_SVME);
@@ -126,7 +126,7 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r
guest_regs.rdi = (u64)svm;
if (svm->ncr3_gpa) {
- ctrl->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
+ ctrl->misc_ctl |= SVM_MISC_ENABLE_NP;
ctrl->nested_cr3 = svm->ncr3_gpa;
}
}
@@ -158,7 +158,7 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r
* for now. registers involved in LOAD/SAVE_GPR_C are eventually
* unmodified so they do not need to be in the clobber list.
*/
-void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa)
+void run_guest(struct vmcb *vmcb, u64 vmcb_gpa)
{
asm volatile (
"vmload %[vmcb_gpa]\n\t"
diff --git a/tools/testing/selftests/kvm/lib/x86/ucall.c b/tools/testing/selftests/kvm/lib/x86/ucall.c
index 1265cecc7dd1..e7dd5791959b 100644
--- a/tools/testing/selftests/kvm/lib/x86/ucall.c
+++ b/tools/testing/selftests/kvm/lib/x86/ucall.c
@@ -6,9 +6,9 @@
*/
#include "kvm_util.h"
-#define UCALL_PIO_PORT ((uint16_t)0x1000)
+#define UCALL_PIO_PORT ((u16)0x1000)
-void ucall_arch_do_ucall(vm_vaddr_t uc)
+void ucall_arch_do_ucall(gva_t uc)
{
/*
* FIXME: Revert this hack (the entire commit that added it) once nVMX
diff --git a/tools/testing/selftests/kvm/lib/x86/vmx.c b/tools/testing/selftests/kvm/lib/x86/vmx.c
index c87b340362a9..67642759e4a0 100644
--- a/tools/testing/selftests/kvm/lib/x86/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86/vmx.c
@@ -27,7 +27,7 @@ struct hv_vp_assist_page *current_vp_assist;
int vcpu_enable_evmcs(struct kvm_vcpu *vcpu)
{
- uint16_t evmcs_ver;
+ u16 evmcs_ver;
vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
(unsigned long)&evmcs_ver);
@@ -79,39 +79,39 @@ void vm_enable_ept(struct kvm_vm *vm)
* Pointer to structure with the addresses of the VMX areas.
*/
struct vmx_pages *
-vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
+vcpu_alloc_vmx(struct kvm_vm *vm, gva_t *p_vmx_gva)
{
- vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm);
+ gva_t vmx_gva = vm_alloc_page(vm);
struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva);
/* Setup of a region of guest memory for the vmxon region. */
- vmx->vmxon = (void *)vm_vaddr_alloc_page(vm);
+ vmx->vmxon = (void *)vm_alloc_page(vm);
vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon);
vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon);
/* Setup of a region of guest memory for a vmcs. */
- vmx->vmcs = (void *)vm_vaddr_alloc_page(vm);
+ vmx->vmcs = (void *)vm_alloc_page(vm);
vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs);
vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs);
/* Setup of a region of guest memory for the MSR bitmap. */
- vmx->msr = (void *)vm_vaddr_alloc_page(vm);
+ vmx->msr = (void *)vm_alloc_page(vm);
vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr);
vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr);
memset(vmx->msr_hva, 0, getpagesize());
/* Setup of a region of guest memory for the shadow VMCS. */
- vmx->shadow_vmcs = (void *)vm_vaddr_alloc_page(vm);
+ vmx->shadow_vmcs = (void *)vm_alloc_page(vm);
vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs);
vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs);
/* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */
- vmx->vmread = (void *)vm_vaddr_alloc_page(vm);
+ vmx->vmread = (void *)vm_alloc_page(vm);
vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread);
vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread);
memset(vmx->vmread_hva, 0, getpagesize());
- vmx->vmwrite = (void *)vm_vaddr_alloc_page(vm);
+ vmx->vmwrite = (void *)vm_alloc_page(vm);
vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite);
vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite);
memset(vmx->vmwrite_hva, 0, getpagesize());
@@ -125,8 +125,8 @@ vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
bool prepare_for_vmx_operation(struct vmx_pages *vmx)
{
- uint64_t feature_control;
- uint64_t required;
+ u64 feature_control;
+ u64 required;
unsigned long cr0;
unsigned long cr4;
@@ -160,7 +160,7 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx)
wrmsr(MSR_IA32_FEAT_CTL, feature_control | required);
/* Enter VMX root operation. */
- *(uint32_t *)(vmx->vmxon) = vmcs_revision();
+ *(u32 *)(vmx->vmxon) = vmcs_revision();
if (vmxon(vmx->vmxon_gpa))
return false;
@@ -170,7 +170,7 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx)
bool load_vmcs(struct vmx_pages *vmx)
{
/* Load a VMCS. */
- *(uint32_t *)(vmx->vmcs) = vmcs_revision();
+ *(u32 *)(vmx->vmcs) = vmcs_revision();
if (vmclear(vmx->vmcs_gpa))
return false;
@@ -178,14 +178,14 @@ bool load_vmcs(struct vmx_pages *vmx)
return false;
/* Setup shadow VMCS, do not load it yet. */
- *(uint32_t *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul;
+ *(u32 *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul;
if (vmclear(vmx->shadow_vmcs_gpa))
return false;
return true;
}
-static bool ept_vpid_cap_supported(uint64_t mask)
+static bool ept_vpid_cap_supported(u64 mask)
{
return rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & mask;
}
@@ -200,7 +200,7 @@ bool ept_1g_pages_supported(void)
*/
static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
{
- uint32_t sec_exec_ctl = 0;
+ u32 sec_exec_ctl = 0;
vmwrite(VIRTUAL_PROCESSOR_ID, 0);
vmwrite(POSTED_INTR_NV, 0);
@@ -208,7 +208,7 @@ static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS));
if (vmx->eptp_gpa) {
- uint64_t eptp = vmx->eptp_gpa | EPTP_WB | EPTP_PWL_4;
+ u64 eptp = vmx->eptp_gpa | EPTP_WB | EPTP_PWL_4;
TEST_ASSERT((vmx->eptp_gpa & ~PHYSICAL_PAGE_MASK) == 0,
"Illegal bits set in vmx->eptp_gpa");
@@ -259,7 +259,7 @@ static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
*/
static inline void init_vmcs_host_state(void)
{
- uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS);
+ u32 exit_controls = vmreadz(VM_EXIT_CONTROLS);
vmwrite(HOST_ES_SELECTOR, get_es());
vmwrite(HOST_CS_SELECTOR, get_cs());
@@ -358,8 +358,8 @@ static inline void init_vmcs_guest_state(void *rip, void *rsp)
vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE));
vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE));
vmwrite(GUEST_DR7, 0x400);
- vmwrite(GUEST_RSP, (uint64_t)rsp);
- vmwrite(GUEST_RIP, (uint64_t)rip);
+ vmwrite(GUEST_RSP, (u64)rsp);
+ vmwrite(GUEST_RIP, (u64)rip);
vmwrite(GUEST_RFLAGS, 2);
vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0);
vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP));
@@ -375,7 +375,7 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
bool kvm_cpu_has_ept(void)
{
- uint64_t ctrl;
+ u64 ctrl;
if (!kvm_cpu_has(X86_FEATURE_VMX))
return false;
@@ -390,7 +390,7 @@ bool kvm_cpu_has_ept(void)
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm)
{
- vmx->apic_access = (void *)vm_vaddr_alloc_page(vm);
+ vmx->apic_access = (void *)vm_alloc_page(vm);
vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access);
vmx->apic_access_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->apic_access);
}