summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h1
-rw-r--r--tools/include/uapi/linux/kvm.h2
-rw-r--r--tools/testing/selftests/ftrace/test.d/remotes/functions19
-rw-r--r--tools/testing/selftests/ftrace/test.d/remotes/hotplug.tc88
-rw-r--r--tools/testing/selftests/ftrace/test.d/remotes/hypervisor/buffer_size.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/remotes/hypervisor/hotplug.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/remotes/hypervisor/reset.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/remotes/hypervisor/trace.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/remotes/hypervisor/trace_pipe.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/remotes/hypervisor/unloading.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/remotes/trace.tc27
-rw-r--r--tools/testing/selftests/ftrace/test.d/remotes/trace_pipe.tc25
-rw-r--r--tools/testing/selftests/kvm/Makefile.kvm18
-rw-r--r--tools/testing/selftests/kvm/arm64/at.c14
-rw-r--r--tools/testing/selftests/kvm/arm64/no-vgic-v3.c177
-rw-r--r--tools/testing/selftests/kvm/arm64/no-vgic.c297
-rw-r--r--tools/testing/selftests/kvm/arm64/set_id_regs.c52
-rw-r--r--tools/testing/selftests/kvm/arm64/vgic_v5.c228
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c3
-rw-r--r--tools/testing/selftests/kvm/guest_memfd_test.c70
-rw-r--r--tools/testing/selftests/kvm/include/arm64/gic_v5.h150
-rw-r--r--tools/testing/selftests/kvm/include/kvm_syscalls.h1
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h4
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util_types.h2
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/pmu.h66
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/processor.h15
-rw-r--r--tools/testing/selftests/kvm/include/riscv/sbi.h37
-rw-r--r--tools/testing/selftests/kvm/include/x86/processor.h12
-rw-r--r--tools/testing/selftests/kvm/include/x86/svm.h14
-rw-r--r--tools/testing/selftests/kvm/kvm_page_table_test.c3
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c9
-rw-r--r--tools/testing/selftests/kvm/lib/loongarch/processor.c15
-rw-r--r--tools/testing/selftests/kvm/lib/memstress.c4
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/processor.c5
-rw-r--r--tools/testing/selftests/kvm/lib/x86/processor.c15
-rw-r--r--tools/testing/selftests/kvm/lib/x86/svm.c2
-rw-r--r--tools/testing/selftests/kvm/loongarch/pmu_test.c205
-rw-r--r--tools/testing/selftests/kvm/pre_fault_memory_test.c4
-rw-r--r--tools/testing/selftests/kvm/riscv/sbi_pmu_test.c20
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c9
-rw-r--r--tools/testing/selftests/kvm/steal_time.c98
-rw-r--r--tools/testing/selftests/kvm/x86/fix_hypercall_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/msrs_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c16
-rw-r--r--tools/testing/selftests/kvm/x86/pmu_event_filter_test.c3
-rw-r--r--tools/testing/selftests/kvm/x86/sev_migrate_tests.c2
-rw-r--r--tools/testing/selftests/kvm/x86/state_test.c35
-rw-r--r--tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c145
-rw-r--r--tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c55
-rw-r--r--tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c176
-rw-r--r--tools/testing/selftests/kvm/x86/xapic_state_test.c2
51 files changed, 1876 insertions, 341 deletions
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index a792a599b9d6..1c13bfa2d38a 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -428,6 +428,7 @@ enum {
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
#define KVM_DEV_ARM_ITS_CTRL_RESET 4
+#define KVM_DEV_ARM_VGIC_USERSPACE_PPIS 5
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 80364d4dbebb..d0c0c8605976 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -1224,6 +1224,8 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_LOONGARCH_EIOINTC KVM_DEV_TYPE_LOONGARCH_EIOINTC
KVM_DEV_TYPE_LOONGARCH_PCHPIC,
#define KVM_DEV_TYPE_LOONGARCH_PCHPIC KVM_DEV_TYPE_LOONGARCH_PCHPIC
+ KVM_DEV_TYPE_ARM_VGIC_V5,
+#define KVM_DEV_TYPE_ARM_VGIC_V5 KVM_DEV_TYPE_ARM_VGIC_V5
KVM_DEV_TYPE_MAX,
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/functions b/tools/testing/selftests/ftrace/test.d/remotes/functions
index 97a09d564a34..05224fac3653 100644
--- a/tools/testing/selftests/ftrace/test.d/remotes/functions
+++ b/tools/testing/selftests/ftrace/test.d/remotes/functions
@@ -24,12 +24,21 @@ setup_remote_test()
assert_loaded()
{
- grep -q "(loaded)" buffer_size_kb
+ grep -q "(loaded)" buffer_size_kb || return 1
}
assert_unloaded()
{
- grep -q "(unloaded)" buffer_size_kb
+ grep -q "(unloaded)" buffer_size_kb || return 1
+}
+
+reload_remote()
+{
+ echo 0 > tracing_on
+ clear_trace
+ assert_unloaded
+ echo 1 > tracing_on
+ assert_loaded
}
dump_trace_pipe()
@@ -79,10 +88,12 @@ get_cpu_ids()
sed -n 's/^processor\s*:\s*\([0-9]\+\).*/\1/p' /proc/cpuinfo
}
-get_page_size() {
+get_page_size()
+{
sed -ne 's/^.*data.*size:\([0-9][0-9]*\).*/\1/p' events/header_page
}
-get_selftest_event_size() {
+get_selftest_event_size()
+{
sed -ne 's/^.*field:.*;.*size:\([0-9][0-9]*\);.*/\1/p' events/*/selftest/format | awk '{s+=$1} END {print s}'
}
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/hotplug.tc b/tools/testing/selftests/ftrace/test.d/remotes/hotplug.tc
new file mode 100644
index 000000000000..145617eb8061
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/hotplug.tc
@@ -0,0 +1,88 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test trace remote read with an offline CPU
+# requires: remotes/test
+
+. $TEST_DIR/remotes/functions
+
+hotunplug_one_cpu()
+{
+ [ "$(get_cpu_ids | wc -l)" -ge 2 ] || return 1
+
+ for cpu in $(get_cpu_ids); do
+ echo 0 > /sys/devices/system/cpu/cpu$cpu/online || return 1
+ break
+ done
+
+ echo $cpu
+}
+
+# Check non-consuming and consuming read
+check_read()
+{
+ for i in $(seq 1 8); do
+ echo $i > write_event
+ done
+
+ check_trace 1 8 trace
+
+ output=$(dump_trace_pipe)
+ check_trace 1 8 $output
+ rm $output
+}
+
+test_hotplug()
+{
+ echo 0 > trace
+ assert_loaded
+
+ #
+ # Test a trace buffer containing an offline CPU
+ #
+
+ cpu=$(hotunplug_one_cpu) || exit_unsupported
+ trap "echo 1 > /sys/devices/system/cpu/cpu$cpu/online" EXIT
+
+ check_read
+
+ #
+ # Test a trace buffer with a missing CPU
+ #
+
+ reload_remote
+
+ check_read
+
+ #
+ # Test a trace buffer with a CPU added later
+ #
+
+ echo 1 > /sys/devices/system/cpu/cpu$cpu/online
+ trap "" EXIT
+ assert_loaded
+
+ check_read
+
+ # Test if the ring-buffer for the newly added CPU is both writable and
+ # readable
+ for i in $(seq 1 8); do
+ taskset -c $cpu echo $i > write_event
+ done
+
+ cd per_cpu/cpu$cpu/
+
+ check_trace 1 8 trace
+
+ output=$(dump_trace_pipe)
+ check_trace 1 8 $output
+ rm $output
+
+ cd -
+}
+
+if [ -z "$SOURCE_REMOTE_TEST" ]; then
+ set -e
+
+ setup_remote_test
+ test_hotplug
+fi
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/buffer_size.tc b/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/buffer_size.tc
new file mode 100644
index 000000000000..64bf859d6406
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/buffer_size.tc
@@ -0,0 +1,11 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test hypervisor trace buffer size
+# requires: remotes/hypervisor/write_event
+
+SOURCE_REMOTE_TEST=1
+. $TEST_DIR/remotes/buffer_size.tc
+
+set -e
+setup_remote "hypervisor"
+test_buffer_size
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/hotplug.tc b/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/hotplug.tc
new file mode 100644
index 000000000000..580ec32c8f81
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/hotplug.tc
@@ -0,0 +1,11 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test hypervisor trace read with an offline CPU
+# requires: remotes/hypervisor/write_event
+
+SOURCE_REMOTE_TEST=1
+. $TEST_DIR/remotes/hotplug.tc
+
+set -e
+setup_remote "hypervisor"
+test_hotplug
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/reset.tc b/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/reset.tc
new file mode 100644
index 000000000000..7fe3b09b34e3
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/reset.tc
@@ -0,0 +1,11 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test hypervisor trace buffer reset
+# requires: remotes/hypervisor/write_event
+
+SOURCE_REMOTE_TEST=1
+. $TEST_DIR/remotes/reset.tc
+
+set -e
+setup_remote "hypervisor"
+test_reset
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/trace.tc b/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/trace.tc
new file mode 100644
index 000000000000..b937c19ca7f9
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/trace.tc
@@ -0,0 +1,11 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test hypervisor non-consuming trace read
+# requires: remotes/hypervisor/write_event
+
+SOURCE_REMOTE_TEST=1
+. $TEST_DIR/remotes/trace.tc
+
+set -e
+setup_remote "hypervisor"
+test_trace
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/trace_pipe.tc b/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/trace_pipe.tc
new file mode 100644
index 000000000000..66aa1b76c147
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/trace_pipe.tc
@@ -0,0 +1,11 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test hypervisor consuming trace read
+# requires: remotes/hypervisor/write_event
+
+SOURCE_REMOTE_TEST=1
+. $TEST_DIR/remotes/trace_pipe.tc
+
+set -e
+setup_remote "hypervisor"
+test_trace_pipe
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/unloading.tc b/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/unloading.tc
new file mode 100644
index 000000000000..1dafde3414ab
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/hypervisor/unloading.tc
@@ -0,0 +1,11 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test hypervisor trace buffer unloading
+# requires: remotes/hypervisor/write_event
+
+SOURCE_REMOTE_TEST=1
+. $TEST_DIR/remotes/unloading.tc
+
+set -e
+setup_remote "hypervisor"
+test_unloading
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/trace.tc b/tools/testing/selftests/ftrace/test.d/remotes/trace.tc
index 170f7648732a..bc9377a70e8d 100644
--- a/tools/testing/selftests/ftrace/test.d/remotes/trace.tc
+++ b/tools/testing/selftests/ftrace/test.d/remotes/trace.tc
@@ -58,11 +58,7 @@ test_trace()
#
# Ensure the writer is not on the reader page by reloading the buffer
- echo 0 > tracing_on
- echo 0 > trace
- assert_unloaded
- echo 1 > tracing_on
- assert_loaded
+ reload_remote
# Ensure ring-buffer overflow by emitting events from the same CPU
for cpu in $(get_cpu_ids); do
@@ -96,27 +92,6 @@ test_trace()
cd - > /dev/null
done
-
- #
- # Test with hotplug
- #
-
- [ "$(get_cpu_ids | wc -l)" -ge 2 ] || return 0
-
- echo 0 > trace
-
- for cpu in $(get_cpu_ids); do
- echo 0 > /sys/devices/system/cpu/cpu$cpu/online || return 0
- break
- done
-
- for i in $(seq 1 8); do
- echo $i > write_event
- done
-
- check_trace 1 8 trace
-
- echo 1 > /sys/devices/system/cpu/cpu$cpu/online
}
if [ -z "$SOURCE_REMOTE_TEST" ]; then
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/trace_pipe.tc b/tools/testing/selftests/ftrace/test.d/remotes/trace_pipe.tc
index 669a7288ed7c..7f7b7b79c490 100644
--- a/tools/testing/selftests/ftrace/test.d/remotes/trace_pipe.tc
+++ b/tools/testing/selftests/ftrace/test.d/remotes/trace_pipe.tc
@@ -92,31 +92,6 @@ test_trace_pipe()
rm $output
cd - > /dev/null
done
-
- #
- # Test interaction with hotplug
- #
-
- [ "$(get_cpu_ids | wc -l)" -ge 2 ] || return 0
-
- echo 0 > trace
-
- for cpu in $(get_cpu_ids); do
- echo 0 > /sys/devices/system/cpu/cpu$cpu/online || return 0
- break
- done
-
- for i in $(seq 1 8); do
- echo $i > write_event
- done
-
- output=$(dump_trace_pipe)
-
- check_trace 1 8 $output
-
- rm $output
-
- echo 1 > /sys/devices/system/cpu/cpu$cpu/online
}
if [ -z "$SOURCE_REMOTE_TEST" ]; then
diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
index 6471fa214a9f..9118a5a51b89 100644
--- a/tools/testing/selftests/kvm/Makefile.kvm
+++ b/tools/testing/selftests/kvm/Makefile.kvm
@@ -64,6 +64,8 @@ TEST_GEN_PROGS_COMMON += kvm_binary_stats_test
TEST_GEN_PROGS_COMMON += kvm_create_max_vcpus
TEST_GEN_PROGS_COMMON += kvm_page_table_test
TEST_GEN_PROGS_COMMON += set_memory_region_test
+TEST_GEN_PROGS_COMMON += memslot_modification_stress_test
+TEST_GEN_PROGS_COMMON += memslot_perf_test
# Compiled test targets
TEST_GEN_PROGS_x86 = $(TEST_GEN_PROGS_COMMON)
@@ -111,8 +113,11 @@ TEST_GEN_PROGS_x86 += x86/state_test
TEST_GEN_PROGS_x86 += x86/vmx_preemption_timer_test
TEST_GEN_PROGS_x86 += x86/svm_vmcall_test
TEST_GEN_PROGS_x86 += x86/svm_int_ctl_test
+TEST_GEN_PROGS_x86 += x86/svm_nested_clear_efer_svme
TEST_GEN_PROGS_x86 += x86/svm_nested_shutdown_test
TEST_GEN_PROGS_x86 += x86/svm_nested_soft_inject_test
+TEST_GEN_PROGS_x86 += x86/svm_nested_vmcb12_gpa
+TEST_GEN_PROGS_x86 += x86/svm_lbr_nested_state
TEST_GEN_PROGS_x86 += x86/tsc_scaling_sync
TEST_GEN_PROGS_x86 += x86/sync_regs_test
TEST_GEN_PROGS_x86 += x86/ucna_injection_test
@@ -148,8 +153,6 @@ TEST_GEN_PROGS_x86 += coalesced_io_test
TEST_GEN_PROGS_x86 += dirty_log_perf_test
TEST_GEN_PROGS_x86 += guest_memfd_test
TEST_GEN_PROGS_x86 += hardware_disable_test
-TEST_GEN_PROGS_x86 += memslot_modification_stress_test
-TEST_GEN_PROGS_x86 += memslot_perf_test
TEST_GEN_PROGS_x86 += mmu_stress_test
TEST_GEN_PROGS_x86 += rseq_test
TEST_GEN_PROGS_x86 += steal_time
@@ -177,8 +180,9 @@ TEST_GEN_PROGS_arm64 += arm64/vcpu_width_config
TEST_GEN_PROGS_arm64 += arm64/vgic_init
TEST_GEN_PROGS_arm64 += arm64/vgic_irq
TEST_GEN_PROGS_arm64 += arm64/vgic_lpi_stress
+TEST_GEN_PROGS_arm64 += arm64/vgic_v5
TEST_GEN_PROGS_arm64 += arm64/vpmu_counter_access
-TEST_GEN_PROGS_arm64 += arm64/no-vgic-v3
+TEST_GEN_PROGS_arm64 += arm64/no-vgic
TEST_GEN_PROGS_arm64 += arm64/idreg-idst
TEST_GEN_PROGS_arm64 += arm64/kvm-uuid
TEST_GEN_PROGS_arm64 += access_tracking_perf_test
@@ -187,8 +191,6 @@ TEST_GEN_PROGS_arm64 += coalesced_io_test
TEST_GEN_PROGS_arm64 += dirty_log_perf_test
TEST_GEN_PROGS_arm64 += get-reg-list
TEST_GEN_PROGS_arm64 += guest_memfd_test
-TEST_GEN_PROGS_arm64 += memslot_modification_stress_test
-TEST_GEN_PROGS_arm64 += memslot_perf_test
TEST_GEN_PROGS_arm64 += mmu_stress_test
TEST_GEN_PROGS_arm64 += rseq_test
TEST_GEN_PROGS_arm64 += steal_time
@@ -207,6 +209,7 @@ TEST_GEN_PROGS_s390 += s390/user_operexec
TEST_GEN_PROGS_s390 += s390/keyop
TEST_GEN_PROGS_s390 += rseq_test
TEST_GEN_PROGS_s390 += s390/irq_routing
+TEST_GEN_PROGS_s390 += mmu_stress_test
TEST_GEN_PROGS_riscv = $(TEST_GEN_PROGS_COMMON)
TEST_GEN_PROGS_riscv += riscv/sbi_pmu_test
@@ -216,13 +219,12 @@ TEST_GEN_PROGS_riscv += arch_timer
TEST_GEN_PROGS_riscv += coalesced_io_test
TEST_GEN_PROGS_riscv += dirty_log_perf_test
TEST_GEN_PROGS_riscv += get-reg-list
-TEST_GEN_PROGS_riscv += memslot_modification_stress_test
-TEST_GEN_PROGS_riscv += memslot_perf_test
TEST_GEN_PROGS_riscv += mmu_stress_test
TEST_GEN_PROGS_riscv += rseq_test
TEST_GEN_PROGS_riscv += steal_time
-TEST_GEN_PROGS_loongarch = arch_timer
+TEST_GEN_PROGS_loongarch = loongarch/pmu_test
+TEST_GEN_PROGS_loongarch += arch_timer
TEST_GEN_PROGS_loongarch += coalesced_io_test
TEST_GEN_PROGS_loongarch += demand_paging_test
TEST_GEN_PROGS_loongarch += dirty_log_perf_test
diff --git a/tools/testing/selftests/kvm/arm64/at.c b/tools/testing/selftests/kvm/arm64/at.c
index c8ee6f520734..ce5d312ef6ba 100644
--- a/tools/testing/selftests/kvm/arm64/at.c
+++ b/tools/testing/selftests/kvm/arm64/at.c
@@ -13,7 +13,6 @@
enum {
CLEAR_ACCESS_FLAG,
- TEST_ACCESS_FLAG,
};
static u64 *ptep_hva;
@@ -49,7 +48,6 @@ do { \
GUEST_ASSERT_EQ(FIELD_GET(SYS_PAR_EL1_ATTR, par), MAIR_ATTR_NORMAL); \
GUEST_ASSERT_EQ(FIELD_GET(SYS_PAR_EL1_SH, par), PTE_SHARED >> 8); \
GUEST_ASSERT_EQ(par & SYS_PAR_EL1_PA, TEST_ADDR); \
- GUEST_SYNC(TEST_ACCESS_FLAG); \
} \
} while (0)
@@ -85,10 +83,6 @@ static void guest_code(void)
if (!SYS_FIELD_GET(ID_AA64MMFR1_EL1, HAFDBS, read_sysreg(id_aa64mmfr1_el1)))
GUEST_DONE();
- /*
- * KVM's software PTW makes the implementation choice that the AT
- * instruction sets the access flag.
- */
sysreg_clear_set(tcr_el1, 0, TCR_HA);
isb();
test_at(false);
@@ -102,8 +96,8 @@ static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc)
case CLEAR_ACCESS_FLAG:
/*
* Delete + reinstall the memslot to invalidate stage-2
- * mappings of the stage-1 page tables, forcing KVM to
- * use the 'slow' AT emulation path.
+ * mappings of the stage-1 page tables, allowing KVM to
+ * potentially use the 'slow' AT emulation path.
*
* This and clearing the access flag from host userspace
* ensures that the access flag cannot be set speculatively
@@ -112,10 +106,6 @@ static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc)
clear_bit(__ffs(PTE_AF), ptep_hva);
vm_mem_region_reload(vcpu->vm, vcpu->vm->memslots[MEM_REGION_PT]);
break;
- case TEST_ACCESS_FLAG:
- TEST_ASSERT(test_bit(__ffs(PTE_AF), ptep_hva),
- "Expected access flag to be set (desc: %lu)", *ptep_hva);
- break;
default:
TEST_FAIL("Unexpected SYNC arg: %lu", uc->args[1]);
}
diff --git a/tools/testing/selftests/kvm/arm64/no-vgic-v3.c b/tools/testing/selftests/kvm/arm64/no-vgic-v3.c
deleted file mode 100644
index 152c34776981..000000000000
--- a/tools/testing/selftests/kvm/arm64/no-vgic-v3.c
+++ /dev/null
@@ -1,177 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Check that, on a GICv3 system, not configuring GICv3 correctly
-// results in all of the sysregs generating an UNDEF exception.
-
-#include <test_util.h>
-#include <kvm_util.h>
-#include <processor.h>
-
-static volatile bool handled;
-
-#define __check_sr_read(r) \
- ({ \
- uint64_t val; \
- \
- handled = false; \
- dsb(sy); \
- val = read_sysreg_s(SYS_ ## r); \
- val; \
- })
-
-#define __check_sr_write(r) \
- do { \
- handled = false; \
- dsb(sy); \
- write_sysreg_s(0, SYS_ ## r); \
- isb(); \
- } while(0)
-
-/* Fatal checks */
-#define check_sr_read(r) \
- do { \
- __check_sr_read(r); \
- __GUEST_ASSERT(handled, #r " no read trap"); \
- } while(0)
-
-#define check_sr_write(r) \
- do { \
- __check_sr_write(r); \
- __GUEST_ASSERT(handled, #r " no write trap"); \
- } while(0)
-
-#define check_sr_rw(r) \
- do { \
- check_sr_read(r); \
- check_sr_write(r); \
- } while(0)
-
-static void guest_code(void)
-{
- uint64_t val;
-
- /*
- * Check that we advertise that ID_AA64PFR0_EL1.GIC == 0, having
- * hidden the feature at runtime without any other userspace action.
- */
- __GUEST_ASSERT(FIELD_GET(ID_AA64PFR0_EL1_GIC,
- read_sysreg(id_aa64pfr0_el1)) == 0,
- "GICv3 wrongly advertised");
-
- /*
- * Access all GICv3 registers, and fail if we don't get an UNDEF.
- * Note that we happily access all the APxRn registers without
- * checking their existance, as all we want to see is a failure.
- */
- check_sr_rw(ICC_PMR_EL1);
- check_sr_read(ICC_IAR0_EL1);
- check_sr_write(ICC_EOIR0_EL1);
- check_sr_rw(ICC_HPPIR0_EL1);
- check_sr_rw(ICC_BPR0_EL1);
- check_sr_rw(ICC_AP0R0_EL1);
- check_sr_rw(ICC_AP0R1_EL1);
- check_sr_rw(ICC_AP0R2_EL1);
- check_sr_rw(ICC_AP0R3_EL1);
- check_sr_rw(ICC_AP1R0_EL1);
- check_sr_rw(ICC_AP1R1_EL1);
- check_sr_rw(ICC_AP1R2_EL1);
- check_sr_rw(ICC_AP1R3_EL1);
- check_sr_write(ICC_DIR_EL1);
- check_sr_read(ICC_RPR_EL1);
- check_sr_write(ICC_SGI1R_EL1);
- check_sr_write(ICC_ASGI1R_EL1);
- check_sr_write(ICC_SGI0R_EL1);
- check_sr_read(ICC_IAR1_EL1);
- check_sr_write(ICC_EOIR1_EL1);
- check_sr_rw(ICC_HPPIR1_EL1);
- check_sr_rw(ICC_BPR1_EL1);
- check_sr_rw(ICC_CTLR_EL1);
- check_sr_rw(ICC_IGRPEN0_EL1);
- check_sr_rw(ICC_IGRPEN1_EL1);
-
- /*
- * ICC_SRE_EL1 may not be trappable, as ICC_SRE_EL2.Enable can
- * be RAO/WI. Engage in non-fatal accesses, starting with a
- * write of 0 to try and disable SRE, and let's see if it
- * sticks.
- */
- __check_sr_write(ICC_SRE_EL1);
- if (!handled)
- GUEST_PRINTF("ICC_SRE_EL1 write not trapping (OK)\n");
-
- val = __check_sr_read(ICC_SRE_EL1);
- if (!handled) {
- __GUEST_ASSERT((val & BIT(0)),
- "ICC_SRE_EL1 not trapped but ICC_SRE_EL1.SRE not set\n");
- GUEST_PRINTF("ICC_SRE_EL1 read not trapping (OK)\n");
- }
-
- GUEST_DONE();
-}
-
-static void guest_undef_handler(struct ex_regs *regs)
-{
- /* Success, we've gracefully exploded! */
- handled = true;
- regs->pc += 4;
-}
-
-static void test_run_vcpu(struct kvm_vcpu *vcpu)
-{
- struct ucall uc;
-
- do {
- vcpu_run(vcpu);
-
- switch (get_ucall(vcpu, &uc)) {
- case UCALL_ABORT:
- REPORT_GUEST_ASSERT(uc);
- break;
- case UCALL_PRINTF:
- printf("%s", uc.buffer);
- break;
- case UCALL_DONE:
- break;
- default:
- TEST_FAIL("Unknown ucall %lu", uc.cmd);
- }
- } while (uc.cmd != UCALL_DONE);
-}
-
-static void test_guest_no_gicv3(void)
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm;
-
- /* Create a VM without a GICv3 */
- vm = vm_create_with_one_vcpu(&vcpu, guest_code);
-
- vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vcpu);
-
- vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
- ESR_ELx_EC_UNKNOWN, guest_undef_handler);
-
- test_run_vcpu(vcpu);
-
- kvm_vm_free(vm);
-}
-
-int main(int argc, char *argv[])
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm;
- uint64_t pfr0;
-
- test_disable_default_vgic();
-
- vm = vm_create_with_one_vcpu(&vcpu, NULL);
- pfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
- __TEST_REQUIRE(FIELD_GET(ID_AA64PFR0_EL1_GIC, pfr0),
- "GICv3 not supported.");
- kvm_vm_free(vm);
-
- test_guest_no_gicv3();
-
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/arm64/no-vgic.c b/tools/testing/selftests/kvm/arm64/no-vgic.c
new file mode 100644
index 000000000000..b14686ef17d1
--- /dev/null
+++ b/tools/testing/selftests/kvm/arm64/no-vgic.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Check that, on a GICv3-capable system (GICv3 native, or GICv5 with
+// FEAT_GCIE_LEGACY), not configuring GICv3 correctly results in all
+// of the sysregs generating an UNDEF exception. Do the same for GICv5
+// on a GICv5 host.
+
+#include <test_util.h>
+#include <kvm_util.h>
+#include <processor.h>
+
+#include <arm64/gic_v5.h>
+
+static volatile bool handled;
+
+#define __check_sr_read(r) \
+ ({ \
+ uint64_t val; \
+ \
+ handled = false; \
+ dsb(sy); \
+ val = read_sysreg_s(SYS_ ## r); \
+ val; \
+ })
+
+#define __check_sr_write(r) \
+ do { \
+ handled = false; \
+ dsb(sy); \
+ write_sysreg_s(0, SYS_ ## r); \
+ isb(); \
+ } while (0)
+
+#define __check_gicv5_gicr_op(r) \
+ ({ \
+ uint64_t val; \
+ \
+ handled = false; \
+ dsb(sy); \
+ val = read_sysreg_s(GICV5_OP_GICR_ ## r); \
+ val; \
+ })
+
+#define __check_gicv5_gic_op(r) \
+ do { \
+ handled = false; \
+ dsb(sy); \
+ write_sysreg_s(0, GICV5_OP_GIC_ ## r); \
+ isb(); \
+ } while (0)
+
+/* Fatal checks */
+#define check_sr_read(r) \
+ do { \
+ __check_sr_read(r); \
+ __GUEST_ASSERT(handled, #r " no read trap"); \
+ } while (0)
+
+#define check_sr_write(r) \
+ do { \
+ __check_sr_write(r); \
+ __GUEST_ASSERT(handled, #r " no write trap"); \
+ } while (0)
+
+#define check_sr_rw(r) \
+ do { \
+ check_sr_read(r); \
+ check_sr_write(r); \
+ } while (0)
+
+#define check_gicv5_gicr_op(r) \
+ do { \
+ __check_gicv5_gicr_op(r); \
+ __GUEST_ASSERT(handled, #r " no read trap"); \
+ } while (0)
+
+#define check_gicv5_gic_op(r) \
+ do { \
+ __check_gicv5_gic_op(r); \
+ __GUEST_ASSERT(handled, #r " no write trap"); \
+ } while (0)
+
+static void guest_code_gicv3(void)
+{
+ uint64_t val;
+
+ /*
+ * Check that we advertise that ID_AA64PFR0_EL1.GIC == 0, having
+ * hidden the feature at runtime without any other userspace action.
+ */
+ __GUEST_ASSERT(FIELD_GET(ID_AA64PFR0_EL1_GIC,
+ read_sysreg(id_aa64pfr0_el1)) == 0,
+ "GICv3 wrongly advertised");
+
+ /*
+ * Access all GICv3 registers, and fail if we don't get an UNDEF.
+ * Note that we happily access all the APxRn registers without
+ * checking their existence, as all we want to see is a failure.
+ */
+ check_sr_rw(ICC_PMR_EL1);
+ check_sr_read(ICC_IAR0_EL1);
+ check_sr_write(ICC_EOIR0_EL1);
+ check_sr_rw(ICC_HPPIR0_EL1);
+ check_sr_rw(ICC_BPR0_EL1);
+ check_sr_rw(ICC_AP0R0_EL1);
+ check_sr_rw(ICC_AP0R1_EL1);
+ check_sr_rw(ICC_AP0R2_EL1);
+ check_sr_rw(ICC_AP0R3_EL1);
+ check_sr_rw(ICC_AP1R0_EL1);
+ check_sr_rw(ICC_AP1R1_EL1);
+ check_sr_rw(ICC_AP1R2_EL1);
+ check_sr_rw(ICC_AP1R3_EL1);
+ check_sr_write(ICC_DIR_EL1);
+ check_sr_read(ICC_RPR_EL1);
+ check_sr_write(ICC_SGI1R_EL1);
+ check_sr_write(ICC_ASGI1R_EL1);
+ check_sr_write(ICC_SGI0R_EL1);
+ check_sr_read(ICC_IAR1_EL1);
+ check_sr_write(ICC_EOIR1_EL1);
+ check_sr_rw(ICC_HPPIR1_EL1);
+ check_sr_rw(ICC_BPR1_EL1);
+ check_sr_rw(ICC_CTLR_EL1);
+ check_sr_rw(ICC_IGRPEN0_EL1);
+ check_sr_rw(ICC_IGRPEN1_EL1);
+
+ /*
+ * ICC_SRE_EL1 may not be trappable, as ICC_SRE_EL2.Enable can
+ * be RAO/WI. Engage in non-fatal accesses, starting with a
+ * write of 0 to try and disable SRE, and let's see if it
+ * sticks.
+ */
+ __check_sr_write(ICC_SRE_EL1);
+ if (!handled)
+ GUEST_PRINTF("ICC_SRE_EL1 write not trapping (OK)\n");
+
+ val = __check_sr_read(ICC_SRE_EL1);
+ if (!handled) {
+ __GUEST_ASSERT((val & BIT(0)),
+ "ICC_SRE_EL1 not trapped but ICC_SRE_EL1.SRE not set\n");
+ GUEST_PRINTF("ICC_SRE_EL1 read not trapping (OK)\n");
+ }
+
+ GUEST_DONE();
+}
+
+static void guest_code_gicv5(void)
+{
+ /*
+ * Check that we advertise that ID_AA64PFR2_EL1.GCIE == 0, having
+ * hidden the feature at runtime without any other userspace action.
+ */
+ __GUEST_ASSERT(FIELD_GET(ID_AA64PFR2_EL1_GCIE,
+ read_sysreg_s(SYS_ID_AA64PFR2_EL1)) == 0,
+ "GICv5 wrongly advertised");
+
+ /*
+ * Try all GICv5 instructions, and fail if we don't get an UNDEF.
+ */
+ check_gicv5_gic_op(CDAFF);
+ check_gicv5_gic_op(CDDI);
+ check_gicv5_gic_op(CDDIS);
+ check_gicv5_gic_op(CDEOI);
+ check_gicv5_gic_op(CDHM);
+ check_gicv5_gic_op(CDPEND);
+ check_gicv5_gic_op(CDPRI);
+ check_gicv5_gic_op(CDRCFG);
+ check_gicv5_gicr_op(CDIA);
+ check_gicv5_gicr_op(CDNMIA);
+
+ /* Check General System Register acccesses */
+ check_sr_rw(ICC_APR_EL1);
+ check_sr_rw(ICC_CR0_EL1);
+ check_sr_read(ICC_HPPIR_EL1);
+ check_sr_read(ICC_IAFFIDR_EL1);
+ check_sr_rw(ICC_ICSR_EL1);
+ check_sr_read(ICC_IDR0_EL1);
+ check_sr_rw(ICC_PCR_EL1);
+
+ /* Check PPI System Register accessess */
+ check_sr_rw(ICC_PPI_CACTIVER0_EL1);
+ check_sr_rw(ICC_PPI_CACTIVER1_EL1);
+ check_sr_rw(ICC_PPI_SACTIVER0_EL1);
+ check_sr_rw(ICC_PPI_SACTIVER1_EL1);
+ check_sr_rw(ICC_PPI_CPENDR0_EL1);
+ check_sr_rw(ICC_PPI_CPENDR1_EL1);
+ check_sr_rw(ICC_PPI_SPENDR0_EL1);
+ check_sr_rw(ICC_PPI_SPENDR1_EL1);
+ check_sr_rw(ICC_PPI_ENABLER0_EL1);
+ check_sr_rw(ICC_PPI_ENABLER1_EL1);
+ check_sr_read(ICC_PPI_HMR0_EL1);
+ check_sr_read(ICC_PPI_HMR1_EL1);
+ check_sr_rw(ICC_PPI_PRIORITYR0_EL1);
+ check_sr_rw(ICC_PPI_PRIORITYR1_EL1);
+ check_sr_rw(ICC_PPI_PRIORITYR2_EL1);
+ check_sr_rw(ICC_PPI_PRIORITYR3_EL1);
+ check_sr_rw(ICC_PPI_PRIORITYR4_EL1);
+ check_sr_rw(ICC_PPI_PRIORITYR5_EL1);
+ check_sr_rw(ICC_PPI_PRIORITYR6_EL1);
+ check_sr_rw(ICC_PPI_PRIORITYR7_EL1);
+ check_sr_rw(ICC_PPI_PRIORITYR8_EL1);
+ check_sr_rw(ICC_PPI_PRIORITYR9_EL1);
+ check_sr_rw(ICC_PPI_PRIORITYR10_EL1);
+ check_sr_rw(ICC_PPI_PRIORITYR11_EL1);
+ check_sr_rw(ICC_PPI_PRIORITYR12_EL1);
+ check_sr_rw(ICC_PPI_PRIORITYR13_EL1);
+ check_sr_rw(ICC_PPI_PRIORITYR14_EL1);
+ check_sr_rw(ICC_PPI_PRIORITYR15_EL1);
+
+ GUEST_DONE();
+}
+
+static void guest_undef_handler(struct ex_regs *regs)
+{
+ /* Success, we've gracefully exploded! */
+ handled = true;
+ regs->pc += 4;
+}
+
+static void test_run_vcpu(struct kvm_vcpu *vcpu)
+{
+ struct ucall uc;
+
+ do {
+ vcpu_run(vcpu);
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ break;
+ case UCALL_PRINTF:
+ printf("%s", uc.buffer);
+ break;
+ case UCALL_DONE:
+ break;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+ } while (uc.cmd != UCALL_DONE);
+}
+
+static void test_guest_no_vgic(void *guest_code)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ /* Create a VM without a GIC */
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vcpu);
+
+ vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
+ ESR_ELx_EC_UNKNOWN, guest_undef_handler);
+
+ test_run_vcpu(vcpu);
+
+ kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ bool has_v3, has_v5;
+ uint64_t pfr;
+
+ test_disable_default_vgic();
+
+ vm = vm_create_with_one_vcpu(&vcpu, NULL);
+
+ pfr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
+ has_v3 = !!FIELD_GET(ID_AA64PFR0_EL1_GIC, pfr);
+
+ pfr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR2_EL1));
+ has_v5 = !!FIELD_GET(ID_AA64PFR2_EL1_GCIE, pfr);
+
+ kvm_vm_free(vm);
+
+ __TEST_REQUIRE(has_v3 || has_v5,
+ "Neither GICv3 nor GICv5 supported.");
+
+ if (has_v3) {
+ pr_info("Testing no-vgic-v3\n");
+ test_guest_no_vgic(guest_code_gicv3);
+ } else {
+ pr_info("No GICv3 support: skipping no-vgic-v3 test\n");
+ }
+
+ if (has_v5) {
+ pr_info("Testing no-vgic-v5\n");
+ test_guest_no_vgic(guest_code_gicv5);
+ } else {
+ pr_info("No GICv5 support: skipping no-vgic-v5 test\n");
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/arm64/set_id_regs.c b/tools/testing/selftests/kvm/arm64/set_id_regs.c
index fa3478a6c914..3a7e5fe9ae7a 100644
--- a/tools/testing/selftests/kvm/arm64/set_id_regs.c
+++ b/tools/testing/selftests/kvm/arm64/set_id_regs.c
@@ -37,6 +37,9 @@ struct reg_ftr_bits {
* For FTR_LOWER_SAFE, safe_val is used as the minimal safe value.
*/
int64_t safe_val;
+
+ /* Allowed to be changed by the host after run */
+ bool mutable;
};
struct test_feature_reg {
@@ -44,7 +47,7 @@ struct test_feature_reg {
const struct reg_ftr_bits *ftr_bits;
};
-#define __REG_FTR_BITS(NAME, SIGNED, TYPE, SHIFT, MASK, SAFE_VAL) \
+#define __REG_FTR_BITS(NAME, SIGNED, TYPE, SHIFT, MASK, SAFE_VAL, MUT) \
{ \
.name = #NAME, \
.sign = SIGNED, \
@@ -52,15 +55,20 @@ struct test_feature_reg {
.shift = SHIFT, \
.mask = MASK, \
.safe_val = SAFE_VAL, \
+ .mutable = MUT, \
}
#define REG_FTR_BITS(type, reg, field, safe_val) \
__REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \
- reg##_##field##_MASK, safe_val)
+ reg##_##field##_MASK, safe_val, false)
+
+#define REG_FTR_BITS_MUTABLE(type, reg, field, safe_val) \
+ __REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \
+ reg##_##field##_MASK, safe_val, true)
#define S_REG_FTR_BITS(type, reg, field, safe_val) \
__REG_FTR_BITS(reg##_##field, FTR_SIGNED, type, reg##_##field##_SHIFT, \
- reg##_##field##_MASK, safe_val)
+ reg##_##field##_MASK, safe_val, false)
#define REG_FTR_END \
{ \
@@ -135,7 +143,8 @@ static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = {
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV2, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, DIT, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, SEL2, 0),
- REG_FTR_BITS(FTR_EXACT, ID_AA64PFR0_EL1, GIC, 0),
+ /* GICv3 support will be forced at run time if available */
+ REG_FTR_BITS_MUTABLE(FTR_EXACT, ID_AA64PFR0_EL1, GIC, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 1),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 1),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 1),
@@ -635,12 +644,38 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac no longer 0xF\n");
}
+static uint64_t reset_mutable_bits(uint32_t id, uint64_t val)
+{
+ struct test_feature_reg *reg = NULL;
+
+ for (int i = 0; i < ARRAY_SIZE(test_regs); i++) {
+ if (test_regs[i].reg == id) {
+ reg = &test_regs[i];
+ break;
+ }
+ }
+
+ if (!reg)
+ return val;
+
+ for (const struct reg_ftr_bits *bits = reg->ftr_bits; bits->type != FTR_END; bits++) {
+ if (bits->mutable) {
+ val &= ~bits->mask;
+ val |= bits->safe_val << bits->shift;
+ }
+ }
+
+ return val;
+}
+
static void test_guest_reg_read(struct kvm_vcpu *vcpu)
{
bool done = false;
struct ucall uc;
while (!done) {
+ uint64_t val;
+
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
@@ -648,9 +683,11 @@ static void test_guest_reg_read(struct kvm_vcpu *vcpu)
REPORT_GUEST_ASSERT(uc);
break;
case UCALL_SYNC:
+ val = test_reg_vals[encoding_to_range_idx(uc.args[2])];
+ val = reset_mutable_bits(uc.args[2], val);
+
/* Make sure the written values are seen by guest */
- TEST_ASSERT_EQ(test_reg_vals[encoding_to_range_idx(uc.args[2])],
- uc.args[3]);
+ TEST_ASSERT_EQ(val, reset_mutable_bits(uc.args[2], uc.args[3]));
break;
case UCALL_DONE:
done = true;
@@ -741,7 +778,8 @@ static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, uint32_t encodin
uint64_t observed;
observed = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding));
- TEST_ASSERT_EQ(test_reg_vals[idx], observed);
+ TEST_ASSERT_EQ(reset_mutable_bits(encoding, test_reg_vals[idx]),
+ reset_mutable_bits(encoding, observed));
}
static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu)
diff --git a/tools/testing/selftests/kvm/arm64/vgic_v5.c b/tools/testing/selftests/kvm/arm64/vgic_v5.c
new file mode 100644
index 000000000000..3ce6cf37a629
--- /dev/null
+++ b/tools/testing/selftests/kvm/arm64/vgic_v5.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kernel.h>
+#include <sys/syscall.h>
+#include <asm/kvm.h>
+#include <asm/kvm_para.h>
+
+#include <arm64/gic_v5.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "vgic.h"
+
+#define NR_VCPUS 1
+
+struct vm_gic {
+ struct kvm_vm *vm;
+ int gic_fd;
+ uint32_t gic_dev_type;
+};
+
+static uint64_t max_phys_size;
+
+#define GUEST_CMD_IRQ_CDIA 10
+#define GUEST_CMD_IRQ_DIEOI 11
+#define GUEST_CMD_IS_AWAKE 12
+#define GUEST_CMD_IS_READY 13
+
+static void guest_irq_handler(struct ex_regs *regs)
+{
+ bool valid;
+ u32 hwirq;
+ u64 ia;
+ static int count;
+
+ /*
+ * We have pending interrupts. Should never actually enter WFI
+ * here!
+ */
+ wfi();
+ GUEST_SYNC(GUEST_CMD_IS_AWAKE);
+
+ ia = gicr_insn(CDIA);
+ valid = GICV5_GICR_CDIA_VALID(ia);
+
+ GUEST_SYNC(GUEST_CMD_IRQ_CDIA);
+
+ if (!valid)
+ return;
+
+ gsb_ack();
+ isb();
+
+ hwirq = FIELD_GET(GICV5_GICR_CDIA_INTID, ia);
+
+ gic_insn(hwirq, CDDI);
+ gic_insn(0, CDEOI);
+
+ GUEST_SYNC(GUEST_CMD_IRQ_DIEOI);
+
+ if (++count >= 2)
+ GUEST_DONE();
+
+ /* Ask for the next interrupt to be injected */
+ GUEST_SYNC(GUEST_CMD_IS_READY);
+}
+
+static void guest_code(void)
+{
+ local_irq_disable();
+
+ gicv5_cpu_enable_interrupts();
+ local_irq_enable();
+
+ /* Enable the SW_PPI (3) */
+ write_sysreg_s(BIT_ULL(3), SYS_ICC_PPI_ENABLER0_EL1);
+
+ /* Ask for the first interrupt to be injected */
+ GUEST_SYNC(GUEST_CMD_IS_READY);
+
+ /* Loop forever waiting for interrupts */
+ while (1);
+}
+
+
+/* we don't want to assert on run execution, hence that helper */
+static int run_vcpu(struct kvm_vcpu *vcpu)
+{
+ return __vcpu_run(vcpu) ? -errno : 0;
+}
+
+static void vm_gic_destroy(struct vm_gic *v)
+{
+ close(v->gic_fd);
+ kvm_vm_free(v->vm);
+}
+
+static void test_vgic_v5_ppis(uint32_t gic_dev_type)
+{
+ struct kvm_vcpu *vcpus[NR_VCPUS];
+ struct ucall uc;
+ u64 user_ppis[2];
+ struct vm_gic v;
+ int ret, i;
+
+ v.gic_dev_type = gic_dev_type;
+ v.vm = __vm_create(VM_SHAPE_DEFAULT, NR_VCPUS, 0);
+
+ v.gic_fd = kvm_create_device(v.vm, gic_dev_type);
+
+ for (i = 0; i < NR_VCPUS; i++)
+ vcpus[i] = vm_vcpu_add(v.vm, i, guest_code);
+
+ vm_init_descriptor_tables(v.vm);
+ vm_install_exception_handler(v.vm, VECTOR_IRQ_CURRENT, guest_irq_handler);
+
+ for (i = 0; i < NR_VCPUS; i++)
+ vcpu_init_descriptor_tables(vcpus[i]);
+
+ kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
+
+ /* Read out the PPIs that user space is allowed to drive. */
+ kvm_device_attr_get(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_USERSPACE_PPIS, &user_ppis);
+
+ /* We should always be able to drive the SW_PPI. */
+ TEST_ASSERT(user_ppis[0] & BIT(GICV5_ARCH_PPI_SW_PPI),
+ "SW_PPI is not drivable by userspace");
+
+ while (1) {
+ ret = run_vcpu(vcpus[0]);
+
+ switch (get_ucall(vcpus[0], &uc)) {
+ case UCALL_SYNC:
+ /*
+ * The guest is ready for the next level change. Set
+ * high if ready, and lower if it has been consumed.
+ */
+ if (uc.args[1] == GUEST_CMD_IS_READY ||
+ uc.args[1] == GUEST_CMD_IRQ_DIEOI) {
+ u64 irq;
+ bool level = uc.args[1] == GUEST_CMD_IRQ_DIEOI ? 0 : 1;
+
+ irq = FIELD_PREP(KVM_ARM_IRQ_NUM_MASK, 3);
+ irq |= KVM_ARM_IRQ_TYPE_PPI << KVM_ARM_IRQ_TYPE_SHIFT;
+
+ _kvm_irq_line(v.vm, irq, level);
+ } else if (uc.args[1] == GUEST_CMD_IS_AWAKE) {
+ pr_info("Guest skipping WFI due to pending IRQ\n");
+ } else if (uc.args[1] == GUEST_CMD_IRQ_CDIA) {
+ pr_info("Guest acknowledged IRQ\n");
+ }
+
+ continue;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ break;
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+ }
+
+done:
+ TEST_ASSERT(ret == 0, "Failed to test GICv5 PPIs");
+
+ vm_gic_destroy(&v);
+}
+
+/*
+ * Returns 0 if it's possible to create GIC device of a given type (V5).
+ */
+int test_kvm_device(uint32_t gic_dev_type)
+{
+ struct kvm_vcpu *vcpus[NR_VCPUS];
+ struct vm_gic v;
+ int ret;
+
+ v.vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus);
+
+ /* try to create a non existing KVM device */
+ ret = __kvm_test_create_device(v.vm, 0);
+ TEST_ASSERT(ret && errno == ENODEV, "unsupported device");
+
+ /* trial mode */
+ ret = __kvm_test_create_device(v.vm, gic_dev_type);
+ if (ret)
+ return ret;
+ v.gic_fd = kvm_create_device(v.vm, gic_dev_type);
+
+ ret = __kvm_create_device(v.vm, gic_dev_type);
+ TEST_ASSERT(ret < 0 && errno == EEXIST, "create GIC device twice");
+
+ vm_gic_destroy(&v);
+
+ return 0;
+}
+
+void run_tests(uint32_t gic_dev_type)
+{
+ pr_info("Test VGICv5 PPIs\n");
+ test_vgic_v5_ppis(gic_dev_type);
+}
+
+int main(int ac, char **av)
+{
+ int ret;
+ int pa_bits;
+
+ test_disable_default_vgic();
+
+ pa_bits = vm_guest_mode_params[VM_MODE_DEFAULT].pa_bits;
+ max_phys_size = 1ULL << pa_bits;
+
+ ret = test_kvm_device(KVM_DEV_TYPE_ARM_VGIC_V5);
+ if (ret) {
+ pr_info("No GICv5 support; Not running GIC_v5 tests.\n");
+ exit(KSFT_SKIP);
+ }
+
+ pr_info("Running VGIC_V5 tests.\n");
+ run_tests(KVM_DEV_TYPE_ARM_VGIC_V5);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index d58a641b0e6a..7627b328f18a 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -641,9 +641,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
}
#ifdef __s390x__
- /* Align to 1M (segment size) */
- guest_test_phys_mem = align_down(guest_test_phys_mem, 1 << 20);
-
/*
* The workaround in guest_code() to write all pages prior to the first
* iteration isn't compatible with the dirty ring, as the dirty ring
diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c
index cc329b57ce2e..ec7644aae999 100644
--- a/tools/testing/selftests/kvm/guest_memfd_test.c
+++ b/tools/testing/selftests/kvm/guest_memfd_test.c
@@ -171,6 +171,64 @@ static void test_numa_allocation(int fd, size_t total_size)
kvm_munmap(mem, total_size);
}
+static void test_collapse(int fd, uint64_t flags)
+{
+ const size_t pmd_size = get_trans_hugepagesz();
+ void *reserved_addr;
+ void *aligned_addr;
+ char *mem;
+ off_t i;
+
+ /*
+ * To even reach the point where the guest_memfd folios will
+ * get collapsed, both the userspace address and the offset
+ * within the guest_memfd have to be aligned to pmd_size.
+ *
+ * To achieve that alignment, reserve virtual address space
+ * with regular mmap, then use MAP_FIXED to allocate memory
+ * from a pmd_size-aligned offset (0) at a known, available
+ * virtual address.
+ */
+ reserved_addr = kvm_mmap(pmd_size * 2, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1);
+ aligned_addr = align_ptr_up(reserved_addr, pmd_size);
+
+ mem = mmap(aligned_addr, pmd_size, PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_SHARED, fd, 0);
+ TEST_ASSERT(IS_ALIGNED((u64)mem, pmd_size),
+ "Userspace address must be aligned to PMD size.");
+
+ /*
+ * Use reads to populate page table to avoid setting dirty
+ * flag on page.
+ */
+ for (i = 0; i < pmd_size; i += getpagesize())
+ READ_ONCE(mem[i]);
+
+ /*
+ * Advising the use of huge pages in guest_memfd should be
+ * fine...
+ */
+ kvm_madvise(mem, pmd_size, MADV_HUGEPAGE);
+
+ /*
+ * ... but collapsing folios must not be supported to avoid
+ * mapping beyond shared ranges into host userspace page
+ * tables.
+ */
+ TEST_ASSERT_EQ(madvise(mem, pmd_size, MADV_COLLAPSE), -1);
+ TEST_ASSERT_EQ(errno, EINVAL);
+
+ /*
+ * Removing from host page tables and re-faulting should be
+ * fine; should not end up faulting in a collapsed/huge folio.
+ */
+ kvm_madvise(mem, pmd_size, MADV_DONTNEED);
+ READ_ONCE(mem[0]);
+
+ kvm_munmap(reserved_addr, pmd_size * 2);
+}
+
static void test_fault_sigbus(int fd, size_t accessible_size, size_t map_size)
{
const char val = 0xaa;
@@ -350,14 +408,17 @@ static void test_guest_memfd_flags(struct kvm_vm *vm)
}
}
-#define gmem_test(__test, __vm, __flags) \
+#define __gmem_test(__test, __vm, __flags, __gmem_size) \
do { \
- int fd = vm_create_guest_memfd(__vm, page_size * 4, __flags); \
+ int fd = vm_create_guest_memfd(__vm, __gmem_size, __flags); \
\
- test_##__test(fd, page_size * 4); \
+ test_##__test(fd, __gmem_size); \
close(fd); \
} while (0)
+#define gmem_test(__test, __vm, __flags) \
+ __gmem_test(__test, __vm, __flags, page_size * 4)
+
static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags)
{
test_create_guest_memfd_multiple(vm);
@@ -367,9 +428,12 @@ static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags)
if (flags & GUEST_MEMFD_FLAG_MMAP) {
if (flags & GUEST_MEMFD_FLAG_INIT_SHARED) {
+ size_t pmd_size = get_trans_hugepagesz();
+
gmem_test(mmap_supported, vm, flags);
gmem_test(fault_overflow, vm, flags);
gmem_test(numa_allocation, vm, flags);
+ __gmem_test(collapse, vm, flags, pmd_size);
} else {
gmem_test(fault_private, vm, flags);
}
diff --git a/tools/testing/selftests/kvm/include/arm64/gic_v5.h b/tools/testing/selftests/kvm/include/arm64/gic_v5.h
new file mode 100644
index 000000000000..eb523d9277cf
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/arm64/gic_v5.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SELFTESTS_GIC_V5_H
+#define __SELFTESTS_GIC_V5_H
+
+#include <asm/barrier.h>
+#include <asm/sysreg.h>
+
+#include <linux/bitfield.h>
+
+#include "processor.h"
+
+/*
+ * Definitions for GICv5 instructions for the Current Domain
+ */
+#define GICV5_OP_GIC_CDAFF sys_insn(1, 0, 12, 1, 3)
+#define GICV5_OP_GIC_CDDI sys_insn(1, 0, 12, 2, 0)
+#define GICV5_OP_GIC_CDDIS sys_insn(1, 0, 12, 1, 0)
+#define GICV5_OP_GIC_CDHM sys_insn(1, 0, 12, 2, 1)
+#define GICV5_OP_GIC_CDEN sys_insn(1, 0, 12, 1, 1)
+#define GICV5_OP_GIC_CDEOI sys_insn(1, 0, 12, 1, 7)
+#define GICV5_OP_GIC_CDPEND sys_insn(1, 0, 12, 1, 4)
+#define GICV5_OP_GIC_CDPRI sys_insn(1, 0, 12, 1, 2)
+#define GICV5_OP_GIC_CDRCFG sys_insn(1, 0, 12, 1, 5)
+#define GICV5_OP_GICR_CDIA sys_insn(1, 0, 12, 3, 0)
+#define GICV5_OP_GICR_CDNMIA sys_insn(1, 0, 12, 3, 1)
+
+/* Definitions for GIC CDAFF */
+#define GICV5_GIC_CDAFF_IAFFID_MASK GENMASK_ULL(47, 32)
+#define GICV5_GIC_CDAFF_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDAFF_IRM_MASK BIT_ULL(28)
+#define GICV5_GIC_CDAFF_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDDI */
+#define GICV5_GIC_CDDI_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDDI_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDDIS */
+#define GICV5_GIC_CDDIS_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDDIS_TYPE(r) FIELD_GET(GICV5_GIC_CDDIS_TYPE_MASK, r)
+#define GICV5_GIC_CDDIS_ID_MASK GENMASK_ULL(23, 0)
+#define GICV5_GIC_CDDIS_ID(r) FIELD_GET(GICV5_GIC_CDDIS_ID_MASK, r)
+
+/* Definitions for GIC CDEN */
+#define GICV5_GIC_CDEN_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDEN_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDHM */
+#define GICV5_GIC_CDHM_HM_MASK BIT_ULL(32)
+#define GICV5_GIC_CDHM_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDHM_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDPEND */
+#define GICV5_GIC_CDPEND_PENDING_MASK BIT_ULL(32)
+#define GICV5_GIC_CDPEND_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDPEND_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDPRI */
+#define GICV5_GIC_CDPRI_PRIORITY_MASK GENMASK_ULL(39, 35)
+#define GICV5_GIC_CDPRI_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDPRI_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDRCFG */
+#define GICV5_GIC_CDRCFG_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDRCFG_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GICR CDIA */
+#define GICV5_GICR_CDIA_VALID_MASK BIT_ULL(32)
+#define GICV5_GICR_CDIA_VALID(r) FIELD_GET(GICV5_GICR_CDIA_VALID_MASK, r)
+#define GICV5_GICR_CDIA_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GICR_CDIA_ID_MASK GENMASK_ULL(23, 0)
+#define GICV5_GICR_CDIA_INTID GENMASK_ULL(31, 0)
+
+/* Definitions for GICR CDNMIA */
+#define GICV5_GICR_CDNMIA_VALID_MASK BIT_ULL(32)
+#define GICV5_GICR_CDNMIA_VALID(r) FIELD_GET(GICV5_GICR_CDNMIA_VALID_MASK, r)
+#define GICV5_GICR_CDNMIA_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GICR_CDNMIA_ID_MASK GENMASK_ULL(23, 0)
+
+#define gicr_insn(insn) read_sysreg_s(GICV5_OP_GICR_##insn)
+#define gic_insn(v, insn) write_sysreg_s(v, GICV5_OP_GIC_##insn)
+
+#define __GIC_BARRIER_INSN(op0, op1, CRn, CRm, op2, Rt) \
+ __emit_inst(0xd5000000 | \
+ sys_insn((op0), (op1), (CRn), (CRm), (op2)) | \
+ ((Rt) & 0x1f))
+
+#define GSB_SYS_BARRIER_INSN __GIC_BARRIER_INSN(1, 0, 12, 0, 0, 31)
+#define GSB_ACK_BARRIER_INSN __GIC_BARRIER_INSN(1, 0, 12, 0, 1, 31)
+
+#define gsb_ack() asm volatile(GSB_ACK_BARRIER_INSN : : : "memory")
+#define gsb_sys() asm volatile(GSB_SYS_BARRIER_INSN : : : "memory")
+
+#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
+
+#define GICV5_IRQ_DEFAULT_PRI 0b10000
+
+#define GICV5_ARCH_PPI_SW_PPI 0x3
+
+void gicv5_ppi_priority_init(void)
+{
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR0_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR1_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR2_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR3_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR4_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR5_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR6_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR7_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR8_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR9_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR10_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR11_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR12_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR13_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR14_EL1);
+ write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_DEFAULT_PRI), SYS_ICC_PPI_PRIORITYR15_EL1);
+
+ /*
+ * Context syncronization required to make sure system register writes
+ * effects are synchronised.
+ */
+ isb();
+}
+
+void gicv5_cpu_disable_interrupts(void)
+{
+ u64 cr0;
+
+ cr0 = FIELD_PREP(ICC_CR0_EL1_EN, 0);
+ write_sysreg_s(cr0, SYS_ICC_CR0_EL1);
+}
+
+void gicv5_cpu_enable_interrupts(void)
+{
+ u64 cr0, pcr;
+
+ write_sysreg_s(0, SYS_ICC_PPI_ENABLER0_EL1);
+ write_sysreg_s(0, SYS_ICC_PPI_ENABLER1_EL1);
+
+ gicv5_ppi_priority_init();
+
+ pcr = FIELD_PREP(ICC_PCR_EL1_PRIORITY, GICV5_IRQ_DEFAULT_PRI);
+ write_sysreg_s(pcr, SYS_ICC_PCR_EL1);
+
+ cr0 = FIELD_PREP(ICC_CR0_EL1_EN, 1);
+ write_sysreg_s(cr0, SYS_ICC_CR0_EL1);
+}
+
+#endif
diff --git a/tools/testing/selftests/kvm/include/kvm_syscalls.h b/tools/testing/selftests/kvm/include/kvm_syscalls.h
index d4e613162bba..843c9904c46f 100644
--- a/tools/testing/selftests/kvm/include/kvm_syscalls.h
+++ b/tools/testing/selftests/kvm/include/kvm_syscalls.h
@@ -77,5 +77,6 @@ __KVM_SYSCALL_DEFINE(munmap, 2, void *, mem, size_t, size);
__KVM_SYSCALL_DEFINE(close, 1, int, fd);
__KVM_SYSCALL_DEFINE(fallocate, 4, int, fd, int, mode, loff_t, offset, loff_t, len);
__KVM_SYSCALL_DEFINE(ftruncate, 2, unsigned int, fd, off_t, length);
+__KVM_SYSCALL_DEFINE(madvise, 3, void *, addr, size_t, length, int, advice);
#endif /* SELFTEST_KVM_SYSCALLS_H */
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 8b39cb919f4f..f861242b4ae8 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -1127,10 +1127,6 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
{
unsigned int n;
n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
-#ifdef __s390x__
- /* s390 requires 1M aligned guest sizes */
- n = (n + 255) & ~255;
-#endif
return n;
}
diff --git a/tools/testing/selftests/kvm/include/kvm_util_types.h b/tools/testing/selftests/kvm/include/kvm_util_types.h
index ec787b97cf18..0366e9bce7f9 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_types.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_types.h
@@ -17,4 +17,6 @@
typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
+#define INVALID_GPA (~(uint64_t)0)
+
#endif /* SELFTEST_KVM_UTIL_TYPES_H */
diff --git a/tools/testing/selftests/kvm/include/loongarch/pmu.h b/tools/testing/selftests/kvm/include/loongarch/pmu.h
new file mode 100644
index 000000000000..478e6a9bbb2b
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/loongarch/pmu.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * LoongArch PMU specific interface
+ */
+#ifndef SELFTEST_KVM_PMU_H
+#define SELFTEST_KVM_PMU_H
+
+#include "processor.h"
+
+#define LOONGARCH_CPUCFG6 0x6
+#define CPUCFG6_PMP BIT(0)
+#define CPUCFG6_PAMVER GENMASK(3, 1)
+#define CPUCFG6_PMNUM GENMASK(7, 4)
+#define CPUCFG6_PMNUM_SHIFT 4
+#define CPUCFG6_PMBITS GENMASK(13, 8)
+#define CPUCFG6_PMBITS_SHIFT 8
+#define CPUCFG6_UPM BIT(14)
+
+/* Performance Counter registers */
+#define LOONGARCH_CSR_PERFCTRL0 0x200 /* perf event 0 config */
+#define LOONGARCH_CSR_PERFCNTR0 0x201 /* perf event 0 count value */
+#define LOONGARCH_CSR_PERFCTRL1 0x202 /* perf event 1 config */
+#define LOONGARCH_CSR_PERFCNTR1 0x203 /* perf event 1 count value */
+#define LOONGARCH_CSR_PERFCTRL2 0x204 /* perf event 2 config */
+#define LOONGARCH_CSR_PERFCNTR2 0x205 /* perf event 2 count value */
+#define LOONGARCH_CSR_PERFCTRL3 0x206 /* perf event 3 config */
+#define LOONGARCH_CSR_PERFCNTR3 0x207 /* perf event 3 count value */
+#define CSR_PERFCTRL_PLV0 BIT(16)
+#define CSR_PERFCTRL_PLV1 BIT(17)
+#define CSR_PERFCTRL_PLV2 BIT(18)
+#define CSR_PERFCTRL_PLV3 BIT(19)
+#define CSR_PERFCTRL_PMIE BIT(20)
+#define PMU_ENVENT_ENABLED (CSR_PERFCTRL_PLV0 | CSR_PERFCTRL_PLV1 | CSR_PERFCTRL_PLV2 | CSR_PERFCTRL_PLV3)
+
+/* Hardware event codes (from LoongArch perf_event.c */
+#define LOONGARCH_PMU_EVENT_CYCLES 0x00 /* CPU cycles */
+#define LOONGARCH_PMU_EVENT_INSTR_RETIRED 0x01 /* Instructions retired */
+#define PERF_COUNT_HW_BRANCH_INSTRUCTIONS 0x02 /* Branch instructions */
+#define PERF_COUNT_HW_BRANCH_MISSES 0x03 /* Branch misses */
+
+#define NUM_LOOPS 1000
+#define EXPECTED_INSTR_MIN (NUM_LOOPS + 10) /* Loop + overhead */
+#define EXPECTED_CYCLES_MIN NUM_LOOPS /* At least 1 cycle per iteration */
+#define UPPER_BOUND (10 * NUM_LOOPS)
+
+#define PMU_OVERFLOW (1ULL << 63)
+
+static inline void pmu_irq_enable(void)
+{
+ unsigned long val;
+
+ val = csr_read(LOONGARCH_CSR_ECFG);
+ val |= ECFGF_PMU;
+ csr_write(val, LOONGARCH_CSR_ECFG);
+}
+
+static inline void pmu_irq_disable(void)
+{
+ unsigned long val;
+
+ val = csr_read(LOONGARCH_CSR_ECFG);
+ val &= ~ECFGF_PMU;
+ csr_write(val, LOONGARCH_CSR_ECFG);
+}
+
+#endif
diff --git a/tools/testing/selftests/kvm/include/loongarch/processor.h b/tools/testing/selftests/kvm/include/loongarch/processor.h
index 76840ddda57d..93dc1fbd2e79 100644
--- a/tools/testing/selftests/kvm/include/loongarch/processor.h
+++ b/tools/testing/selftests/kvm/include/loongarch/processor.h
@@ -83,6 +83,8 @@
#define LOONGARCH_CSR_PRMD 0x1
#define LOONGARCH_CSR_EUEN 0x2
#define LOONGARCH_CSR_ECFG 0x4
+#define ECFGB_PMU 10
+#define ECFGF_PMU (BIT_ULL(ECFGB_PMU))
#define ECFGB_TIMER 11
#define ECFGF_TIMER (BIT_ULL(ECFGB_TIMER))
#define LOONGARCH_CSR_ESTAT 0x5 /* Exception status */
@@ -90,6 +92,7 @@
#define CSR_ESTAT_EXC_WIDTH 6
#define CSR_ESTAT_EXC (0x3f << CSR_ESTAT_EXC_SHIFT)
#define EXCCODE_INT 0 /* Interrupt */
+#define INT_PMI 10 /* PMU interrupt */
#define INT_TI 11 /* Timer interrupt*/
#define LOONGARCH_CSR_ERA 0x6 /* ERA */
#define LOONGARCH_CSR_BADV 0x7 /* Bad virtual address */
@@ -128,6 +131,17 @@
#define CSR_TLBREHI_PS_SHIFT 0
#define CSR_TLBREHI_PS (0x3fUL << CSR_TLBREHI_PS_SHIFT)
+#define read_cpucfg(reg) \
+({ \
+ register unsigned long __v; \
+ __asm__ __volatile__( \
+ "cpucfg %0, %1\n\t" \
+ : "=r" (__v) \
+ : "r" (reg) \
+ : "memory"); \
+ __v; \
+})
+
#define csr_read(csr) \
({ \
register unsigned long __v; \
@@ -178,6 +192,7 @@ struct handlers {
handler_fn exception_handlers[VECTOR_NUM];
};
+void loongarch_vcpu_setup(struct kvm_vcpu *vcpu);
void vm_init_descriptor_tables(struct kvm_vm *vm);
void vm_install_exception_handler(struct kvm_vm *vm, int vector, handler_fn handler);
diff --git a/tools/testing/selftests/kvm/include/riscv/sbi.h b/tools/testing/selftests/kvm/include/riscv/sbi.h
index 046b432ae896..16f1815ac48f 100644
--- a/tools/testing/selftests/kvm/include/riscv/sbi.h
+++ b/tools/testing/selftests/kvm/include/riscv/sbi.h
@@ -97,6 +97,43 @@ enum sbi_pmu_hw_generic_events_t {
SBI_PMU_HW_GENERAL_MAX,
};
+enum sbi_pmu_fw_generic_events_t {
+ SBI_PMU_FW_MISALIGNED_LOAD = 0,
+ SBI_PMU_FW_MISALIGNED_STORE = 1,
+ SBI_PMU_FW_ACCESS_LOAD = 2,
+ SBI_PMU_FW_ACCESS_STORE = 3,
+ SBI_PMU_FW_ILLEGAL_INSN = 4,
+ SBI_PMU_FW_SET_TIMER = 5,
+ SBI_PMU_FW_IPI_SENT = 6,
+ SBI_PMU_FW_IPI_RCVD = 7,
+ SBI_PMU_FW_FENCE_I_SENT = 8,
+ SBI_PMU_FW_FENCE_I_RCVD = 9,
+ SBI_PMU_FW_SFENCE_VMA_SENT = 10,
+ SBI_PMU_FW_SFENCE_VMA_RCVD = 11,
+ SBI_PMU_FW_SFENCE_VMA_ASID_SENT = 12,
+ SBI_PMU_FW_SFENCE_VMA_ASID_RCVD = 13,
+
+ SBI_PMU_FW_HFENCE_GVMA_SENT = 14,
+ SBI_PMU_FW_HFENCE_GVMA_RCVD = 15,
+ SBI_PMU_FW_HFENCE_GVMA_VMID_SENT = 16,
+ SBI_PMU_FW_HFENCE_GVMA_VMID_RCVD = 17,
+
+ SBI_PMU_FW_HFENCE_VVMA_SENT = 18,
+ SBI_PMU_FW_HFENCE_VVMA_RCVD = 19,
+ SBI_PMU_FW_HFENCE_VVMA_ASID_SENT = 20,
+ SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD = 21,
+ SBI_PMU_FW_MAX,
+};
+
+/* SBI PMU event types */
+enum sbi_pmu_event_type {
+ SBI_PMU_EVENT_TYPE_HW = 0x0,
+ SBI_PMU_EVENT_TYPE_CACHE = 0x1,
+ SBI_PMU_EVENT_TYPE_RAW = 0x2,
+ SBI_PMU_EVENT_TYPE_RAW_V2 = 0x3,
+ SBI_PMU_EVENT_TYPE_FW = 0xf,
+};
+
/* SBI PMU counter types */
enum sbi_pmu_ctr_type {
SBI_PMU_CTR_TYPE_HW = 0x0,
diff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h
index 469a22122157..d8634a760a60 100644
--- a/tools/testing/selftests/kvm/include/x86/processor.h
+++ b/tools/testing/selftests/kvm/include/x86/processor.h
@@ -21,6 +21,8 @@
extern bool host_cpu_is_intel;
extern bool host_cpu_is_amd;
+extern bool host_cpu_is_hygon;
+extern bool host_cpu_is_amd_compatible;
extern uint64_t guest_tsc_khz;
#ifndef MAX_NR_CPUID_ENTRIES
@@ -717,6 +719,11 @@ static inline bool this_cpu_is_amd(void)
return this_cpu_vendor_string_is("AuthenticAMD");
}
+static inline bool this_cpu_is_hygon(void)
+{
+ return this_cpu_vendor_string_is("HygonGenuine");
+}
+
static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index,
uint8_t reg, uint8_t lo, uint8_t hi)
{
@@ -1383,6 +1390,11 @@ static inline bool kvm_is_ignore_msrs(void)
return get_kvm_param_bool("ignore_msrs");
}
+static inline bool kvm_is_lbrv_enabled(void)
+{
+ return !!get_kvm_amd_param_integer("lbrv");
+}
+
uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr);
uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
diff --git a/tools/testing/selftests/kvm/include/x86/svm.h b/tools/testing/selftests/kvm/include/x86/svm.h
index 10b30b38bb3f..c8539166270e 100644
--- a/tools/testing/selftests/kvm/include/x86/svm.h
+++ b/tools/testing/selftests/kvm/include/x86/svm.h
@@ -97,13 +97,13 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u64 exit_info_2;
u32 exit_int_info;
u32 exit_int_info_err;
- u64 nested_ctl;
+ u64 misc_ctl;
u64 avic_vapic_bar;
u8 reserved_4[8];
u32 event_inj;
u32 event_inj_err;
u64 nested_cr3;
- u64 virt_ext;
+ u64 misc_ctl2;
u32 clean;
u32 reserved_5;
u64 next_rip;
@@ -155,9 +155,6 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define AVIC_ENABLE_SHIFT 31
#define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT)
-#define LBR_CTL_ENABLE_MASK BIT_ULL(0)
-#define VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK BIT_ULL(1)
-
#define SVM_INTERRUPT_SHADOW_MASK 1
#define SVM_IOIO_STR_SHIFT 2
@@ -175,8 +172,11 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
#define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
-#define SVM_NESTED_CTL_NP_ENABLE BIT(0)
-#define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
+#define SVM_MISC_ENABLE_NP BIT(0)
+#define SVM_MISC_ENABLE_SEV BIT(1)
+
+#define SVM_MISC2_ENABLE_V_LBR BIT_ULL(0)
+#define SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE BIT_ULL(1)
struct __attribute__ ((__packed__)) vmcb_seg {
u16 selector;
diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
index dd8b12f626d3..c60a24a92829 100644
--- a/tools/testing/selftests/kvm/kvm_page_table_test.c
+++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
@@ -261,9 +261,6 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
guest_page_size;
else
guest_test_phys_mem = p->phys_offset;
-#ifdef __s390x__
- alignment = max(0x100000UL, alignment);
-#endif
guest_test_phys_mem = align_down(guest_test_phys_mem, alignment);
/* Set up the shared data structure test_args */
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 1959bf556e88..f5e076591c64 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -985,7 +985,7 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
struct userspace_mem_region *region;
size_t backing_src_pagesz = get_backing_src_pagesz(src_type);
size_t mem_size = npages * vm->page_size;
- size_t alignment;
+ size_t alignment = 1;
TEST_REQUIRE_SET_USER_MEMORY_REGION2();
@@ -1039,13 +1039,6 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
TEST_ASSERT(region != NULL, "Insufficient Memory");
region->mmap_size = mem_size;
-#ifdef __s390x__
- /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
- alignment = 0x100000;
-#else
- alignment = 1;
-#endif
-
/*
* When using THP mmap is not guaranteed to returned a hugepage aligned
* address so we have to pad the mmap. Padding is not needed for HugeTLB
diff --git a/tools/testing/selftests/kvm/lib/loongarch/processor.c b/tools/testing/selftests/kvm/lib/loongarch/processor.c
index 17aa55a2047a..ee4ad3b1d2a4 100644
--- a/tools/testing/selftests/kvm/lib/loongarch/processor.c
+++ b/tools/testing/selftests/kvm/lib/loongarch/processor.c
@@ -5,6 +5,7 @@
#include <asm/kvm.h>
#include "kvm_util.h"
+#include "pmu.h"
#include "processor.h"
#include "ucall_common.h"
@@ -251,6 +252,14 @@ static void loongarch_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
__vcpu_set_reg(vcpu, id, val);
}
+static void loongarch_set_cpucfg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
+{
+ uint64_t cfgid;
+
+ cfgid = KVM_REG_LOONGARCH_CPUCFG | KVM_REG_SIZE_U64 | 8 * id;
+ __vcpu_set_reg(vcpu, cfgid, val);
+}
+
static void loongarch_get_csr(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
{
uint64_t csrid;
@@ -267,9 +276,10 @@ static void loongarch_set_csr(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
__vcpu_set_reg(vcpu, csrid, val);
}
-static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
+void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
{
int width;
+ unsigned int cfg;
unsigned long val;
struct kvm_vm *vm = vcpu->vm;
@@ -282,6 +292,9 @@ static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
}
+ cfg = read_cpucfg(LOONGARCH_CPUCFG6);
+ loongarch_set_cpucfg(vcpu, LOONGARCH_CPUCFG6, cfg);
+
/* kernel mode and page enable mode */
val = PLV_KERN | CSR_CRMD_PG;
loongarch_set_csr(vcpu, LOONGARCH_CSR_CRMD, val);
diff --git a/tools/testing/selftests/kvm/lib/memstress.c b/tools/testing/selftests/kvm/lib/memstress.c
index 557c0a0a5658..1ea735d66e15 100644
--- a/tools/testing/selftests/kvm/lib/memstress.c
+++ b/tools/testing/selftests/kvm/lib/memstress.c
@@ -196,10 +196,6 @@ struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
args->gpa = (region_end_gfn - guest_num_pages - 1) * args->guest_page_size;
args->gpa = align_down(args->gpa, backing_src_pagesz);
-#ifdef __s390x__
- /* Align to 1M (segment size) */
- args->gpa = align_down(args->gpa, 1 << 20);
-#endif
args->size = guest_num_pages * args->guest_page_size;
pr_info("guest physical test memory: [0x%lx, 0x%lx)\n",
args->gpa, args->gpa + args->size);
diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c
index 51dd455ff52c..067c6b2c15b0 100644
--- a/tools/testing/selftests/kvm/lib/riscv/processor.c
+++ b/tools/testing/selftests/kvm/lib/riscv/processor.c
@@ -566,3 +566,8 @@ unsigned long riscv64_get_satp_mode(void)
return val;
}
+
+bool kvm_arch_has_default_irqchip(void)
+{
+ return kvm_check_cap(KVM_CAP_IRQCHIP);
+}
diff --git a/tools/testing/selftests/kvm/lib/x86/processor.c b/tools/testing/selftests/kvm/lib/x86/processor.c
index 23a44941e283..01f0f97d4430 100644
--- a/tools/testing/selftests/kvm/lib/x86/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86/processor.c
@@ -24,6 +24,8 @@
vm_vaddr_t exception_handlers;
bool host_cpu_is_amd;
bool host_cpu_is_intel;
+bool host_cpu_is_hygon;
+bool host_cpu_is_amd_compatible;
bool is_forced_emulation_enabled;
uint64_t guest_tsc_khz;
@@ -793,6 +795,8 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus)
sync_global_to_guest(vm, host_cpu_is_intel);
sync_global_to_guest(vm, host_cpu_is_amd);
+ sync_global_to_guest(vm, host_cpu_is_hygon);
+ sync_global_to_guest(vm, host_cpu_is_amd_compatible);
sync_global_to_guest(vm, is_forced_emulation_enabled);
sync_global_to_guest(vm, pmu_errata_mask);
@@ -1349,7 +1353,8 @@ const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
"1: vmmcall\n\t" \
"2:" \
: "=a"(r) \
- : [use_vmmcall] "r" (host_cpu_is_amd), inputs); \
+ : [use_vmmcall] "r" (host_cpu_is_amd_compatible), \
+ inputs); \
\
r; \
})
@@ -1389,8 +1394,8 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
max_gfn = (1ULL << (guest_maxphyaddr - vm->page_shift)) - 1;
- /* Avoid reserved HyperTransport region on AMD processors. */
- if (!host_cpu_is_amd)
+ /* Avoid reserved HyperTransport region on AMD or Hygon processors. */
+ if (!host_cpu_is_amd_compatible)
return max_gfn;
/* On parts with <40 physical address bits, the area is fully hidden */
@@ -1404,7 +1409,7 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
/*
* Otherwise it's at the top of the physical address space, possibly
- * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use
+ * reduced due to SME or CSV by bits 11:6 of CPUID[0x8000001f].EBX. Use
* the old conservative value if MAXPHYADDR is not enumerated.
*/
if (!this_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR))
@@ -1425,6 +1430,8 @@ void kvm_selftest_arch_init(void)
{
host_cpu_is_intel = this_cpu_is_intel();
host_cpu_is_amd = this_cpu_is_amd();
+ host_cpu_is_hygon = this_cpu_is_hygon();
+ host_cpu_is_amd_compatible = host_cpu_is_amd || host_cpu_is_hygon;
is_forced_emulation_enabled = kvm_is_forced_emulation_enabled();
kvm_init_pmu_errata();
diff --git a/tools/testing/selftests/kvm/lib/x86/svm.c b/tools/testing/selftests/kvm/lib/x86/svm.c
index 2e5c480c9afd..eb20b00112c7 100644
--- a/tools/testing/selftests/kvm/lib/x86/svm.c
+++ b/tools/testing/selftests/kvm/lib/x86/svm.c
@@ -126,7 +126,7 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r
guest_regs.rdi = (u64)svm;
if (svm->ncr3_gpa) {
- ctrl->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
+ ctrl->misc_ctl |= SVM_MISC_ENABLE_NP;
ctrl->nested_cr3 = svm->ncr3_gpa;
}
}
diff --git a/tools/testing/selftests/kvm/loongarch/pmu_test.c b/tools/testing/selftests/kvm/loongarch/pmu_test.c
new file mode 100644
index 000000000000..88bb530e336e
--- /dev/null
+++ b/tools/testing/selftests/kvm/loongarch/pmu_test.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LoongArch KVM PMU event counting test
+ *
+ * Test hardware event counting: CPU_CYCLES, INSTR_RETIRED,
+ * BRANCH_INSTRUCTIONS and BRANCH_MISSES.
+ */
+#include <linux/bitops.h>
+#include "kvm_util.h"
+#include "pmu.h"
+#include "loongarch/processor.h"
+
+static int pmu_irq_count;
+
+/* Check PMU support */
+static bool has_pmu_support(void)
+{
+ uint32_t cfg6;
+
+ /* Read CPUCFG6 to check PMU */
+ cfg6 = read_cpucfg(LOONGARCH_CPUCFG6);
+
+ /* Check PMU present bit */
+ if (!(cfg6 & CPUCFG6_PMP))
+ return false;
+
+ /* Check that at least one counter exists */
+ if (((cfg6 & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT) == 0)
+ return false;
+
+ return true;
+}
+
+/* Dump PMU capabilities */
+static void dump_pmu_caps(void)
+{
+ uint32_t cfg6;
+ int nr_counters, counter_bits;
+
+ cfg6 = read_cpucfg(LOONGARCH_CPUCFG6);
+ nr_counters = ((cfg6 & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT) + 1;
+ counter_bits = ((cfg6 & CPUCFG6_PMBITS) >> CPUCFG6_PMBITS_SHIFT) + 1;
+
+ pr_info("PMU capabilities:\n");
+ pr_info(" Counters present: %s\n", cfg6 & CPUCFG6_PMP ? "yes" : "no");
+ pr_info(" Number of counters: %d\n", nr_counters);
+ pr_info(" Counter width: %d bits\n", counter_bits);
+}
+
+/* Guest test code - runs inside VM */
+static void guest_pmu_base_test(void)
+{
+ int i;
+ uint32_t cfg6, pmnum;
+ uint64_t cnt[4];
+
+ cfg6 = read_cpucfg(LOONGARCH_CPUCFG6);
+ pmnum = (cfg6 >> 4) & 0xf;
+ GUEST_PRINTF("CPUCFG6 = 0x%x\n", cfg6);
+ GUEST_PRINTF("PMP enabled: %s\n", (cfg6 & 0x1) ? "YES" : "NO");
+ GUEST_PRINTF("Number of counters (PMNUM): %x\n", pmnum + 1);
+ GUEST_ASSERT(pmnum == 3);
+
+ GUEST_PRINTF("Clean csr_perfcntr0-3\n");
+ csr_write(0, LOONGARCH_CSR_PERFCNTR0);
+ csr_write(0, LOONGARCH_CSR_PERFCNTR1);
+ csr_write(0, LOONGARCH_CSR_PERFCNTR2);
+ csr_write(0, LOONGARCH_CSR_PERFCNTR3);
+ GUEST_PRINTF("Set csr_perfctrl0 for cycles event\n");
+ csr_write(PMU_ENVENT_ENABLED |
+ LOONGARCH_PMU_EVENT_CYCLES, LOONGARCH_CSR_PERFCTRL0);
+ GUEST_PRINTF("Set csr_perfctrl1 for instr_retired event\n");
+ csr_write(PMU_ENVENT_ENABLED |
+ LOONGARCH_PMU_EVENT_INSTR_RETIRED, LOONGARCH_CSR_PERFCTRL1);
+ GUEST_PRINTF("Set csr_perfctrl2 for branch_instructions event\n");
+ csr_write(PMU_ENVENT_ENABLED |
+ PERF_COUNT_HW_BRANCH_INSTRUCTIONS, LOONGARCH_CSR_PERFCTRL2);
+ GUEST_PRINTF("Set csr_perfctrl3 for branch_misses event\n");
+ csr_write(PMU_ENVENT_ENABLED |
+ PERF_COUNT_HW_BRANCH_MISSES, LOONGARCH_CSR_PERFCTRL3);
+
+ for (i = 0; i < NUM_LOOPS; i++)
+ cpu_relax();
+
+ cnt[0] = csr_read(LOONGARCH_CSR_PERFCNTR0);
+ GUEST_PRINTF("csr_perfcntr0 is %lx\n", cnt[0]);
+ cnt[1] = csr_read(LOONGARCH_CSR_PERFCNTR1);
+ GUEST_PRINTF("csr_perfcntr1 is %lx\n", cnt[1]);
+ cnt[2] = csr_read(LOONGARCH_CSR_PERFCNTR2);
+ GUEST_PRINTF("csr_perfcntr2 is %lx\n", cnt[2]);
+ cnt[3] = csr_read(LOONGARCH_CSR_PERFCNTR3);
+ GUEST_PRINTF("csr_perfcntr3 is %lx\n", cnt[3]);
+
+ GUEST_PRINTF("assert csr_perfcntr0 >EXPECTED_CYCLES_MIN && csr_perfcntr0 < UPPER_BOUND\n");
+ GUEST_ASSERT(cnt[0] > EXPECTED_CYCLES_MIN && cnt[0] < UPPER_BOUND);
+ GUEST_PRINTF("assert csr_perfcntr1 > EXPECTED_INSTR_MIN && csr_perfcntr1 < UPPER_BOUND\n");
+ GUEST_ASSERT(cnt[1] > EXPECTED_INSTR_MIN && cnt[1] < UPPER_BOUND);
+ GUEST_PRINTF("assert csr_perfcntr2 > 0 && csr_perfcntr2 < UPPER_BOUND\n");
+ GUEST_ASSERT(cnt[2] > 0 && cnt[2] < UPPER_BOUND);
+ GUEST_PRINTF("assert csr_perfcntr3 > 0 && csr_perfcntr3 < UPPER_BOUND\n");
+ GUEST_ASSERT(cnt[3] > 0 && cnt[3] < UPPER_BOUND);
+}
+
+static void guest_irq_handler(struct ex_regs *regs)
+{
+ unsigned int intid;
+
+ pmu_irq_disable();
+ intid = !!(regs->estat & BIT(INT_PMI));
+ GUEST_ASSERT_EQ(intid, 1);
+ GUEST_PRINTF("Get PMU interrupt\n");
+ WRITE_ONCE(pmu_irq_count, pmu_irq_count + 1);
+}
+
+static void guest_pmu_interrupt_test(void)
+{
+ uint64_t cnt;
+
+ csr_write(PMU_OVERFLOW - 1, LOONGARCH_CSR_PERFCNTR0);
+ csr_write(PMU_ENVENT_ENABLED | CSR_PERFCTRL_PMIE | LOONGARCH_PMU_EVENT_CYCLES, LOONGARCH_CSR_PERFCTRL0);
+
+ cpu_relax();
+
+ GUEST_ASSERT_EQ(pmu_irq_count, 1);
+ cnt = csr_read(LOONGARCH_CSR_PERFCNTR0);
+ GUEST_PRINTF("csr_perfcntr0 is %lx\n", cnt);
+ GUEST_PRINTF("PMU interrupt test success\n");
+
+}
+
+static void guest_code(void)
+{
+ guest_pmu_base_test();
+
+ pmu_irq_enable();
+ local_irq_enable();
+ guest_pmu_interrupt_test();
+
+ GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+ int ret = 0;
+ struct kvm_device_attr attr;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ struct ucall uc;
+
+ /* Check host KVM PMU support */
+ if (!has_pmu_support()) {
+ print_skip("PMU not supported by host hardware\n");
+ dump_pmu_caps();
+ return KSFT_SKIP;
+ }
+ pr_info("Host support PMU\n");
+
+ /* Dump PMU capabilities */
+ dump_pmu_caps();
+
+ vm = vm_create(VM_MODE_P47V47_16K);
+ vcpu = vm_vcpu_add(vm, 0, guest_code);
+
+ pmu_irq_count = 0;
+ vm_init_descriptor_tables(vm);
+ loongarch_vcpu_setup(vcpu);
+ vm_install_exception_handler(vm, EXCCODE_INT, guest_irq_handler);
+ sync_global_to_guest(vm, pmu_irq_count);
+
+ attr.group = KVM_LOONGARCH_VM_FEAT_CTRL,
+ attr.attr = KVM_LOONGARCH_VM_FEAT_PMU,
+
+ ret = ioctl(vm->fd, KVM_HAS_DEVICE_ATTR, &attr);
+
+ if (ret == 0) {
+ pr_info("PMU is enabled in VM\n");
+ } else {
+ print_skip("PMU not enabled by VM config\n");
+ return KSFT_SKIP;
+ }
+
+ while (1) {
+ vcpu_run(vcpu);
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_PRINTF:
+ printf("%s", (const char *)uc.buffer);
+ break;
+ case UCALL_DONE:
+ printf("PMU test PASSED\n");
+ goto done;
+ case UCALL_ABORT:
+ printf("PMU test FAILED\n");
+ ret = -1;
+ goto done;
+ default:
+ printf("Unexpected exit\n");
+ ret = -1;
+ goto done;
+ }
+ }
+
+done:
+ kvm_vm_free(vm);
+ return ret;
+}
diff --git a/tools/testing/selftests/kvm/pre_fault_memory_test.c b/tools/testing/selftests/kvm/pre_fault_memory_test.c
index 93e603d91311..f3de0386ba7b 100644
--- a/tools/testing/selftests/kvm/pre_fault_memory_test.c
+++ b/tools/testing/selftests/kvm/pre_fault_memory_test.c
@@ -175,11 +175,7 @@ static void __test_pre_fault_memory(unsigned long vm_type, bool private)
alignment = guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
gpa = (vm->max_gfn - TEST_NPAGES) * guest_page_size;
-#ifdef __s390x__
- alignment = max(0x100000UL, guest_page_size);
-#else
alignment = SZ_2M;
-#endif
gpa = align_down(gpa, alignment);
gva = gpa & ((1ULL << (vm->va_bits - 1)) - 1);
diff --git a/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c b/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
index 924a335d2262..cec1621ace23 100644
--- a/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
+++ b/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
@@ -436,6 +436,7 @@ static void test_pmu_basic_sanity(void)
struct sbiret ret;
int num_counters = 0, i;
union sbi_pmu_ctr_info ctrinfo;
+ unsigned long fw_eidx;
probe = guest_sbi_probe_extension(SBI_EXT_PMU, &out_val);
GUEST_ASSERT(probe && out_val == 1);
@@ -461,7 +462,24 @@ static void test_pmu_basic_sanity(void)
pmu_csr_read_num(ctrinfo.csr);
GUEST_ASSERT(illegal_handler_invoked);
} else if (ctrinfo.type == SBI_PMU_CTR_TYPE_FW) {
- read_fw_counter(i, ctrinfo);
+ /* Read without configure should fail */
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ,
+ i, 0, 0, 0, 0, 0);
+ GUEST_ASSERT(ret.error == SBI_ERR_INVALID_PARAM);
+
+ /*
+ * Try to configure with a common firmware event.
+ * If configuration succeeds, verify we can read it.
+ */
+ fw_eidx = ((unsigned long)SBI_PMU_EVENT_TYPE_FW << 16) |
+ SBI_PMU_FW_ACCESS_LOAD;
+
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH,
+ i, 1, 0, fw_eidx, 0, 0);
+ if (ret.error == 0) {
+ GUEST_ASSERT(ret.value == i);
+ read_fw_counter(i, ctrinfo);
+ }
}
}
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c
index 7fe427ff9b38..a398dc3a8c4b 100644
--- a/tools/testing/selftests/kvm/set_memory_region_test.c
+++ b/tools/testing/selftests/kvm/set_memory_region_test.c
@@ -413,14 +413,7 @@ static void test_add_max_memory_regions(void)
uint32_t max_mem_slots;
uint32_t slot;
void *mem, *mem_aligned, *mem_extra;
- size_t alignment;
-
-#ifdef __s390x__
- /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
- alignment = 0x100000;
-#else
- alignment = 1;
-#endif
+ size_t alignment = 1;
max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
TEST_ASSERT(max_mem_slots > 0,
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
index 7be8adfe5dd3..efe56a10d13e 100644
--- a/tools/testing/selftests/kvm/steal_time.c
+++ b/tools/testing/selftests/kvm/steal_time.c
@@ -69,16 +69,10 @@ static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
{
- int ret;
-
/* ST_GPA_BASE is identity mapped */
st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
sync_global_to_guest(vcpu->vm, st_gva[i]);
- ret = _vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME,
- (ulong)st_gva[i] | KVM_STEAL_RESERVED_MASK);
- TEST_ASSERT(ret == 0, "Bad GPA didn't fail");
-
vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED);
}
@@ -99,6 +93,21 @@ static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
st->pad[8], st->pad[9], st->pad[10]);
}
+static void check_steal_time_uapi(void)
+{
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
+ int ret;
+
+ vm = vm_create_with_one_vcpu(&vcpu, NULL);
+
+ ret = _vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME,
+ (ulong)ST_GPA_BASE | KVM_STEAL_RESERVED_MASK);
+ TEST_ASSERT(ret == 0, "Bad GPA didn't fail");
+
+ kvm_vm_free(vm);
+}
+
#elif defined(__aarch64__)
/* PV_TIME_ST must have 64-byte alignment */
@@ -170,7 +179,6 @@ static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
{
struct kvm_vm *vm = vcpu->vm;
uint64_t st_ipa;
- int ret;
struct kvm_device_attr dev = {
.group = KVM_ARM_VCPU_PVTIME_CTRL,
@@ -178,21 +186,12 @@ static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
.addr = (uint64_t)&st_ipa,
};
- vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
-
/* ST_GPA_BASE is identity mapped */
st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
sync_global_to_guest(vm, st_gva[i]);
- st_ipa = (ulong)st_gva[i] | 1;
- ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
- TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL");
-
st_ipa = (ulong)st_gva[i];
vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
-
- ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
- TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST");
}
static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
@@ -205,6 +204,36 @@ static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
ksft_print_msg(" st_time: %ld\n", st->st_time);
}
+static void check_steal_time_uapi(void)
+{
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
+ uint64_t st_ipa;
+ int ret;
+
+ vm = vm_create_with_one_vcpu(&vcpu, NULL);
+
+ struct kvm_device_attr dev = {
+ .group = KVM_ARM_VCPU_PVTIME_CTRL,
+ .attr = KVM_ARM_VCPU_PVTIME_IPA,
+ .addr = (uint64_t)&st_ipa,
+ };
+
+ vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
+
+ st_ipa = (ulong)ST_GPA_BASE | 1;
+ ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
+ TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL");
+
+ st_ipa = (ulong)ST_GPA_BASE;
+ vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
+
+ ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
+ TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST");
+
+ kvm_vm_free(vm);
+}
+
#elif defined(__riscv)
/* SBI STA shmem must have 64-byte alignment */
@@ -301,6 +330,41 @@ static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
pr_info("\n");
}
+static void check_steal_time_uapi(void)
+{
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
+ struct kvm_one_reg reg;
+ uint64_t shmem;
+ int ret;
+
+ vm = vm_create_with_one_vcpu(&vcpu, NULL);
+
+ reg.id = KVM_REG_RISCV |
+ KVM_REG_SIZE_ULONG |
+ KVM_REG_RISCV_SBI_STATE |
+ KVM_REG_RISCV_SBI_STA |
+ KVM_REG_RISCV_SBI_STA_REG(shmem_lo);
+ reg.addr = (uint64_t)&shmem;
+
+ shmem = ST_GPA_BASE + 1;
+ ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
+ TEST_ASSERT(ret == -1 && errno == EINVAL,
+ "misaligned STA shmem returns -EINVAL");
+
+ shmem = ST_GPA_BASE;
+ ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
+ TEST_ASSERT(ret == 0,
+ "aligned STA shmem succeeds");
+
+ shmem = INVALID_GPA;
+ ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
+ TEST_ASSERT(ret == 0,
+ "all-ones for STA shmem succeeds");
+
+ kvm_vm_free(vm);
+}
+
#elif defined(__loongarch__)
/* steal_time must have 64-byte alignment */
@@ -465,6 +529,8 @@ int main(int ac, char **av)
TEST_REQUIRE(is_steal_time_supported(vcpus[0]));
ksft_set_plan(NR_VCPUS);
+ check_steal_time_uapi();
+
/* Run test on each VCPU */
for (i = 0; i < NR_VCPUS; ++i) {
steal_time_init(vcpus[i], i);
diff --git a/tools/testing/selftests/kvm/x86/fix_hypercall_test.c b/tools/testing/selftests/kvm/x86/fix_hypercall_test.c
index 762628f7d4ba..00b6e85735dd 100644
--- a/tools/testing/selftests/kvm/x86/fix_hypercall_test.c
+++ b/tools/testing/selftests/kvm/x86/fix_hypercall_test.c
@@ -52,7 +52,7 @@ static void guest_main(void)
if (host_cpu_is_intel) {
native_hypercall_insn = vmx_vmcall;
other_hypercall_insn = svm_vmmcall;
- } else if (host_cpu_is_amd) {
+ } else if (host_cpu_is_amd_compatible) {
native_hypercall_insn = svm_vmmcall;
other_hypercall_insn = vmx_vmcall;
} else {
diff --git a/tools/testing/selftests/kvm/x86/msrs_test.c b/tools/testing/selftests/kvm/x86/msrs_test.c
index 40d918aedce6..f7e39bf887ad 100644
--- a/tools/testing/selftests/kvm/x86/msrs_test.c
+++ b/tools/testing/selftests/kvm/x86/msrs_test.c
@@ -81,7 +81,7 @@ static u64 fixup_rdmsr_val(u32 msr, u64 want)
* is supposed to emulate that behavior based on guest vendor model
* (which is the same as the host vendor model for this test).
*/
- if (!host_cpu_is_amd)
+ if (!host_cpu_is_amd_compatible)
return want;
switch (msr) {
@@ -175,7 +175,7 @@ void guest_test_reserved_val(const struct kvm_msr *msr)
* If the CPU will truncate the written value (e.g. SYSENTER on AMD),
* expect success and a truncated value, not #GP.
*/
- if (!this_cpu_has(msr->feature) ||
+ if ((!this_cpu_has(msr->feature) && !this_cpu_has(msr->feature2)) ||
msr->rsvd_val == fixup_rdmsr_val(msr->index, msr->rsvd_val)) {
u8 vec = wrmsr_safe(msr->index, msr->rsvd_val);
diff --git a/tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c b/tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c
index 6764a48f9d4d..71717118d692 100644
--- a/tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c
+++ b/tools/testing/selftests/kvm/x86/nested_vmsave_vmload_test.c
@@ -79,8 +79,8 @@ static void l1_guest_code(struct svm_test_data *svm)
svm->vmcb->control.intercept |= (BIT_ULL(INTERCEPT_VMSAVE) |
BIT_ULL(INTERCEPT_VMLOAD));
- /* ..VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK cleared.. */
- svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
+ /* ..SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE cleared.. */
+ svm->vmcb->control.misc_ctl2 &= ~SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE;
svm->vmcb->save.rip = (u64)l2_guest_code_vmsave;
run_guest(svm->vmcb, svm->vmcb_gpa);
@@ -90,8 +90,8 @@ static void l1_guest_code(struct svm_test_data *svm)
run_guest(svm->vmcb, svm->vmcb_gpa);
GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMLOAD);
- /* ..and VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK set */
- svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
+ /* ..and SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE set */
+ svm->vmcb->control.misc_ctl2 |= SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE;
svm->vmcb->save.rip = (u64)l2_guest_code_vmsave;
run_guest(svm->vmcb, svm->vmcb_gpa);
@@ -106,20 +106,20 @@ static void l1_guest_code(struct svm_test_data *svm)
BIT_ULL(INTERCEPT_VMLOAD));
/*
- * Without VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK, the GPA will be
+ * Without SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE, the GPA will be
* interpreted as an L1 GPA, so VMCB0 should be used.
*/
svm->vmcb->save.rip = (u64)l2_guest_code_vmcb0;
- svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
+ svm->vmcb->control.misc_ctl2 &= ~SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE;
run_guest(svm->vmcb, svm->vmcb_gpa);
GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMMCALL);
/*
- * With VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK, the GPA will be interpeted as
+ * With SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE, the GPA will be interpeted as
* an L2 GPA, and translated through the NPT to VMCB1.
*/
svm->vmcb->save.rip = (u64)l2_guest_code_vmcb1;
- svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
+ svm->vmcb->control.misc_ctl2 |= SVM_MISC2_ENABLE_V_VMLOAD_VMSAVE;
run_guest(svm->vmcb, svm->vmcb_gpa);
GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMMCALL);
diff --git a/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c
index 1c5b7611db24..93b61c077991 100644
--- a/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c
@@ -361,7 +361,8 @@ static bool use_intel_pmu(void)
*/
static bool use_amd_pmu(void)
{
- return host_cpu_is_amd && kvm_cpu_family() >= 0x17;
+ return (host_cpu_is_amd && kvm_cpu_family() >= 0x17) ||
+ host_cpu_is_hygon;
}
/*
diff --git a/tools/testing/selftests/kvm/x86/sev_migrate_tests.c b/tools/testing/selftests/kvm/x86/sev_migrate_tests.c
index 0a6dfba3905b..6b0928e69051 100644
--- a/tools/testing/selftests/kvm/x86/sev_migrate_tests.c
+++ b/tools/testing/selftests/kvm/x86/sev_migrate_tests.c
@@ -36,8 +36,6 @@ static struct kvm_vm *sev_vm_create(bool es)
sev_vm_launch(vm, es ? SEV_POLICY_ES : 0);
- if (es)
- vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
return vm;
}
diff --git a/tools/testing/selftests/kvm/x86/state_test.c b/tools/testing/selftests/kvm/x86/state_test.c
index f2c7a1c297e3..992a52504a4a 100644
--- a/tools/testing/selftests/kvm/x86/state_test.c
+++ b/tools/testing/selftests/kvm/x86/state_test.c
@@ -26,7 +26,9 @@ void svm_l2_guest_code(void)
GUEST_SYNC(4);
/* Exit to L1 */
vmcall();
+ clgi();
GUEST_SYNC(6);
+ stgi();
/* Done, exit to L1 and never come back. */
vmcall();
}
@@ -41,6 +43,8 @@ static void svm_l1_guest_code(struct svm_test_data *svm)
generic_svm_setup(svm, svm_l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
+ vmcb->control.int_ctl |= (V_GIF_ENABLE_MASK | V_GIF_MASK);
+
GUEST_SYNC(3);
run_guest(vmcb, svm->vmcb_gpa);
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
@@ -222,6 +226,35 @@ static void __attribute__((__flatten__)) guest_code(void *arg)
GUEST_DONE();
}
+void svm_check_nested_state(int stage, struct kvm_x86_state *state)
+{
+ struct vmcb *vmcb = (struct vmcb *)state->nested.data.svm;
+
+ if (kvm_cpu_has(X86_FEATURE_VGIF)) {
+ if (stage == 4)
+ TEST_ASSERT_EQ(!!(vmcb->control.int_ctl & V_GIF_MASK), 1);
+ if (stage == 6)
+ TEST_ASSERT_EQ(!!(vmcb->control.int_ctl & V_GIF_MASK), 0);
+ }
+
+ if (kvm_cpu_has(X86_FEATURE_NRIPS)) {
+ /*
+ * GUEST_SYNC() causes IO emulation in KVM, in which case the
+ * RIP is advanced before exiting to userspace. Hence, the RIP
+ * in the saved state should be the same as nRIP saved by the
+ * CPU in the VMCB.
+ */
+ if (stage == 6)
+ TEST_ASSERT_EQ(vmcb->control.next_rip, state->regs.rip);
+ }
+}
+
+void check_nested_state(int stage, struct kvm_x86_state *state)
+{
+ if (kvm_has_cap(KVM_CAP_NESTED_STATE) && kvm_cpu_has(X86_FEATURE_SVM))
+ svm_check_nested_state(stage, state);
+}
+
int main(int argc, char *argv[])
{
uint64_t *xstate_bv, saved_xstate_bv;
@@ -278,6 +311,8 @@ int main(int argc, char *argv[])
kvm_vm_release(vm);
+ check_nested_state(stage, state);
+
/* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm);
vcpu_load_state(vcpu, state);
diff --git a/tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c b/tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c
new file mode 100644
index 000000000000..ff99438824d3
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86/svm_lbr_nested_state.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2026, Google, Inc.
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+
+
+#define L2_GUEST_STACK_SIZE 64
+
+#define DO_BRANCH() do { asm volatile("jmp 1f\n 1: nop"); } while (0)
+
+struct lbr_branch {
+ u64 from, to;
+};
+
+volatile struct lbr_branch l2_branch;
+
+#define RECORD_AND_CHECK_BRANCH(b) \
+do { \
+ wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); \
+ DO_BRANCH(); \
+ (b)->from = rdmsr(MSR_IA32_LASTBRANCHFROMIP); \
+ (b)->to = rdmsr(MSR_IA32_LASTBRANCHTOIP); \
+ /* Disable LBR right after to avoid overriding the IPs */ \
+ wrmsr(MSR_IA32_DEBUGCTLMSR, 0); \
+ \
+ GUEST_ASSERT_NE((b)->from, 0); \
+ GUEST_ASSERT_NE((b)->to, 0); \
+} while (0)
+
+#define CHECK_BRANCH_MSRS(b) \
+do { \
+ GUEST_ASSERT_EQ((b)->from, rdmsr(MSR_IA32_LASTBRANCHFROMIP)); \
+ GUEST_ASSERT_EQ((b)->to, rdmsr(MSR_IA32_LASTBRANCHTOIP)); \
+} while (0)
+
+#define CHECK_BRANCH_VMCB(b, vmcb) \
+do { \
+ GUEST_ASSERT_EQ((b)->from, vmcb->save.br_from); \
+ GUEST_ASSERT_EQ((b)->to, vmcb->save.br_to); \
+} while (0)
+
+static void l2_guest_code(struct svm_test_data *svm)
+{
+ /* Record a branch, trigger save/restore, and make sure LBRs are intact */
+ RECORD_AND_CHECK_BRANCH(&l2_branch);
+ GUEST_SYNC(true);
+ CHECK_BRANCH_MSRS(&l2_branch);
+ vmmcall();
+}
+
+static void l1_guest_code(struct svm_test_data *svm, bool nested_lbrv)
+{
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ struct vmcb *vmcb = svm->vmcb;
+ struct lbr_branch l1_branch;
+
+ /* Record a branch, trigger save/restore, and make sure LBRs are intact */
+ RECORD_AND_CHECK_BRANCH(&l1_branch);
+ GUEST_SYNC(true);
+ CHECK_BRANCH_MSRS(&l1_branch);
+
+ /* Run L2, which will also do the same */
+ generic_svm_setup(svm, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ if (nested_lbrv)
+ vmcb->control.misc_ctl2 = SVM_MISC2_ENABLE_V_LBR;
+ else
+ vmcb->control.misc_ctl2 &= ~SVM_MISC2_ENABLE_V_LBR;
+
+ run_guest(vmcb, svm->vmcb_gpa);
+ GUEST_ASSERT(svm->vmcb->control.exit_code == SVM_EXIT_VMMCALL);
+
+ /* Trigger save/restore one more time before checking, just for kicks */
+ GUEST_SYNC(true);
+
+ /*
+ * If LBR_CTL_ENABLE is set, L1 and L2 should have separate LBR MSRs, so
+ * expect L1's LBRs to remain intact and L2 LBRs to be in the VMCB.
+ * Otherwise, the MSRs are shared between L1 & L2 so expect L2's LBRs.
+ */
+ if (nested_lbrv) {
+ CHECK_BRANCH_MSRS(&l1_branch);
+ CHECK_BRANCH_VMCB(&l2_branch, vmcb);
+ } else {
+ CHECK_BRANCH_MSRS(&l2_branch);
+ }
+ GUEST_DONE();
+}
+
+void test_lbrv_nested_state(bool nested_lbrv)
+{
+ struct kvm_x86_state *state = NULL;
+ struct kvm_vcpu *vcpu;
+ vm_vaddr_t svm_gva;
+ struct kvm_vm *vm;
+ struct ucall uc;
+
+ pr_info("Testing with nested LBRV %s\n", nested_lbrv ? "enabled" : "disabled");
+
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
+ vcpu_alloc_svm(vm, &svm_gva);
+ vcpu_args_set(vcpu, 2, svm_gva, nested_lbrv);
+
+ for (;;) {
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_SYNC:
+ /* Save the vCPU state and restore it in a new VM on sync */
+ pr_info("Guest triggered save/restore.\n");
+ state = vcpu_save_state(vcpu);
+ kvm_vm_release(vm);
+ vcpu = vm_recreate_with_one_vcpu(vm);
+ vcpu_load_state(vcpu, state);
+ kvm_x86_state_cleanup(state);
+ break;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ /* NOT REACHED */
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+ }
+done:
+ kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
+ TEST_REQUIRE(kvm_is_lbrv_enabled());
+
+ test_lbrv_nested_state(/*nested_lbrv=*/false);
+ test_lbrv_nested_state(/*nested_lbrv=*/true);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c b/tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c
new file mode 100644
index 000000000000..a521a9eed061
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86/svm_nested_clear_efer_svme.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2026, Google LLC.
+ */
+#include "kvm_util.h"
+#include "vmx.h"
+#include "svm_util.h"
+#include "kselftest.h"
+
+
+#define L2_GUEST_STACK_SIZE 64
+
+static void l2_guest_code(void)
+{
+ unsigned long efer = rdmsr(MSR_EFER);
+
+ /* generic_svm_setup() initializes EFER_SVME set for L2 */
+ GUEST_ASSERT(efer & EFER_SVME);
+ wrmsr(MSR_EFER, efer & ~EFER_SVME);
+
+ /* Unreachable, L1 should be shutdown */
+ GUEST_ASSERT(0);
+}
+
+static void l1_guest_code(struct svm_test_data *svm)
+{
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+ generic_svm_setup(svm, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+ run_guest(svm->vmcb, svm->vmcb_gpa);
+
+ /* Unreachable, L1 should be shutdown */
+ GUEST_ASSERT(0);
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ vm_vaddr_t nested_gva = 0;
+
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
+
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
+
+ vcpu_alloc_svm(vm, &nested_gva);
+ vcpu_args_set(vcpu, 1, nested_gva);
+
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN);
+
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c b/tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c
new file mode 100644
index 000000000000..569869bed20b
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86/svm_nested_vmcb12_gpa.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2026, Google LLC.
+ */
+#include "kvm_util.h"
+#include "vmx.h"
+#include "svm_util.h"
+#include "kselftest.h"
+#include "kvm_test_harness.h"
+#include "test_util.h"
+
+
+#define L2_GUEST_STACK_SIZE 64
+
+#define SYNC_GP 101
+#define SYNC_L2_STARTED 102
+
+static unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+static void guest_gp_handler(struct ex_regs *regs)
+{
+ GUEST_SYNC(SYNC_GP);
+}
+
+static void l2_code(void)
+{
+ GUEST_SYNC(SYNC_L2_STARTED);
+ vmcall();
+}
+
+static void l1_vmrun(struct svm_test_data *svm, u64 gpa)
+{
+ generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ asm volatile ("vmrun %[gpa]" : : [gpa] "a" (gpa) : "memory");
+}
+
+static void l1_vmload(struct svm_test_data *svm, u64 gpa)
+{
+ generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ asm volatile ("vmload %[gpa]" : : [gpa] "a" (gpa) : "memory");
+}
+
+static void l1_vmsave(struct svm_test_data *svm, u64 gpa)
+{
+ generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ asm volatile ("vmsave %[gpa]" : : [gpa] "a" (gpa) : "memory");
+}
+
+static void l1_vmexit(struct svm_test_data *svm, u64 gpa)
+{
+ generic_svm_setup(svm, l2_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ run_guest(svm->vmcb, svm->vmcb_gpa);
+ GUEST_ASSERT(svm->vmcb->control.exit_code == SVM_EXIT_VMMCALL);
+ GUEST_DONE();
+}
+
+static u64 unmappable_gpa(struct kvm_vcpu *vcpu)
+{
+ struct userspace_mem_region *region;
+ u64 region_gpa_end, vm_gpa_end = 0;
+ int i;
+
+ hash_for_each(vcpu->vm->regions.slot_hash, i, region, slot_node) {
+ region_gpa_end = region->region.guest_phys_addr + region->region.memory_size;
+ vm_gpa_end = max(vm_gpa_end, region_gpa_end);
+ }
+
+ return vm_gpa_end;
+}
+
+static void test_invalid_vmcb12(struct kvm_vcpu *vcpu)
+{
+ vm_vaddr_t nested_gva = 0;
+ struct ucall uc;
+
+
+ vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler);
+ vcpu_alloc_svm(vcpu->vm, &nested_gva);
+ vcpu_args_set(vcpu, 2, nested_gva, -1ULL);
+ vcpu_run(vcpu);
+
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
+ TEST_ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
+ TEST_ASSERT_EQ(uc.args[1], SYNC_GP);
+}
+
+static void test_unmappable_vmcb12(struct kvm_vcpu *vcpu)
+{
+ vm_vaddr_t nested_gva = 0;
+
+ vcpu_alloc_svm(vcpu->vm, &nested_gva);
+ vcpu_args_set(vcpu, 2, nested_gva, unmappable_gpa(vcpu));
+ vcpu_run(vcpu);
+
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR);
+ TEST_ASSERT_EQ(vcpu->run->emulation_failure.suberror, KVM_INTERNAL_ERROR_EMULATION);
+}
+
+static void test_unmappable_vmcb12_vmexit(struct kvm_vcpu *vcpu)
+{
+ struct kvm_x86_state *state;
+ vm_vaddr_t nested_gva = 0;
+ struct ucall uc;
+
+ /*
+ * Enter L2 (with a legit vmcb12 GPA), then overwrite vmcb12 GPA with an
+ * unmappable GPA. KVM will fail to map vmcb12 on nested VM-Exit and
+ * cause a shutdown.
+ */
+ vcpu_alloc_svm(vcpu->vm, &nested_gva);
+ vcpu_args_set(vcpu, 2, nested_gva, unmappable_gpa(vcpu));
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
+ TEST_ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
+ TEST_ASSERT_EQ(uc.args[1], SYNC_L2_STARTED);
+
+ state = vcpu_save_state(vcpu);
+ state->nested.hdr.svm.vmcb_pa = unmappable_gpa(vcpu);
+ vcpu_load_state(vcpu, state);
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN);
+
+ kvm_x86_state_cleanup(state);
+}
+
+KVM_ONE_VCPU_TEST_SUITE(vmcb12_gpa);
+
+KVM_ONE_VCPU_TEST(vmcb12_gpa, vmrun_invalid, l1_vmrun)
+{
+ test_invalid_vmcb12(vcpu);
+}
+
+KVM_ONE_VCPU_TEST(vmcb12_gpa, vmload_invalid, l1_vmload)
+{
+ test_invalid_vmcb12(vcpu);
+}
+
+KVM_ONE_VCPU_TEST(vmcb12_gpa, vmsave_invalid, l1_vmsave)
+{
+ test_invalid_vmcb12(vcpu);
+}
+
+KVM_ONE_VCPU_TEST(vmcb12_gpa, vmrun_unmappable, l1_vmrun)
+{
+ test_unmappable_vmcb12(vcpu);
+}
+
+KVM_ONE_VCPU_TEST(vmcb12_gpa, vmload_unmappable, l1_vmload)
+{
+ test_unmappable_vmcb12(vcpu);
+}
+
+KVM_ONE_VCPU_TEST(vmcb12_gpa, vmsave_unmappable, l1_vmsave)
+{
+ test_unmappable_vmcb12(vcpu);
+}
+
+/*
+ * Invalid vmcb12_gpa cannot be test for #VMEXIT as KVM_SET_NESTED_STATE will
+ * reject it.
+ */
+KVM_ONE_VCPU_TEST(vmcb12_gpa, vmexit_unmappable, l1_vmexit)
+{
+ test_unmappable_vmcb12_vmexit(vcpu);
+}
+
+int main(int argc, char *argv[])
+{
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
+
+ return test_harness_run(argc, argv);
+}
diff --git a/tools/testing/selftests/kvm/x86/xapic_state_test.c b/tools/testing/selftests/kvm/x86/xapic_state_test.c
index 3b4814c55722..0c5e12f5f14e 100644
--- a/tools/testing/selftests/kvm/x86/xapic_state_test.c
+++ b/tools/testing/selftests/kvm/x86/xapic_state_test.c
@@ -248,7 +248,7 @@ int main(int argc, char *argv[])
* drops writes, AMD does not). Account for the errata when checking
* that KVM reads back what was written.
*/
- x.has_xavic_errata = host_cpu_is_amd &&
+ x.has_xavic_errata = host_cpu_is_amd_compatible &&
get_kvm_amd_param_bool("avic");
vcpu_clear_cpuid_feature(x.vcpu, X86_FEATURE_X2APIC);