summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2026-01-19 10:29:22 +0800
committerWill Deacon <will@kernel.org>2026-01-22 13:24:49 +0000
commitf174a9ffcd48d78a45d560c02ce4071ded036b53 (patch)
tree25fd4dea206ad4a0ae8796af33698919b2ed3359
parent018a231b0260ebd85eddca3fec85031b59f50117 (diff)
KVM: arm64: Add exit to userspace on {LD,ST}64B* outside of memslots
The main use of {LD,ST}64B* is to talk to a device, which is hopefully directly assigned to the guest and requires no additional handling. However, this does not preclude a VMM from exposing a virtual device to the guest, and to allow 64 byte accesses as part of the programming interface. A direct consequence of this is that we need to be able to forward such access to userspace. Given that such a contraption is very unlikely to ever exist, we choose to offer a limited service: userspace gets (as part of a new exit reason) the ESR, the IPA, and that's it. It is fully expected to handle the full semantics of the instructions, deal with ACCDATA, the return values and increment PC. Much fun. A canonical implementation can also simply inject an abort and be done with it. Frankly, don't try to do anything else unless you have time to waste. Acked-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Oliver Upton <oupton@kernel.org> Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Yicong Yang <yangyicong@hisilicon.com> Signed-off-by: Zhou Wang <wangzhou1@hisilicon.com> Signed-off-by: Will Deacon <will@kernel.org>
-rw-r--r--arch/arm64/kvm/mmio.c27
-rw-r--r--include/uapi/linux/kvm.h3
2 files changed, 28 insertions, 2 deletions
diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c
index 54f9358c9e0e..e2285ed8c91d 100644
--- a/arch/arm64/kvm/mmio.c
+++ b/arch/arm64/kvm/mmio.c
@@ -159,6 +159,9 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
bool is_write;
int len;
u8 data_buf[8];
+ u64 esr;
+
+ esr = kvm_vcpu_get_esr(vcpu);
/*
* No valid syndrome? Ask userspace for help if it has
@@ -168,7 +171,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
* though, so directly deliver an exception to the guest.
*/
if (!kvm_vcpu_dabt_isvalid(vcpu)) {
- trace_kvm_mmio_nisv(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
+ trace_kvm_mmio_nisv(*vcpu_pc(vcpu), esr,
kvm_vcpu_get_hfar(vcpu), fault_ipa);
if (vcpu_is_protected(vcpu))
@@ -186,6 +189,28 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
}
/*
+ * When (DFSC == 0b00xxxx || DFSC == 0b10101x) && DFSC != 0b0000xx
+ * ESR_EL2[12:11] describe the Load/Store Type. This allows us to
+ * punt the LD64B/ST64B/ST64BV/ST64BV0 instructions to userspace,
+ * which will have to provide a full emulation of these 4
+ * instructions. No, we don't expect this do be fast.
+ *
+ * We rely on traps being set if the corresponding features are not
+ * enabled, so if we get here, userspace has promised us to handle
+ * it already.
+ */
+ switch (kvm_vcpu_trap_get_fault(vcpu)) {
+ case 0b000100 ... 0b001111:
+ case 0b101010 ... 0b101011:
+ if (FIELD_GET(GENMASK(12, 11), esr)) {
+ run->exit_reason = KVM_EXIT_ARM_LDST64B;
+ run->arm_nisv.esr_iss = esr & ~(u64)ESR_ELx_FSC;
+ run->arm_nisv.fault_ipa = fault_ipa;
+ return 0;
+ }
+ }
+
+ /*
* Prepare MMIO operation. First decode the syndrome data we get
* from the CPU. Then try if some in-kernel emulation feels
* responsible, otherwise let user space do its magic.
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index dddb781b0507..88cca0e22ece 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -180,6 +180,7 @@ struct kvm_xen_exit {
#define KVM_EXIT_MEMORY_FAULT 39
#define KVM_EXIT_TDX 40
#define KVM_EXIT_ARM_SEA 41
+#define KVM_EXIT_ARM_LDST64B 42
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -402,7 +403,7 @@ struct kvm_run {
} eoi;
/* KVM_EXIT_HYPERV */
struct kvm_hyperv_exit hyperv;
- /* KVM_EXIT_ARM_NISV */
+ /* KVM_EXIT_ARM_NISV / KVM_EXIT_ARM_LDST64B */
struct {
__u64 esr_iss;
__u64 fault_ipa;