diff options
author | Andre Przywara <andre.przywara@arm.com> | 2015-12-01 12:40:58 +0000 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@linaro.org> | 2016-05-20 15:39:52 +0200 |
commit | 69b6fe0c6e7f560165d655bbb127f8d69b7358ea (patch) | |
tree | fb82497302cd793568e24f7703beed1a2c3cea51 /virt | |
parent | 96b298000db48360e49a1f8f9edc6d2b9c1b2548 (diff) |
KVM: arm/arm64: vgic-new: Add ACTIVE registers handlers
The active register handlers are shared between the v2 and v3
emulation, so their implementation goes into vgic-mmio.c, to be
easily referenced from the v3 emulation as well later.
Since activation/deactivation of an interrupt may happen entirely
in the guest without it ever exiting, we need some extra logic to
properly track the active state.
For clearing the active state, we basically have to halt the guest to
make sure this is properly propagated into the respective VCPUs.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio-v2.c | 4 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio.c | 81 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio.h | 10 |
3 files changed, 93 insertions, 2 deletions
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index c13a7089bc9a..12e101b8fd52 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -84,10 +84,10 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { vgic_mmio_read_pending, vgic_mmio_write_cpending, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET, - vgic_mmio_read_raz, vgic_mmio_write_wi, 1, + vgic_mmio_read_active, vgic_mmio_write_sactive, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR, - vgic_mmio_read_raz, vgic_mmio_write_wi, 1, + vgic_mmio_read_active, vgic_mmio_write_cactive, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI, vgic_mmio_read_raz, vgic_mmio_write_wi, 8, diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index d8dc8f6480dd..79a4622dad04 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -155,6 +155,87 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, } } +unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + u32 value = 0; + int i; + + /* Loop over all IRQs affected by this read */ + for (i = 0; i < len * 8; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + if (irq->active) + value |= (1U << i); + } + + return value; +} + +void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + int i; + + kvm_arm_halt_guest(vcpu->kvm); + for_each_set_bit(i, &val, len * 8) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + spin_lock(&irq->irq_lock); + /* + * If this virtual IRQ was written into a list register, we + * have to make sure the CPU that runs the VCPU thread has + * synced back LR state to the struct vgic_irq. We can only + * know this for sure, when either this irq is not assigned to + * anyone's AP list anymore, or the VCPU thread is not + * running on any CPUs. + * + * In the opposite case, we know the VCPU thread may be on its + * way back from the guest and still has to sync back this + * IRQ, so we release and re-acquire the spin_lock to let the + * other thread sync back the IRQ. + */ + while (irq->vcpu && /* IRQ may have state in an LR somewhere */ + irq->vcpu->cpu != -1) /* VCPU thread is running */ + cond_resched_lock(&irq->irq_lock); + + irq->active = false; + spin_unlock(&irq->irq_lock); + } + kvm_arm_resume_guest(vcpu->kvm); +} + +void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + int i; + + for_each_set_bit(i, &val, len * 8) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + spin_lock(&irq->irq_lock); + + /* + * If the IRQ was already active or there is no target VCPU + * assigned at the moment, then just proceed. + */ + if (irq->active || !irq->target_vcpu) { + irq->active = true; + + spin_unlock(&irq->irq_lock); + continue; + } + + irq->active = true; + vgic_queue_irq_unlock(vcpu->kvm, irq); + } +} + static int match_region(const void *key, const void *elt) { const unsigned int offset = (unsigned long)key; diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h index 97ee703a1bd6..50b4464a0730 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.h +++ b/virt/kvm/arm/vgic/vgic-mmio.h @@ -118,6 +118,16 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val); +unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len); + +void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val); + +void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val); unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); |