diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-24 09:36:49 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-24 09:36:49 -0700 |
commit | 4e241557fc1cb560bd9e77ca1b4a9352732a5427 (patch) | |
tree | da4dbe5e5b3a8792daf9ed7e6bd320c56c86d252 /arch/s390 | |
parent | 08d183e3c1f650b4db1d07d764502116861542fa (diff) | |
parent | f2ae45edbca7ba5324eef01719ede0151dc5cead (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull first batch of KVM updates from Paolo Bonzini:
"The bulk of the changes here is for x86. And for once it's not for
silicon that no one owns: these are really new features for everyone.
Details:
- ARM:
several features are in progress but missed the 4.2 deadline.
So here is just a smattering of bug fixes, plus enabling the
VFIO integration.
- s390:
Some fixes/refactorings/optimizations, plus support for 2GB
pages.
- x86:
* host and guest support for marking kvmclock as a stable
scheduler clock.
* support for write combining.
* support for system management mode, needed for secure boot in
guests.
* a bunch of cleanups required for the above
* support for virtualized performance counters on AMD
* legacy PCI device assignment is deprecated and defaults to "n"
in Kconfig; VFIO replaces it
On top of this there are also bug fixes and eager FPU context
loading for FPU-heavy guests.
- Common code:
Support for multiple address spaces; for now it is used only for
x86 SMM but the s390 folks also have plans"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (124 commits)
KVM: s390: clear floating interrupt bitmap and parameters
KVM: x86/vPMU: Enable PMU handling for AMD PERFCTRn and EVNTSELn MSRs
KVM: x86/vPMU: Implement AMD vPMU code for KVM
KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch
KVM: x86/vPMU: introduce kvm_pmu_msr_idx_to_pmc
KVM: x86/vPMU: reorder PMU functions
KVM: x86/vPMU: whitespace and stylistic adjustments in PMU code
KVM: x86/vPMU: use the new macros to go between PMC, PMU and VCPU
KVM: x86/vPMU: introduce pmu.h header
KVM: x86/vPMU: rename a few PMU functions
KVM: MTRR: do not map huge page for non-consistent range
KVM: MTRR: simplify kvm_mtrr_get_guest_memory_type
KVM: MTRR: introduce mtrr_for_each_mem_type
KVM: MTRR: introduce fixed_mtrr_addr_* functions
KVM: MTRR: sort variable MTRRs
KVM: MTRR: introduce var_mtrr_range
KVM: MTRR: introduce fixed_mtrr_segment table
KVM: MTRR: improve kvm_mtrr_get_guest_memory_type
KVM: MTRR: do not split 64 bits MSR content
KVM: MTRR: clean up mtrr default type
...
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 6 | ||||
-rw-r--r-- | arch/s390/kernel/entry.S | 2 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 16 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 90 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 81 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.h | 25 | ||||
-rw-r--r-- | arch/s390/kvm/priv.c | 8 |
7 files changed, 140 insertions, 88 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index d01fc588b5c3..3024acbe1f9d 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -80,6 +80,7 @@ struct sca_block { #define CPUSTAT_MCDS 0x00000100 #define CPUSTAT_SM 0x00000080 #define CPUSTAT_IBS 0x00000040 +#define CPUSTAT_GED2 0x00000010 #define CPUSTAT_G 0x00000008 #define CPUSTAT_GED 0x00000004 #define CPUSTAT_J 0x00000002 @@ -95,7 +96,8 @@ struct kvm_s390_sie_block { #define PROG_IN_SIE (1<<0) __u32 prog0c; /* 0x000c */ __u8 reserved10[16]; /* 0x0010 */ -#define PROG_BLOCK_SIE 0x00000001 +#define PROG_BLOCK_SIE (1<<0) +#define PROG_REQUEST (1<<1) atomic_t prog20; /* 0x0020 */ __u8 reserved24[4]; /* 0x0024 */ __u64 cputm; /* 0x0028 */ @@ -634,7 +636,7 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} -static inline void kvm_arch_memslots_updated(struct kvm *kvm) {} +static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {} static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 99b44acbfcc7..3238893c9d4f 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -1005,7 +1005,7 @@ ENTRY(sie64a) .Lsie_gmap: lg %r14,__SF_EMPTY(%r15) # get control block pointer oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now - tm __SIE_PROG20+3(%r14),1 # last exit... + tm __SIE_PROG20+3(%r14),3 # last exit... jnz .Lsie_done LPP __SF_EMPTY(%r15) # set guest id sie 0(%r14) diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 9e3779e3e496..7365e8a46032 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -241,21 +241,6 @@ static int handle_prog(struct kvm_vcpu *vcpu) return kvm_s390_inject_prog_irq(vcpu, &pgm_info); } -static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) -{ - int rc, rc2; - - vcpu->stat.exit_instr_and_program++; - rc = handle_instruction(vcpu); - rc2 = handle_prog(vcpu); - - if (rc == -EOPNOTSUPP) - vcpu->arch.sie_block->icptcode = 0x04; - if (rc) - return rc; - return rc2; -} - /** * handle_external_interrupt - used for external interruption interceptions * @@ -355,7 +340,6 @@ static const intercept_handler_t intercept_funcs[] = { [0x00 >> 2] = handle_noop, [0x04 >> 2] = handle_instruction, [0x08 >> 2] = handle_prog, - [0x0C >> 2] = handle_instruction_and_prog, [0x10 >> 2] = handle_noop, [0x14 >> 2] = handle_external_interrupt, [0x18 >> 2] = handle_noop, diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 0d3deef6edff..c98d89708e99 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -134,6 +134,8 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) active_mask = pending_local_irqs(vcpu); active_mask |= pending_floating_irqs(vcpu); + if (!active_mask) + return 0; if (psw_extint_disabled(vcpu)) active_mask &= ~IRQ_PEND_EXT_MASK; @@ -941,12 +943,9 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) if (cpu_timer_irq_pending(vcpu)) set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); - do { - irqs = deliverable_irqs(vcpu); + while ((irqs = deliverable_irqs(vcpu)) && !rc) { /* bits are in the order of interrupt priority */ irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT); - if (irq_type == IRQ_PEND_COUNT) - break; if (is_ioirq(irq_type)) { rc = __deliver_io(vcpu, irq_type); } else { @@ -958,9 +957,7 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) } rc = func(vcpu); } - if (rc) - break; - } while (!rc); + } set_intercept_indicators(vcpu); @@ -1061,7 +1058,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) if (sclp.has_sigpif) return __inject_extcall_sigpif(vcpu, src_id); - if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) + if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) return -EBUSY; *extcall = irq->u.extcall; atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); @@ -1340,12 +1337,54 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) return 0; } -static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) +/* + * Find a destination VCPU for a floating irq and kick it. + */ +static void __floating_irq_kick(struct kvm *kvm, u64 type) { + struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; struct kvm_s390_local_interrupt *li; + struct kvm_vcpu *dst_vcpu; + int sigcpu, online_vcpus, nr_tries = 0; + + online_vcpus = atomic_read(&kvm->online_vcpus); + if (!online_vcpus) + return; + + /* find idle VCPUs first, then round robin */ + sigcpu = find_first_bit(fi->idle_mask, online_vcpus); + if (sigcpu == online_vcpus) { + do { + sigcpu = fi->next_rr_cpu; + fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus; + /* avoid endless loops if all vcpus are stopped */ + if (nr_tries++ >= online_vcpus) + return; + } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu))); + } + dst_vcpu = kvm_get_vcpu(kvm, sigcpu); + + /* make the VCPU drop out of the SIE, or wake it up if sleeping */ + li = &dst_vcpu->arch.local_int; + spin_lock(&li->lock); + switch (type) { + case KVM_S390_MCHK: + atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); + break; + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: + atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags); + break; + default: + atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + break; + } + spin_unlock(&li->lock); + kvm_s390_vcpu_wakeup(dst_vcpu); +} + +static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) +{ struct kvm_s390_float_interrupt *fi; - struct kvm_vcpu *dst_vcpu = NULL; - int sigcpu; u64 type = READ_ONCE(inti->type); int rc; @@ -1373,32 +1412,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) if (rc) return rc; - sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); - if (sigcpu == KVM_MAX_VCPUS) { - do { - sigcpu = fi->next_rr_cpu++; - if (sigcpu == KVM_MAX_VCPUS) - sigcpu = fi->next_rr_cpu = 0; - } while (kvm_get_vcpu(kvm, sigcpu) == NULL); - } - dst_vcpu = kvm_get_vcpu(kvm, sigcpu); - li = &dst_vcpu->arch.local_int; - spin_lock(&li->lock); - switch (type) { - case KVM_S390_MCHK: - atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); - break; - case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: - atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags); - break; - default: - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); - break; - } - spin_unlock(&li->lock); - kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); + __floating_irq_kick(kvm, type); return 0; - } int kvm_s390_inject_vm(struct kvm *kvm, @@ -1606,6 +1621,9 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm) int i; spin_lock(&fi->lock); + fi->pending_irqs = 0; + memset(&fi->srv_signal, 0, sizeof(fi->srv_signal)); + memset(&fi->mchk, 0, sizeof(fi->mchk)); for (i = 0; i < FIRQ_LIST_COUNT; i++) clear_irq_list(&fi->lists[i]); for (i = 0; i < FIRQ_MAX_COUNT; i++) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index c4e81b26c1b0..2078f92d15ac 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -36,6 +36,10 @@ #include "kvm-s390.h" #include "gaccess.h" +#define KMSG_COMPONENT "kvm-s390" +#undef pr_fmt +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #define CREATE_TRACE_POINTS #include "trace.h" #include "trace-s390.h" @@ -110,7 +114,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { /* upper facilities limit for kvm */ unsigned long kvm_s390_fac_list_mask[] = { 0xffe6fffbfcfdfc40UL, - 0x005c800000000000UL, + 0x005e800000000000UL, }; unsigned long kvm_s390_fac_list_mask_size(void) @@ -236,6 +240,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, { int r; unsigned long n; + struct kvm_memslots *slots; struct kvm_memory_slot *memslot; int is_dirty = 0; @@ -245,7 +250,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, if (log->slot >= KVM_USER_MEM_SLOTS) goto out; - memslot = id_to_memslot(kvm->memslots, log->slot); + slots = kvm_memslots(kvm); + memslot = id_to_memslot(slots, log->slot); r = -ENOENT; if (!memslot->dirty_bitmap) goto out; @@ -454,10 +460,10 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) mutex_lock(&kvm->lock); kvm->arch.epoch = gtod - host_tod; - kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) { + kvm_s390_vcpu_block_all(kvm); + kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; - exit_sie(cur_vcpu); - } + kvm_s390_vcpu_unblock_all(kvm); mutex_unlock(&kvm->lock); return 0; } @@ -1311,8 +1317,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM | - CPUSTAT_STOPPED | - CPUSTAT_GED); + CPUSTAT_STOPPED); + + if (test_kvm_facility(vcpu->kvm, 78)) + atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); + else if (test_kvm_facility(vcpu->kvm, 8)) + atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); + kvm_s390_vcpu_setup_model(vcpu); vcpu->arch.sie_block->ecb = 6; @@ -1409,16 +1420,28 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) return kvm_s390_vcpu_has_irq(vcpu, 0); } -void s390_vcpu_block(struct kvm_vcpu *vcpu) +void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) { atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); + exit_sie(vcpu); } -void s390_vcpu_unblock(struct kvm_vcpu *vcpu) +void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu) { atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); } +static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) +{ + atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); + exit_sie(vcpu); +} + +static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) +{ + atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); +} + /* * Kick a guest cpu out of SIE and wait until SIE is not running. * If the CPU is not running (e.g. waiting as idle) the function will @@ -1430,11 +1453,11 @@ void exit_sie(struct kvm_vcpu *vcpu) cpu_relax(); } -/* Kick a guest cpu out of SIE and prevent SIE-reentry */ -void exit_sie_sync(struct kvm_vcpu *vcpu) +/* Kick a guest cpu out of SIE to process a request synchronously */ +void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu) { - s390_vcpu_block(vcpu); - exit_sie(vcpu); + kvm_make_request(req, vcpu); + kvm_s390_vcpu_request(vcpu); } static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) @@ -1447,8 +1470,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) /* match against both prefix pages */ if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) { VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); - kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); - exit_sie_sync(vcpu); + kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu); } } } @@ -1720,8 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu) static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) { + if (!vcpu->requests) + return 0; retry: - s390_vcpu_unblock(vcpu); + kvm_s390_vcpu_request_handled(vcpu); /* * We use MMU_RELOAD just to re-arm the ipte notifier for the * guest prefix page. gmap_ipte_notify will wait on the ptl lock. @@ -1993,12 +2017,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) * As PF_VCPU will be used in fault handler, between * guest_enter and guest_exit should be no uaccess. */ - preempt_disable(); - kvm_guest_enter(); - preempt_enable(); + local_irq_disable(); + __kvm_guest_enter(); + local_irq_enable(); exit_reason = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); - kvm_guest_exit(); + local_irq_disable(); + __kvm_guest_exit(); + local_irq_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); rc = vcpu_post_run(vcpu, exit_reason); @@ -2068,7 +2094,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { kvm_s390_vcpu_start(vcpu); } else if (is_vcpu_stopped(vcpu)) { - pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n", + pr_err_ratelimited("can't run stopped vcpu %d\n", vcpu->vcpu_id); return -EINVAL; } @@ -2206,8 +2232,7 @@ int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr) static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) { kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); - kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); - exit_sie_sync(vcpu); + kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu); } static void __disable_ibs_on_all_vcpus(struct kvm *kvm) @@ -2223,8 +2248,7 @@ static void __disable_ibs_on_all_vcpus(struct kvm *kvm) static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) { kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); - kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); - exit_sie_sync(vcpu); + kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu); } void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) @@ -2563,7 +2587,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, /* Section: memory related */ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, - struct kvm_userspace_memory_region *mem, + const struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) { /* A few sanity checks. We can have memory slots which have to be @@ -2581,8 +2605,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, } void kvm_arch_commit_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem, + const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, enum kvm_mr_change change) { int rc; @@ -2601,7 +2626,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, mem->guest_phys_addr, mem->memory_size); if (rc) - printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); + pr_warn("failed to commit memory region\n"); return; } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index ca108b90ae56..c5704786e473 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -211,10 +211,10 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); -void s390_vcpu_block(struct kvm_vcpu *vcpu); -void s390_vcpu_unblock(struct kvm_vcpu *vcpu); +void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu); +void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu); void exit_sie(struct kvm_vcpu *vcpu); -void exit_sie_sync(struct kvm_vcpu *vcpu); +void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu); int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu); /* is cmma enabled */ @@ -228,6 +228,25 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, struct kvm_s390_pgm_info *pgm_info); +static inline void kvm_s390_vcpu_block_all(struct kvm *kvm) +{ + int i; + struct kvm_vcpu *vcpu; + + WARN_ON(!mutex_is_locked(&kvm->lock)); + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_s390_vcpu_block(vcpu); +} + +static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm) +{ + int i; + struct kvm_vcpu *vcpu; + + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_s390_vcpu_unblock(vcpu); +} + /** * kvm_s390_inject_prog_cond - conditionally inject a program check * @vcpu: virtual cpu diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index d22d8ee1ff9d..ad4242245771 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -698,10 +698,14 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) case 0x00001000: end = (start + (1UL << 20)) & ~((1UL << 20) - 1); break; - /* We dont support EDAT2 case 0x00002000: + /* only support 2G frame size if EDAT2 is available and we are + not in 24-bit addressing mode */ + if (!test_kvm_facility(vcpu->kvm, 78) || + psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); end = (start + (1UL << 31)) & ~((1UL << 31) - 1); - break;*/ + break; default: return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); } |