diff options
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r-- | arch/s390/kvm/Kconfig | 2 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 45 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 272 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 54 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.h | 46 | ||||
-rw-r--r-- | arch/s390/kvm/priv.c | 316 | ||||
-rw-r--r-- | arch/s390/kvm/sigp.c | 10 | ||||
-rw-r--r-- | arch/s390/kvm/trace-s390.h | 26 |
8 files changed, 685 insertions, 86 deletions
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index b58dd869cb32..60f9f8ae0fc8 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig @@ -18,7 +18,7 @@ if VIRTUALIZATION config KVM def_tristate y prompt "Kernel-based Virtual Machine (KVM) support" - depends on HAVE_KVM && EXPERIMENTAL + depends on HAVE_KVM select PREEMPT_NOTIFIERS select ANON_INODES select HAVE_KVM_CPU_RELAX_INTERCEPT diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 22798ec33fd1..f26ff1e31bdb 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -26,27 +26,20 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + - ((vcpu->arch.sie_block->ipb & 0xff00) << 4); u64 useraddr; int reg, rc; vcpu->stat.instruction_lctlg++; - if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) - return -EOPNOTSUPP; - useraddr = disp2; - if (base2) - useraddr += vcpu->run->s.regs.gprs[base2]; + useraddr = kvm_s390_get_base_disp_rsy(vcpu); if (useraddr & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); reg = reg1; - VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, - disp2); + VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, + useraddr); trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr); do { @@ -68,23 +61,19 @@ static int handle_lctl(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 useraddr; u32 val = 0; int reg, rc; vcpu->stat.instruction_lctl++; - useraddr = disp2; - if (base2) - useraddr += vcpu->run->s.regs.gprs[base2]; + useraddr = kvm_s390_get_base_disp_rs(vcpu); if (useraddr & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, - disp2); + VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, + useraddr); trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr); reg = reg1; @@ -104,14 +93,31 @@ static int handle_lctl(struct kvm_vcpu *vcpu) return 0; } -static intercept_handler_t instruction_handlers[256] = { +static const intercept_handler_t eb_handlers[256] = { + [0x2f] = handle_lctlg, + [0x8a] = kvm_s390_handle_priv_eb, +}; + +static int handle_eb(struct kvm_vcpu *vcpu) +{ + intercept_handler_t handler; + + handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; + if (handler) + return handler(vcpu); + return -EOPNOTSUPP; +} + +static const intercept_handler_t instruction_handlers[256] = { [0x01] = kvm_s390_handle_01, + [0x82] = kvm_s390_handle_lpsw, [0x83] = kvm_s390_handle_diag, [0xae] = kvm_s390_handle_sigp, [0xb2] = kvm_s390_handle_b2, [0xb7] = handle_lctl, + [0xb9] = kvm_s390_handle_b9, [0xe5] = kvm_s390_handle_e5, - [0xeb] = handle_lctlg, + [0xeb] = handle_eb, }; static int handle_noop(struct kvm_vcpu *vcpu) @@ -258,6 +264,7 @@ static const intercept_handler_t intercept_funcs[] = { [0x0C >> 2] = handle_instruction_and_prog, [0x10 >> 2] = handle_noop, [0x14 >> 2] = handle_noop, + [0x18 >> 2] = handle_noop, [0x1C >> 2] = kvm_s390_handle_wait, [0x20 >> 2] = handle_validity, [0x28 >> 2] = handle_stop, diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index c30615e605ac..37116a77cb4b 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -21,11 +21,31 @@ #include "gaccess.h" #include "trace-s390.h" +#define IOINT_SCHID_MASK 0x0000ffff +#define IOINT_SSID_MASK 0x00030000 +#define IOINT_CSSID_MASK 0x03fc0000 +#define IOINT_AI_MASK 0x04000000 + +static int is_ioint(u64 type) +{ + return ((type & 0xfffe0000u) != 0xfffe0000u); +} + static int psw_extint_disabled(struct kvm_vcpu *vcpu) { return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); } +static int psw_ioint_disabled(struct kvm_vcpu *vcpu) +{ + return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); +} + +static int psw_mchk_disabled(struct kvm_vcpu *vcpu) +{ + return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); +} + static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) { if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || @@ -35,6 +55,13 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) return 1; } +static u64 int_word_to_isc_bits(u32 int_word) +{ + u8 isc = (int_word & 0x38000000) >> 27; + + return (0x80 >> isc) << 24; +} + static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, struct kvm_s390_interrupt_info *inti) { @@ -67,7 +94,22 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, case KVM_S390_SIGP_SET_PREFIX: case KVM_S390_RESTART: return 1; + case KVM_S390_MCHK: + if (psw_mchk_disabled(vcpu)) + return 0; + if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14) + return 1; + return 0; + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: + if (psw_ioint_disabled(vcpu)) + return 0; + if (vcpu->arch.sie_block->gcr[6] & + int_word_to_isc_bits(inti->io.io_int_word)) + return 1; + return 0; default: + printk(KERN_WARNING "illegal interrupt type %llx\n", + inti->type); BUG(); } return 0; @@ -93,6 +135,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); vcpu->arch.sie_block->lctl = 0x0000; + vcpu->arch.sie_block->ictl &= ~ICTL_LPSW; } static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) @@ -116,6 +159,18 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu, case KVM_S390_SIGP_STOP: __set_cpuflag(vcpu, CPUSTAT_STOP_INT); break; + case KVM_S390_MCHK: + if (psw_mchk_disabled(vcpu)) + vcpu->arch.sie_block->ictl |= ICTL_LPSW; + else + vcpu->arch.sie_block->lctl |= LCTL_CR14; + break; + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: + if (psw_ioint_disabled(vcpu)) + __set_cpuflag(vcpu, CPUSTAT_IO_INT); + else + vcpu->arch.sie_block->lctl |= LCTL_CR6; + break; default: BUG(); } @@ -297,6 +352,73 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, exception = 1; break; + case KVM_S390_MCHK: + VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", + inti->mchk.mcic); + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, + inti->mchk.cr14, + inti->mchk.mcic); + rc = kvm_s390_vcpu_store_status(vcpu, + KVM_S390_STORE_STATUS_PREFIXED); + if (rc == -EFAULT) + exception = 1; + + rc = put_guest_u64(vcpu, __LC_MCCK_CODE, inti->mchk.mcic); + if (rc == -EFAULT) + exception = 1; + + rc = copy_to_guest(vcpu, __LC_MCK_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + if (rc == -EFAULT) + exception = 1; + + rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, + __LC_MCK_NEW_PSW, sizeof(psw_t)); + if (rc == -EFAULT) + exception = 1; + break; + + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: + { + __u32 param0 = ((__u32)inti->io.subchannel_id << 16) | + inti->io.subchannel_nr; + __u64 param1 = ((__u64)inti->io.io_int_parm << 32) | + inti->io.io_int_word; + VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); + vcpu->stat.deliver_io_int++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, + param0, param1); + rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_ID, + inti->io.subchannel_id); + if (rc == -EFAULT) + exception = 1; + + rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_NR, + inti->io.subchannel_nr); + if (rc == -EFAULT) + exception = 1; + + rc = put_guest_u32(vcpu, __LC_IO_INT_PARM, + inti->io.io_int_parm); + if (rc == -EFAULT) + exception = 1; + + rc = put_guest_u32(vcpu, __LC_IO_INT_WORD, + inti->io.io_int_word); + if (rc == -EFAULT) + exception = 1; + + rc = copy_to_guest(vcpu, __LC_IO_OLD_PSW, + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); + if (rc == -EFAULT) + exception = 1; + + rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, + __LC_IO_NEW_PSW, sizeof(psw_t)); + if (rc == -EFAULT) + exception = 1; + break; + } default: BUG(); } @@ -362,7 +484,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) } if ((!rc) && (vcpu->arch.sie_block->ckc < - get_clock() + vcpu->arch.sie_block->epoch)) { + get_tod_clock() + vcpu->arch.sie_block->epoch)) { if ((!psw_extint_disabled(vcpu)) && (vcpu->arch.sie_block->gcr[0] & 0x800ul)) rc = 1; @@ -402,13 +524,13 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) goto no_timer; } - now = get_clock() + vcpu->arch.sie_block->epoch; + now = get_tod_clock() + vcpu->arch.sie_block->epoch; if (vcpu->arch.sie_block->ckc < now) { __unset_cpu_idle(vcpu); return 0; } - sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9; + sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); @@ -492,7 +614,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) } if ((vcpu->arch.sie_block->ckc < - get_clock() + vcpu->arch.sie_block->epoch)) + get_tod_clock() + vcpu->arch.sie_block->epoch)) __try_deliver_ckc_interrupt(vcpu); if (atomic_read(&fi->active)) { @@ -518,6 +640,61 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) } } +void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; + struct kvm_s390_interrupt_info *n, *inti = NULL; + int deliver; + + __reset_intercept_indicators(vcpu); + if (atomic_read(&li->active)) { + do { + deliver = 0; + spin_lock_bh(&li->lock); + list_for_each_entry_safe(inti, n, &li->list, list) { + if ((inti->type == KVM_S390_MCHK) && + __interrupt_is_deliverable(vcpu, inti)) { + list_del(&inti->list); + deliver = 1; + break; + } + __set_intercept_indicator(vcpu, inti); + } + if (list_empty(&li->list)) + atomic_set(&li->active, 0); + spin_unlock_bh(&li->lock); + if (deliver) { + __do_deliver_interrupt(vcpu, inti); + kfree(inti); + } + } while (deliver); + } + + if (atomic_read(&fi->active)) { + do { + deliver = 0; + spin_lock(&fi->lock); + list_for_each_entry_safe(inti, n, &fi->list, list) { + if ((inti->type == KVM_S390_MCHK) && + __interrupt_is_deliverable(vcpu, inti)) { + list_del(&inti->list); + deliver = 1; + break; + } + __set_intercept_indicator(vcpu, inti); + } + if (list_empty(&fi->list)) + atomic_set(&fi->active, 0); + spin_unlock(&fi->lock); + if (deliver) { + __do_deliver_interrupt(vcpu, inti); + kfree(inti); + } + } while (deliver); + } +} + int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; @@ -540,12 +717,50 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) return 0; } +struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, + u64 cr6, u64 schid) +{ + struct kvm_s390_float_interrupt *fi; + struct kvm_s390_interrupt_info *inti, *iter; + + if ((!schid && !cr6) || (schid && cr6)) + return NULL; + mutex_lock(&kvm->lock); + fi = &kvm->arch.float_int; + spin_lock(&fi->lock); + inti = NULL; + list_for_each_entry(iter, &fi->list, list) { + if (!is_ioint(iter->type)) + continue; + if (cr6 && + ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0)) + continue; + if (schid) { + if (((schid & 0x00000000ffff0000) >> 16) != + iter->io.subchannel_id) + continue; + if ((schid & 0x000000000000ffff) != + iter->io.subchannel_nr) + continue; + } + inti = iter; + break; + } + if (inti) + list_del_init(&inti->list); + if (list_empty(&fi->list)) + atomic_set(&fi->active, 0); + spin_unlock(&fi->lock); + mutex_unlock(&kvm->lock); + return inti; +} + int kvm_s390_inject_vm(struct kvm *kvm, struct kvm_s390_interrupt *s390int) { struct kvm_s390_local_interrupt *li; struct kvm_s390_float_interrupt *fi; - struct kvm_s390_interrupt_info *inti; + struct kvm_s390_interrupt_info *inti, *iter; int sigcpu; inti = kzalloc(sizeof(*inti), GFP_KERNEL); @@ -569,6 +784,29 @@ int kvm_s390_inject_vm(struct kvm *kvm, case KVM_S390_SIGP_STOP: case KVM_S390_INT_EXTERNAL_CALL: case KVM_S390_INT_EMERGENCY: + kfree(inti); + return -EINVAL; + case KVM_S390_MCHK: + VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", + s390int->parm64); + inti->type = s390int->type; + inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ + inti->mchk.mcic = s390int->parm64; + break; + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: + if (s390int->type & IOINT_AI_MASK) + VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); + else + VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", + s390int->type & IOINT_CSSID_MASK, + s390int->type & IOINT_SSID_MASK, + s390int->type & IOINT_SCHID_MASK); + inti->type = s390int->type; + inti->io.subchannel_id = s390int->parm >> 16; + inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; + inti->io.io_int_parm = s390int->parm64 >> 32; + inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull; + break; default: kfree(inti); return -EINVAL; @@ -579,7 +817,22 @@ int kvm_s390_inject_vm(struct kvm *kvm, mutex_lock(&kvm->lock); fi = &kvm->arch.float_int; spin_lock(&fi->lock); - list_add_tail(&inti->list, &fi->list); + if (!is_ioint(inti->type)) + list_add_tail(&inti->list, &fi->list); + else { + u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); + + /* Keep I/O interrupts sorted in isc order. */ + list_for_each_entry(iter, &fi->list, list) { + if (!is_ioint(iter->type)) + continue; + if (int_word_to_isc_bits(iter->io.io_int_word) + <= isc_bits) + continue; + break; + } + list_add_tail(&inti->list, &iter->list); + } atomic_set(&fi->active, 1); sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); if (sigcpu == KVM_MAX_VCPUS) { @@ -651,8 +904,15 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, inti->type = s390int->type; inti->emerg.code = s390int->parm; break; + case KVM_S390_MCHK: + VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", + s390int->parm64); + inti->type = s390int->type; + inti->mchk.mcic = s390int->parm64; + break; case KVM_S390_INT_VIRTIO: case KVM_S390_INT_SERVICE: + case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: default: kfree(inti); return -EINVAL; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index c9011bfaabbe..4cf35a0a79e7 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -140,6 +140,8 @@ int kvm_dev_ioctl_check_extension(long ext) #endif case KVM_CAP_SYNC_REGS: case KVM_CAP_ONE_REG: + case KVM_CAP_ENABLE_CAP: + case KVM_CAP_S390_CSS_SUPPORT: r = 1; break; case KVM_CAP_NR_VCPUS: @@ -147,7 +149,7 @@ int kvm_dev_ioctl_check_extension(long ext) r = KVM_MAX_VCPUS; break; case KVM_CAP_S390_COW: - r = sclp_get_fac85() & 0x2; + r = MACHINE_HAS_ESOP; break; default: r = 0; @@ -234,6 +236,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (!kvm->arch.gmap) goto out_nogmap; } + + kvm->arch.css_support = 0; + return 0; out_nogmap: debug_unregister(kvm->arch.dbf); @@ -613,7 +618,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) kvm_s390_deliver_pending_interrupts(vcpu); vcpu->arch.sie_block->icptcode = 0; + preempt_disable(); kvm_guest_enter(); + preempt_enable(); VCPU_EVENT(vcpu, 6, "entering sie flags %x", atomic_read(&vcpu->arch.sie_block->cpuflags)); trace_kvm_s390_sie_enter(vcpu, @@ -657,6 +664,7 @@ rerun_vcpu: case KVM_EXIT_INTR: case KVM_EXIT_S390_RESET: case KVM_EXIT_S390_UCONTROL: + case KVM_EXIT_S390_TSCH: break; default: BUG(); @@ -764,6 +772,14 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) } else prefix = 0; + /* + * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy + * copying in vcpu load/put. Lets update our copies before we save + * it into the save area + */ + save_fp_regs(&vcpu->arch.guest_fpregs); + save_access_regs(vcpu->run->s.regs.acrs); + if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), vcpu->arch.guest_fpregs.fprs, 128, prefix)) return -EFAULT; @@ -808,6 +824,29 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) return 0; } +static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, + struct kvm_enable_cap *cap) +{ + int r; + + if (cap->flags) + return -EINVAL; + + switch (cap->cap) { + case KVM_CAP_S390_CSS_SUPPORT: + if (!vcpu->kvm->arch.css_support) { + vcpu->kvm->arch.css_support = 1; + trace_kvm_s390_enable_css(vcpu->kvm); + } + r = 0; + break; + default: + r = -EINVAL; + break; + } + return r; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -894,6 +933,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = 0; break; } + case KVM_ENABLE_CAP: + { + struct kvm_enable_cap cap; + r = -EFAULT; + if (copy_from_user(&cap, argp, sizeof(cap))) + break; + r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); + break; + } default: r = -ENOTTY; } @@ -928,7 +976,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem, - int user_alloc) + bool user_alloc) { /* A few sanity checks. We can have exactly one memory slot which has to start at guest virtual zero and which has to be located at a @@ -958,7 +1006,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, - int user_alloc) + bool user_alloc) { int rc; diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index d75bc5e92c5b..4d89d64a8161 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -65,21 +65,67 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) vcpu->arch.sie_block->ihcpu = 0xffff; } +static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu) +{ + u32 base2 = vcpu->arch.sie_block->ipb >> 28; + u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); + + return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; +} + +static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, + u64 *address1, u64 *address2) +{ + u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; + u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; + u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12; + u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff; + + *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1; + *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; +} + +static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) +{ + u32 base2 = vcpu->arch.sie_block->ipb >> 28; + u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + + ((vcpu->arch.sie_block->ipb & 0xff00) << 4); + /* The displacement is a 20bit _SIGNED_ value */ + if (disp2 & 0x80000) + disp2+=0xfff00000; + + return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2; +} + +static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu) +{ + u32 base2 = vcpu->arch.sie_block->ipb >> 28; + u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); + + return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; +} + int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); void kvm_s390_tasklet(unsigned long parm); void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); +void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu); int kvm_s390_inject_vm(struct kvm *kvm, struct kvm_s390_interrupt *s390int); int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_interrupt *s390int); int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); +struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, + u64 cr6, u64 schid); /* implemented in priv.c */ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); int kvm_s390_handle_01(struct kvm_vcpu *vcpu); +int kvm_s390_handle_b9(struct kvm_vcpu *vcpu); +int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu); +int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu); /* implemented in sigp.c */ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index d768906f15c8..0ef9894606e5 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -18,23 +18,21 @@ #include <asm/debug.h> #include <asm/ebcdic.h> #include <asm/sysinfo.h> +#include <asm/ptrace.h> +#include <asm/compat.h> #include "gaccess.h" #include "kvm-s390.h" #include "trace.h" static int handle_set_prefix(struct kvm_vcpu *vcpu) { - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 operand2; u32 address = 0; u8 tmp; vcpu->stat.instruction_spx++; - operand2 = disp2; - if (base2) - operand2 += vcpu->run->s.regs.gprs[base2]; + operand2 = kvm_s390_get_base_disp_s(vcpu); /* must be word boundary */ if (operand2 & 3) { @@ -67,15 +65,12 @@ out: static int handle_store_prefix(struct kvm_vcpu *vcpu) { - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 operand2; u32 address; vcpu->stat.instruction_stpx++; - operand2 = disp2; - if (base2) - operand2 += vcpu->run->s.regs.gprs[base2]; + + operand2 = kvm_s390_get_base_disp_s(vcpu); /* must be word boundary */ if (operand2 & 3) { @@ -100,15 +95,12 @@ out: static int handle_store_cpu_address(struct kvm_vcpu *vcpu) { - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 useraddr; int rc; vcpu->stat.instruction_stap++; - useraddr = disp2; - if (base2) - useraddr += vcpu->run->s.regs.gprs[base2]; + + useraddr = kvm_s390_get_base_disp_s(vcpu); if (useraddr & 1) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -135,24 +127,96 @@ static int handle_skey(struct kvm_vcpu *vcpu) return 0; } -static int handle_stsch(struct kvm_vcpu *vcpu) +static int handle_tpi(struct kvm_vcpu *vcpu) { - vcpu->stat.instruction_stsch++; - VCPU_EVENT(vcpu, 4, "%s", "store subchannel - CC3"); - /* condition code 3 */ + u64 addr; + struct kvm_s390_interrupt_info *inti; + int cc; + + addr = kvm_s390_get_base_disp_s(vcpu); + + inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0); + if (inti) { + if (addr) { + /* + * Store the two-word I/O interruption code into the + * provided area. + */ + put_guest_u16(vcpu, addr, inti->io.subchannel_id); + put_guest_u16(vcpu, addr + 2, inti->io.subchannel_nr); + put_guest_u32(vcpu, addr + 4, inti->io.io_int_parm); + } else { + /* + * Store the three-word I/O interruption code into + * the appropriate lowcore area. + */ + put_guest_u16(vcpu, 184, inti->io.subchannel_id); + put_guest_u16(vcpu, 186, inti->io.subchannel_nr); + put_guest_u32(vcpu, 188, inti->io.io_int_parm); + put_guest_u32(vcpu, 192, inti->io.io_int_word); + } + cc = 1; + } else + cc = 0; + kfree(inti); + /* Set condition code and we're done. */ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); - vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44; + vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44; return 0; } -static int handle_chsc(struct kvm_vcpu *vcpu) +static int handle_tsch(struct kvm_vcpu *vcpu) { - vcpu->stat.instruction_chsc++; - VCPU_EVENT(vcpu, 4, "%s", "channel subsystem call - CC3"); - /* condition code 3 */ - vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); - vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44; - return 0; + struct kvm_s390_interrupt_info *inti; + + inti = kvm_s390_get_io_int(vcpu->kvm, 0, + vcpu->run->s.regs.gprs[1]); + + /* + * Prepare exit to userspace. + * We indicate whether we dequeued a pending I/O interrupt + * so that userspace can re-inject it if the instruction gets + * a program check. While this may re-order the pending I/O + * interrupts, this is no problem since the priority is kept + * intact. + */ + vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; + vcpu->run->s390_tsch.dequeued = !!inti; + if (inti) { + vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; + vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; + vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; + vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; + } + vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; + kfree(inti); + return -EREMOTE; +} + +static int handle_io_inst(struct kvm_vcpu *vcpu) +{ + VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); + + if (vcpu->kvm->arch.css_support) { + /* + * Most I/O instructions will be handled by userspace. + * Exceptions are tpi and the interrupt portion of tsch. + */ + if (vcpu->arch.sie_block->ipa == 0xb236) + return handle_tpi(vcpu); + if (vcpu->arch.sie_block->ipa == 0xb235) + return handle_tsch(vcpu); + /* Handle in userspace. */ + return -EOPNOTSUPP; + } else { + /* + * Set condition code 3 to stop the guest from issueing channel + * I/O instructions. + */ + vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); + vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44; + return 0; + } } static int handle_stfl(struct kvm_vcpu *vcpu) @@ -176,17 +240,107 @@ static int handle_stfl(struct kvm_vcpu *vcpu) return 0; } +static void handle_new_psw(struct kvm_vcpu *vcpu) +{ + /* Check whether the new psw is enabled for machine checks. */ + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK) + kvm_s390_deliver_pending_machine_checks(vcpu); +} + +#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) +#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL +#define PSW_ADDR_24 0x00000000000fffffUL +#define PSW_ADDR_31 0x000000007fffffffUL + +int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) +{ + u64 addr; + psw_compat_t new_psw; + + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, + PGM_PRIVILEGED_OPERATION); + + addr = kvm_s390_get_base_disp_s(vcpu); + + if (addr & 7) { + kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + goto out; + } + + if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) { + kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + goto out; + } + + if (!(new_psw.mask & PSW32_MASK_BASE)) { + kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + goto out; + } + + vcpu->arch.sie_block->gpsw.mask = + (new_psw.mask & ~PSW32_MASK_BASE) << 32; + vcpu->arch.sie_block->gpsw.addr = new_psw.addr; + + if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) || + (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) && + (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) || + ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) == + PSW_MASK_EA)) { + kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + goto out; + } + + handle_new_psw(vcpu); +out: + return 0; +} + +static int handle_lpswe(struct kvm_vcpu *vcpu) +{ + u64 addr; + psw_t new_psw; + + addr = kvm_s390_get_base_disp_s(vcpu); + + if (addr & 7) { + kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + goto out; + } + + if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) { + kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + goto out; + } + + vcpu->arch.sie_block->gpsw.mask = new_psw.mask; + vcpu->arch.sie_block->gpsw.addr = new_psw.addr; + + if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) || + (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) == + PSW_MASK_BA) && + (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) || + (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) && + (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) || + ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) == + PSW_MASK_EA)) { + kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + goto out; + } + + handle_new_psw(vcpu); +out: + return 0; +} + static int handle_stidp(struct kvm_vcpu *vcpu) { - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 operand2; int rc; vcpu->stat.instruction_stidp++; - operand2 = disp2; - if (base2) - operand2 += vcpu->run->s.regs.gprs[base2]; + + operand2 = kvm_s390_get_base_disp_s(vcpu); if (operand2 & 7) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -240,17 +394,13 @@ static int handle_stsi(struct kvm_vcpu *vcpu) int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 operand2; unsigned long mem; vcpu->stat.instruction_stsi++; VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); - operand2 = disp2; - if (base2) - operand2 += vcpu->run->s.regs.gprs[base2]; + operand2 = kvm_s390_get_base_disp_s(vcpu); if (operand2 & 0xfff && fc > 0) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -297,7 +447,7 @@ out_fail: return 0; } -static intercept_handler_t priv_handlers[256] = { +static const intercept_handler_t b2_handlers[256] = { [0x02] = handle_stidp, [0x10] = handle_set_prefix, [0x11] = handle_store_prefix, @@ -305,10 +455,25 @@ static intercept_handler_t priv_handlers[256] = { [0x29] = handle_skey, [0x2a] = handle_skey, [0x2b] = handle_skey, - [0x34] = handle_stsch, - [0x5f] = handle_chsc, + [0x30] = handle_io_inst, + [0x31] = handle_io_inst, + [0x32] = handle_io_inst, + [0x33] = handle_io_inst, + [0x34] = handle_io_inst, + [0x35] = handle_io_inst, + [0x36] = handle_io_inst, + [0x37] = handle_io_inst, + [0x38] = handle_io_inst, + [0x39] = handle_io_inst, + [0x3a] = handle_io_inst, + [0x3b] = handle_io_inst, + [0x3c] = handle_io_inst, + [0x5f] = handle_io_inst, + [0x74] = handle_io_inst, + [0x76] = handle_io_inst, [0x7d] = handle_stsi, [0xb1] = handle_stfl, + [0xb2] = handle_lpswe, }; int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) @@ -322,7 +487,7 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) * state bit and (a) handle the instruction or (b) send a code 2 * program check. * Anything else goes to userspace.*/ - handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; + handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; if (handler) { if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, @@ -333,19 +498,74 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; } +static int handle_epsw(struct kvm_vcpu *vcpu) +{ + int reg1, reg2; + + reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24; + reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; + + /* This basically extracts the mask half of the psw. */ + vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000; + vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; + if (reg2) { + vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000; + vcpu->run->s.regs.gprs[reg2] |= + vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff; + } + return 0; +} + +static const intercept_handler_t b9_handlers[256] = { + [0x8d] = handle_epsw, + [0x9c] = handle_io_inst, +}; + +int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) +{ + intercept_handler_t handler; + + /* This is handled just as for the B2 instructions. */ + handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; + if (handler) { + if ((handler != handle_epsw) && + (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)) + return kvm_s390_inject_program_int(vcpu, + PGM_PRIVILEGED_OPERATION); + else + return handler(vcpu); + } + return -EOPNOTSUPP; +} + +static const intercept_handler_t eb_handlers[256] = { + [0x8a] = handle_io_inst, +}; + +int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu) +{ + intercept_handler_t handler; + + /* All eb instructions that end up here are privileged. */ + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, + PGM_PRIVILEGED_OPERATION); + handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; + if (handler) + return handler(vcpu); + return -EOPNOTSUPP; +} + static int handle_tprot(struct kvm_vcpu *vcpu) { - int base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; - int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; - int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12; - int disp2 = vcpu->arch.sie_block->ipb & 0x0fff; - u64 address1 = disp1 + base1 ? vcpu->run->s.regs.gprs[base1] : 0; - u64 address2 = disp2 + base2 ? vcpu->run->s.regs.gprs[base2] : 0; + u64 address1, address2; struct vm_area_struct *vma; unsigned long user_address; vcpu->stat.instruction_tprot++; + kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); + /* we only handle the Linux memory detection case: * access key == 0 * guest DAT == off @@ -405,7 +625,7 @@ static int handle_sckpf(struct kvm_vcpu *vcpu) return 0; } -static intercept_handler_t x01_handlers[256] = { +static const intercept_handler_t x01_handlers[256] = { [0x07] = handle_sckpf, }; diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 566ddf6e8dfb..1c48ab2845e0 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -137,8 +137,10 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) inti->type = KVM_S390_SIGP_STOP; spin_lock_bh(&li->lock); - if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) + if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { + kfree(inti); goto out; + } list_add_tail(&inti->list, &li->list); atomic_set(&li->active, 1); atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); @@ -324,8 +326,6 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) { int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int r3 = vcpu->arch.sie_block->ipa & 0x000f; - int base2 = vcpu->arch.sie_block->ipb >> 28; - int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u32 parameter; u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; u8 order_code; @@ -336,9 +336,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OPERATION); - order_code = disp2; - if (base2) - order_code += vcpu->run->s.regs.gprs[base2]; + order_code = kvm_s390_get_base_disp_rs(vcpu); if (r1 % 2) parameter = vcpu->run->s.regs.gprs[r1]; diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h index 90fdf85b5ff7..13f30f58a2df 100644 --- a/arch/s390/kvm/trace-s390.h +++ b/arch/s390/kvm/trace-s390.h @@ -141,13 +141,13 @@ TRACE_EVENT(kvm_s390_inject_vcpu, * Trace point for the actual delivery of interrupts. */ TRACE_EVENT(kvm_s390_deliver_interrupt, - TP_PROTO(unsigned int id, __u64 type, __u32 data0, __u64 data1), + TP_PROTO(unsigned int id, __u64 type, __u64 data0, __u64 data1), TP_ARGS(id, type, data0, data1), TP_STRUCT__entry( __field(int, id) __field(__u32, inttype) - __field(__u32, data0) + __field(__u64, data0) __field(__u64, data1) ), @@ -159,7 +159,7 @@ TRACE_EVENT(kvm_s390_deliver_interrupt, ), TP_printk("deliver interrupt (vcpu %d): type:%x (%s) " \ - "data:%08x %016llx", + "data:%08llx %016llx", __entry->id, __entry->inttype, __print_symbolic(__entry->inttype, kvm_s390_int_type), __entry->data0, __entry->data1) @@ -204,6 +204,26 @@ TRACE_EVENT(kvm_s390_stop_request, ); +/* + * Trace point for enabling channel I/O instruction support. + */ +TRACE_EVENT(kvm_s390_enable_css, + TP_PROTO(void *kvm), + TP_ARGS(kvm), + + TP_STRUCT__entry( + __field(void *, kvm) + ), + + TP_fast_assign( + __entry->kvm = kvm; + ), + + TP_printk("enabling channel I/O support (kvm @ %p)\n", + __entry->kvm) + ); + + #endif /* _TRACE_KVMS390_H */ /* This part must be outside protection */ |