diff options
author | Arnd Bergmann <arnd@arndb.de> | 2009-06-12 09:53:47 +0200 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2009-06-12 11:32:58 +0200 |
commit | 5b02ee3d219f9e01b6e9146e25613822cfc2e5ce (patch) | |
tree | 7ce9126738c3cf4b37d67170d0e4b34818c057a9 /arch/s390 | |
parent | 26a28fa4fea5b8c65713aa50c124f76a88c7924d (diff) | |
parent | 8ebf975608aaebd7feb33d77f07ba21a6380e086 (diff) |
asm-generic: merge branch 'master' of torvalds/linux-2.6
Fixes a merge conflict against the x86 tree caused by a fix to
atomic.h which I renamed to atomic_long.h.
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 5 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 28 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 59 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 63 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.h | 4 | ||||
-rw-r--r-- | arch/s390/kvm/priv.c | 4 | ||||
-rw-r--r-- | arch/s390/kvm/sigp.c | 16 |
7 files changed, 126 insertions, 53 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 54ea39f96ecd..a27d0d5a6f86 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -13,6 +13,8 @@ #ifndef ASM_KVM_HOST_H #define ASM_KVM_HOST_H +#include <linux/hrtimer.h> +#include <linux/interrupt.h> #include <linux/kvm_host.h> #include <asm/debug.h> #include <asm/cpuid.h> @@ -210,7 +212,8 @@ struct kvm_vcpu_arch { s390_fp_regs guest_fpregs; unsigned int guest_acrs[NUM_ACRS]; struct kvm_s390_local_interrupt local_int; - struct timer_list ckc_timer; + struct hrtimer ckc_timer; + struct tasklet_struct tasklet; union { cpuid_t cpu_id; u64 stidp_data; diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 9d19803111ba..98997ccba501 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -154,17 +154,25 @@ static int handle_stop(struct kvm_vcpu *vcpu) static int handle_validity(struct kvm_vcpu *vcpu) { int viwhy = vcpu->arch.sie_block->ipb >> 16; + int rc; + vcpu->stat.exit_validity++; - if (viwhy == 0x37) { - fault_in_pages_writeable((char __user *) - vcpu->kvm->arch.guest_origin + - vcpu->arch.sie_block->prefix, - PAGE_SIZE); - return 0; - } - VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d", - viwhy); - return -ENOTSUPP; + if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix + <= vcpu->kvm->arch.guest_memsize - 2*PAGE_SIZE)){ + rc = fault_in_pages_writeable((char __user *) + vcpu->kvm->arch.guest_origin + + vcpu->arch.sie_block->prefix, + 2*PAGE_SIZE); + if (rc) + /* user will receive sigsegv, exit to user */ + rc = -ENOTSUPP; + } else + rc = -ENOTSUPP; + + if (rc) + VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d", + viwhy); + return rc; } static int handle_instruction(struct kvm_vcpu *vcpu) diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 0189356fe209..f04f5301b1b4 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -12,6 +12,8 @@ #include <asm/lowcore.h> #include <asm/uaccess.h> +#include <linux/hrtimer.h> +#include <linux/interrupt.h> #include <linux/kvm_host.h> #include <linux/signal.h> #include "kvm-s390.h" @@ -299,13 +301,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) } if ((!rc) && atomic_read(&fi->active)) { - spin_lock_bh(&fi->lock); + spin_lock(&fi->lock); list_for_each_entry(inti, &fi->list, list) if (__interrupt_is_deliverable(vcpu, inti)) { rc = 1; break; } - spin_unlock_bh(&fi->lock); + spin_unlock(&fi->lock); } if ((!rc) && (vcpu->arch.sie_block->ckc < @@ -318,6 +320,12 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) return rc; } +int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) +{ + /* do real check here */ + return 1; +} + int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { return 0; @@ -355,14 +363,12 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) return 0; } - sltime = (vcpu->arch.sie_block->ckc - now) / (0xf4240000ul / HZ) + 1; + sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9; - vcpu->arch.ckc_timer.expires = jiffies + sltime; - - add_timer(&vcpu->arch.ckc_timer); - VCPU_EVENT(vcpu, 5, "enabled wait timer:%llx jiffies", sltime); + hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); + VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); no_timer: - spin_lock_bh(&vcpu->arch.local_int.float_int->lock); + spin_lock(&vcpu->arch.local_int.float_int->lock); spin_lock_bh(&vcpu->arch.local_int.lock); add_wait_queue(&vcpu->arch.local_int.wq, &wait); while (list_empty(&vcpu->arch.local_int.list) && @@ -371,33 +377,46 @@ no_timer: !signal_pending(current)) { set_current_state(TASK_INTERRUPTIBLE); spin_unlock_bh(&vcpu->arch.local_int.lock); - spin_unlock_bh(&vcpu->arch.local_int.float_int->lock); + spin_unlock(&vcpu->arch.local_int.float_int->lock); vcpu_put(vcpu); schedule(); vcpu_load(vcpu); - spin_lock_bh(&vcpu->arch.local_int.float_int->lock); + spin_lock(&vcpu->arch.local_int.float_int->lock); spin_lock_bh(&vcpu->arch.local_int.lock); } __unset_cpu_idle(vcpu); __set_current_state(TASK_RUNNING); remove_wait_queue(&vcpu->wq, &wait); spin_unlock_bh(&vcpu->arch.local_int.lock); - spin_unlock_bh(&vcpu->arch.local_int.float_int->lock); - del_timer(&vcpu->arch.ckc_timer); + spin_unlock(&vcpu->arch.local_int.float_int->lock); + hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); return 0; } -void kvm_s390_idle_wakeup(unsigned long data) +void kvm_s390_tasklet(unsigned long parm) { - struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; + struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; - spin_lock_bh(&vcpu->arch.local_int.lock); + spin_lock(&vcpu->arch.local_int.lock); vcpu->arch.local_int.timer_due = 1; if (waitqueue_active(&vcpu->arch.local_int.wq)) wake_up_interruptible(&vcpu->arch.local_int.wq); - spin_unlock_bh(&vcpu->arch.local_int.lock); + spin_unlock(&vcpu->arch.local_int.lock); } +/* + * low level hrtimer wake routine. Because this runs in hardirq context + * we schedule a tasklet to do the real work. + */ +enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) +{ + struct kvm_vcpu *vcpu; + + vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); + tasklet_schedule(&vcpu->arch.tasklet); + + return HRTIMER_NORESTART; +} void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) { @@ -436,7 +455,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) if (atomic_read(&fi->active)) { do { deliver = 0; - spin_lock_bh(&fi->lock); + spin_lock(&fi->lock); list_for_each_entry_safe(inti, n, &fi->list, list) { if (__interrupt_is_deliverable(vcpu, inti)) { list_del(&inti->list); @@ -447,7 +466,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) } if (list_empty(&fi->list)) atomic_set(&fi->active, 0); - spin_unlock_bh(&fi->lock); + spin_unlock(&fi->lock); if (deliver) { __do_deliver_interrupt(vcpu, inti); kfree(inti); @@ -512,7 +531,7 @@ int kvm_s390_inject_vm(struct kvm *kvm, mutex_lock(&kvm->lock); fi = &kvm->arch.float_int; - spin_lock_bh(&fi->lock); + spin_lock(&fi->lock); list_add_tail(&inti->list, &fi->list); atomic_set(&fi->active, 1); sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); @@ -529,7 +548,7 @@ int kvm_s390_inject_vm(struct kvm *kvm, if (waitqueue_active(&li->wq)) wake_up_interruptible(&li->wq); spin_unlock_bh(&li->lock); - spin_unlock_bh(&fi->lock); + spin_unlock(&fi->lock); mutex_unlock(&kvm->lock); return 0; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f4d56e9939c9..10bccd1f8aee 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -15,6 +15,7 @@ #include <linux/compiler.h> #include <linux/err.h> #include <linux/fs.h> +#include <linux/hrtimer.h> #include <linux/init.h> #include <linux/kvm.h> #include <linux/kvm_host.h> @@ -195,6 +196,10 @@ out_nokvm: void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { VCPU_EVENT(vcpu, 3, "%s", "free cpu"); + if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == + (__u64) vcpu->arch.sie_block) + vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; + smp_mb(); free_page((unsigned long)(vcpu->arch.sie_block)); kvm_vcpu_uninit(vcpu); kfree(vcpu); @@ -283,8 +288,10 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin; vcpu->arch.sie_block->ecb = 2; vcpu->arch.sie_block->eca = 0xC1002001U; - setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup, - (unsigned long) vcpu); + hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); + tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, + (unsigned long) vcpu); + vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; get_cpu_id(&vcpu->arch.cpu_id); vcpu->arch.cpu_id.version = 0xff; return 0; @@ -307,19 +314,21 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, vcpu->arch.sie_block->icpua = id; BUG_ON(!kvm->arch.sca); - BUG_ON(kvm->arch.sca->cpu[id].sda); - kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; + if (!kvm->arch.sca->cpu[id].sda) + kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; + else + BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */ vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; spin_lock_init(&vcpu->arch.local_int.lock); INIT_LIST_HEAD(&vcpu->arch.local_int.list); vcpu->arch.local_int.float_int = &kvm->arch.float_int; - spin_lock_bh(&kvm->arch.float_int.lock); + spin_lock(&kvm->arch.float_int.lock); kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int; init_waitqueue_head(&vcpu->arch.local_int.wq); vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; - spin_unlock_bh(&kvm->arch.float_int.lock); + spin_unlock(&kvm->arch.float_int.lock); rc = kvm_vcpu_init(vcpu, kvm, id); if (rc) @@ -478,6 +487,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vcpu_load(vcpu); + /* verify, that memory has been registered */ + if (!vcpu->kvm->arch.guest_memsize) { + vcpu_put(vcpu); + return -EINVAL; + } + if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); @@ -657,6 +672,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm, struct kvm_memory_slot old, int user_alloc) { + int i; + /* A few sanity checks. We can have exactly one memory slot which has to start at guest virtual zero and which has to be located at a page boundary in userland and which has to end at a page boundary. @@ -664,7 +681,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm, vmas. It is okay to mmap() and munmap() stuff in this slot after doing this call at any time */ - if (mem->slot) + if (mem->slot || kvm->arch.guest_memsize) return -EINVAL; if (mem->guest_phys_addr) @@ -676,15 +693,39 @@ int kvm_arch_set_memory_region(struct kvm *kvm, if (mem->memory_size & (PAGE_SIZE - 1)) return -EINVAL; + if (!user_alloc) + return -EINVAL; + + /* lock all vcpus */ + for (i = 0; i < KVM_MAX_VCPUS; ++i) { + if (!kvm->vcpus[i]) + continue; + if (!mutex_trylock(&kvm->vcpus[i]->mutex)) + goto fail_out; + } + kvm->arch.guest_origin = mem->userspace_addr; kvm->arch.guest_memsize = mem->memory_size; - /* FIXME: we do want to interrupt running CPUs and update their memory - configuration now to avoid race conditions. But hey, changing the - memory layout while virtual CPUs are running is usually bad - programming practice. */ + /* update sie control blocks, and unlock all vcpus */ + for (i = 0; i < KVM_MAX_VCPUS; ++i) { + if (kvm->vcpus[i]) { + kvm->vcpus[i]->arch.sie_block->gmsor = + kvm->arch.guest_origin; + kvm->vcpus[i]->arch.sie_block->gmslm = + kvm->arch.guest_memsize + + kvm->arch.guest_origin + + VIRTIODESCSPACE - 1ul; + mutex_unlock(&kvm->vcpus[i]->mutex); + } + } return 0; + +fail_out: + for (; i >= 0; i--) + mutex_unlock(&kvm->vcpus[i]->mutex); + return -EINVAL; } void kvm_arch_flush_shadow(struct kvm *kvm) diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 00bbe69b78da..748fee872323 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -14,6 +14,7 @@ #ifndef ARCH_S390_KVM_S390_H #define ARCH_S390_KVM_S390_H +#include <linux/hrtimer.h> #include <linux/kvm.h> #include <linux/kvm_host.h> @@ -41,7 +42,8 @@ static inline int __cpu_is_stopped(struct kvm_vcpu *vcpu) } int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); -void kvm_s390_idle_wakeup(unsigned long data); +enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); +void kvm_s390_tasklet(unsigned long parm); void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); int kvm_s390_inject_vm(struct kvm *kvm, struct kvm_s390_interrupt *s390int); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 4b88834b8dd8..93ecd06e1a74 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -204,11 +204,11 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) int cpus = 0; int n; - spin_lock_bh(&fi->lock); + spin_lock(&fi->lock); for (n = 0; n < KVM_MAX_VCPUS; n++) if (fi->local_int[n]) cpus++; - spin_unlock_bh(&fi->lock); + spin_unlock(&fi->lock); /* deal with other level 3 hypervisors */ if (stsi(mem, 3, 2, 2) == -ENOSYS) diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index f27dbedf0866..36678835034d 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -52,7 +52,7 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, if (cpu_addr >= KVM_MAX_VCPUS) return 3; /* not operational */ - spin_lock_bh(&fi->lock); + spin_lock(&fi->lock); if (fi->local_int[cpu_addr] == NULL) rc = 3; /* not operational */ else if (atomic_read(fi->local_int[cpu_addr]->cpuflags) @@ -64,7 +64,7 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, *reg |= SIGP_STAT_STOPPED; rc = 1; /* status stored */ } - spin_unlock_bh(&fi->lock); + spin_unlock(&fi->lock); VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); return rc; @@ -86,7 +86,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) inti->type = KVM_S390_INT_EMERGENCY; - spin_lock_bh(&fi->lock); + spin_lock(&fi->lock); li = fi->local_int[cpu_addr]; if (li == NULL) { rc = 3; /* not operational */ @@ -102,7 +102,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) spin_unlock_bh(&li->lock); rc = 0; /* order accepted */ unlock: - spin_unlock_bh(&fi->lock); + spin_unlock(&fi->lock); VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); return rc; } @@ -123,7 +123,7 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store) inti->type = KVM_S390_SIGP_STOP; - spin_lock_bh(&fi->lock); + spin_lock(&fi->lock); li = fi->local_int[cpu_addr]; if (li == NULL) { rc = 3; /* not operational */ @@ -142,7 +142,7 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store) spin_unlock_bh(&li->lock); rc = 0; /* order accepted */ unlock: - spin_unlock_bh(&fi->lock); + spin_unlock(&fi->lock); VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); return rc; } @@ -188,7 +188,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, if (!inti) return 2; /* busy */ - spin_lock_bh(&fi->lock); + spin_lock(&fi->lock); li = fi->local_int[cpu_addr]; if ((cpu_addr >= KVM_MAX_VCPUS) || (li == NULL)) { @@ -220,7 +220,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, out_li: spin_unlock_bh(&li->lock); out_fi: - spin_unlock_bh(&fi->lock); + spin_unlock(&fi->lock); return rc; } |