summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-08-25 17:27:17 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-08-25 17:27:17 -0700
commit6ec9776c28f9fe0780803a7dc1045c83323d8338 (patch)
tree957376c792451aadb00a1b1d68871cd30e3178c8 /arch/x86
parente1d33a5c634bcf5f944034a2127c0026308c005c (diff)
parent5ad105e569c45dcfad50d724c61d5061248be755 (diff)
Merge git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Marcelo Tosatti. * git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86 emulator: use stack size attribute to mask rsp in stack ops KVM: MMU: Fix mmu_shrink() so that it can free mmu pages as intended ppc: e500_tlb memset clears nothing KVM: PPC: Add cache flush on page map KVM: PPC: Book3S HV: Fix incorrect branch in H_CEDE code KVM: x86: update KVM_SAVE_MSRS_BEGIN to correct value
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/emulate.c30
-rw-r--r--arch/x86/kvm/mmu.c13
-rw-r--r--arch/x86/kvm/x86.c2
3 files changed, 31 insertions, 14 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 97d9a9914ba8..a3b57a27be88 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -475,13 +475,26 @@ register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
return address_mask(ctxt, reg);
}
+static void masked_increment(ulong *reg, ulong mask, int inc)
+{
+ assign_masked(reg, *reg + inc, mask);
+}
+
static inline void
register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
{
+ ulong mask;
+
if (ctxt->ad_bytes == sizeof(unsigned long))
- *reg += inc;
+ mask = ~0UL;
else
- *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
+ mask = ad_mask(ctxt);
+ masked_increment(reg, mask, inc);
+}
+
+static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
+{
+ masked_increment(&ctxt->regs[VCPU_REGS_RSP], stack_mask(ctxt), inc);
}
static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
@@ -1522,8 +1535,8 @@ static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
{
struct segmented_address addr;
- register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -bytes);
- addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
+ rsp_increment(ctxt, -bytes);
+ addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
return segmented_write(ctxt, addr, data, bytes);
@@ -1542,13 +1555,13 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
int rc;
struct segmented_address addr;
- addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
+ addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
rc = segmented_read(ctxt, addr, dest, len);
if (rc != X86EMUL_CONTINUE)
return rc;
- register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
+ rsp_increment(ctxt, len);
return rc;
}
@@ -1688,8 +1701,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) {
- register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
- ctxt->op_bytes);
+ rsp_increment(ctxt, ctxt->op_bytes);
--reg;
}
@@ -2825,7 +2837,7 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
+ rsp_increment(ctxt, ctxt->src.val);
return X86EMUL_CONTINUE;
}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 01ca00423938..7fbd0d273ea8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4113,16 +4113,21 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
LIST_HEAD(invalid_list);
/*
+ * Never scan more than sc->nr_to_scan VM instances.
+ * Will not hit this condition practically since we do not try
+ * to shrink more than one VM and it is very unlikely to see
+ * !n_used_mmu_pages so many times.
+ */
+ if (!nr_to_scan--)
+ break;
+ /*
* n_used_mmu_pages is accessed without holding kvm->mmu_lock
* here. We may skip a VM instance errorneosly, but we do not
* want to shrink a VM that only started to populate its MMU
* anyway.
*/
- if (kvm->arch.n_used_mmu_pages > 0) {
- if (!nr_to_scan--)
- break;
+ if (!kvm->arch.n_used_mmu_pages)
continue;
- }
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 42bce48f6928..dce75b760312 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -806,7 +806,7 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
* kvm-specific. Those are put in the beginning of the list.
*/
-#define KVM_SAVE_MSRS_BEGIN 9
+#define KVM_SAVE_MSRS_BEGIN 10
static u32 msrs_to_save[] = {
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,