summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2014-01-08 21:25:29 +1100
committerAlexander Graf <agraf@suse.de>2014-01-27 16:01:15 +0100
commit8563bf52d509213e746295341ab52896b562ca5e (patch)
tree2f8914e14e13f46c773f979061a5bfc060def279 /arch/powerpc
parent5d00f66b865e3782c5852cdafe1cea11a292a81e (diff)
KVM: PPC: Book3S HV: Add support for DABRX register on POWER7
The DABRX (DABR extension) register on POWER7 processors provides finer control over which accesses cause a data breakpoint interrupt. It contains 3 bits which indicate whether to enable accesses in user, kernel and hypervisor modes respectively to cause data breakpoint interrupts, plus one bit that enables both real mode and virtual mode accesses to cause interrupts. Currently, KVM sets DABRX to allow both kernel and user accesses to cause interrupts while in the guest. This adds support for the guest to specify other values for DABRX. PAPR defines a H_SET_XDABR hcall to allow the guest to set both DABR and DABRX with one call. This adds a real-mode implementation of H_SET_XDABR, which shares most of its code with the existing H_SET_DABR implementation. To support this, we add a per-vcpu field to store the DABRX value plus code to get and set it via the ONE_REG interface. For Linux guests to use this new hcall, userspace needs to add "hcall-xdabr" to the set of strings in the /chosen/hypertas-functions property in the device tree. If userspace does this and then migrates the guest to a host where the kernel doesn't include this patch, then userspace will need to implement H_SET_XDABR by writing the specified DABR value to the DABR using the ONE_REG interface. In that case, the old kernel will set DABRX to DABRX_USER | DABRX_KERNEL. That should still work correctly, at least for Linux guests, since Linux guests cope with getting data breakpoint interrupts in modes that weren't requested by just ignoring the interrupt, and Linux guests never set DABRX_BTI. The other thing this does is to make H_SET_DABR and H_SET_XDABR work on POWER8, which has the DAWR and DAWRX instead of DABR/X. Guests that know about POWER8 should use H_SET_MODE rather than H_SET_[X]DABR, but guests running in POWER7 compatibility mode will still use H_SET_[X]DABR. For them, this adds the logic to convert DABR/X values into DAWR/X values on POWER8. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/reg.h18
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S32
6 files changed, 51 insertions, 9 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 81c92d1d7978..d161bc09153b 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -466,6 +466,7 @@ struct kvm_vcpu_arch {
ulong uamor;
ulong iamr;
u32 ctrl;
+ u32 dabrx;
ulong dabr;
ulong dawr;
ulong dawrx;
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 05ecb072540c..adf644a80a3e 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -229,16 +229,20 @@
#define CIABR_PRIV_SUPER 2
#define CIABR_PRIV_HYPER 3
#define SPRN_DAWRX 0xBC
-#define DAWRX_USER (1UL << 0)
-#define DAWRX_KERNEL (1UL << 1)
-#define DAWRX_HYP (1UL << 2)
+#define DAWRX_USER __MASK(0)
+#define DAWRX_KERNEL __MASK(1)
+#define DAWRX_HYP __MASK(2)
+#define DAWRX_WTI __MASK(3)
+#define DAWRX_WT __MASK(4)
+#define DAWRX_DR __MASK(5)
+#define DAWRX_DW __MASK(6)
#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
#define SPRN_DABR2 0x13D /* e300 */
#define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */
-#define DABRX_USER (1UL << 0)
-#define DABRX_KERNEL (1UL << 1)
-#define DABRX_HYP (1UL << 2)
-#define DABRX_BTI (1UL << 3)
+#define DABRX_USER __MASK(0)
+#define DABRX_KERNEL __MASK(1)
+#define DABRX_HYP __MASK(2)
+#define DABRX_BTI __MASK(3)
#define DABRX_ALL (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER)
#define SPRN_DAR 0x013 /* Data Address Register */
#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index a586fb9b77bd..a6665be4f3ab 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -554,6 +554,8 @@ struct kvm_get_htab_header {
/* Architecture compatibility level */
#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
+#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8)
+
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
*/
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 043900abbbb0..239a857f1141 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -493,6 +493,7 @@ int main(void)
DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr));
DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
+ DEFINE(VCPU_DABRX, offsetof(struct kvm_vcpu, arch.dabrx));
DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr));
DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx));
DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr));
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 216049ff7368..eb4eed4a7173 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -812,6 +812,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_DABR:
*val = get_reg_val(id, vcpu->arch.dabr);
break;
+ case KVM_REG_PPC_DABRX:
+ *val = get_reg_val(id, vcpu->arch.dabrx);
+ break;
case KVM_REG_PPC_DSCR:
*val = get_reg_val(id, vcpu->arch.dscr);
break;
@@ -967,6 +970,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_DABR:
vcpu->arch.dabr = set_reg_val(id, *val);
break;
+ case KVM_REG_PPC_DABRX:
+ vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
+ break;
case KVM_REG_PPC_DSCR:
vcpu->arch.dscr = set_reg_val(id, *val);
break;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index eae4ab9b9135..56299349e94b 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -585,7 +585,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
/* Set partition DABR */
/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
- li r5,3
+ lwz r5,VCPU_DABRX(r4)
ld r6,VCPU_DABR(r4)
mtspr SPRN_DABRX,r5
mtspr SPRN_DABR,r6
@@ -1763,24 +1763,52 @@ hcall_real_table:
.long 0 /* 0x11c */
.long 0 /* 0x120 */
.long .kvmppc_h_bulk_remove - hcall_real_table
+ .long 0 /* 0x128 */
+ .long 0 /* 0x12c */
+ .long 0 /* 0x130 */
+ .long .kvmppc_h_set_xdabr - hcall_real_table
hcall_real_table_end:
ignore_hdec:
mr r4,r9
b fast_guest_return
+_GLOBAL(kvmppc_h_set_xdabr)
+ andi. r0, r5, DABRX_USER | DABRX_KERNEL
+ beq 6f
+ li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
+ andc. r0, r5, r0
+ beq 3f
+6: li r3, H_PARAMETER
+ blr
+
_GLOBAL(kvmppc_h_set_dabr)
+ li r5, DABRX_USER | DABRX_KERNEL
+3:
BEGIN_FTR_SECTION
b 2f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
std r4,VCPU_DABR(r3)
+ stw r5, VCPU_DABRX(r3)
+ mtspr SPRN_DABRX, r5
/* Work around P7 bug where DABR can get corrupted on mtspr */
1: mtspr SPRN_DABR,r4
mfspr r5, SPRN_DABR
cmpd r4, r5
bne 1b
isync
-2: li r3,0
+ li r3,0
+ blr
+
+ /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
+2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
+ rlwimi r5, r4, 1, DAWRX_WT
+ clrrdi r4, r4, 3
+ std r4, VCPU_DAWR(r3)
+ std r5, VCPU_DAWRX(r3)
+ mtspr SPRN_DAWR, r4
+ mtspr SPRN_DAWRX, r5
+ li r3, 0
blr
_GLOBAL(kvmppc_h_cede)