summaryrefslogtreecommitdiff
path: root/include/linux/kvm.h
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-02-21 18:04:26 +0200
committerAvi Kivity <avi@qumranet.com>2007-03-04 11:12:42 +0200
commitbccf2150fe62dda5fb09efa2f64d2a234694eb48 (patch)
treeb5e6fc6440b864ddd1c32c4cee1916a0c5484c63 /include/linux/kvm.h
parentc5ea76600653b1a242321734435cb1c54778941a (diff)
KVM: Per-vcpu inodes
Allocate a distinct inode for every vcpu in a VM. This has the following benefits: - the filp cachelines are no longer bounced when f_count is incremented on every ioctl() - the API and internal code are distinctly clearer; for example, on the KVM_GET_REGS ioctl, there is no need to copy the vcpu number from userspace and then copy the registers back; the vcpu identity is derived from the fd used to make the call Right now the performance benefits are completely theoretical since (a) we don't support more than one vcpu per VM and (b) virtualization hardware inefficiencies completely everwhelm any cacheline bouncing effects. But both of these will change, and we need to prepare the API today. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'include/linux/kvm.h')
-rw-r--r--include/linux/kvm.h38
1 files changed, 17 insertions, 21 deletions
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index d6e6635dbec1..7c9a4004af44 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -52,11 +52,10 @@ enum kvm_exit_reason {
/* for KVM_RUN */
struct kvm_run {
/* in */
- __u32 vcpu;
__u32 emulated; /* skip current instruction */
__u32 mmio_completed; /* mmio request completed */
__u8 request_interrupt_window;
- __u8 padding1[3];
+ __u8 padding1[7];
/* out */
__u32 exit_type;
@@ -111,10 +110,6 @@ struct kvm_run {
/* for KVM_GET_REGS and KVM_SET_REGS */
struct kvm_regs {
- /* in */
- __u32 vcpu;
- __u32 padding;
-
/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
__u64 rax, rbx, rcx, rdx;
__u64 rsi, rdi, rsp, rbp;
@@ -141,10 +136,6 @@ struct kvm_dtable {
/* for KVM_GET_SREGS and KVM_SET_SREGS */
struct kvm_sregs {
- /* in */
- __u32 vcpu;
- __u32 padding;
-
/* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
struct kvm_segment cs, ds, es, fs, gs, ss;
struct kvm_segment tr, ldt;
@@ -163,8 +154,8 @@ struct kvm_msr_entry {
/* for KVM_GET_MSRS and KVM_SET_MSRS */
struct kvm_msrs {
- __u32 vcpu;
__u32 nmsrs; /* number of msrs in entries */
+ __u32 pad;
struct kvm_msr_entry entries[0];
};
@@ -179,8 +170,6 @@ struct kvm_msr_list {
struct kvm_translation {
/* in */
__u64 linear_address;
- __u32 vcpu;
- __u32 padding;
/* out */
__u64 physical_address;
@@ -193,7 +182,6 @@ struct kvm_translation {
/* for KVM_INTERRUPT */
struct kvm_interrupt {
/* in */
- __u32 vcpu;
__u32 irq;
};
@@ -206,8 +194,8 @@ struct kvm_breakpoint {
/* for KVM_DEBUG_GUEST */
struct kvm_debug_guest {
/* int */
- __u32 vcpu;
__u32 enabled;
+ __u32 pad;
struct kvm_breakpoint breakpoints[4];
__u32 singlestep;
};
@@ -234,18 +222,26 @@ struct kvm_dirty_log {
/*
* ioctls for VM fds
*/
+#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 10, struct kvm_memory_region)
+/*
+ * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns
+ * a vcpu fd.
+ */
+#define KVM_CREATE_VCPU _IOW(KVMIO, 11, int)
+#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 12, struct kvm_dirty_log)
+
+/*
+ * ioctls for vcpu fds
+ */
#define KVM_RUN _IOWR(KVMIO, 2, struct kvm_run)
-#define KVM_GET_REGS _IOWR(KVMIO, 3, struct kvm_regs)
+#define KVM_GET_REGS _IOR(KVMIO, 3, struct kvm_regs)
#define KVM_SET_REGS _IOW(KVMIO, 4, struct kvm_regs)
-#define KVM_GET_SREGS _IOWR(KVMIO, 5, struct kvm_sregs)
+#define KVM_GET_SREGS _IOR(KVMIO, 5, struct kvm_sregs)
#define KVM_SET_SREGS _IOW(KVMIO, 6, struct kvm_sregs)
#define KVM_TRANSLATE _IOWR(KVMIO, 7, struct kvm_translation)
#define KVM_INTERRUPT _IOW(KVMIO, 8, struct kvm_interrupt)
#define KVM_DEBUG_GUEST _IOW(KVMIO, 9, struct kvm_debug_guest)
-#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 10, struct kvm_memory_region)
-#define KVM_CREATE_VCPU _IOW(KVMIO, 11, int /* vcpu_slot */)
-#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 12, struct kvm_dirty_log)
#define KVM_GET_MSRS _IOWR(KVMIO, 13, struct kvm_msrs)
-#define KVM_SET_MSRS _IOWR(KVMIO, 14, struct kvm_msrs)
+#define KVM_SET_MSRS _IOW(KVMIO, 14, struct kvm_msrs)
#endif