summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRodrigo Vivi <rodrigo.vivi@intel.com>2019-02-01 09:03:23 -0800
committerRodrigo Vivi <rodrigo.vivi@intel.com>2019-02-01 09:03:24 -0800
commit1b4fd5d38c633344bca57c1dd874c49474c94462 (patch)
tree71b048cba1ace90a5b66e8d4487b92c210f1c916
parent7360c9f6b857e22a48e545f4e99c79630994e932 (diff)
parent39c68e87bc50a71bcfe93582d9b0673ef30db418 (diff)
Merge tag 'gvt-next-2019-02-01' of https://github.com/intel/gvt-linux into drm-intel-next-queued
gvt-next-2019-02-01 - new VFIO EDID region support (Henry) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> From: Zhenyu Wang <zhenyuw@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190201061523.GE5588@zhen-hp.sh.intel.com
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c31
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h37
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c143
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h17
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c6
8 files changed, 233 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 4f25b6b7728e..035479e273be 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -342,6 +342,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
port->dpcd->data_valid = true;
port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
port->type = type;
+ port->id = resolution;
emulate_monitor_status_change(vgpu);
@@ -445,6 +446,36 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
}
/**
+ * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
+ * @vgpu: a vGPU
+ * @conncted: link state
+ *
+ * This function is used to trigger hotplug interrupt for vGPU
+ *
+ */
+void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ /* TODO: add more platforms support */
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (connected) {
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
+ SFUSE_STRAP_DDID_DETECTED;
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+ } else {
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
+ ~SFUSE_STRAP_DDID_DETECTED;
+ vgpu_vreg_t(vgpu, SDEISR) &= ~SDE_PORTD_HOTPLUG_CPT;
+ }
+ vgpu_vreg_t(vgpu, SDEIIR) |= SDE_PORTD_HOTPLUG_CPT;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTD_HOTPLUG_STATUS_MASK;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
+ }
+}
+
+/**
* intel_vgpu_clean_display - clean vGPU virtual display emulation
* @vgpu: a vGPU
*
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index ea7c1c525b8c..a87f33e6a23c 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -146,18 +146,19 @@ enum intel_vgpu_port_type {
GVT_PORT_MAX
};
+enum intel_vgpu_edid {
+ GVT_EDID_1024_768,
+ GVT_EDID_1920_1200,
+ GVT_EDID_NUM,
+};
+
struct intel_vgpu_port {
/* per display EDID information */
struct intel_vgpu_edid_data *edid;
/* per display DPCD information */
struct intel_vgpu_dpcd_data *dpcd;
int type;
-};
-
-enum intel_vgpu_edid {
- GVT_EDID_1024_768,
- GVT_EDID_1920_1200,
- GVT_EDID_NUM,
+ enum intel_vgpu_edid id;
};
static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
@@ -172,6 +173,30 @@ static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
}
}
+static inline unsigned int vgpu_edid_xres(enum intel_vgpu_edid id)
+{
+ switch (id) {
+ case GVT_EDID_1024_768:
+ return 1024;
+ case GVT_EDID_1920_1200:
+ return 1920;
+ default:
+ return 0;
+ }
+}
+
+static inline unsigned int vgpu_edid_yres(enum intel_vgpu_edid id)
+{
+ switch (id) {
+ case GVT_EDID_1024_768:
+ return 768;
+ case GVT_EDID_1920_1200:
+ return 1200;
+ default:
+ return 0;
+ }
+}
+
void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 4e8947f33bd0..43f4242062dd 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -185,6 +185,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
.write_protect_handler = intel_vgpu_page_track_handler,
+ .emulate_hotplug = intel_vgpu_emulate_hotplug,
};
static void init_device_info(struct intel_gvt *gvt)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index fb9cc980e120..8bce09de4b82 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -536,6 +536,8 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
+void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected);
+
static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
{
/* We are 64bit bar. */
@@ -577,6 +579,7 @@ struct intel_gvt_ops {
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
unsigned int);
+ void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
};
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 50798868ab15..5e01cc8d9b16 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -67,6 +67,7 @@ struct intel_gvt_mpt {
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map);
int (*set_opregion)(void *vgpu);
+ int (*set_edid)(void *vgpu, int port_num);
int (*get_vfio_device)(void *vgpu);
void (*put_vfio_device)(void *vgpu);
bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index f8d44e8f86a6..63eef86a2a85 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -57,6 +57,8 @@ static const struct intel_gvt_ops *intel_gvt_ops;
#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
+#define EDID_BLOB_OFFSET (PAGE_SIZE/2)
+
#define OPREGION_SIGNATURE "IntelGraphicsMem"
struct vfio_region;
@@ -76,6 +78,11 @@ struct vfio_region {
void *data;
};
+struct vfio_edid_region {
+ struct vfio_region_gfx_edid vfio_edid_regs;
+ void *edid_blob;
+};
+
struct kvmgt_pgfn {
gfn_t gfn;
struct hlist_node hnode;
@@ -427,6 +434,111 @@ static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
.release = intel_vgpu_reg_release_opregion,
};
+static int handle_edid_regs(struct intel_vgpu *vgpu,
+ struct vfio_edid_region *region, char *buf,
+ size_t count, u16 offset, bool is_write)
+{
+ struct vfio_region_gfx_edid *regs = &region->vfio_edid_regs;
+ unsigned int data;
+
+ if (offset + count > sizeof(*regs))
+ return -EINVAL;
+
+ if (count != 4)
+ return -EINVAL;
+
+ if (is_write) {
+ data = *((unsigned int *)buf);
+ switch (offset) {
+ case offsetof(struct vfio_region_gfx_edid, link_state):
+ if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) {
+ if (!drm_edid_block_valid(
+ (u8 *)region->edid_blob,
+ 0,
+ true,
+ NULL)) {
+ gvt_vgpu_err("invalid EDID blob\n");
+ return -EINVAL;
+ }
+ intel_gvt_ops->emulate_hotplug(vgpu, true);
+ } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
+ intel_gvt_ops->emulate_hotplug(vgpu, false);
+ else {
+ gvt_vgpu_err("invalid EDID link state %d\n",
+ regs->link_state);
+ return -EINVAL;
+ }
+ regs->link_state = data;
+ break;
+ case offsetof(struct vfio_region_gfx_edid, edid_size):
+ if (data > regs->edid_max_size) {
+ gvt_vgpu_err("EDID size is bigger than %d!\n",
+ regs->edid_max_size);
+ return -EINVAL;
+ }
+ regs->edid_size = data;
+ break;
+ default:
+ /* read-only regs */
+ gvt_vgpu_err("write read-only EDID region at offset %d\n",
+ offset);
+ return -EPERM;
+ }
+ } else {
+ memcpy(buf, (char *)regs + offset, count);
+ }
+
+ return count;
+}
+
+static int handle_edid_blob(struct vfio_edid_region *region, char *buf,
+ size_t count, u16 offset, bool is_write)
+{
+ if (offset + count > region->vfio_edid_regs.edid_size)
+ return -EINVAL;
+
+ if (is_write)
+ memcpy(region->edid_blob + offset, buf, count);
+ else
+ memcpy(buf, region->edid_blob + offset, count);
+
+ return count;
+}
+
+static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ int ret;
+ unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
+ VFIO_PCI_NUM_REGIONS;
+ struct vfio_edid_region *region =
+ (struct vfio_edid_region *)vgpu->vdev.region[i].data;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+ if (pos < region->vfio_edid_regs.edid_offset) {
+ ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
+ } else {
+ pos -= EDID_BLOB_OFFSET;
+ ret = handle_edid_blob(region, buf, count, pos, iswrite);
+ }
+
+ if (ret < 0)
+ gvt_vgpu_err("failed to access EDID region\n");
+
+ return ret;
+}
+
+static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
+ struct vfio_region *region)
+{
+ kfree(region->data);
+}
+
+static const struct intel_vgpu_regops intel_vgpu_regops_edid = {
+ .rw = intel_vgpu_reg_rw_edid,
+ .release = intel_vgpu_reg_release_edid,
+};
+
static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
unsigned int type, unsigned int subtype,
const struct intel_vgpu_regops *ops,
@@ -493,6 +605,36 @@ static int kvmgt_set_opregion(void *p_vgpu)
return ret;
}
+static int kvmgt_set_edid(void *p_vgpu, int port_num)
+{
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+ struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+ struct vfio_edid_region *base;
+ int ret;
+
+ base = kzalloc(sizeof(*base), GFP_KERNEL);
+ if (!base)
+ return -ENOMEM;
+
+ /* TODO: Add multi-port and EDID extension block support */
+ base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET;
+ base->vfio_edid_regs.edid_max_size = EDID_SIZE;
+ base->vfio_edid_regs.edid_size = EDID_SIZE;
+ base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id);
+ base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id);
+ base->edid_blob = port->edid->edid_block;
+
+ ret = intel_vgpu_register_reg(vgpu,
+ VFIO_REGION_TYPE_GFX,
+ VFIO_REGION_SUBTYPE_GFX_EDID,
+ &intel_vgpu_regops_edid, EDID_SIZE,
+ VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE |
+ VFIO_REGION_INFO_FLAG_CAPS, base);
+
+ return ret;
+}
+
static void kvmgt_put_vfio_device(void *vgpu)
{
if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
@@ -1874,6 +2016,7 @@ static struct intel_gvt_mpt kvmgt_mpt = {
.dma_map_guest_page = kvmgt_dma_map_guest_page,
.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
.set_opregion = kvmgt_set_opregion,
+ .set_edid = kvmgt_set_edid,
.get_vfio_device = kvmgt_get_vfio_device,
.put_vfio_device = kvmgt_put_vfio_device,
.is_valid_gfn = kvmgt_is_valid_gfn,
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 9b4225d44243..5d8b8f228d8f 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -314,6 +314,23 @@ static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
}
/**
+ * intel_gvt_hypervisor_set_edid - Set EDID region for guest
+ * @vgpu: a vGPU
+ * @port_num: display port number
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu,
+ int port_num)
+{
+ if (!intel_gvt_host.mpt->set_edid)
+ return 0;
+
+ return intel_gvt_host.mpt->set_edid(vgpu, port_num);
+}
+
+/**
* intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
* @vgpu: a vGPU
*
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index e1c860f80eb0..720e2b10adaa 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -428,6 +428,12 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
+ /*TODO: add more platforms support */
+ if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+ ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
+ if (ret)
+ goto out_clean_sched_policy;
+
return vgpu;
out_clean_sched_policy: