Merge tag 'gvt-next-2019-02-01' of https://github.com/intel/gvt-linux into drm-intel...
authorRodrigo Vivi <rodrigo.vivi@intel.com>
Fri, 1 Feb 2019 17:03:23 +0000 (09:03 -0800)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Fri, 1 Feb 2019 17:03:24 +0000 (09:03 -0800)
gvt-next-2019-02-01

- new VFIO EDID region support (Henry)

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190201061523.GE5588@zhen-hp.sh.intel.com
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/display.h
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/hypercall.h
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mpt.h
drivers/gpu/drm/i915/gvt/vgpu.c

index 4f25b6b7728ef32e181f3e89627c92d2d78d2033..035479e273beca866575c4bef70438583029d2df 100644 (file)
@@ -342,6 +342,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
        port->dpcd->data_valid = true;
        port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
        port->type = type;
+       port->id = resolution;
 
        emulate_monitor_status_change(vgpu);
 
@@ -444,6 +445,36 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
        mutex_unlock(&gvt->lock);
 }
 
+/**
+ * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
+ * @vgpu: a vGPU
+ * @conncted: link state
+ *
+ * This function is used to trigger hotplug interrupt for vGPU
+ *
+ */
+void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
+{
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+       /* TODO: add more platforms support */
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+               if (connected) {
+                       vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
+                               SFUSE_STRAP_DDID_DETECTED;
+                       vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+               } else {
+                       vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
+                               ~SFUSE_STRAP_DDID_DETECTED;
+                       vgpu_vreg_t(vgpu, SDEISR) &= ~SDE_PORTD_HOTPLUG_CPT;
+               }
+               vgpu_vreg_t(vgpu, SDEIIR) |= SDE_PORTD_HOTPLUG_CPT;
+               vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+                               PORTD_HOTPLUG_STATUS_MASK;
+               intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
+       }
+}
+
 /**
  * intel_vgpu_clean_display - clean vGPU virtual display emulation
  * @vgpu: a vGPU
index ea7c1c525b8c36f8b57ed6bec9d05d6e13432cce..a87f33e6a23ca46708a6dd321c02aac70104a76e 100644 (file)
@@ -146,18 +146,19 @@ enum intel_vgpu_port_type {
        GVT_PORT_MAX
 };
 
+enum intel_vgpu_edid {
+       GVT_EDID_1024_768,
+       GVT_EDID_1920_1200,
+       GVT_EDID_NUM,
+};
+
 struct intel_vgpu_port {
        /* per display EDID information */
        struct intel_vgpu_edid_data *edid;
        /* per display DPCD information */
        struct intel_vgpu_dpcd_data *dpcd;
        int type;
-};
-
-enum intel_vgpu_edid {
-       GVT_EDID_1024_768,
-       GVT_EDID_1920_1200,
-       GVT_EDID_NUM,
+       enum intel_vgpu_edid id;
 };
 
 static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
@@ -172,6 +173,30 @@ static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
        }
 }
 
+static inline unsigned int vgpu_edid_xres(enum intel_vgpu_edid id)
+{
+       switch (id) {
+       case GVT_EDID_1024_768:
+               return 1024;
+       case GVT_EDID_1920_1200:
+               return 1920;
+       default:
+               return 0;
+       }
+}
+
+static inline unsigned int vgpu_edid_yres(enum intel_vgpu_edid id)
+{
+       switch (id) {
+       case GVT_EDID_1024_768:
+               return 768;
+       case GVT_EDID_1920_1200:
+               return 1200;
+       default:
+               return 0;
+       }
+}
+
 void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
 void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
 
index 4e8947f33bd00db2a4c4e89fc59be35dc6ce1c5b..43f4242062dd8e613ca4003d743e7ea769bc7f65 100644 (file)
@@ -185,6 +185,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
        .vgpu_query_plane = intel_vgpu_query_plane,
        .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
        .write_protect_handler = intel_vgpu_page_track_handler,
+       .emulate_hotplug = intel_vgpu_emulate_hotplug,
 };
 
 static void init_device_info(struct intel_gvt *gvt)
index fb9cc980e120507bfb52d5626049d0ccf5d265bc..8bce09de4b822354a68c57c10caef5fc01266757 100644 (file)
@@ -536,6 +536,8 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes);
 
+void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected);
+
 static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
 {
        /* We are 64bit bar. */
@@ -577,6 +579,7 @@ struct intel_gvt_ops {
        int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
        int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
                                     unsigned int);
+       void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
 };
 
 
index 50798868ab15ae2a0c4e4ca7bd16d727d3b8c9b3..5e01cc8d9b166a2992f5bd9b5b942065d380d9de 100644 (file)
@@ -67,6 +67,7 @@ struct intel_gvt_mpt {
        int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
                             bool map);
        int (*set_opregion)(void *vgpu);
+       int (*set_edid)(void *vgpu, int port_num);
        int (*get_vfio_device)(void *vgpu);
        void (*put_vfio_device)(void *vgpu);
        bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
index f8d44e8f86a65ed6066f8aa29cac8321276564cb..63eef86a2a85e28998ced72a1d516f23fbd69c02 100644 (file)
@@ -57,6 +57,8 @@ static const struct intel_gvt_ops *intel_gvt_ops;
 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
 #define VFIO_PCI_OFFSET_MASK    (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
 
+#define EDID_BLOB_OFFSET (PAGE_SIZE/2)
+
 #define OPREGION_SIGNATURE "IntelGraphicsMem"
 
 struct vfio_region;
@@ -76,6 +78,11 @@ struct vfio_region {
        void                            *data;
 };
 
+struct vfio_edid_region {
+       struct vfio_region_gfx_edid vfio_edid_regs;
+       void *edid_blob;
+};
+
 struct kvmgt_pgfn {
        gfn_t gfn;
        struct hlist_node hnode;
@@ -427,6 +434,111 @@ static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
        .release = intel_vgpu_reg_release_opregion,
 };
 
+static int handle_edid_regs(struct intel_vgpu *vgpu,
+                       struct vfio_edid_region *region, char *buf,
+                       size_t count, u16 offset, bool is_write)
+{
+       struct vfio_region_gfx_edid *regs = &region->vfio_edid_regs;
+       unsigned int data;
+
+       if (offset + count > sizeof(*regs))
+               return -EINVAL;
+
+       if (count != 4)
+               return -EINVAL;
+
+       if (is_write) {
+               data = *((unsigned int *)buf);
+               switch (offset) {
+               case offsetof(struct vfio_region_gfx_edid, link_state):
+                       if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) {
+                               if (!drm_edid_block_valid(
+                                       (u8 *)region->edid_blob,
+                                       0,
+                                       true,
+                                       NULL)) {
+                                       gvt_vgpu_err("invalid EDID blob\n");
+                                       return -EINVAL;
+                               }
+                               intel_gvt_ops->emulate_hotplug(vgpu, true);
+                       } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
+                               intel_gvt_ops->emulate_hotplug(vgpu, false);
+                       else {
+                               gvt_vgpu_err("invalid EDID link state %d\n",
+                                       regs->link_state);
+                               return -EINVAL;
+                       }
+                       regs->link_state = data;
+                       break;
+               case offsetof(struct vfio_region_gfx_edid, edid_size):
+                       if (data > regs->edid_max_size) {
+                               gvt_vgpu_err("EDID size is bigger than %d!\n",
+                                       regs->edid_max_size);
+                               return -EINVAL;
+                       }
+                       regs->edid_size = data;
+                       break;
+               default:
+                       /* read-only regs */
+                       gvt_vgpu_err("write read-only EDID region at offset %d\n",
+                               offset);
+                       return -EPERM;
+               }
+       } else {
+               memcpy(buf, (char *)regs + offset, count);
+       }
+
+       return count;
+}
+
+static int handle_edid_blob(struct vfio_edid_region *region, char *buf,
+                       size_t count, u16 offset, bool is_write)
+{
+       if (offset + count > region->vfio_edid_regs.edid_size)
+               return -EINVAL;
+
+       if (is_write)
+               memcpy(region->edid_blob + offset, buf, count);
+       else
+               memcpy(buf, region->edid_blob + offset, count);
+
+       return count;
+}
+
+static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
+               size_t count, loff_t *ppos, bool iswrite)
+{
+       int ret;
+       unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
+                       VFIO_PCI_NUM_REGIONS;
+       struct vfio_edid_region *region =
+               (struct vfio_edid_region *)vgpu->vdev.region[i].data;
+       loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+       if (pos < region->vfio_edid_regs.edid_offset) {
+               ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
+       } else {
+               pos -= EDID_BLOB_OFFSET;
+               ret = handle_edid_blob(region, buf, count, pos, iswrite);
+       }
+
+       if (ret < 0)
+               gvt_vgpu_err("failed to access EDID region\n");
+
+       return ret;
+}
+
+static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
+                                       struct vfio_region *region)
+{
+       kfree(region->data);
+}
+
+static const struct intel_vgpu_regops intel_vgpu_regops_edid = {
+       .rw = intel_vgpu_reg_rw_edid,
+       .release = intel_vgpu_reg_release_edid,
+};
+
 static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
                unsigned int type, unsigned int subtype,
                const struct intel_vgpu_regops *ops,
@@ -493,6 +605,36 @@ static int kvmgt_set_opregion(void *p_vgpu)
        return ret;
 }
 
+static int kvmgt_set_edid(void *p_vgpu, int port_num)
+{
+       struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+       struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+       struct vfio_edid_region *base;
+       int ret;
+
+       base = kzalloc(sizeof(*base), GFP_KERNEL);
+       if (!base)
+               return -ENOMEM;
+
+       /* TODO: Add multi-port and EDID extension block support */
+       base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET;
+       base->vfio_edid_regs.edid_max_size = EDID_SIZE;
+       base->vfio_edid_regs.edid_size = EDID_SIZE;
+       base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id);
+       base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id);
+       base->edid_blob = port->edid->edid_block;
+
+       ret = intel_vgpu_register_reg(vgpu,
+                       VFIO_REGION_TYPE_GFX,
+                       VFIO_REGION_SUBTYPE_GFX_EDID,
+                       &intel_vgpu_regops_edid, EDID_SIZE,
+                       VFIO_REGION_INFO_FLAG_READ |
+                       VFIO_REGION_INFO_FLAG_WRITE |
+                       VFIO_REGION_INFO_FLAG_CAPS, base);
+
+       return ret;
+}
+
 static void kvmgt_put_vfio_device(void *vgpu)
 {
        if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
@@ -1874,6 +2016,7 @@ static struct intel_gvt_mpt kvmgt_mpt = {
        .dma_map_guest_page = kvmgt_dma_map_guest_page,
        .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
        .set_opregion = kvmgt_set_opregion,
+       .set_edid = kvmgt_set_edid,
        .get_vfio_device = kvmgt_get_vfio_device,
        .put_vfio_device = kvmgt_put_vfio_device,
        .is_valid_gfn = kvmgt_is_valid_gfn,
index 9b4225d44243b9010836482133124721f4279c68..5d8b8f228d8f29c6834e27e9d6887ddf7e60a951 100644 (file)
@@ -313,6 +313,23 @@ static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
        return intel_gvt_host.mpt->set_opregion(vgpu);
 }
 
+/**
+ * intel_gvt_hypervisor_set_edid - Set EDID region for guest
+ * @vgpu: a vGPU
+ * @port_num: display port number
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu,
+                                               int port_num)
+{
+       if (!intel_gvt_host.mpt->set_edid)
+               return 0;
+
+       return intel_gvt_host.mpt->set_edid(vgpu, port_num);
+}
+
 /**
  * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
  * @vgpu: a vGPU
index e1c860f80eb0593dbe1948f897f7c55cfe0d6c99..720e2b10adaa10f8ccc4c7472da4e300eb4b296f 100644 (file)
@@ -428,6 +428,12 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        if (ret)
                goto out_clean_sched_policy;
 
+       /*TODO: add more platforms support */
+       if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+               ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
+       if (ret)
+               goto out_clean_sched_policy;
+
        return vgpu;
 
 out_clean_sched_policy: