Merge tag 'drm-intel-next-fixes-2017-04-27' of git://anongit.freedesktop.org/git...
authorDave Airlie <airlied@redhat.com>
Fri, 28 Apr 2017 19:50:27 +0000 (05:50 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 28 Apr 2017 19:50:27 +0000 (05:50 +1000)
drm/i915 and gvt fixes for drm-next/v4.12

* tag 'drm-intel-next-fixes-2017-04-27' of git://anongit.freedesktop.org/git/drm-intel:
  drm/i915: Confirm the request is still active before adding it to the await
  drm/i915: Avoid busy-spinning on VLV_GLTC_PW_STATUS mmio
  drm/i915/selftests: Allocate inode/file dynamically
  drm/i915: Fix system hang with EI UP masked on Haswell
  drm/i915: checking for NULL instead of IS_ERR() in mock selftests
  drm/i915: Perform link quality check unconditionally during long pulse
  drm/i915: Fix use after free in lpe_audio_platdev_destroy()
  drm/i915: Use the right mapping_gfp_mask for final shmem allocation
  drm/i915: Make legacy cursor updates more unsynced
  drm/i915: Apply a cond_resched() to the saturated signaler
  drm/i915: Park the signaler before sleeping
  drm/i915/gvt: fix a bounds check in ring_id_to_context_switch_event()
  drm/i915/gvt: Fix PTE write flush for taking runtime pm properly
  drm/i915/gvt: remove some debug messages in scheduler timer handler
  drm/i915/gvt: add mmio init for virtual display
  drm/i915/gvt: use directly assignment for structure copying
  drm/i915/gvt: remove redundant ring id check which cause significant CPU misprediction
  drm/i915/gvt: remove redundant platform check for mocs load/restore
  drm/i915/gvt: Align render mmio list to cacheline
  drm/i915/gvt: cleanup some too chatty scheduler message

1  2 
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem.c

index 536bde8638c817040d343be68c010cd5e9a2bf26,dc9aef3e92d462ca76c010f140302bb388e14844..dca989eb2d42ed48f6c13c15fe9d3f8a9cbfaab2
@@@ -56,8 -56,8 +56,8 @@@ static int context_switch_events[] = 
  
  static int ring_id_to_context_switch_event(int ring_id)
  {
-       if (WARN_ON(ring_id < RCS && ring_id >
-                               ARRAY_SIZE(context_switch_events)))
+       if (WARN_ON(ring_id < RCS ||
+                   ring_id >= ARRAY_SIZE(context_switch_events)))
                return -EINVAL;
  
        return context_switch_events[ring_id];
@@@ -687,9 -687,7 +687,7 @@@ static int submit_context(struct intel_
        }
  
        if (emulate_schedule_in)
-               memcpy(&workload->elsp_dwords,
-                               &vgpu->execlist[ring_id].elsp_dwords,
-                               sizeof(workload->elsp_dwords));
+               workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords;
  
        gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
                        workload, ring_id, head, tail, start, ctl);
@@@ -776,8 -774,7 +774,8 @@@ static void init_vgpu_execlist(struct i
                        _EL_OFFSET_STATUS_PTR);
  
        ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
 -      ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
 +      ctx_status_ptr.read_ptr = 0;
 +      ctx_status_ptr.write_ptr = 0x7;
        vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
  }
  
index 7b4fa84cbc3ce3918692b9e68b2b651e87d8ba6a,1aa26d5f1779db8a74baf8fe8c371e9835dbe599..3036d4835b0fa7a3b366a31d0b6ed18fc7889ae1
@@@ -1469,6 -1469,8 +1469,6 @@@ static int i915_drm_suspend(struct drm_
                goto out;
        }
  
 -      intel_guc_suspend(dev_priv);
 -
        intel_display_suspend(dev);
  
        intel_dp_mst_suspend(dev);
@@@ -2175,6 -2177,20 +2175,20 @@@ static void vlv_restore_gunit_s0ix_stat
        I915_WRITE(VLV_GUNIT_CLOCK_GATE2,       s->clock_gate_dis2);
  }
  
+ static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
+                                 u32 mask, u32 val)
+ {
+       /* The HW does not like us polling for PW_STATUS frequently, so
+        * use the sleeping loop rather than risk the busy spin within
+        * intel_wait_for_register().
+        *
+        * Transitioning between RC6 states should be at most 2ms (see
+        * valleyview_enable_rps) so use a 3ms timeout.
+        */
+       return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val,
+                       3);
+ }
  int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
  {
        u32 val;
  
  static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
  {
+       u32 mask;
        u32 val;
-       int err = 0;
+       int err;
  
        val = I915_READ(VLV_GTLC_WAKE_CTRL);
        val &= ~VLV_GTLC_ALLOWWAKEREQ;
        I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
        POSTING_READ(VLV_GTLC_WAKE_CTRL);
  
-       err = intel_wait_for_register(dev_priv,
-                                     VLV_GTLC_PW_STATUS,
-                                     VLV_GTLC_ALLOWWAKEACK,
-                                     allow,
-                                     1);
+       mask = VLV_GTLC_ALLOWWAKEACK;
+       val = allow ? mask : 0;
+       err = vlv_wait_for_pw_status(dev_priv, mask, val);
        if (err)
                DRM_ERROR("timeout disabling GT waking\n");
  
        return err;
  }
  
- static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
-                                bool wait_for_on)
+ static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
+                                 bool wait_for_on)
  {
        u32 mask;
        u32 val;
-       int err;
  
        mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
        val = wait_for_on ? mask : 0;
-       if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
-               return 0;
-       DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
-                     onoff(wait_for_on),
-                     I915_READ(VLV_GTLC_PW_STATUS));
  
        /*
         * RC6 transitioning can be delayed up to 2 msec (see
         * valleyview_enable_rps), use 3 msec for safety.
         */
-       err = intel_wait_for_register(dev_priv,
-                                     VLV_GTLC_PW_STATUS, mask, val,
-                                     3);
-       if (err)
+       if (vlv_wait_for_pw_status(dev_priv, mask, val))
                DRM_ERROR("timeout waiting for GT wells to go %s\n",
                          onoff(wait_for_on));
-       return err;
  }
  
  static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
@@@ -2272,7 -2276,7 +2274,7 @@@ static int vlv_suspend_complete(struct 
         * Bspec defines the following GT well on flags as debug only, so
         * don't treat them as hard failures.
         */
-       (void)vlv_wait_for_gt_wells(dev_priv, false);
+       vlv_wait_for_gt_wells(dev_priv, false);
  
        mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
        WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
index 92343343044fdcbaeb0d55de0ee3d8bc310d3d5a,28b92017b1eafc1afe792c93274718e194ffc200..532a577ff7a14e8f56613f605bb515833f7cc615
@@@ -2340,7 -2340,7 +2340,7 @@@ rebuild_st
                         * defer the oom here by reporting the ENOMEM back
                         * to userspace.
                         */
-                       reclaim = mapping_gfp_constraint(mapping, 0);
+                       reclaim = mapping_gfp_mask(mapping);
                        reclaim |= __GFP_NORETRY; /* reclaim, but no oom */
  
                        page = shmem_read_mapping_page_gfp(mapping, i, reclaim);
@@@ -4456,8 -4456,6 +4456,8 @@@ int i915_gem_suspend(struct drm_i915_pr
        i915_gem_context_lost(dev_priv);
        mutex_unlock(&dev->struct_mutex);
  
 +      intel_guc_suspend(dev_priv);
 +
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
        cancel_delayed_work_sync(&dev_priv->gt.retire_work);