Backmerge tag 'v4.11-rc4' into drm-next
authorDave Airlie <airlied@redhat.com>
Tue, 28 Mar 2017 07:34:19 +0000 (17:34 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 28 Mar 2017 07:34:19 +0000 (17:34 +1000)
Linux 4.11-rc4

The i915 GVT team need the rc4 code to base some more code on.

1  2 
MAINTAINERS
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_crtc.h
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_irq.c

diff --cc MAINTAINERS
Simple merge
Simple merge
index da6bbca90d974f37f68becdf4869f56b45b9b28a,2b92cc8a7d1aa551778917ed038bc6aa7961ce3e..b3c9a478c6560fdb8aa97e2353a4a54fdce2dfd3
@@@ -2633,23 -2668,26 +2665,23 @@@ static int shadow_workload_ring_buffer(
        /* head > tail --> copy head <-> top */
        if (gma_head > gma_tail) {
                ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
 -                              gma_head, gma_top,
 -                              workload->shadow_ring_buffer_va);
 +                                    gma_head, gma_top, cs);
-               if (ret < 0) {
-                       gvt_err("fail to copy guest ring buffer\n");
+               if (ret) {
+                       gvt_vgpu_err("fail to copy guest ring buffer\n");
                        return ret;
                }
 -              copy_len = gma_top - gma_head;
 +              cs += ret / sizeof(u32);
                gma_head = workload->rb_start;
        }
  
        /* copy head or start <-> tail */
 -      ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
 -                      gma_head, gma_tail,
 -                      workload->shadow_ring_buffer_va + copy_len);
 +      ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
-       if (ret < 0) {
-               gvt_err("fail to copy guest ring buffer\n");
+       if (ret) {
+               gvt_vgpu_err("fail to copy guest ring buffer\n");
                return ret;
        }
 -      ring->tail += workload->rb_len;
 -      intel_ring_advance(ring);
 +      cs += ret / sizeof(u32);
 +      intel_ring_advance(workload->req, cs);
        return 0;
  }
  
index 811a84bdbafb4d75d200d7705a6fe2f962ae57a3,c4353ed86d4b30da172c3afc8fcbaf251a582437..ad8876bd15b3e27e71eeb17638fcae35707ee404
@@@ -224,7 -244,10 +244,10 @@@ out
                workload->status = ret;
  
        if (!IS_ERR_OR_NULL(rq))
 -              i915_add_request_no_flush(rq);
 +              i915_add_request(rq);
+       else
+               engine->context_unpin(engine, shadow_ctx);
        mutex_unlock(&dev_priv->drm.struct_mutex);
        return ret;
  }
index cb20c9408b1245cc1195f18cc6189f34cf0dbf63,b6c886ac901bd78cfa7beb1a3aaaf706821a6d2b..8163d5024ff82fe7b59bb512897da00b9462fcf1
@@@ -1098,12 -1061,18 +1098,13 @@@ static u32 vlv_wa_c0_ei(struct drm_i915
                return 0;
  
        vlv_c0_read(dev_priv, &now);
 -      if (now.cz_clock == 0)
 -              return 0;
  
 -      if (prev->cz_clock) {
 +      if (prev->ktime) {
                u64 time, c0;
 -              unsigned int mul;
 +              u32 render, media;
  
 -              mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
 -              if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
 -                      mul <<= 8;
 +              time = ktime_us_delta(now.ktime, prev->ktime);
 -              time = now.cz_clock - prev->cz_clock;
                time *= dev_priv->czclk_freq;
  
                /* Workload can be split between render + media,