Merge tag 'drm-next-2018-08-17' of git://anongit.freedesktop.org/drm/drm
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 17 Aug 2018 19:10:22 +0000 (12:10 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 17 Aug 2018 19:10:22 +0000 (12:10 -0700)
Pull drm fixes from Dave Airlie:
 "First round of fixes for -rc1. I'll follow this up with the msm new hw
  support pull request.

  This just has three sets of fixes, some for msm before the new hw, a
  bunch of AMD fixes (includiing some required firmware changes for new
  hw), and a set of i915 (+gvt) fixes"

* tag 'drm-next-2018-08-17' of git://anongit.freedesktop.org/drm/drm: (30 commits)
  drm/amdgpu: Use kvmalloc for allocating UVD/VCE/VCN BO backup memory
  drm/i915: set DP Main Stream Attribute for color range on DDI platforms
  drm/i915/selftests: Hold rpm for unparking
  drm/i915: Restore user forcewake domains across suspend
  drm/i915: Unmask user interrupts writes into HWSP on snb/ivb/vlv/hsw
  drm/i915/gvt: fix memory leak in intel_vgpu_ioctl()
  drm/i915/gvt: Off by one in intel_vgpu_write_fence()
  drm/i915/kvmgt: Fix potential Spectre v1
  drm/i915/gvt: return error on cmd access
  drm/i915/gvt: initialize dmabuf mutex in vgpu_create
  drm/i915/gvt: fix cleanup sequence in intel_gvt_clean_device
  drm/amd/display: Guard against null crtc in CRC IRQ
  drm/amd/display: Pass connector id when executing VBIOS CT
  drm/amd/display: Check if clock source in use before disabling
  drm/amd/display: Allow clock sharing b/w HDMI and DVI
  drm/amd/display: Fix warning observed in mode change on Vega
  drm/amd/display: fix single link DVI has no display
  drm/amdgpu/vce: VCE entity initialization relies on ring initializtion
  drm/amdgpu/uvd: UVD entity initialization relys on ring initialization
  drm/amdgpu:add VCN booting with firmware loaded by PSP
  ...

44 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
drivers/gpu/drm/amd/display/dc/inc/resource.h
drivers/gpu/drm/i915/gvt/aperture_gm.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/intel_uncore.h
drivers/gpu/drm/i915/selftests/i915_gem_object.c
drivers/gpu/drm/i915/selftests/intel_uncore.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem.c

index 9f1a5bd39ae8a8667d31fe025bdf3f0e2fc5b99b..5b39d139963046bb851df3c0ebdb4746c50ed8ae 100644 (file)
@@ -131,6 +131,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
                msleep(1);
        }
 
+       if (ucode) {
+               ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
+               ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
+       }
+
        return ret;
 }
 
index 08e38579af246c57c378c42526b36bdbaf365244..bdc472b6e64136ca5d0211429aee15cff6de152d 100644 (file)
@@ -194,6 +194,7 @@ enum AMDGPU_UCODE_ID {
        AMDGPU_UCODE_ID_SMC,
        AMDGPU_UCODE_ID_UVD,
        AMDGPU_UCODE_ID_VCE,
+       AMDGPU_UCODE_ID_VCN,
        AMDGPU_UCODE_ID_MAXIMUM,
 };
 
@@ -226,6 +227,9 @@ struct amdgpu_firmware_info {
        void *kaddr;
        /* ucode_size_bytes */
        uint32_t ucode_size;
+       /* starting tmr mc address */
+       uint32_t tmr_mc_addr_lo;
+       uint32_t tmr_mc_addr_hi;
 };
 
 void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
index 632fa5980ff44ab73519a9c8fe8e21baca6fbecc..e5a6db6beab7acfa364fedac5d0ac538458a4570 100644 (file)
@@ -122,8 +122,6 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
 
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring;
-       struct drm_sched_rq *rq;
        unsigned long bo_size;
        const char *fw_name;
        const struct common_firmware_header *hdr;
@@ -266,13 +264,6 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
                }
        }
 
-       ring = &adev->uvd.inst[0].ring;
-       rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-       r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
-       if (r) {
-               DRM_ERROR("Failed setting up UVD kernel entity.\n");
-               return r;
-       }
        for (i = 0; i < adev->uvd.max_handles; ++i) {
                atomic_set(&adev->uvd.handles[i], 0);
                adev->uvd.filp[i] = NULL;
@@ -311,7 +302,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
        for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
                if (adev->uvd.harvest_config & (1 << j))
                        continue;
-               kfree(adev->uvd.inst[j].saved_bo);
+               kvfree(adev->uvd.inst[j].saved_bo);
 
                amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
                                      &adev->uvd.inst[j].gpu_addr,
@@ -327,6 +318,29 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
        return 0;
 }
 
+/**
+ * amdgpu_uvd_entity_init - init entity
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       struct drm_sched_rq *rq;
+       int r;
+
+       ring = &adev->uvd.inst[0].ring;
+       rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+       r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
+       if (r) {
+               DRM_ERROR("Failed setting up UVD kernel entity.\n");
+               return r;
+       }
+
+       return 0;
+}
+
 int amdgpu_uvd_suspend(struct amdgpu_device *adev)
 {
        unsigned size;
@@ -354,7 +368,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
                size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
                ptr = adev->uvd.inst[j].cpu_addr;
 
-               adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
+               adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL);
                if (!adev->uvd.inst[j].saved_bo)
                        return -ENOMEM;
 
@@ -380,7 +394,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
 
                if (adev->uvd.inst[i].saved_bo != NULL) {
                        memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
-                       kfree(adev->uvd.inst[i].saved_bo);
+                       kvfree(adev->uvd.inst[i].saved_bo);
                        adev->uvd.inst[i].saved_bo = NULL;
                } else {
                        const struct common_firmware_header *hdr;
index 33c5f806f9256a004235833268ad4d5a281ca644..a3ab1a41060f56654fe54ee8e38e2453610797bd 100644 (file)
@@ -69,6 +69,7 @@ struct amdgpu_uvd {
 
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
+int amdgpu_uvd_entity_init(struct amdgpu_device *adev);
 int amdgpu_uvd_suspend(struct amdgpu_device *adev);
 int amdgpu_uvd_resume(struct amdgpu_device *adev);
 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
index b6ab4f5350c8098836c5f6758f2ef2387efea596..0cc5190f4f36e4a1b192a17b274bbd4fe38c00b6 100644 (file)
@@ -90,8 +90,6 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work);
  */
 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
 {
-       struct amdgpu_ring *ring;
-       struct drm_sched_rq *rq;
        const char *fw_name;
        const struct common_firmware_header *hdr;
        unsigned ucode_version, version_major, version_minor, binary_id;
@@ -188,14 +186,6 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
                return r;
        }
 
-       ring = &adev->vce.ring[0];
-       rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-       r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up VCE run queue.\n");
-               return r;
-       }
-
        for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
                atomic_set(&adev->vce.handles[i], 0);
                adev->vce.filp[i] = NULL;
@@ -235,6 +225,29 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
        return 0;
 }
 
+/**
+ * amdgpu_vce_entity_init - init entity
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+int amdgpu_vce_entity_init(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       struct drm_sched_rq *rq;
+       int r;
+
+       ring = &adev->vce.ring[0];
+       rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+       r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
+       if (r != 0) {
+               DRM_ERROR("Failed setting up VCE run queue.\n");
+               return r;
+       }
+
+       return 0;
+}
+
 /**
  * amdgpu_vce_suspend - unpin VCE fw memory
  *
index 71781267ee4c00d7ceb7c5271c7f809503c1a1ec..a1f209eed4c477498ee932a0f02e47fdff993137 100644 (file)
@@ -55,6 +55,7 @@ struct amdgpu_vce {
 
 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size);
 int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
+int amdgpu_vce_entity_init(struct amdgpu_device *adev);
 int amdgpu_vce_suspend(struct amdgpu_device *adev);
 int amdgpu_vce_resume(struct amdgpu_device *adev);
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
index 798648a1971080040e2dc5140f1a7349440c6cc2..fd654a4406db964da6611f6c2d3d9db1fb283519 100644 (file)
@@ -111,9 +111,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
                        version_major, version_minor, family_id);
        }
 
-       bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
-                 +  AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
+       bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
                  +  AMDGPU_VCN_SESSION_SIZE * 40;
+       if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+               bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
        r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
                                    AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
                                    &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
@@ -129,7 +130,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
 {
        int i;
 
-       kfree(adev->vcn.saved_bo);
+       kvfree(adev->vcn.saved_bo);
 
        amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
                              &adev->vcn.gpu_addr,
@@ -160,7 +161,7 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
        size = amdgpu_bo_size(adev->vcn.vcpu_bo);
        ptr = adev->vcn.cpu_addr;
 
-       adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
+       adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
        if (!adev->vcn.saved_bo)
                return -ENOMEM;
 
@@ -182,18 +183,20 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
 
        if (adev->vcn.saved_bo != NULL) {
                memcpy_toio(ptr, adev->vcn.saved_bo, size);
-               kfree(adev->vcn.saved_bo);
+               kvfree(adev->vcn.saved_bo);
                adev->vcn.saved_bo = NULL;
        } else {
                const struct common_firmware_header *hdr;
                unsigned offset;
 
                hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
-               offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
-               memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
-                           le32_to_cpu(hdr->ucode_size_bytes));
-               size -= le32_to_cpu(hdr->ucode_size_bytes);
-               ptr += le32_to_cpu(hdr->ucode_size_bytes);
+               if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+                       offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+                       memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
+                                   le32_to_cpu(hdr->ucode_size_bytes));
+                       size -= le32_to_cpu(hdr->ucode_size_bytes);
+                       ptr += le32_to_cpu(hdr->ucode_size_bytes);
+               }
                memset_io(ptr, 0, size);
        }
 
index 0ff136d02d9b36af1b9a88693c25e95c5e906d03..02be34e72ed906d1e2caea46e31976f4a4a75ea3 100644 (file)
@@ -88,6 +88,9 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
        case AMDGPU_UCODE_ID_VCE:
                *type = GFX_FW_TYPE_VCE;
                break;
+       case AMDGPU_UCODE_ID_VCN:
+               *type = GFX_FW_TYPE_VCN;
+               break;
        case AMDGPU_UCODE_ID_MAXIMUM:
        default:
                return -EINVAL;
index 6fed3d7797a8da2a51ceac38f7e2c467d7b6a33d..8a926d1df939a43a2531f2a3d5c9fbf550dff1d9 100644 (file)
@@ -123,6 +123,10 @@ static int uvd_v4_2_sw_init(void *handle)
        ring = &adev->uvd.inst->ring;
        sprintf(ring->name, "uvd");
        r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+       if (r)
+               return r;
+
+       r = amdgpu_uvd_entity_init(adev);
 
        return r;
 }
index aeaa1ca46a99dc1a4801f921ea8942ad30ca1d56..50248059412e78353d2c653819f8b3b311c8214e 100644 (file)
@@ -120,6 +120,10 @@ static int uvd_v5_0_sw_init(void *handle)
        ring = &adev->uvd.inst->ring;
        sprintf(ring->name, "uvd");
        r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+       if (r)
+               return r;
+
+       r = amdgpu_uvd_entity_init(adev);
 
        return r;
 }
index 598dbeaba63686e2e2476528adace86d035b3f70..6ae82cc2e55e007cd8b4af958f6e0104510455ae 100644 (file)
@@ -440,6 +440,8 @@ static int uvd_v6_0_sw_init(void *handle)
                }
        }
 
+       r = amdgpu_uvd_entity_init(adev);
+
        return r;
 }
 
index 5fab3560a71db8b3b4d6116c2de0e94337beb208..9b7f8469bc5c081baae8955b23240a8b57628cbe 100644 (file)
@@ -410,6 +410,7 @@ static int uvd_v7_0_early_init(void *handle)
 static int uvd_v7_0_sw_init(void *handle)
 {
        struct amdgpu_ring *ring;
+
        int i, j, r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
@@ -478,6 +479,10 @@ static int uvd_v7_0_sw_init(void *handle)
                }
        }
 
+       r = amdgpu_uvd_entity_init(adev);
+       if (r)
+               return r;
+
        r = amdgpu_virt_alloc_mm_table(adev);
        if (r)
                return r;
index d48e877b682e8f1ba18ed7d659c83229cfb94e8f..7eaa54ba016b7848fc3c5f09697407599eac2fd8 100644 (file)
@@ -439,6 +439,8 @@ static int vce_v2_0_sw_init(void *handle)
                        return r;
        }
 
+       r = amdgpu_vce_entity_init(adev);
+
        return r;
 }
 
index cc6ce6cc03f47968345e6afe0d44003308ed2ecf..c8390f9adfd6ce750e1deb179692d0a5cdc3867c 100644 (file)
@@ -448,6 +448,8 @@ static int vce_v3_0_sw_init(void *handle)
                        return r;
        }
 
+       r = amdgpu_vce_entity_init(adev);
+
        return r;
 }
 
index 65f8860169e95b4030b97c4c8de563e21c93dfe5..2e4d1b5f62431fed6f8a05b0b14e229ae21f060a 100644 (file)
@@ -419,6 +419,7 @@ static int vce_v4_0_sw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct amdgpu_ring *ring;
+
        unsigned size;
        int r, i;
 
@@ -438,7 +439,7 @@ static int vce_v4_0_sw_init(void *handle)
                const struct common_firmware_header *hdr;
                unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
 
-               adev->vce.saved_bo = kmalloc(size, GFP_KERNEL);
+               adev->vce.saved_bo = kvmalloc(size, GFP_KERNEL);
                if (!adev->vce.saved_bo)
                        return -ENOMEM;
 
@@ -474,6 +475,11 @@ static int vce_v4_0_sw_init(void *handle)
                        return r;
        }
 
+
+       r = amdgpu_vce_entity_init(adev);
+       if (r)
+               return r;
+
        r = amdgpu_virt_alloc_mm_table(adev);
        if (r)
                return r;
@@ -490,7 +496,7 @@ static int vce_v4_0_sw_fini(void *handle)
        amdgpu_virt_free_mm_table(adev);
 
        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
-               kfree(adev->vce.saved_bo);
+               kvfree(adev->vce.saved_bo);
                adev->vce.saved_bo = NULL;
        }
 
index 2ce91a748c4028867c64d7f8d5940fc3e6787cd0..072371ef597595505be617b680916266086a0eba 100644 (file)
@@ -100,6 +100,16 @@ static int vcn_v1_0_sw_init(void *handle)
        if (r)
                return r;
 
+       if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+               const struct common_firmware_header *hdr;
+               hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+               adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
+               adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
+               adev->firmware.fw_size +=
+                       ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
+               DRM_INFO("PSP loading VCN firmware\n");
+       }
+
        r = amdgpu_vcn_resume(adev);
        if (r)
                return r;
@@ -265,26 +275,38 @@ static int vcn_v1_0_resume(void *handle)
 static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
 {
        uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
-
-       WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+       uint32_t offset;
+
+       if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+               WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+                            (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
+               WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+                            (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
+               WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
+               offset = 0;
+       } else {
+               WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
                        lower_32_bits(adev->vcn.gpu_addr));
-       WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+               WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
                        upper_32_bits(adev->vcn.gpu_addr));
-       WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
-                               AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+               offset = size;
+               WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
+                            AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+       }
+
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
 
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
-                       lower_32_bits(adev->vcn.gpu_addr + size));
+                    lower_32_bits(adev->vcn.gpu_addr + offset));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
-                       upper_32_bits(adev->vcn.gpu_addr + size));
+                    upper_32_bits(adev->vcn.gpu_addr + offset));
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
 
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
-                       lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
+                    lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
-                       upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
+                    upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
                        AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
index 52f2c01349e38214c9bad2e3e639a69717828d0a..9bfb040352e9875584b4efbb137c84e2fb7a0bf9 100644 (file)
@@ -98,10 +98,16 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
  */
 void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
 {
-       struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
-       struct dc_stream_state *stream_state = crtc_state->stream;
+       struct dm_crtc_state *crtc_state;
+       struct dc_stream_state *stream_state;
        uint32_t crcs[3];
 
+       if (crtc == NULL)
+               return;
+
+       crtc_state = to_dm_crtc_state(crtc->state);
+       stream_state = crtc_state->stream;
+
        /* Early return if CRC capture is not enabled. */
        if (!crtc_state->crc_enabled)
                return;
index a38e7ad36a7e9500e3a39f2ddb56d11f688c01a6..326b3e99b7e4e63a8067207c368a546617050679 100644 (file)
@@ -1812,6 +1812,8 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
        bool is_vga_mode = (stream->timing.h_addressable == 640)
                        && (stream->timing.v_addressable == 480);
 
+       if (stream->phy_pix_clk == 0)
+               stream->phy_pix_clk = stream->timing.pix_clk_khz;
        if (stream->phy_pix_clk > 340000)
                is_over_340mhz = true;
 
index 4ca41d6e3bcf2cb89dce5227eed4cf55add06236..1644f2a946b09e79dfccf8306b887496e517df43 100644 (file)
@@ -268,24 +268,30 @@ bool resource_construct(
 
        return true;
 }
+static int find_matching_clock_source(
+               const struct resource_pool *pool,
+               struct clock_source *clock_source)
+{
+
+       int i;
 
+       for (i = 0; i < pool->clk_src_count; i++) {
+               if (pool->clock_sources[i] == clock_source)
+                       return i;
+       }
+       return -1;
+}
 
 void resource_unreference_clock_source(
                struct resource_context *res_ctx,
                const struct resource_pool *pool,
                struct clock_source *clock_source)
 {
-       int i;
-
-       for (i = 0; i < pool->clk_src_count; i++) {
-               if (pool->clock_sources[i] != clock_source)
-                       continue;
+       int i = find_matching_clock_source(pool, clock_source);
 
+       if (i > -1)
                res_ctx->clock_source_ref_count[i]--;
 
-               break;
-       }
-
        if (pool->dp_clock_source == clock_source)
                res_ctx->dp_clock_source_ref_count--;
 }
@@ -295,19 +301,31 @@ void resource_reference_clock_source(
                const struct resource_pool *pool,
                struct clock_source *clock_source)
 {
-       int i;
-       for (i = 0; i < pool->clk_src_count; i++) {
-               if (pool->clock_sources[i] != clock_source)
-                       continue;
+       int i = find_matching_clock_source(pool, clock_source);
 
+       if (i > -1)
                res_ctx->clock_source_ref_count[i]++;
-               break;
-       }
 
        if (pool->dp_clock_source == clock_source)
                res_ctx->dp_clock_source_ref_count++;
 }
 
+int resource_get_clock_source_reference(
+               struct resource_context *res_ctx,
+               const struct resource_pool *pool,
+               struct clock_source *clock_source)
+{
+       int i = find_matching_clock_source(pool, clock_source);
+
+       if (i > -1)
+               return res_ctx->clock_source_ref_count[i];
+
+       if (pool->dp_clock_source == clock_source)
+               return res_ctx->dp_clock_source_ref_count;
+
+       return -1;
+}
+
 bool resource_are_streams_timing_synchronizable(
        struct dc_stream_state *stream1,
        struct dc_stream_state *stream2)
@@ -372,11 +390,11 @@ static bool is_sharable_clk_src(
                return false;
 
        if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
-                       && dc_is_dvi_signal(pipe->stream->signal))
+                       && dc_is_dual_link_signal(pipe->stream->signal))
                return false;
 
        if (dc_is_hdmi_signal(pipe->stream->signal)
-                       && dc_is_dvi_signal(pipe_with_clk_src->stream->signal))
+                       && dc_is_dual_link_signal(pipe_with_clk_src->stream->signal))
                return false;
 
        if (!resource_are_streams_timing_synchronizable(
index 752b3d62e793d02016ef45dc0c92a35ffcd0d3c2..eff7d22d78fb16d1da638cb513a7c76dcd6a3932 100644 (file)
@@ -930,7 +930,7 @@ void dce110_link_encoder_enable_tmds_output(
        enum bp_result result;
 
        /* Enable the PHY */
-
+       cntl.connector_obj_id = enc110->base.connector;
        cntl.action = TRANSMITTER_CONTROL_ENABLE;
        cntl.engine_id = enc->preferred_engine;
        cntl.transmitter = enc110->base.transmitter;
@@ -972,7 +972,7 @@ void dce110_link_encoder_enable_dp_output(
         * We need to set number of lanes manually.
         */
        configure_encoder(enc110, link_settings);
-
+       cntl.connector_obj_id = enc110->base.connector;
        cntl.action = TRANSMITTER_CONTROL_ENABLE;
        cntl.engine_id = enc->preferred_engine;
        cntl.transmitter = enc110->base.transmitter;
index 1d98e3678b04cda450f06003c87c8f68fadac851..5450d4d38e8a49a3f59331eb0388b8d3fff54264 100644 (file)
@@ -1908,7 +1908,9 @@ static void dce110_reset_hw_ctx_wrap(
                        pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
                                        pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
 
-                       if (old_clk)
+                       if (old_clk && 0 == resource_get_clock_source_reference(&context->res_ctx,
+                                                                               dc->res_pool,
+                                                                               old_clk))
                                old_clk->funcs->cs_power_down(old_clk);
 
                        dc->hwss.disable_plane(dc, pipe_ctx_old);
index 2ea490f8482e04f2951c82ea8b27c9332835bb8f..04b866f0fa1f4ba661ac31e5192c21b1fd1b65ba 100644 (file)
@@ -772,7 +772,7 @@ void dce120_tg_set_blank(struct timing_generator *tg,
 
        CRTC_REG_SET(
                CRTC0_CRTC_DOUBLE_BUFFER_CONTROL,
-               CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 0);
+               CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 1);
 
        if (enable_blanking)
                CRTC_REG_SET(CRTC0_CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
index e92facbd038f0ea559bf9cd910505ca0a8fc8a60..5b321008b0b54c0a7dce51854979fc89e09ba9b1 100644 (file)
@@ -103,6 +103,11 @@ void resource_reference_clock_source(
                const struct resource_pool *pool,
                struct clock_source *clock_source);
 
+int resource_get_clock_source_reference(
+               struct resource_context *res_ctx,
+               const struct resource_pool *pool,
+               struct clock_source *clock_source);
+
 bool resource_are_streams_timing_synchronizable(
                struct dc_stream_state *stream1,
                struct dc_stream_state *stream2);
index 380eeb2a0e83c60c1067a6d7150bd8608662d5b1..fe754022e356b033c2fa62af06c28f3612197df5 100644 (file)
@@ -131,7 +131,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
 
        assert_rpm_wakelock_held(dev_priv);
 
-       if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
+       if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
                return;
 
        reg = vgpu->fence.regs[fence];
index 45e89b1e048183164e218c5aa868dca458ab93a4..a614db310ea276a5deca674363c09db44e79ac79 100644 (file)
@@ -874,7 +874,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
        if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
                gvt_vgpu_err("%s access to non-render register (%x)\n",
                                cmd, offset);
-               return 0;
+               return -EBADRQC;
        }
 
        if (is_shadowed_mmio(offset)) {
index 712f9d14e7200678228065bd16d5d0bdec7a009b..46c8b720e336317f5fdf1ce51553031112513295 100644 (file)
@@ -176,6 +176,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
        .emulate_mmio_write = intel_vgpu_emulate_mmio_write,
        .vgpu_create = intel_gvt_create_vgpu,
        .vgpu_destroy = intel_gvt_destroy_vgpu,
+       .vgpu_release = intel_gvt_release_vgpu,
        .vgpu_reset = intel_gvt_reset_vgpu,
        .vgpu_activate = intel_gvt_activate_vgpu,
        .vgpu_deactivate = intel_gvt_deactivate_vgpu,
@@ -315,6 +316,11 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
        if (WARN_ON(!gvt))
                return;
 
+       intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
+       intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
+       intel_gvt_cleanup_vgpu_type_groups(gvt);
+       intel_gvt_clean_vgpu_types(gvt);
+
        intel_gvt_debugfs_clean(gvt);
        clean_service_thread(gvt);
        intel_gvt_clean_cmd_parser(gvt);
@@ -322,17 +328,10 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
        intel_gvt_clean_workload_scheduler(gvt);
        intel_gvt_clean_gtt(gvt);
        intel_gvt_clean_irq(gvt);
-       intel_gvt_clean_mmio_info(gvt);
        intel_gvt_free_firmware(gvt);
-
-       intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
-       intel_gvt_cleanup_vgpu_type_groups(gvt);
-       intel_gvt_clean_vgpu_types(gvt);
-
+       intel_gvt_clean_mmio_info(gvt);
        idr_destroy(&gvt->vgpu_idr);
 
-       intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
-
        kfree(dev_priv->gvt);
        dev_priv->gvt = NULL;
 }
index 9a967152277494ccbf1dbd5ac55b4b7f5b842eba..31f6cdbe5c424f67f29167be34496e67fcb67b85 100644 (file)
@@ -486,6 +486,7 @@ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
                                         struct intel_vgpu_type *type);
 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
                                 unsigned int engine_mask);
 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
@@ -563,7 +564,8 @@ struct intel_gvt_ops {
                                unsigned int);
        struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
                                struct intel_vgpu_type *);
-       void (*vgpu_destroy)(struct intel_vgpu *);
+       void (*vgpu_destroy)(struct intel_vgpu *vgpu);
+       void (*vgpu_release)(struct intel_vgpu *vgpu);
        void (*vgpu_reset)(struct intel_vgpu *);
        void (*vgpu_activate)(struct intel_vgpu *);
        void (*vgpu_deactivate)(struct intel_vgpu *);
index 4d2f53ae9f0f26907433aabc3cae4c9d1e7d3361..a45f46d8537f15bd187fd4195d7de2c6b6dffd2a 100644 (file)
@@ -43,6 +43,8 @@
 #include <linux/mdev.h>
 #include <linux/debugfs.h>
 
+#include <linux/nospec.h>
+
 #include "i915_drv.h"
 #include "gvt.h"
 
@@ -187,14 +189,14 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
 
        /* Setup DMA mapping. */
        *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
-       ret = dma_mapping_error(dev, *dma_addr);
-       if (ret) {
+       if (dma_mapping_error(dev, *dma_addr)) {
                gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
                             page_to_pfn(page), ret);
                gvt_unpin_guest_page(vgpu, gfn, size);
+               return -ENOMEM;
        }
 
-       return ret;
+       return 0;
 }
 
 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
@@ -666,7 +668,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
        if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
                return;
 
-       intel_gvt_ops->vgpu_deactivate(vgpu);
+       intel_gvt_ops->vgpu_release(vgpu);
 
        ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
                                        &vgpu->vdev.iommu_notifier);
@@ -1139,7 +1141,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
        } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
                struct vfio_region_info info;
                struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
-               int i, ret;
+               unsigned int i;
+               int ret;
                struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
                size_t size;
                int nr_areas = 1;
@@ -1224,6 +1227,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
                                if (info.index >= VFIO_PCI_NUM_REGIONS +
                                                vgpu->vdev.num_regions)
                                        return -EINVAL;
+                               info.index =
+                                       array_index_nospec(info.index,
+                                                       VFIO_PCI_NUM_REGIONS +
+                                                       vgpu->vdev.num_regions);
 
                                i = info.index - VFIO_PCI_NUM_REGIONS;
 
@@ -1250,11 +1257,13 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
                                        &sparse->header, sizeof(*sparse) +
                                        (sparse->nr_areas *
                                                sizeof(*sparse->areas)));
-                               kfree(sparse);
-                               if (ret)
+                               if (ret) {
+                                       kfree(sparse);
                                        return ret;
+                               }
                                break;
                        default:
+                               kfree(sparse);
                                return -EINVAL;
                        }
                }
@@ -1270,6 +1279,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
                                                  sizeof(info), caps.buf,
                                                  caps.size)) {
                                        kfree(caps.buf);
+                                       kfree(sparse);
                                        return -EFAULT;
                                }
                                info.cap_offset = sizeof(info);
@@ -1278,6 +1288,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
                        kfree(caps.buf);
                }
 
+               kfree(sparse);
                return copy_to_user((void __user *)arg, &info, minsz) ?
                        -EFAULT : 0;
        } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
@@ -1615,7 +1626,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
        kvmgt_protect_table_init(info);
        gvt_cache_init(vgpu);
 
-       mutex_init(&vgpu->dmabuf_lock);
        init_completion(&vgpu->vblank_done);
 
        info->track_node.track_write = kvmgt_page_track_write;
index b0e566956b8d5ce1609c0f19f31ffa45e610adee..43aa058e29fca92368c55aa3b458393951de72e8 100644 (file)
@@ -784,7 +784,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
        kunmap(page);
 }
 
-static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
+                               unsigned long engine_mask)
 {
        struct intel_vgpu_submission *s = &vgpu->submission;
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -879,7 +880,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
                 * cleaned up during the resetting process later, so doing
                 * the workload clean up here doesn't have any impact.
                 **/
-               clean_workloads(vgpu, ENGINE_MASK(ring_id));
+               intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
        }
 
        workload->complete(workload);
@@ -1081,7 +1082,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
        if (!s->active)
                return;
 
-       clean_workloads(vgpu, engine_mask);
+       intel_vgpu_clean_workloads(vgpu, engine_mask);
        s->ops->reset(vgpu, engine_mask);
 }
 
index 21eddab4a9cd465b1f835cb310ea091387d9bea2..ca5529d0e48ef20b0d2dbdd6e33512a2234afda7 100644 (file)
@@ -158,4 +158,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 
 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
 
+void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
+                               unsigned long engine_mask);
+
 #endif
index f6fa916517c32fc1de1ddb47b3703b30283bbf59..a4e8e3cf74fd58b400331bc608516e9414e458d3 100644 (file)
@@ -222,7 +222,7 @@ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
  * @vgpu: virtual GPU
  *
  * This function is called when user wants to deactivate a virtual GPU.
- * All virtual GPU runtime information will be destroyed.
+ * The virtual GPU will be stopped.
  *
  */
 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
@@ -238,11 +238,29 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
        }
 
        intel_vgpu_stop_schedule(vgpu);
-       intel_vgpu_dmabuf_cleanup(vgpu);
 
        mutex_unlock(&vgpu->vgpu_lock);
 }
 
+/**
+ * intel_gvt_release_vgpu - release a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to release a virtual GPU.
+ * The virtual GPU will be stopped and all runtime information will be
+ * destroyed.
+ *
+ */
+void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
+{
+       intel_gvt_deactivate_vgpu(vgpu);
+
+       mutex_lock(&vgpu->vgpu_lock);
+       intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
+       intel_vgpu_dmabuf_cleanup(vgpu);
+       mutex_unlock(&vgpu->vgpu_lock);
+}
+
 /**
  * intel_gvt_destroy_vgpu - destroy a virtual GPU
  * @vgpu: virtual GPU
@@ -361,6 +379,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        vgpu->gvt = gvt;
        vgpu->sched_ctl.weight = param->weight;
        mutex_init(&vgpu->vgpu_lock);
+       mutex_init(&vgpu->dmabuf_lock);
        INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
        INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
        idr_init(&vgpu->object_idr);
index 91e7483228e11d1ff932a19c749f349b3961ea4f..08ec7446282e7f981f74272e8c29c755abad9e21 100644 (file)
@@ -9201,6 +9201,7 @@ enum skl_power_gate {
 #define  TRANS_MSA_10_BPC              (2 << 5)
 #define  TRANS_MSA_12_BPC              (3 << 5)
 #define  TRANS_MSA_16_BPC              (4 << 5)
+#define  TRANS_MSA_CEA_RANGE           (1 << 3)
 
 /* LCPLL Control */
 #define LCPLL_CTL                      _MMIO(0x130040)
index 39d66f8493faea5162fd88315c7e709e4d51350d..8761513f3532c5c4bfb56151833ab6217a5ab99f 100644 (file)
@@ -1685,6 +1685,10 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
        WARN_ON(transcoder_is_dsi(cpu_transcoder));
 
        temp = TRANS_MSA_SYNC_CLK;
+
+       if (crtc_state->limited_color_range)
+               temp |= TRANS_MSA_CEA_RANGE;
+
        switch (crtc_state->pipe_bpp) {
        case 18:
                temp |= TRANS_MSA_6_BPC;
index 33faad3197feea61430e5eb2ed7c3ed34160c4da..6a8f27d0a7429e6d3a01f46e9b768031680519b6 100644 (file)
@@ -387,8 +387,18 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
                mmio = RING_HWS_PGA(engine->mmio_base);
        }
 
-       if (INTEL_GEN(dev_priv) >= 6)
-               I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
+       if (INTEL_GEN(dev_priv) >= 6) {
+               u32 mask = ~0u;
+
+               /*
+                * Keep the render interrupt unmasked as this papers over
+                * lost interrupts following a reset.
+                */
+               if (engine->id == RCS)
+                       mask &= ~BIT(0);
+
+               I915_WRITE(RING_HWSTAM(engine->mmio_base), mask);
+       }
 
        I915_WRITE(mmio, engine->status_page.ggtt_offset);
        POSTING_READ(mmio);
index b892ca8396e8778d0e4670813412c4a8a0b669c9..50b39aa4ffb88ac28d478124bf2e67a54e028167 100644 (file)
@@ -359,8 +359,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
 }
 
 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
-static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
-                                        bool restore)
+static unsigned int
+intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
 {
        unsigned long irqflags;
        struct intel_uncore_forcewake_domain *domain;
@@ -412,20 +412,11 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
                dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
 
        fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
-
-       if (restore) { /* If reset with a user forcewake, try to restore */
-               if (fw)
-                       dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
-
-               if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
-                       dev_priv->uncore.fifo_count =
-                               fifo_free_entries(dev_priv);
-       }
-
-       if (!restore)
-               assert_forcewakes_inactive(dev_priv);
+       assert_forcewakes_inactive(dev_priv);
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+       return fw; /* track the lost user forcewake domains */
 }
 
 static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
@@ -534,7 +525,7 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
 }
 
 static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
-                                         bool restore_forcewake)
+                                         unsigned int restore_forcewake)
 {
        /* clear out unclaimed reg detection bit */
        if (check_for_unclaimed_mmio(dev_priv))
@@ -549,7 +540,17 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
        }
 
        iosf_mbi_punit_acquire();
-       intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
+       intel_uncore_forcewake_reset(dev_priv);
+       if (restore_forcewake) {
+               spin_lock_irq(&dev_priv->uncore.lock);
+               dev_priv->uncore.funcs.force_wake_get(dev_priv,
+                                                     restore_forcewake);
+
+               if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
+                       dev_priv->uncore.fifo_count =
+                               fifo_free_entries(dev_priv);
+               spin_unlock_irq(&dev_priv->uncore.lock);
+       }
        iosf_mbi_punit_release();
 }
 
@@ -558,13 +559,18 @@ void intel_uncore_suspend(struct drm_i915_private *dev_priv)
        iosf_mbi_punit_acquire();
        iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
                &dev_priv->uncore.pmic_bus_access_nb);
-       intel_uncore_forcewake_reset(dev_priv, false);
+       dev_priv->uncore.fw_domains_saved =
+               intel_uncore_forcewake_reset(dev_priv);
        iosf_mbi_punit_release();
 }
 
 void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
 {
-       __intel_uncore_early_sanitize(dev_priv, true);
+       unsigned int restore_forcewake;
+
+       restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved);
+       __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
+
        iosf_mbi_register_pmic_bus_access_notifier(
                &dev_priv->uncore.pmic_bus_access_nb);
        i915_check_and_clear_faults(dev_priv);
@@ -1545,7 +1551,7 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
 
        intel_uncore_edram_detect(dev_priv);
        intel_uncore_fw_domains_init(dev_priv);
-       __intel_uncore_early_sanitize(dev_priv, false);
+       __intel_uncore_early_sanitize(dev_priv, 0);
 
        dev_priv->uncore.unclaimed_mmio_check = 1;
        dev_priv->uncore.pmic_bus_access_nb.notifier_call =
@@ -1632,7 +1638,7 @@ void intel_uncore_fini(struct drm_i915_private *dev_priv)
        iosf_mbi_punit_acquire();
        iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
                &dev_priv->uncore.pmic_bus_access_nb);
-       intel_uncore_forcewake_reset(dev_priv, false);
+       intel_uncore_forcewake_reset(dev_priv);
        iosf_mbi_punit_release();
 }
 
index 2fbe93178fb2a3bb8eace212b040cb31dbc379cb..e5e157d288de4b9fa06e9add47facaa28925e4e9 100644 (file)
@@ -104,6 +104,7 @@ struct intel_uncore {
 
        enum forcewake_domains fw_domains;
        enum forcewake_domains fw_domains_active;
+       enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
 
        u32 fw_set;
        u32 fw_clear;
index c69cbd5aed527940fdffc4e96c3645ddd77ae708..ba4f322d56b8c71a43419a096a06637f12c95c35 100644 (file)
@@ -499,6 +499,19 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
        return err == expected;
 }
 
+static void disable_retire_worker(struct drm_i915_private *i915)
+{
+       mutex_lock(&i915->drm.struct_mutex);
+       if (!i915->gt.active_requests++) {
+               intel_runtime_pm_get(i915);
+               i915_gem_unpark(i915);
+               intel_runtime_pm_put(i915);
+       }
+       mutex_unlock(&i915->drm.struct_mutex);
+       cancel_delayed_work_sync(&i915->gt.retire_work);
+       cancel_delayed_work_sync(&i915->gt.idle_work);
+}
+
 static int igt_mmap_offset_exhaustion(void *arg)
 {
        struct drm_i915_private *i915 = arg;
@@ -509,12 +522,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
        int loop, err;
 
        /* Disable background reaper */
-       mutex_lock(&i915->drm.struct_mutex);
-       if (!i915->gt.active_requests++)
-               i915_gem_unpark(i915);
-       mutex_unlock(&i915->drm.struct_mutex);
-       cancel_delayed_work_sync(&i915->gt.retire_work);
-       cancel_delayed_work_sync(&i915->gt.idle_work);
+       disable_retire_worker(i915);
        GEM_BUG_ON(!i915->gt.awake);
 
        /* Trim the device mmap space to only a page */
index 47bc5b2ddb5602633b86fcb1c2f391b545d597b4..81d9d31042a9c50f8172abb3d5c3137f1f153260 100644 (file)
@@ -160,7 +160,7 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
                i915_reg_t reg = { offset };
 
                iosf_mbi_punit_acquire();
-               intel_uncore_forcewake_reset(dev_priv, false);
+               intel_uncore_forcewake_reset(dev_priv);
                iosf_mbi_punit_release();
 
                check_for_unclaimed_mmio(dev_priv);
index bd84f71d27d8567243956d9c1ddc1d9e6a09d458..ab1d9308c31146d968c5b431587b895df9d517a8 100644 (file)
@@ -11,6 +11,7 @@
  *
  */
 
+#include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/cpumask.h>
 #include <linux/qcom_scm.h>
@@ -20,6 +21,7 @@
 #include <linux/pm_opp.h>
 #include <linux/nvmem-consumer.h>
 #include <linux/iopoll.h>
+#include <linux/slab.h>
 #include "msm_gem.h"
 #include "msm_mmu.h"
 #include "a5xx_gpu.h"
@@ -92,12 +94,13 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
                ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID,
                                mem_region, mem_phys, mem_size, NULL);
        } else {
-               char newname[strlen("qcom/") + strlen(fwname) + 1];
+               char *newname;
 
-               sprintf(newname, "qcom/%s", fwname);
+               newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
 
                ret = qcom_mdt_load(dev, fw, newname, GPU_PAS_ID,
                                mem_region, mem_phys, mem_size, NULL);
+               kfree(newname);
        }
        if (ret)
                goto out;
index 38ac50b7382950078c68c5ef9f7124d571449b25..65c0ae7d8ad19bc3ee312b6a5f39d4ca16d29632 100644 (file)
@@ -18,7 +18,9 @@
  */
 
 #include <linux/ascii85.h>
+#include <linux/kernel.h>
 #include <linux/pm_opp.h>
+#include <linux/slab.h>
 #include "adreno_gpu.h"
 #include "msm_gem.h"
 #include "msm_mmu.h"
@@ -71,10 +73,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
 {
        struct drm_device *drm = adreno_gpu->base.dev;
        const struct firmware *fw = NULL;
-       char newname[strlen("qcom/") + strlen(fwname) + 1];
+       char *newname;
        int ret;
 
-       sprintf(newname, "qcom/%s", fwname);
+       newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
+       if (!newname)
+               return ERR_PTR(-ENOMEM);
 
        /*
         * Try first to load from qcom/$fwfile using a direct load (to avoid
@@ -88,11 +92,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
                        dev_info(drm->dev, "loaded %s from new location\n",
                                newname);
                        adreno_gpu->fwloc = FW_LOCATION_NEW;
-                       return fw;
+                       goto out;
                } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
                        dev_err(drm->dev, "failed to load %s: %d\n",
                                newname, ret);
-                       return ERR_PTR(ret);
+                       fw = ERR_PTR(ret);
+                       goto out;
                }
        }
 
@@ -107,11 +112,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
                        dev_info(drm->dev, "loaded %s from legacy location\n",
                                newname);
                        adreno_gpu->fwloc = FW_LOCATION_LEGACY;
-                       return fw;
+                       goto out;
                } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
                        dev_err(drm->dev, "failed to load %s: %d\n",
                                fwname, ret);
-                       return ERR_PTR(ret);
+                       fw = ERR_PTR(ret);
+                       goto out;
                }
        }
 
@@ -127,16 +133,20 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
                        dev_info(drm->dev, "loaded %s with helper\n",
                                newname);
                        adreno_gpu->fwloc = FW_LOCATION_HELPER;
-                       return fw;
+                       goto out;
                } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
                        dev_err(drm->dev, "failed to load %s: %d\n",
                                newname, ret);
-                       return ERR_PTR(ret);
+                       fw = ERR_PTR(ret);
+                       goto out;
                }
        }
 
        dev_err(drm->dev, "failed to load %s\n", fwname);
-       return ERR_PTR(-ENOENT);
+       fw = ERR_PTR(-ENOENT);
+out:
+       kfree(newname);
+       return fw;
 }
 
 static int adreno_load_fw(struct adreno_gpu *adreno_gpu)
index 0bd3eda93e226ab7fa62f647fe86dcb93e052c5d..1b4de3486ef9e14fa8b172de29d68db2df41e802 100644 (file)
@@ -421,7 +421,7 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
 
        ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
        if (ret) {
-               DRM_ERROR("diable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+               DRM_ERROR("disable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
                          DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
                          irq->irq_idx, ret);
        }
@@ -2444,6 +2444,8 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
 
        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
                struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+               if (!phys)
+                       continue;
 
                switch (event) {
                case MSM_ENC_COMMIT_DONE:
@@ -2461,7 +2463,7 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
                        return -EINVAL;
                };
 
-               if (phys && fn_wait) {
+               if (fn_wait) {
                        DPU_ATRACE_BEGIN("wait_for_completion_event");
                        ret = fn_wait(phys);
                        DPU_ATRACE_END("wait_for_completion_event");
index a68f1249388c4a78b688a5a9f485e29576465285..a75eebca2f377da592b215583a3f8787d95e02c3 100644 (file)
@@ -121,7 +121,7 @@ void dpu_power_resource_deinit(struct platform_device *pdev,
        mutex_lock(&phandle->phandle_lock);
        list_for_each_entry_safe(curr_client, next_client,
                        &phandle->power_client_clist, list) {
-               pr_err("cliend:%s-%d still registered with refcount:%d\n",
+               pr_err("client:%s-%d still registered with refcount:%d\n",
                                curr_client->name, curr_client->id,
                                curr_client->refcount);
                curr_client->active = false;
index 0cba86ed3f54ca1be0513164ba8481b86a255b65..b611484866d68ac032fff312198b784342434941 100644 (file)
@@ -263,7 +263,7 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
 int msm_gem_mmap_obj(struct drm_gem_object *obj,
                        struct vm_area_struct *vma);
 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int msm_gem_fault(struct vm_fault *vmf);
+vm_fault_t msm_gem_fault(struct vm_fault *vmf);
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
 int msm_gem_get_iova(struct drm_gem_object *obj,
                struct msm_gem_address_space *aspace, uint64_t *iova);
index f583bb4222f9ad13dc17d74ec13d65ad16734506..f59ca27a4a357492f96d0b7e37c76536037b40d6 100644 (file)
@@ -219,7 +219,7 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
        return msm_gem_mmap_obj(vma->vm_private_data, vma);
 }
 
-int msm_gem_fault(struct vm_fault *vmf)
+vm_fault_t msm_gem_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_gem_object *obj = vma->vm_private_data;
@@ -227,15 +227,18 @@ int msm_gem_fault(struct vm_fault *vmf)
        struct page **pages;
        unsigned long pfn;
        pgoff_t pgoff;
-       int ret;
+       int err;
+       vm_fault_t ret;
 
        /*
         * vm_ops.open/drm_gem_mmap_obj and close get and put
         * a reference on obj. So, we dont need to hold one here.
         */
-       ret = mutex_lock_interruptible(&msm_obj->lock);
-       if (ret)
+       err = mutex_lock_interruptible(&msm_obj->lock);
+       if (err) {
+               ret = VM_FAULT_NOPAGE;
                goto out;
+       }
 
        if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
                mutex_unlock(&msm_obj->lock);
@@ -245,7 +248,7 @@ int msm_gem_fault(struct vm_fault *vmf)
        /* make sure we have pages attached now */
        pages = get_pages(obj);
        if (IS_ERR(pages)) {
-               ret = PTR_ERR(pages);
+               ret = vmf_error(PTR_ERR(pages));
                goto out_unlock;
        }
 
@@ -257,27 +260,11 @@ int msm_gem_fault(struct vm_fault *vmf)
        VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
                        pfn, pfn << PAGE_SHIFT);
 
-       ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
-
+       ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 out_unlock:
        mutex_unlock(&msm_obj->lock);
 out:
-       switch (ret) {
-       case -EAGAIN:
-       case 0:
-       case -ERESTARTSYS:
-       case -EINTR:
-       case -EBUSY:
-               /*
-                * EBUSY is ok: this just means that another thread
-                * already did the job.
-                */
-               return VM_FAULT_NOPAGE;
-       case -ENOMEM:
-               return VM_FAULT_OOM;
-       default:
-               return VM_FAULT_SIGBUS;
-       }
+       return ret;
 }
 
 /** get mmap offset */