Merge tag 'tilcdc-4.15-fixes' of https://github.com/jsarha/linux into drm-next
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / uvd_v6_0.c
index 31db356476f8f1f72f488c54282a09409bcbb749..2581543b35a79318ded40c3171b93183c0d4492d 100644 (file)
@@ -38,6 +38,8 @@
 #include "vi.h"
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
+static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
+
 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
 static int uvd_v6_0_start(struct amdgpu_device *adev);
 static void uvd_v6_0_stop(struct amdgpu_device *adev);
@@ -47,6 +49,18 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
                                 bool enable);
 
+/**
+* uvd_v6_0_enc_support - get encode support status
+*
+* @adev: amdgpu_device pointer
+*
+* Returns the current hardware encode support status
+*/
+static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
+{
+       return ((adev->asic_type >= CHIP_POLARIS10) && (adev->asic_type <= CHIP_POLARIS12));
+}
+
 /**
  * uvd_v6_0_ring_get_rptr - get read pointer
  *
@@ -61,6 +75,22 @@ static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
        return RREG32(mmUVD_RBC_RB_RPTR);
 }
 
+/**
+ * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware enc read pointer
+ */
+static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       if (ring == &adev->uvd.ring_enc[0])
+               return RREG32(mmUVD_RB_RPTR);
+       else
+               return RREG32(mmUVD_RB_RPTR2);
+}
 /**
  * uvd_v6_0_ring_get_wptr - get write pointer
  *
@@ -75,6 +105,23 @@ static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
        return RREG32(mmUVD_RBC_RB_WPTR);
 }
 
+/**
+ * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware enc write pointer
+ */
+static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       if (ring == &adev->uvd.ring_enc[0])
+               return RREG32(mmUVD_RB_WPTR);
+       else
+               return RREG32(mmUVD_RB_WPTR2);
+}
+
 /**
  * uvd_v6_0_ring_set_wptr - set write pointer
  *
@@ -89,11 +136,248 @@ static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
        WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 }
 
+/**
+ * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the enc write pointer to the hardware
+ */
+static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       if (ring == &adev->uvd.ring_enc[0])
+               WREG32(mmUVD_RB_WPTR,
+                       lower_32_bits(ring->wptr));
+       else
+               WREG32(mmUVD_RB_WPTR2,
+                       lower_32_bits(ring->wptr));
+}
+
+/**
+ * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
+ *
+ * @ring: the engine to test on
+ *
+ */
+static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       uint32_t rptr = amdgpu_ring_get_rptr(ring);
+       unsigned i;
+       int r;
+
+       r = amdgpu_ring_alloc(ring, 16);
+       if (r) {
+               DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
+                         ring->idx, r);
+               return r;
+       }
+       amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
+       amdgpu_ring_commit(ring);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (amdgpu_ring_get_rptr(ring) != rptr)
+                       break;
+               DRM_UDELAY(1);
+       }
+
+       if (i < adev->usec_timeout) {
+               DRM_INFO("ring test on %d succeeded in %d usecs\n",
+                        ring->idx, i);
+       } else {
+               DRM_ERROR("amdgpu: ring %d test failed\n",
+                         ring->idx);
+               r = -ETIMEDOUT;
+       }
+
+       return r;
+}
+
+/**
+ * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: ring we should submit the msg to
+ * @handle: session handle to use
+ * @fence: optional fence to return
+ *
+ * Open up a stream for HW test
+ */
+static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+                                      struct dma_fence **fence)
+{
+       const unsigned ib_size_dw = 16;
+       struct amdgpu_job *job;
+       struct amdgpu_ib *ib;
+       struct dma_fence *f = NULL;
+       uint64_t dummy;
+       int i, r;
+
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       if (r)
+               return r;
+
+       ib = &job->ibs[0];
+       dummy = ib->gpu_addr + 1024;
+
+       ib->length_dw = 0;
+       ib->ptr[ib->length_dw++] = 0x00000018;
+       ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
+       ib->ptr[ib->length_dw++] = handle;
+       ib->ptr[ib->length_dw++] = 0x00010000;
+       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+       ib->ptr[ib->length_dw++] = dummy;
+
+       ib->ptr[ib->length_dw++] = 0x00000014;
+       ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
+       ib->ptr[ib->length_dw++] = 0x0000001c;
+       ib->ptr[ib->length_dw++] = 0x00000001;
+       ib->ptr[ib->length_dw++] = 0x00000000;
+
+       ib->ptr[ib->length_dw++] = 0x00000008;
+       ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
+
+       for (i = ib->length_dw; i < ib_size_dw; ++i)
+               ib->ptr[i] = 0x0;
+
+       r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+       job->fence = dma_fence_get(f);
+       if (r)
+               goto err;
+
+       amdgpu_job_free(job);
+       if (fence)
+               *fence = dma_fence_get(f);
+       dma_fence_put(f);
+       return 0;
+
+err:
+       amdgpu_job_free(job);
+       return r;
+}
+
+/**
+ * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: ring we should submit the msg to
+ * @handle: session handle to use
+ * @fence: optional fence to return
+ *
+ * Close up a stream for HW test or if userspace failed to do so
+ */
+static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
+                                       uint32_t handle,
+                                       bool direct, struct dma_fence **fence)
+{
+       const unsigned ib_size_dw = 16;
+       struct amdgpu_job *job;
+       struct amdgpu_ib *ib;
+       struct dma_fence *f = NULL;
+       uint64_t dummy;
+       int i, r;
+
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       if (r)
+               return r;
+
+       ib = &job->ibs[0];
+       dummy = ib->gpu_addr + 1024;
+
+       ib->length_dw = 0;
+       ib->ptr[ib->length_dw++] = 0x00000018;
+       ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
+       ib->ptr[ib->length_dw++] = handle;
+       ib->ptr[ib->length_dw++] = 0x00010000;
+       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+       ib->ptr[ib->length_dw++] = dummy;
+
+       ib->ptr[ib->length_dw++] = 0x00000014;
+       ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
+       ib->ptr[ib->length_dw++] = 0x0000001c;
+       ib->ptr[ib->length_dw++] = 0x00000001;
+       ib->ptr[ib->length_dw++] = 0x00000000;
+
+       ib->ptr[ib->length_dw++] = 0x00000008;
+       ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
+
+       for (i = ib->length_dw; i < ib_size_dw; ++i)
+               ib->ptr[i] = 0x0;
+
+       if (direct) {
+               r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+               job->fence = dma_fence_get(f);
+               if (r)
+                       goto err;
+
+               amdgpu_job_free(job);
+       } else {
+               r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+                                     AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+               if (r)
+                       goto err;
+       }
+
+       if (fence)
+               *fence = dma_fence_get(f);
+       dma_fence_put(f);
+       return 0;
+
+err:
+       amdgpu_job_free(job);
+       return r;
+}
+
+/**
+ * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
+ *
+ * @ring: the engine to test on
+ *
+ */
+static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+       struct dma_fence *fence = NULL;
+       long r;
+
+       r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+               goto error;
+       }
+
+       r = uvd_v6_0_enc_get_destroy_msg(ring, 1, true, &fence);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+               goto error;
+       }
+
+       r = dma_fence_wait_timeout(fence, false, timeout);
+       if (r == 0) {
+               DRM_ERROR("amdgpu: IB test timed out.\n");
+               r = -ETIMEDOUT;
+       } else if (r < 0) {
+               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+       } else {
+               DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+               r = 0;
+       }
+error:
+       dma_fence_put(fence);
+       return r;
+}
 static int uvd_v6_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        uvd_v6_0_set_ring_funcs(adev);
+
+       if (uvd_v6_0_enc_support(adev)) {
+               adev->uvd.num_enc_rings = 2;
+               uvd_v6_0_set_enc_ring_funcs(adev);
+       }
+
        uvd_v6_0_set_irq_funcs(adev);
 
        return 0;
@@ -102,7 +386,7 @@ static int uvd_v6_0_early_init(void *handle)
 static int uvd_v6_0_sw_init(void *handle)
 {
        struct amdgpu_ring *ring;
-       int r;
+       int i, r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* UVD TRAP */
@@ -110,10 +394,31 @@ static int uvd_v6_0_sw_init(void *handle)
        if (r)
                return r;
 
+       /* UVD ENC TRAP */
+       if (uvd_v6_0_enc_support(adev)) {
+               for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+                       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.irq);
+                       if (r)
+                               return r;
+               }
+       }
+
        r = amdgpu_uvd_sw_init(adev);
        if (r)
                return r;
 
+       if (uvd_v6_0_enc_support(adev)) {
+               struct amd_sched_rq *rq;
+               ring = &adev->uvd.ring_enc[0];
+               rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+               r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
+                                         rq, amdgpu_sched_jobs);
+               if (r) {
+                       DRM_ERROR("Failed setting up UVD ENC run queue.\n");
+                       return r;
+               }
+       }
+
        r = amdgpu_uvd_resume(adev);
        if (r)
                return r;
@@ -121,19 +426,38 @@ static int uvd_v6_0_sw_init(void *handle)
        ring = &adev->uvd.ring;
        sprintf(ring->name, "uvd");
        r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
+       if (r)
+               return r;
+
+       if (uvd_v6_0_enc_support(adev)) {
+               for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+                       ring = &adev->uvd.ring_enc[i];
+                       sprintf(ring->name, "uvd_enc%d", i);
+                       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
+                       if (r)
+                               return r;
+               }
+       }
 
        return r;
 }
 
 static int uvd_v6_0_sw_fini(void *handle)
 {
-       int r;
+       int i, r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        r = amdgpu_uvd_suspend(adev);
        if (r)
                return r;
 
+       if (uvd_v6_0_enc_support(adev)) {
+               amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
+
+               for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+                       amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
+       }
+
        return amdgpu_uvd_sw_fini(adev);
 }
 
@@ -149,7 +473,7 @@ static int uvd_v6_0_hw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct amdgpu_ring *ring = &adev->uvd.ring;
        uint32_t tmp;
-       int r;
+       int i, r;
 
        amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
        uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
@@ -189,9 +513,25 @@ static int uvd_v6_0_hw_init(void *handle)
 
        amdgpu_ring_commit(ring);
 
+       if (uvd_v6_0_enc_support(adev)) {
+               for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+                       ring = &adev->uvd.ring_enc[i];
+                       ring->ready = true;
+                       r = amdgpu_ring_test_ring(ring);
+                       if (r) {
+                               ring->ready = false;
+                               goto done;
+                       }
+               }
+       }
+
 done:
-       if (!r)
-               DRM_INFO("UVD initialized successfully.\n");
+       if (!r) {
+               if (uvd_v6_0_enc_support(adev))
+                       DRM_INFO("UVD and UVD ENC initialized successfully.\n");
+               else
+                       DRM_INFO("UVD initialized successfully.\n");
+       }
 
        return r;
 }
@@ -225,11 +565,7 @@ static int uvd_v6_0_suspend(void *handle)
        if (r)
                return r;
 
-       /* Skip this for APU for now */
-       if (!(adev->flags & AMD_IS_APU))
-               r = amdgpu_uvd_suspend(adev);
-
-       return r;
+       return amdgpu_uvd_suspend(adev);
 }
 
 static int uvd_v6_0_resume(void *handle)
@@ -237,12 +573,10 @@ static int uvd_v6_0_resume(void *handle)
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       /* Skip this for APU for now */
-       if (!(adev->flags & AMD_IS_APU)) {
-               r = amdgpu_uvd_resume(adev);
-               if (r)
-                       return r;
-       }
+       r = amdgpu_uvd_resume(adev);
+       if (r)
+               return r;
+
        return uvd_v6_0_hw_init(adev);
 }
 
@@ -514,6 +848,22 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
 
        WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
 
+       if (uvd_v6_0_enc_support(adev)) {
+               ring = &adev->uvd.ring_enc[0];
+               WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+               WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+               WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
+               WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+               WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
+
+               ring = &adev->uvd.ring_enc[1];
+               WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+               WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+               WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
+               WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+               WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
+       }
+
        return 0;
 }
 
@@ -576,6 +926,26 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
        amdgpu_ring_write(ring, 2);
 }
 
+/**
+ * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write enc a fence and a trap command to the ring.
+ */
+static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+                       u64 seq, unsigned flags)
+{
+       WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+       amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
+       amdgpu_ring_write(ring, addr);
+       amdgpu_ring_write(ring, upper_32_bits(addr));
+       amdgpu_ring_write(ring, seq);
+       amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
+}
+
 /**
  * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
  *
@@ -667,6 +1037,24 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, ib->length_dw);
 }
 
+/**
+ * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write enc ring commands to execute the indirect buffer
+ */
+static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
+               struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+{
+       amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
+       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+       amdgpu_ring_write(ring, ib->length_dw);
+}
+
 static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
                                         unsigned vm_id, uint64_t pd_addr)
 {
@@ -718,6 +1106,33 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, 0xE);
 }
 
+static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+       uint32_t seq = ring->fence_drv.sync_seq;
+       uint64_t addr = ring->fence_drv.gpu_addr;
+
+       amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
+       amdgpu_ring_write(ring, lower_32_bits(addr));
+       amdgpu_ring_write(ring, upper_32_bits(addr));
+       amdgpu_ring_write(ring, seq);
+}
+
+static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
+}
+
+static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+        unsigned int vm_id, uint64_t pd_addr)
+{
+       amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
+       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, pd_addr >> 12);
+
+       amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
+       amdgpu_ring_write(ring, vm_id);
+}
+
 static bool uvd_v6_0_is_idle(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -825,8 +1240,31 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
                                      struct amdgpu_irq_src *source,
                                      struct amdgpu_iv_entry *entry)
 {
+       bool int_handled = true;
        DRM_DEBUG("IH: UVD TRAP\n");
-       amdgpu_fence_process(&adev->uvd.ring);
+
+       switch (entry->src_id) {
+       case 124:
+               amdgpu_fence_process(&adev->uvd.ring);
+               break;
+       case 119:
+               if (likely(uvd_v6_0_enc_support(adev)))
+                       amdgpu_fence_process(&adev->uvd.ring_enc[0]);
+               else
+                       int_handled = false;
+               break;
+       case 120:
+               if (likely(uvd_v6_0_enc_support(adev)))
+                       amdgpu_fence_process(&adev->uvd.ring_enc[1]);
+               else
+                       int_handled = false;
+               break;
+       }
+
+       if (false == int_handled)
+                       DRM_ERROR("Unhandled interrupt: %d %d\n",
+                         entry->src_id, entry->src_data[0]);
+
        return 0;
 }
 
@@ -1153,6 +1591,33 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
        .end_use = amdgpu_uvd_ring_end_use,
 };
 
+static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
+       .type = AMDGPU_RING_TYPE_UVD_ENC,
+       .align_mask = 0x3f,
+       .nop = HEVC_ENC_CMD_NO_OP,
+       .support_64bit_ptrs = false,
+       .get_rptr = uvd_v6_0_enc_ring_get_rptr,
+       .get_wptr = uvd_v6_0_enc_ring_get_wptr,
+       .set_wptr = uvd_v6_0_enc_ring_set_wptr,
+       .emit_frame_size =
+               4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
+               6 + /* uvd_v6_0_enc_ring_emit_vm_flush */
+               5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
+               1, /* uvd_v6_0_enc_ring_insert_end */
+       .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
+       .emit_ib = uvd_v6_0_enc_ring_emit_ib,
+       .emit_fence = uvd_v6_0_enc_ring_emit_fence,
+       .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
+       .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
+       .test_ring = uvd_v6_0_enc_ring_test_ring,
+       .test_ib = uvd_v6_0_enc_ring_test_ib,
+       .insert_nop = amdgpu_ring_insert_nop,
+       .insert_end = uvd_v6_0_enc_ring_insert_end,
+       .pad_ib = amdgpu_ring_generic_pad_ib,
+       .begin_use = amdgpu_uvd_ring_begin_use,
+       .end_use = amdgpu_uvd_ring_end_use,
+};
+
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
 {
        if (adev->asic_type >= CHIP_POLARIS10) {
@@ -1164,6 +1629,16 @@ static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
        }
 }
 
+static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+               adev->uvd.ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
+
+       DRM_INFO("UVD ENC is enabled in VM mode\n");
+}
+
 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
        .set = uvd_v6_0_set_interrupt_state,
        .process = uvd_v6_0_process_interrupt,
@@ -1171,7 +1646,11 @@ static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
 
 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->uvd.irq.num_types = 1;
+       if (uvd_v6_0_enc_support(adev))
+               adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
+       else
+               adev->uvd.irq.num_types = 1;
+
        adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
 }