2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
28 #include "amdgpu_cs.h"
32 #include "mmsch_v3_0.h"
34 #include "vcn/vcn_3_0_0_offset.h"
35 #include "vcn/vcn_3_0_0_sh_mask.h"
36 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
38 #include <drm/drm_drv.h>
40 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
41 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
42 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
43 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
44 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
45 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
46 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
48 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
49 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
50 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
51 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
53 #define VCN_INSTANCES_SIENNA_CICHLID 2
54 #define DEC_SW_RING_ENABLED FALSE
56 #define RDECODE_MSG_CREATE 0x00000000
57 #define RDECODE_MESSAGE_CREATE 0x00000001
59 static int amdgpu_ih_clientid_vcns[] = {
60 SOC15_IH_CLIENTID_VCN,
61 SOC15_IH_CLIENTID_VCN1
64 static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
65 static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
66 static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
67 static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev);
68 static int vcn_v3_0_set_powergating_state(void *handle,
69 enum amd_powergating_state state);
70 static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
71 int inst_idx, struct dpg_pause_state *new_state);
73 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
74 static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
77 * vcn_v3_0_early_init - set function pointers
79 * @handle: amdgpu_device pointer
81 * Set ring and irq function pointers
83 static int vcn_v3_0_early_init(void *handle)
85 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
87 if (amdgpu_sriov_vf(adev)) {
88 adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
89 adev->vcn.harvest_config = 0;
90 adev->vcn.num_enc_rings = 1;
93 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
94 AMDGPU_VCN_HARVEST_VCN1))
95 /* both instances are harvested, disable the block */
98 if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 33))
99 adev->vcn.num_enc_rings = 0;
101 adev->vcn.num_enc_rings = 2;
104 vcn_v3_0_set_dec_ring_funcs(adev);
105 vcn_v3_0_set_enc_ring_funcs(adev);
106 vcn_v3_0_set_irq_funcs(adev);
112 * vcn_v3_0_sw_init - sw init for VCN block
114 * @handle: amdgpu_device pointer
116 * Load firmware and sw initialization
118 static int vcn_v3_0_sw_init(void *handle)
120 struct amdgpu_ring *ring;
122 int vcn_doorbell_index = 0;
123 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
125 r = amdgpu_vcn_sw_init(adev);
129 amdgpu_vcn_setup_ucode(adev);
131 r = amdgpu_vcn_resume(adev);
136 * Note: doorbell assignment is fixed for SRIOV multiple VCN engines
138 * vcn_db_base = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
139 * dec_ring_i = vcn_db_base + i * (adev->vcn.num_enc_rings + 1)
140 * enc_ring_i,j = vcn_db_base + i * (adev->vcn.num_enc_rings + 1) + 1 + j
142 if (amdgpu_sriov_vf(adev)) {
143 vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1;
144 /* get DWORD offset */
145 vcn_doorbell_index = vcn_doorbell_index << 1;
148 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
149 volatile struct amdgpu_fw_shared *fw_shared;
151 if (adev->vcn.harvest_config & (1 << i))
154 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
155 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
156 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
157 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
158 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
159 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
161 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
162 adev->vcn.inst[i].external.scratch9 = SOC15_REG_OFFSET(VCN, i, mmUVD_SCRATCH9);
163 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
164 adev->vcn.inst[i].external.data0 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA0);
165 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
166 adev->vcn.inst[i].external.data1 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA1);
167 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
168 adev->vcn.inst[i].external.cmd = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_CMD);
169 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
170 adev->vcn.inst[i].external.nop = SOC15_REG_OFFSET(VCN, i, mmUVD_NO_OP);
173 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
174 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[i].irq);
178 atomic_set(&adev->vcn.inst[i].sched_score, 0);
180 ring = &adev->vcn.inst[i].ring_dec;
181 ring->use_doorbell = true;
182 if (amdgpu_sriov_vf(adev)) {
183 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1);
185 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
187 sprintf(ring->name, "vcn_dec_%d", i);
188 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
189 AMDGPU_RING_PRIO_DEFAULT,
190 &adev->vcn.inst[i].sched_score);
194 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
195 enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(j);
198 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
199 j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
203 ring = &adev->vcn.inst[i].ring_enc[j];
204 ring->use_doorbell = true;
205 if (amdgpu_sriov_vf(adev)) {
206 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1 + j;
208 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
210 sprintf(ring->name, "vcn_enc_%d.%d", i, j);
211 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
212 hw_prio, &adev->vcn.inst[i].sched_score);
217 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
218 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) |
219 cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
220 cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
221 fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);
222 fw_shared->present_flag_0 |= AMDGPU_VCN_SMU_VERSION_INFO_FLAG;
223 if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 2))
224 fw_shared->smu_interface_info.smu_interface_type = 2;
225 else if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 1))
226 fw_shared->smu_interface_info.smu_interface_type = 1;
228 if (amdgpu_vcnfw_log)
229 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
232 if (amdgpu_sriov_vf(adev)) {
233 r = amdgpu_virt_alloc_mm_table(adev);
237 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
238 adev->vcn.pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
244 * vcn_v3_0_sw_fini - sw fini for VCN block
246 * @handle: amdgpu_device pointer
248 * VCN suspend and free up sw allocation
250 static int vcn_v3_0_sw_fini(void *handle)
252 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
255 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
256 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
257 volatile struct amdgpu_fw_shared *fw_shared;
259 if (adev->vcn.harvest_config & (1 << i))
261 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
262 fw_shared->present_flag_0 = 0;
263 fw_shared->sw_ring.is_enabled = false;
269 if (amdgpu_sriov_vf(adev))
270 amdgpu_virt_free_mm_table(adev);
272 r = amdgpu_vcn_suspend(adev);
276 r = amdgpu_vcn_sw_fini(adev);
282 * vcn_v3_0_hw_init - start and test VCN block
284 * @handle: amdgpu_device pointer
286 * Initialize the hardware, boot up the VCPU and do some testing
288 static int vcn_v3_0_hw_init(void *handle)
290 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
291 struct amdgpu_ring *ring;
294 if (amdgpu_sriov_vf(adev)) {
295 r = vcn_v3_0_start_sriov(adev);
299 /* initialize VCN dec and enc ring buffers */
300 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
301 if (adev->vcn.harvest_config & (1 << i))
304 ring = &adev->vcn.inst[i].ring_dec;
305 if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, i)) {
306 ring->sched.ready = false;
307 ring->no_scheduler = true;
308 dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
312 vcn_v3_0_dec_ring_set_wptr(ring);
313 ring->sched.ready = true;
316 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
317 ring = &adev->vcn.inst[i].ring_enc[j];
318 if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
319 ring->sched.ready = false;
320 ring->no_scheduler = true;
321 dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
325 vcn_v3_0_enc_ring_set_wptr(ring);
326 ring->sched.ready = true;
331 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
332 if (adev->vcn.harvest_config & (1 << i))
335 ring = &adev->vcn.inst[i].ring_dec;
337 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
338 ring->doorbell_index, i);
340 r = amdgpu_ring_test_helper(ring);
344 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
345 ring = &adev->vcn.inst[i].ring_enc[j];
346 r = amdgpu_ring_test_helper(ring);
355 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
356 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
362 * vcn_v3_0_hw_fini - stop the hardware block
364 * @handle: amdgpu_device pointer
366 * Stop the VCN block, mark ring as not ready any more
368 static int vcn_v3_0_hw_fini(void *handle)
370 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
373 cancel_delayed_work_sync(&adev->vcn.idle_work);
375 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
376 if (adev->vcn.harvest_config & (1 << i))
379 if (!amdgpu_sriov_vf(adev)) {
380 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
381 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
382 RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
383 vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
392 * vcn_v3_0_suspend - suspend VCN block
394 * @handle: amdgpu_device pointer
396 * HW fini and suspend VCN block
398 static int vcn_v3_0_suspend(void *handle)
401 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
403 r = vcn_v3_0_hw_fini(adev);
407 r = amdgpu_vcn_suspend(adev);
413 * vcn_v3_0_resume - resume VCN block
415 * @handle: amdgpu_device pointer
417 * Resume firmware and hw init VCN block
419 static int vcn_v3_0_resume(void *handle)
422 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
424 r = amdgpu_vcn_resume(adev);
428 r = vcn_v3_0_hw_init(adev);
434 * vcn_v3_0_mc_resume - memory controller programming
436 * @adev: amdgpu_device pointer
437 * @inst: instance number
439 * Let the VCN memory controller know it's offsets
441 static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
443 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
446 /* cache window 0: fw */
447 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
448 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
449 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
450 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
451 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
452 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0, 0);
455 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
456 lower_32_bits(adev->vcn.inst[inst].gpu_addr));
457 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
458 upper_32_bits(adev->vcn.inst[inst].gpu_addr));
460 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0,
461 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
463 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE0, size);
465 /* cache window 1: stack */
466 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
467 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
468 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
469 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
470 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET1, 0);
471 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
473 /* cache window 2: context */
474 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
475 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
476 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
477 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
478 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET2, 0);
479 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
481 /* non-cache window */
482 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
483 lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
484 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
485 upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
486 WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
487 WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_SIZE0,
488 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
491 static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
493 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
496 /* cache window 0: fw */
497 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
499 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
500 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
501 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
502 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
503 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
504 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
505 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
506 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
508 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
509 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
510 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
511 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
512 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
513 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
517 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
518 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
519 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
520 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
521 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
522 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
524 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
525 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0),
526 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
530 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
531 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
533 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
534 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
536 /* cache window 1: stack */
538 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
539 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
540 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
541 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
542 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
543 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
544 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
545 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
547 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
548 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
549 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
550 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
551 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
552 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
554 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
555 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
557 /* cache window 2: context */
558 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
559 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
560 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
561 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
562 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
563 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
564 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
565 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
566 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
567 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
569 /* non-cache window */
570 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
571 VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
572 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
573 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
574 VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
575 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
576 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
577 VCN, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
578 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
579 VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
580 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
582 /* VCN global tiling registers */
583 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
584 UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
587 static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
591 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
592 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
593 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
594 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
595 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
596 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
597 | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
598 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
599 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
600 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
601 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
602 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
603 | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
604 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
605 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
607 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
608 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS,
609 UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF);
611 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
612 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
613 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
614 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
615 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
616 | 1 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
617 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
618 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
619 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
620 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
621 | 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
622 | 1 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
623 | 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
624 | 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
625 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
626 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, 0, 0x3F3FFFFF);
629 data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS);
631 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
632 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
633 UVD_POWER_STATUS__UVD_PG_EN_MASK;
635 WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
638 static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
642 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
643 /* Before power off, this indicator has to be turned on */
644 data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS);
645 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
646 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
647 WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
649 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
650 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
651 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
652 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
653 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
654 | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
655 | 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
656 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
657 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
658 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
659 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
660 | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
661 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
662 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
663 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
665 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
666 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
667 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
668 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
669 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
670 | 2 << UVD_PGFSM_STATUS__UVDIRL_PWR_STATUS__SHIFT
671 | 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT
672 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
673 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
674 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
675 | 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT
676 | 2 << UVD_PGFSM_STATUS__UVDATD_PWR_STATUS__SHIFT
677 | 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT
678 | 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT);
679 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, data, 0x3F3FFFFF);
684 * vcn_v3_0_disable_clock_gating - disable VCN clock gating
686 * @adev: amdgpu_device pointer
687 * @inst: instance number
689 * Disable clock gating for VCN block
691 static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
695 /* VCN disable CGC */
696 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
697 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
698 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
700 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
701 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
702 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
703 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
705 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_GATE);
706 data &= ~(UVD_CGC_GATE__SYS_MASK
707 | UVD_CGC_GATE__UDEC_MASK
708 | UVD_CGC_GATE__MPEG2_MASK
709 | UVD_CGC_GATE__REGS_MASK
710 | UVD_CGC_GATE__RBC_MASK
711 | UVD_CGC_GATE__LMI_MC_MASK
712 | UVD_CGC_GATE__LMI_UMC_MASK
713 | UVD_CGC_GATE__IDCT_MASK
714 | UVD_CGC_GATE__MPRD_MASK
715 | UVD_CGC_GATE__MPC_MASK
716 | UVD_CGC_GATE__LBSI_MASK
717 | UVD_CGC_GATE__LRBBM_MASK
718 | UVD_CGC_GATE__UDEC_RE_MASK
719 | UVD_CGC_GATE__UDEC_CM_MASK
720 | UVD_CGC_GATE__UDEC_IT_MASK
721 | UVD_CGC_GATE__UDEC_DB_MASK
722 | UVD_CGC_GATE__UDEC_MP_MASK
723 | UVD_CGC_GATE__WCB_MASK
724 | UVD_CGC_GATE__VCPU_MASK
725 | UVD_CGC_GATE__MMSCH_MASK);
727 WREG32_SOC15(VCN, inst, mmUVD_CGC_GATE, data);
729 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
731 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
732 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
733 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
734 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
735 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
736 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
737 | UVD_CGC_CTRL__SYS_MODE_MASK
738 | UVD_CGC_CTRL__UDEC_MODE_MASK
739 | UVD_CGC_CTRL__MPEG2_MODE_MASK
740 | UVD_CGC_CTRL__REGS_MODE_MASK
741 | UVD_CGC_CTRL__RBC_MODE_MASK
742 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
743 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
744 | UVD_CGC_CTRL__IDCT_MODE_MASK
745 | UVD_CGC_CTRL__MPRD_MODE_MASK
746 | UVD_CGC_CTRL__MPC_MODE_MASK
747 | UVD_CGC_CTRL__LBSI_MODE_MASK
748 | UVD_CGC_CTRL__LRBBM_MODE_MASK
749 | UVD_CGC_CTRL__WCB_MODE_MASK
750 | UVD_CGC_CTRL__VCPU_MODE_MASK
751 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
752 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
754 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE);
755 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
756 | UVD_SUVD_CGC_GATE__SIT_MASK
757 | UVD_SUVD_CGC_GATE__SMP_MASK
758 | UVD_SUVD_CGC_GATE__SCM_MASK
759 | UVD_SUVD_CGC_GATE__SDB_MASK
760 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
761 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
762 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
763 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
764 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
765 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
766 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
767 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
768 | UVD_SUVD_CGC_GATE__SCLR_MASK
769 | UVD_SUVD_CGC_GATE__ENT_MASK
770 | UVD_SUVD_CGC_GATE__IME_MASK
771 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
772 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
773 | UVD_SUVD_CGC_GATE__SITE_MASK
774 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
775 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
776 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
777 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
778 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK
779 | UVD_SUVD_CGC_GATE__EFC_MASK
780 | UVD_SUVD_CGC_GATE__SAOE_MASK
781 | UVD_SUVD_CGC_GATE__SRE_AV1_MASK
782 | UVD_SUVD_CGC_GATE__FBC_PCLK_MASK
783 | UVD_SUVD_CGC_GATE__FBC_CCLK_MASK
784 | UVD_SUVD_CGC_GATE__SCM_AV1_MASK
785 | UVD_SUVD_CGC_GATE__SMPA_MASK);
786 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE, data);
788 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2);
789 data |= (UVD_SUVD_CGC_GATE2__MPBE0_MASK
790 | UVD_SUVD_CGC_GATE2__MPBE1_MASK
791 | UVD_SUVD_CGC_GATE2__SIT_AV1_MASK
792 | UVD_SUVD_CGC_GATE2__SDB_AV1_MASK
793 | UVD_SUVD_CGC_GATE2__MPC1_MASK);
794 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2, data);
796 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL);
797 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
798 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
799 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
800 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
801 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
802 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
803 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
804 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
805 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
806 | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK
807 | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK
808 | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
809 | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
810 | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
811 | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
812 | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
813 | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
814 | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
815 | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
816 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
819 static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
820 uint8_t sram_sel, int inst_idx, uint8_t indirect)
822 uint32_t reg_data = 0;
824 /* enable sw clock gating control */
825 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
826 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
828 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
829 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
830 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
831 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
832 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
833 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
834 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
835 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
836 UVD_CGC_CTRL__SYS_MODE_MASK |
837 UVD_CGC_CTRL__UDEC_MODE_MASK |
838 UVD_CGC_CTRL__MPEG2_MODE_MASK |
839 UVD_CGC_CTRL__REGS_MODE_MASK |
840 UVD_CGC_CTRL__RBC_MODE_MASK |
841 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
842 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
843 UVD_CGC_CTRL__IDCT_MODE_MASK |
844 UVD_CGC_CTRL__MPRD_MODE_MASK |
845 UVD_CGC_CTRL__MPC_MODE_MASK |
846 UVD_CGC_CTRL__LBSI_MODE_MASK |
847 UVD_CGC_CTRL__LRBBM_MODE_MASK |
848 UVD_CGC_CTRL__WCB_MODE_MASK |
849 UVD_CGC_CTRL__VCPU_MODE_MASK |
850 UVD_CGC_CTRL__MMSCH_MODE_MASK);
851 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
852 VCN, inst_idx, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
854 /* turn off clock gating */
855 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
856 VCN, inst_idx, mmUVD_CGC_GATE), 0, sram_sel, indirect);
858 /* turn on SUVD clock gating */
859 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
860 VCN, inst_idx, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
862 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
863 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
864 VCN, inst_idx, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
868 * vcn_v3_0_enable_clock_gating - enable VCN clock gating
870 * @adev: amdgpu_device pointer
871 * @inst: instance number
873 * Enable clock gating for VCN block
875 static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
880 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
881 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
882 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
884 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
885 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
886 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
887 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
889 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
890 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
891 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
892 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
893 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
894 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
895 | UVD_CGC_CTRL__SYS_MODE_MASK
896 | UVD_CGC_CTRL__UDEC_MODE_MASK
897 | UVD_CGC_CTRL__MPEG2_MODE_MASK
898 | UVD_CGC_CTRL__REGS_MODE_MASK
899 | UVD_CGC_CTRL__RBC_MODE_MASK
900 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
901 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
902 | UVD_CGC_CTRL__IDCT_MODE_MASK
903 | UVD_CGC_CTRL__MPRD_MODE_MASK
904 | UVD_CGC_CTRL__MPC_MODE_MASK
905 | UVD_CGC_CTRL__LBSI_MODE_MASK
906 | UVD_CGC_CTRL__LRBBM_MODE_MASK
907 | UVD_CGC_CTRL__WCB_MODE_MASK
908 | UVD_CGC_CTRL__VCPU_MODE_MASK
909 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
910 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
912 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL);
913 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
914 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
915 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
916 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
917 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
918 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
919 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
920 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
921 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
922 | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK
923 | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK
924 | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
925 | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
926 | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
927 | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
928 | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
929 | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
930 | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
931 | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
932 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
935 static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
937 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
938 struct amdgpu_ring *ring;
939 uint32_t rb_bufsz, tmp;
941 /* disable register anti-hang mechanism */
942 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
943 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
944 /* enable dynamic power gating mode */
945 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
946 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
947 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
948 WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
951 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
953 /* enable clock gating */
954 vcn_v3_0_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
956 /* enable VCPU clock */
957 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
958 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
959 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
960 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
961 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
963 /* disable master interupt */
964 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
965 VCN, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect);
967 /* setup mmUVD_LMI_CTRL */
968 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
969 UVD_LMI_CTRL__REQ_MODE_MASK |
970 UVD_LMI_CTRL__CRC_RESET_MASK |
971 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
972 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
973 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
974 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
976 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
977 VCN, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect);
979 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
980 VCN, inst_idx, mmUVD_MPC_CNTL),
981 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
983 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
984 VCN, inst_idx, mmUVD_MPC_SET_MUXA0),
985 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
986 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
987 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
988 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
990 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
991 VCN, inst_idx, mmUVD_MPC_SET_MUXB0),
992 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
993 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
994 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
995 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
997 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
998 VCN, inst_idx, mmUVD_MPC_SET_MUX),
999 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1000 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1001 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
1003 vcn_v3_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
1005 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1006 VCN, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
1007 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1008 VCN, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
1010 /* enable LMI MC and UMC channels */
1011 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1012 VCN, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect);
1014 /* unblock VCPU register access */
1015 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1016 VCN, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
1018 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
1019 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
1020 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1021 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1023 /* enable master interrupt */
1024 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1025 VCN, inst_idx, mmUVD_MASTINT_EN),
1026 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
1028 /* add nop to workaround PSP size check */
1029 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1030 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1033 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
1034 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
1035 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
1037 ring = &adev->vcn.inst[inst_idx].ring_dec;
1038 /* force RBC into idle state */
1039 rb_bufsz = order_base_2(ring->ring_size);
1040 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1041 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1042 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1043 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1044 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1045 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
1047 /* Stall DPG before WPTR/RPTR reset */
1048 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1049 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1050 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1051 fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1053 /* set the write pointer delay */
1054 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
1056 /* set the wb address */
1057 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
1058 (upper_32_bits(ring->gpu_addr) >> 2));
1060 /* programm the RB_BASE for ring buffer */
1061 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1062 lower_32_bits(ring->gpu_addr));
1063 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1064 upper_32_bits(ring->gpu_addr));
1066 /* Initialize the ring buffer's read and write pointers */
1067 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
1069 WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
1071 ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
1072 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
1073 lower_32_bits(ring->wptr));
1075 /* Reset FW shared memory RBC WPTR/RPTR */
1076 fw_shared->rb.rptr = 0;
1077 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1079 /*resetting done, fw can check RB ring */
1080 fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1083 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1084 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1089 static int vcn_v3_0_start(struct amdgpu_device *adev)
1091 volatile struct amdgpu_fw_shared *fw_shared;
1092 struct amdgpu_ring *ring;
1093 uint32_t rb_bufsz, tmp;
1096 if (adev->pm.dpm_enabled)
1097 amdgpu_dpm_enable_uvd(adev, true);
1099 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1100 if (adev->vcn.harvest_config & (1 << i))
1103 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG){
1104 r = vcn_v3_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
1108 /* disable VCN power gating */
1109 vcn_v3_0_disable_static_power_gating(adev, i);
1111 /* set VCN status busy */
1112 tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1113 WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
1115 /*SW clock gating */
1116 vcn_v3_0_disable_clock_gating(adev, i);
1118 /* enable VCPU clock */
1119 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1120 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1122 /* disable master interrupt */
1123 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
1124 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1126 /* enable LMI MC and UMC channels */
1127 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1128 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1130 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1131 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1132 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1133 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1135 /* setup mmUVD_LMI_CTRL */
1136 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
1137 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp |
1138 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1139 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1140 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1141 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1143 /* setup mmUVD_MPC_CNTL */
1144 tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
1145 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1146 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1147 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
1149 /* setup UVD_MPC_SET_MUXA0 */
1150 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
1151 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1152 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1153 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1154 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1156 /* setup UVD_MPC_SET_MUXB0 */
1157 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
1158 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1159 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1160 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1161 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1163 /* setup mmUVD_MPC_SET_MUX */
1164 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
1165 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1166 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1167 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1169 vcn_v3_0_mc_resume(adev, i);
1171 /* VCN global tiling registers */
1172 WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG,
1173 adev->gfx.config.gb_addr_config);
1175 /* unblock VCPU register access */
1176 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1177 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1179 /* release VCPU reset to boot */
1180 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1181 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1183 for (j = 0; j < 10; ++j) {
1186 for (k = 0; k < 100; ++k) {
1187 status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1196 DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i);
1197 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1198 UVD_VCPU_CNTL__BLK_RST_MASK,
1199 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1201 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1202 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1209 DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i);
1213 /* enable master interrupt */
1214 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1215 UVD_MASTINT_EN__VCPU_EN_MASK,
1216 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1218 /* clear the busy bit of VCN_STATUS */
1219 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1220 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1222 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1224 ring = &adev->vcn.inst[i].ring_dec;
1225 /* force RBC into idle state */
1226 rb_bufsz = order_base_2(ring->ring_size);
1227 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1228 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1229 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1230 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1231 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1232 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1234 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1235 fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1237 /* programm the RB_BASE for ring buffer */
1238 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1239 lower_32_bits(ring->gpu_addr));
1240 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1241 upper_32_bits(ring->gpu_addr));
1243 /* Initialize the ring buffer's read and write pointers */
1244 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1246 WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
1247 ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1248 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1249 lower_32_bits(ring->wptr));
1250 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1251 fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1253 if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) {
1254 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1255 ring = &adev->vcn.inst[i].ring_enc[0];
1256 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1257 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1258 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1259 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1260 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1261 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1263 fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1264 ring = &adev->vcn.inst[i].ring_enc[1];
1265 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1266 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1267 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1268 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1269 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1270 fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1277 static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
1280 struct amdgpu_ring *ring;
1281 uint64_t cache_addr;
1284 uint32_t param, resp, expected;
1285 uint32_t offset, cache_size;
1286 uint32_t tmp, timeout;
1288 struct amdgpu_mm_table *table = &adev->virt.mm_table;
1289 uint32_t *table_loc;
1290 uint32_t table_size;
1291 uint32_t size, size_dw;
1293 struct mmsch_v3_0_cmd_direct_write
1294 direct_wt = { {0} };
1295 struct mmsch_v3_0_cmd_direct_read_modify_write
1296 direct_rd_mod_wt = { {0} };
1297 struct mmsch_v3_0_cmd_end end = { {0} };
1298 struct mmsch_v3_0_init_header header;
1300 direct_wt.cmd_header.command_type =
1301 MMSCH_COMMAND__DIRECT_REG_WRITE;
1302 direct_rd_mod_wt.cmd_header.command_type =
1303 MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1304 end.cmd_header.command_type =
1307 header.version = MMSCH_VERSION;
1308 header.total_size = sizeof(struct mmsch_v3_0_init_header) >> 2;
1309 for (i = 0; i < AMDGPU_MAX_VCN_INSTANCES; i++) {
1310 header.inst[i].init_status = 0;
1311 header.inst[i].table_offset = 0;
1312 header.inst[i].table_size = 0;
1315 table_loc = (uint32_t *)table->cpu_addr;
1316 table_loc += header.total_size;
1317 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1318 if (adev->vcn.harvest_config & (1 << i))
1323 MMSCH_V3_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
1325 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1327 cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1329 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1330 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1331 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1332 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1333 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1334 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1335 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1337 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1338 mmUVD_VCPU_CACHE_OFFSET0),
1341 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1342 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1343 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1344 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1345 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1346 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1347 offset = cache_size;
1348 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1349 mmUVD_VCPU_CACHE_OFFSET0),
1350 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1353 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1354 mmUVD_VCPU_CACHE_SIZE0),
1357 cache_addr = adev->vcn.inst[i].gpu_addr + offset;
1358 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1359 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1360 lower_32_bits(cache_addr));
1361 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1362 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1363 upper_32_bits(cache_addr));
1364 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1365 mmUVD_VCPU_CACHE_OFFSET1),
1367 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1368 mmUVD_VCPU_CACHE_SIZE1),
1369 AMDGPU_VCN_STACK_SIZE);
1371 cache_addr = adev->vcn.inst[i].gpu_addr + offset +
1372 AMDGPU_VCN_STACK_SIZE;
1373 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1374 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1375 lower_32_bits(cache_addr));
1376 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1377 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1378 upper_32_bits(cache_addr));
1379 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1380 mmUVD_VCPU_CACHE_OFFSET2),
1382 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1383 mmUVD_VCPU_CACHE_SIZE2),
1384 AMDGPU_VCN_CONTEXT_SIZE);
1386 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
1387 ring = &adev->vcn.inst[i].ring_enc[j];
1389 rb_addr = ring->gpu_addr;
1390 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1392 lower_32_bits(rb_addr));
1393 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1395 upper_32_bits(rb_addr));
1396 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1398 ring->ring_size / 4);
1401 ring = &adev->vcn.inst[i].ring_dec;
1403 rb_addr = ring->gpu_addr;
1404 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1405 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1406 lower_32_bits(rb_addr));
1407 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1408 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1409 upper_32_bits(rb_addr));
1410 /* force RBC into idle state */
1411 tmp = order_base_2(ring->ring_size);
1412 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
1413 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1414 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1415 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1416 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1417 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1421 /* add end packet */
1422 MMSCH_V3_0_INSERT_END();
1425 header.inst[i].init_status = 0;
1426 header.inst[i].table_offset = header.total_size;
1427 header.inst[i].table_size = table_size;
1428 header.total_size += table_size;
1431 /* Update init table header in memory */
1432 size = sizeof(struct mmsch_v3_0_init_header);
1433 table_loc = (uint32_t *)table->cpu_addr;
1434 memcpy((void *)table_loc, &header, size);
1436 /* message MMSCH (in VCN[0]) to initialize this client
1437 * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1438 * of memory descriptor location
1440 ctx_addr = table->gpu_addr;
1441 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
1442 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
1444 /* 2, update vmid of descriptor */
1445 tmp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1446 tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1447 /* use domain0 for MM scheduler */
1448 tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1449 WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, tmp);
1451 /* 3, notify mmsch about the size of this descriptor */
1452 size = header.total_size;
1453 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1455 /* 4, set resp to zero */
1456 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1458 /* 5, kick off the initialization and wait until
1459 * MMSCH_VF_MAILBOX_RESP becomes non-zero
1462 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, param);
1466 expected = param + 1;
1467 while (resp != expected) {
1468 resp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1469 if (resp == expected)
1474 if (tmp >= timeout) {
1475 DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
1476 " waiting for mmMMSCH_VF_MAILBOX_RESP "\
1477 "(expected=0x%08x, readback=0x%08x)\n",
1478 tmp, expected, resp);
1486 static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1488 struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
1491 vcn_v3_0_pause_dpg_mode(adev, inst_idx, &state);
1493 /* Wait for power status to be 1 */
1494 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1495 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1497 /* wait for read ptr to be equal to write ptr */
1498 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1499 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1501 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1502 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1504 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1505 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1507 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1508 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1510 /* disable dynamic power gating mode */
1511 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1512 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1517 static int vcn_v3_0_stop(struct amdgpu_device *adev)
1522 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1523 if (adev->vcn.harvest_config & (1 << i))
1526 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1527 r = vcn_v3_0_stop_dpg_mode(adev, i);
1531 /* wait for vcn idle */
1532 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1536 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1537 UVD_LMI_STATUS__READ_CLEAN_MASK |
1538 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1539 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1540 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1544 /* disable LMI UMC channel */
1545 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1546 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1547 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1548 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1549 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1550 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1554 /* block VCPU register access */
1555 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1556 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1557 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1560 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1561 UVD_VCPU_CNTL__BLK_RST_MASK,
1562 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1564 /* disable VCPU clock */
1565 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1566 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1568 /* apply soft reset */
1569 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1570 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1571 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1572 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1573 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1574 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1577 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1579 /* apply HW clock gating */
1580 vcn_v3_0_enable_clock_gating(adev, i);
1582 /* enable VCN power gating */
1583 vcn_v3_0_enable_static_power_gating(adev, i);
1586 if (adev->pm.dpm_enabled)
1587 amdgpu_dpm_enable_uvd(adev, false);
1592 static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
1593 int inst_idx, struct dpg_pause_state *new_state)
1595 volatile struct amdgpu_fw_shared *fw_shared;
1596 struct amdgpu_ring *ring;
1597 uint32_t reg_data = 0;
1600 /* pause/unpause if state is changed */
1601 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1602 DRM_DEBUG("dpg pause state changed %d -> %d",
1603 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1604 reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1605 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1607 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1608 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1609 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1613 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1614 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1617 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1618 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1619 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1621 /* Stall DPG before WPTR/RPTR reset */
1622 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1623 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1624 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1626 if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) {
1628 fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1629 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1630 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1632 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1633 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1634 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1635 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1636 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1637 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1639 fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1640 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1642 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1643 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1644 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1645 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1646 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1647 fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1649 /* restore wptr/rptr with pointers saved in FW shared memory*/
1650 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr);
1651 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr);
1655 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1656 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1658 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1659 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1662 /* unpause dpg, no need to wait */
1663 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1664 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1666 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1673 * vcn_v3_0_dec_ring_get_rptr - get read pointer
1675 * @ring: amdgpu_ring pointer
1677 * Returns the current hardware read pointer
1679 static uint64_t vcn_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1681 struct amdgpu_device *adev = ring->adev;
1683 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1687 * vcn_v3_0_dec_ring_get_wptr - get write pointer
1689 * @ring: amdgpu_ring pointer
1691 * Returns the current hardware write pointer
1693 static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1695 struct amdgpu_device *adev = ring->adev;
1697 if (ring->use_doorbell)
1698 return adev->wb.wb[ring->wptr_offs];
1700 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1704 * vcn_v3_0_dec_ring_set_wptr - set write pointer
1706 * @ring: amdgpu_ring pointer
1708 * Commits the write pointer to the hardware
1710 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1712 struct amdgpu_device *adev = ring->adev;
1713 volatile struct amdgpu_fw_shared *fw_shared;
1715 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1716 /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
1717 fw_shared = adev->vcn.inst[ring->me].fw_shared.cpu_addr;
1718 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1719 WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
1720 lower_32_bits(ring->wptr));
1723 if (ring->use_doorbell) {
1724 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1725 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1727 WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1731 static void vcn_v3_0_dec_sw_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1732 u64 seq, uint32_t flags)
1734 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1736 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_FENCE);
1737 amdgpu_ring_write(ring, addr);
1738 amdgpu_ring_write(ring, upper_32_bits(addr));
1739 amdgpu_ring_write(ring, seq);
1740 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_TRAP);
1743 static void vcn_v3_0_dec_sw_ring_insert_end(struct amdgpu_ring *ring)
1745 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
1748 static void vcn_v3_0_dec_sw_ring_emit_ib(struct amdgpu_ring *ring,
1749 struct amdgpu_job *job,
1750 struct amdgpu_ib *ib,
1753 uint32_t vmid = AMDGPU_JOB_GET_VMID(job);
1755 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_IB);
1756 amdgpu_ring_write(ring, vmid);
1757 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1758 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1759 amdgpu_ring_write(ring, ib->length_dw);
1762 static void vcn_v3_0_dec_sw_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1763 uint32_t val, uint32_t mask)
1765 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_REG_WAIT);
1766 amdgpu_ring_write(ring, reg << 2);
1767 amdgpu_ring_write(ring, mask);
1768 amdgpu_ring_write(ring, val);
1771 static void vcn_v3_0_dec_sw_ring_emit_vm_flush(struct amdgpu_ring *ring,
1772 uint32_t vmid, uint64_t pd_addr)
1774 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1775 uint32_t data0, data1, mask;
1777 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1779 /* wait for register write */
1780 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1781 data1 = lower_32_bits(pd_addr);
1783 vcn_v3_0_dec_sw_ring_emit_reg_wait(ring, data0, data1, mask);
1786 static void vcn_v3_0_dec_sw_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
1788 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_REG_WRITE);
1789 amdgpu_ring_write(ring, reg << 2);
1790 amdgpu_ring_write(ring, val);
1793 static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
1794 .type = AMDGPU_RING_TYPE_VCN_DEC,
1796 .nop = VCN_DEC_SW_CMD_NO_OP,
1797 .secure_submission_supported = true,
1798 .vmhub = AMDGPU_MMHUB_0,
1799 .get_rptr = vcn_v3_0_dec_ring_get_rptr,
1800 .get_wptr = vcn_v3_0_dec_ring_get_wptr,
1801 .set_wptr = vcn_v3_0_dec_ring_set_wptr,
1803 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1804 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1805 4 + /* vcn_v3_0_dec_sw_ring_emit_vm_flush */
1806 5 + 5 + /* vcn_v3_0_dec_sw_ring_emit_fdec_swe x2 vm fdec_swe */
1807 1, /* vcn_v3_0_dec_sw_ring_insert_end */
1808 .emit_ib_size = 5, /* vcn_v3_0_dec_sw_ring_emit_ib */
1809 .emit_ib = vcn_v3_0_dec_sw_ring_emit_ib,
1810 .emit_fence = vcn_v3_0_dec_sw_ring_emit_fence,
1811 .emit_vm_flush = vcn_v3_0_dec_sw_ring_emit_vm_flush,
1812 .test_ring = amdgpu_vcn_dec_sw_ring_test_ring,
1813 .test_ib = NULL,//amdgpu_vcn_dec_sw_ring_test_ib,
1814 .insert_nop = amdgpu_ring_insert_nop,
1815 .insert_end = vcn_v3_0_dec_sw_ring_insert_end,
1816 .pad_ib = amdgpu_ring_generic_pad_ib,
1817 .begin_use = amdgpu_vcn_ring_begin_use,
1818 .end_use = amdgpu_vcn_ring_end_use,
1819 .emit_wreg = vcn_v3_0_dec_sw_ring_emit_wreg,
1820 .emit_reg_wait = vcn_v3_0_dec_sw_ring_emit_reg_wait,
1821 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1824 static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
1825 struct amdgpu_job *job)
1827 struct drm_gpu_scheduler **scheds;
1829 /* The create msg must be in the first IB submitted */
1830 if (atomic_read(&job->base.entity->fence_seq))
1833 scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
1834 [AMDGPU_RING_PRIO_DEFAULT].sched;
1835 drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
1839 static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
1842 struct ttm_operation_ctx ctx = { false, false };
1843 struct amdgpu_bo_va_mapping *map;
1844 uint32_t *msg, num_buffers;
1845 struct amdgpu_bo *bo;
1846 uint64_t start, end;
1851 addr &= AMDGPU_GMC_HOLE_MASK;
1852 r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
1854 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
1858 start = map->start * AMDGPU_GPU_PAGE_SIZE;
1859 end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
1861 DRM_ERROR("VCN messages must be 8 byte aligned!\n");
1865 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1866 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1867 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1869 DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
1873 r = amdgpu_bo_kmap(bo, &ptr);
1875 DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
1879 msg = ptr + addr - start;
1882 if (msg[1] > end - addr) {
1887 if (msg[3] != RDECODE_MSG_CREATE)
1890 num_buffers = msg[2];
1891 for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
1892 uint32_t offset, size, *create;
1894 if (msg[0] != RDECODE_MESSAGE_CREATE)
1900 if (offset + size > end) {
1905 create = ptr + addr + offset - start;
1907 /* H246, HEVC and VP9 can run on any instance */
1908 if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
1911 r = vcn_v3_0_limit_sched(p, job);
1917 amdgpu_bo_kunmap(bo);
1921 static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1922 struct amdgpu_job *job,
1923 struct amdgpu_ib *ib)
1925 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
1926 uint32_t msg_lo = 0, msg_hi = 0;
1930 /* The first instance can decode anything */
1934 for (i = 0; i < ib->length_dw; i += 2) {
1935 uint32_t reg = amdgpu_ib_get_value(ib, i);
1936 uint32_t val = amdgpu_ib_get_value(ib, i + 1);
1938 if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
1940 } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
1942 } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
1944 r = vcn_v3_0_dec_msg(p, job,
1945 ((u64)msg_hi) << 32 | msg_lo);
1953 static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
1954 .type = AMDGPU_RING_TYPE_VCN_DEC,
1956 .secure_submission_supported = true,
1957 .vmhub = AMDGPU_MMHUB_0,
1958 .get_rptr = vcn_v3_0_dec_ring_get_rptr,
1959 .get_wptr = vcn_v3_0_dec_ring_get_wptr,
1960 .set_wptr = vcn_v3_0_dec_ring_set_wptr,
1961 .patch_cs_in_place = vcn_v3_0_ring_patch_cs_in_place,
1963 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1964 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1965 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1966 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1968 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1969 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1970 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1971 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1972 .test_ring = vcn_v2_0_dec_ring_test_ring,
1973 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1974 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1975 .insert_start = vcn_v2_0_dec_ring_insert_start,
1976 .insert_end = vcn_v2_0_dec_ring_insert_end,
1977 .pad_ib = amdgpu_ring_generic_pad_ib,
1978 .begin_use = amdgpu_vcn_ring_begin_use,
1979 .end_use = amdgpu_vcn_ring_end_use,
1980 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1981 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1982 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1986 * vcn_v3_0_enc_ring_get_rptr - get enc read pointer
1988 * @ring: amdgpu_ring pointer
1990 * Returns the current hardware enc read pointer
1992 static uint64_t vcn_v3_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1994 struct amdgpu_device *adev = ring->adev;
1996 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1997 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
1999 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
2003 * vcn_v3_0_enc_ring_get_wptr - get enc write pointer
2005 * @ring: amdgpu_ring pointer
2007 * Returns the current hardware enc write pointer
2009 static uint64_t vcn_v3_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
2011 struct amdgpu_device *adev = ring->adev;
2013 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
2014 if (ring->use_doorbell)
2015 return adev->wb.wb[ring->wptr_offs];
2017 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
2019 if (ring->use_doorbell)
2020 return adev->wb.wb[ring->wptr_offs];
2022 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
2027 * vcn_v3_0_enc_ring_set_wptr - set enc write pointer
2029 * @ring: amdgpu_ring pointer
2031 * Commits the enc write pointer to the hardware
2033 static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
2035 struct amdgpu_device *adev = ring->adev;
2037 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
2038 if (ring->use_doorbell) {
2039 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
2040 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2042 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
2045 if (ring->use_doorbell) {
2046 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
2047 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2049 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
2054 static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = {
2055 .type = AMDGPU_RING_TYPE_VCN_ENC,
2057 .nop = VCN_ENC_CMD_NO_OP,
2058 .vmhub = AMDGPU_MMHUB_0,
2059 .get_rptr = vcn_v3_0_enc_ring_get_rptr,
2060 .get_wptr = vcn_v3_0_enc_ring_get_wptr,
2061 .set_wptr = vcn_v3_0_enc_ring_set_wptr,
2063 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2064 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2065 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
2066 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
2067 1, /* vcn_v2_0_enc_ring_insert_end */
2068 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
2069 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
2070 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
2071 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2072 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2073 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2074 .insert_nop = amdgpu_ring_insert_nop,
2075 .insert_end = vcn_v2_0_enc_ring_insert_end,
2076 .pad_ib = amdgpu_ring_generic_pad_ib,
2077 .begin_use = amdgpu_vcn_ring_begin_use,
2078 .end_use = amdgpu_vcn_ring_end_use,
2079 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2080 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2081 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2084 static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2088 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2089 if (adev->vcn.harvest_config & (1 << i))
2092 if (!DEC_SW_RING_ENABLED)
2093 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_ring_vm_funcs;
2095 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_sw_ring_vm_funcs;
2096 adev->vcn.inst[i].ring_dec.me = i;
2097 DRM_INFO("VCN(%d) decode%s is enabled in VM mode\n", i,
2098 DEC_SW_RING_ENABLED?"(Software Ring)":"");
2102 static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2106 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2107 if (adev->vcn.harvest_config & (1 << i))
2110 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
2111 adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs;
2112 adev->vcn.inst[i].ring_enc[j].me = i;
2114 if (adev->vcn.num_enc_rings > 0)
2115 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", i);
2119 static bool vcn_v3_0_is_idle(void *handle)
2121 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2124 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2125 if (adev->vcn.harvest_config & (1 << i))
2128 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
2134 static int vcn_v3_0_wait_for_idle(void *handle)
2136 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2139 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2140 if (adev->vcn.harvest_config & (1 << i))
2143 ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
2152 static int vcn_v3_0_set_clockgating_state(void *handle,
2153 enum amd_clockgating_state state)
2155 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2156 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
2159 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2160 if (adev->vcn.harvest_config & (1 << i))
2164 if (RREG32_SOC15(VCN, i, mmUVD_STATUS) != UVD_STATUS__IDLE)
2166 vcn_v3_0_enable_clock_gating(adev, i);
2168 vcn_v3_0_disable_clock_gating(adev, i);
2175 static int vcn_v3_0_set_powergating_state(void *handle,
2176 enum amd_powergating_state state)
2178 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2181 /* for SRIOV, guest should not control VCN Power-gating
2182 * MMSCH FW should control Power-gating and clock-gating
2183 * guest should avoid touching CGC and PG
2185 if (amdgpu_sriov_vf(adev)) {
2186 adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
2190 if(state == adev->vcn.cur_state)
2193 if (state == AMD_PG_STATE_GATE)
2194 ret = vcn_v3_0_stop(adev);
2196 ret = vcn_v3_0_start(adev);
2199 adev->vcn.cur_state = state;
2204 static int vcn_v3_0_set_interrupt_state(struct amdgpu_device *adev,
2205 struct amdgpu_irq_src *source,
2207 enum amdgpu_interrupt_state state)
2212 static int vcn_v3_0_process_interrupt(struct amdgpu_device *adev,
2213 struct amdgpu_irq_src *source,
2214 struct amdgpu_iv_entry *entry)
2216 uint32_t ip_instance;
2218 switch (entry->client_id) {
2219 case SOC15_IH_CLIENTID_VCN:
2222 case SOC15_IH_CLIENTID_VCN1:
2226 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
2230 DRM_DEBUG("IH: VCN TRAP\n");
2232 switch (entry->src_id) {
2233 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
2234 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
2236 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
2237 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
2239 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
2240 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
2243 DRM_ERROR("Unhandled interrupt: %d %d\n",
2244 entry->src_id, entry->src_data[0]);
2251 static const struct amdgpu_irq_src_funcs vcn_v3_0_irq_funcs = {
2252 .set = vcn_v3_0_set_interrupt_state,
2253 .process = vcn_v3_0_process_interrupt,
2256 static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
2260 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2261 if (adev->vcn.harvest_config & (1 << i))
2264 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
2265 adev->vcn.inst[i].irq.funcs = &vcn_v3_0_irq_funcs;
2269 static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
2271 .early_init = vcn_v3_0_early_init,
2273 .sw_init = vcn_v3_0_sw_init,
2274 .sw_fini = vcn_v3_0_sw_fini,
2275 .hw_init = vcn_v3_0_hw_init,
2276 .hw_fini = vcn_v3_0_hw_fini,
2277 .suspend = vcn_v3_0_suspend,
2278 .resume = vcn_v3_0_resume,
2279 .is_idle = vcn_v3_0_is_idle,
2280 .wait_for_idle = vcn_v3_0_wait_for_idle,
2281 .check_soft_reset = NULL,
2282 .pre_soft_reset = NULL,
2284 .post_soft_reset = NULL,
2285 .set_clockgating_state = vcn_v3_0_set_clockgating_state,
2286 .set_powergating_state = vcn_v3_0_set_powergating_state,
2289 const struct amdgpu_ip_block_version vcn_v3_0_ip_block =
2291 .type = AMD_IP_BLOCK_TYPE_VCN,
2295 .funcs = &vcn_v3_0_ip_funcs,