2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_vcn.h"
30 #include "amdgpu_pm.h"
31 #include "amdgpu_psp.h"
33 #include "vcn/vcn_2_0_0_offset.h"
34 #include "vcn/vcn_2_0_0_sh_mask.h"
35 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
37 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd
38 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503
39 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x504
40 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x505
41 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x53f
42 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x54a
43 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
45 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x1e1
46 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x5a6
47 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7
48 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2
50 #define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff
51 #define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029
52 #define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a
53 #define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b
54 #define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea
55 #define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb
56 #define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf
57 #define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1
58 #define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8
59 #define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9
60 #define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082
61 #define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec
62 #define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed
63 #define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085
64 #define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084
65 #define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089
66 #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
68 #define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
70 #define mmUVD_RBC_XX_IB_REG_CHECK 0x026b
71 #define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
72 #define mmUVD_REG_XX_MASK 0x026c
73 #define mmUVD_REG_XX_MASK_BASE_IDX 1
75 static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
76 static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
77 static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
78 static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
79 static int vcn_v2_0_set_powergating_state(void *handle,
80 enum amd_powergating_state state);
81 static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
82 struct dpg_pause_state *new_state);
85 * vcn_v2_0_early_init - set function pointers
87 * @handle: amdgpu_device pointer
89 * Set ring and irq function pointers
91 static int vcn_v2_0_early_init(void *handle)
93 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
95 adev->vcn.num_enc_rings = 2;
97 vcn_v2_0_set_dec_ring_funcs(adev);
98 vcn_v2_0_set_enc_ring_funcs(adev);
99 vcn_v2_0_set_jpeg_ring_funcs(adev);
100 vcn_v2_0_set_irq_funcs(adev);
106 * vcn_v2_0_sw_init - sw init for VCN block
108 * @handle: amdgpu_device pointer
110 * Load firmware and sw initialization
112 static int vcn_v2_0_sw_init(void *handle)
114 struct amdgpu_ring *ring;
116 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
119 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
120 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT,
126 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
127 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
128 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
135 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
136 VCN_2_0__SRCID__JPEG_DECODE,
141 r = amdgpu_vcn_sw_init(adev);
145 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
146 const struct common_firmware_header *hdr;
147 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
148 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
149 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
150 adev->firmware.fw_size +=
151 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
152 DRM_INFO("PSP loading VCN firmware\n");
155 r = amdgpu_vcn_resume(adev);
159 ring = &adev->vcn.ring_dec;
161 ring->use_doorbell = true;
162 ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
164 sprintf(ring->name, "vcn_dec");
165 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
169 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
170 adev->vcn.external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
171 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
172 adev->vcn.external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
173 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
174 adev->vcn.external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
175 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
176 adev->vcn.external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
177 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
178 adev->vcn.external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
180 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
181 ring = &adev->vcn.ring_enc[i];
182 ring->use_doorbell = true;
183 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
184 sprintf(ring->name, "vcn_enc%d", i);
185 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
190 ring = &adev->vcn.ring_jpeg;
191 ring->use_doorbell = true;
192 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
193 sprintf(ring->name, "vcn_jpeg");
194 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
198 adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
200 adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
201 adev->vcn.external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
207 * vcn_v2_0_sw_fini - sw fini for VCN block
209 * @handle: amdgpu_device pointer
211 * VCN suspend and free up sw allocation
213 static int vcn_v2_0_sw_fini(void *handle)
216 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
218 r = amdgpu_vcn_suspend(adev);
222 r = amdgpu_vcn_sw_fini(adev);
228 * vcn_v2_0_hw_init - start and test VCN block
230 * @handle: amdgpu_device pointer
232 * Initialize the hardware, boot up the VCPU and do some testing
234 static int vcn_v2_0_hw_init(void *handle)
236 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
237 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
240 adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
241 ring->doorbell_index);
243 ring->sched.ready = true;
244 r = amdgpu_ring_test_ring(ring);
246 ring->sched.ready = false;
250 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
251 ring = &adev->vcn.ring_enc[i];
252 ring->sched.ready = true;
253 r = amdgpu_ring_test_ring(ring);
255 ring->sched.ready = false;
260 ring = &adev->vcn.ring_jpeg;
261 ring->sched.ready = true;
262 r = amdgpu_ring_test_ring(ring);
264 ring->sched.ready = false;
270 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
271 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
277 * vcn_v2_0_hw_fini - stop the hardware block
279 * @handle: amdgpu_device pointer
281 * Stop the VCN block, mark ring as not ready any more
283 static int vcn_v2_0_hw_fini(void *handle)
285 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
286 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
289 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
290 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
291 RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
292 vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
294 ring->sched.ready = false;
296 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
297 ring = &adev->vcn.ring_enc[i];
298 ring->sched.ready = false;
301 ring = &adev->vcn.ring_jpeg;
302 ring->sched.ready = false;
308 * vcn_v2_0_suspend - suspend VCN block
310 * @handle: amdgpu_device pointer
312 * HW fini and suspend VCN block
314 static int vcn_v2_0_suspend(void *handle)
317 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
319 r = vcn_v2_0_hw_fini(adev);
323 r = amdgpu_vcn_suspend(adev);
329 * vcn_v2_0_resume - resume VCN block
331 * @handle: amdgpu_device pointer
333 * Resume firmware and hw init VCN block
335 static int vcn_v2_0_resume(void *handle)
338 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
340 r = amdgpu_vcn_resume(adev);
344 r = vcn_v2_0_hw_init(adev);
350 * vcn_v2_0_mc_resume - memory controller programming
352 * @adev: amdgpu_device pointer
354 * Let the VCN memory controller know it's offsets
356 static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
358 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
361 /* cache window 0: fw */
362 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
363 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
364 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
365 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
366 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
367 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
370 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
371 lower_32_bits(adev->vcn.gpu_addr));
372 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
373 upper_32_bits(adev->vcn.gpu_addr));
375 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
376 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
379 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
381 /* cache window 1: stack */
382 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
383 lower_32_bits(adev->vcn.gpu_addr + offset));
384 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
385 upper_32_bits(adev->vcn.gpu_addr + offset));
386 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
387 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
389 /* cache window 2: context */
390 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
391 lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
392 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
393 upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
394 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
395 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
397 WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
398 WREG32_SOC15(UVD, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
401 static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
403 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
406 /* cache window 0: fw */
407 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
409 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
410 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
411 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect);
412 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
413 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
414 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect);
415 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
416 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
418 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
419 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
420 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
421 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
422 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
423 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
427 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
428 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
429 lower_32_bits(adev->vcn.gpu_addr), 0, indirect);
430 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
431 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
432 upper_32_bits(adev->vcn.gpu_addr), 0, indirect);
434 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
435 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
436 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
440 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
441 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
443 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
444 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
446 /* cache window 1: stack */
448 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
449 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
450 lower_32_bits(adev->vcn.gpu_addr + offset), 0, indirect);
451 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
452 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
453 upper_32_bits(adev->vcn.gpu_addr + offset), 0, indirect);
454 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
455 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
457 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
458 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
459 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
460 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
461 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
462 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
464 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
465 UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
467 /* cache window 2: context */
468 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
469 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
470 lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
471 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
472 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
473 upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
474 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
475 UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
476 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
477 UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
479 /* non-cache window */
480 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
481 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
482 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
483 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
484 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
485 UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
486 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
487 UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
489 /* VCN global tiling registers */
490 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
491 UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
495 * vcn_v2_0_disable_clock_gating - disable VCN clock gating
497 * @adev: amdgpu_device pointer
498 * @sw: enable SW clock gating
500 * Disable clock gating for VCN block
502 static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
506 /* UVD disable CGC */
507 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
508 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
509 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
511 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
512 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
513 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
514 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
516 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
517 data &= ~(UVD_CGC_GATE__SYS_MASK
518 | UVD_CGC_GATE__UDEC_MASK
519 | UVD_CGC_GATE__MPEG2_MASK
520 | UVD_CGC_GATE__REGS_MASK
521 | UVD_CGC_GATE__RBC_MASK
522 | UVD_CGC_GATE__LMI_MC_MASK
523 | UVD_CGC_GATE__LMI_UMC_MASK
524 | UVD_CGC_GATE__IDCT_MASK
525 | UVD_CGC_GATE__MPRD_MASK
526 | UVD_CGC_GATE__MPC_MASK
527 | UVD_CGC_GATE__LBSI_MASK
528 | UVD_CGC_GATE__LRBBM_MASK
529 | UVD_CGC_GATE__UDEC_RE_MASK
530 | UVD_CGC_GATE__UDEC_CM_MASK
531 | UVD_CGC_GATE__UDEC_IT_MASK
532 | UVD_CGC_GATE__UDEC_DB_MASK
533 | UVD_CGC_GATE__UDEC_MP_MASK
534 | UVD_CGC_GATE__WCB_MASK
535 | UVD_CGC_GATE__VCPU_MASK
536 | UVD_CGC_GATE__SCPU_MASK);
537 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
539 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
540 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
541 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
542 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
543 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
544 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
545 | UVD_CGC_CTRL__SYS_MODE_MASK
546 | UVD_CGC_CTRL__UDEC_MODE_MASK
547 | UVD_CGC_CTRL__MPEG2_MODE_MASK
548 | UVD_CGC_CTRL__REGS_MODE_MASK
549 | UVD_CGC_CTRL__RBC_MODE_MASK
550 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
551 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
552 | UVD_CGC_CTRL__IDCT_MODE_MASK
553 | UVD_CGC_CTRL__MPRD_MODE_MASK
554 | UVD_CGC_CTRL__MPC_MODE_MASK
555 | UVD_CGC_CTRL__LBSI_MODE_MASK
556 | UVD_CGC_CTRL__LRBBM_MODE_MASK
557 | UVD_CGC_CTRL__WCB_MODE_MASK
558 | UVD_CGC_CTRL__VCPU_MODE_MASK
559 | UVD_CGC_CTRL__SCPU_MODE_MASK);
560 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
563 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
564 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
565 | UVD_SUVD_CGC_GATE__SIT_MASK
566 | UVD_SUVD_CGC_GATE__SMP_MASK
567 | UVD_SUVD_CGC_GATE__SCM_MASK
568 | UVD_SUVD_CGC_GATE__SDB_MASK
569 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
570 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
571 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
572 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
573 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
574 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
575 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
576 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
577 | UVD_SUVD_CGC_GATE__SCLR_MASK
578 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
579 | UVD_SUVD_CGC_GATE__ENT_MASK
580 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
581 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
582 | UVD_SUVD_CGC_GATE__SITE_MASK
583 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
584 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
585 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
586 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
587 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
588 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
590 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
591 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
592 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
593 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
594 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
595 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
596 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
597 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
598 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
599 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
600 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
601 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
604 static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
605 uint8_t sram_sel, uint8_t indirect)
607 uint32_t reg_data = 0;
609 /* enable sw clock gating control */
610 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
611 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
613 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
614 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
615 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
616 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
617 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
618 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
619 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
620 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
621 UVD_CGC_CTRL__SYS_MODE_MASK |
622 UVD_CGC_CTRL__UDEC_MODE_MASK |
623 UVD_CGC_CTRL__MPEG2_MODE_MASK |
624 UVD_CGC_CTRL__REGS_MODE_MASK |
625 UVD_CGC_CTRL__RBC_MODE_MASK |
626 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
627 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
628 UVD_CGC_CTRL__IDCT_MODE_MASK |
629 UVD_CGC_CTRL__MPRD_MODE_MASK |
630 UVD_CGC_CTRL__MPC_MODE_MASK |
631 UVD_CGC_CTRL__LBSI_MODE_MASK |
632 UVD_CGC_CTRL__LRBBM_MODE_MASK |
633 UVD_CGC_CTRL__WCB_MODE_MASK |
634 UVD_CGC_CTRL__VCPU_MODE_MASK |
635 UVD_CGC_CTRL__SCPU_MODE_MASK);
636 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
637 UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
639 /* turn off clock gating */
640 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
641 UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
643 /* turn on SUVD clock gating */
644 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
645 UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
647 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
648 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
649 UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
653 * jpeg_v2_0_start - start JPEG block
655 * @adev: amdgpu_device pointer
657 * Setup and start the JPEG block
659 static int jpeg_v2_0_start(struct amdgpu_device *adev)
661 struct amdgpu_ring *ring = &adev->vcn.ring_jpeg;
665 /* disable power gating */
666 tmp = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
667 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_PGFSM_CONFIG), tmp);
669 SOC15_WAIT_ON_RREG(VCN, 0,
670 mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
671 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r);
674 DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
678 /* Removing the anti hang mechanism to indicate the UVDJ tile is ON */
679 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS)) & ~0x1;
680 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), tmp);
682 /* JPEG disable CGC */
683 tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
684 tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
685 tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
686 tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
687 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp);
689 tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
690 tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
691 | JPEG_CGC_GATE__JPEG2_DEC_MASK
692 | JPEG_CGC_GATE__JPEG_ENC_MASK
693 | JPEG_CGC_GATE__JMCIF_MASK
694 | JPEG_CGC_GATE__JRBBM_MASK);
695 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp);
697 /* enable JMI channel */
698 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL), 0,
699 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
701 /* enable System Interrupt for JRBC */
702 WREG32_P(SOC15_REG_OFFSET(VCN, 0, mmJPEG_SYS_INT_EN),
703 JPEG_SYS_INT_EN__DJRBC_MASK,
704 ~JPEG_SYS_INT_EN__DJRBC_MASK);
706 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
707 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
708 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
709 lower_32_bits(ring->gpu_addr));
710 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
711 upper_32_bits(ring->gpu_addr));
712 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
713 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
714 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
715 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
716 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
722 * jpeg_v2_0_stop - stop JPEG block
724 * @adev: amdgpu_device pointer
726 * stop the JPEG block
728 static int jpeg_v2_0_stop(struct amdgpu_device *adev)
734 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL),
735 UVD_JMI_CNTL__SOFT_RESET_MASK,
736 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
738 /* enable JPEG CGC */
739 tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
740 tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
741 tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
742 tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
743 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp);
746 tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
747 tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK
748 |JPEG_CGC_GATE__JPEG2_DEC_MASK
749 |JPEG_CGC_GATE__JPEG_ENC_MASK
750 |JPEG_CGC_GATE__JMCIF_MASK
751 |JPEG_CGC_GATE__JRBBM_MASK);
752 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp);
754 /* enable power gating */
755 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS));
756 tmp &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK;
757 tmp |= 0x1; //UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_TILES_OFF;
758 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), tmp);
760 tmp = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
761 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_PGFSM_CONFIG), tmp);
763 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS,
764 (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
765 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r);
768 DRM_ERROR("amdgpu: JPEG enable power gating failed\n");
776 * vcn_v2_0_enable_clock_gating - enable VCN clock gating
778 * @adev: amdgpu_device pointer
779 * @sw: enable SW clock gating
781 * Enable clock gating for VCN block
783 static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
788 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
789 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
790 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
792 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
793 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
794 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
795 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
797 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
798 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
799 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
800 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
801 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
802 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
803 | UVD_CGC_CTRL__SYS_MODE_MASK
804 | UVD_CGC_CTRL__UDEC_MODE_MASK
805 | UVD_CGC_CTRL__MPEG2_MODE_MASK
806 | UVD_CGC_CTRL__REGS_MODE_MASK
807 | UVD_CGC_CTRL__RBC_MODE_MASK
808 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
809 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
810 | UVD_CGC_CTRL__IDCT_MODE_MASK
811 | UVD_CGC_CTRL__MPRD_MODE_MASK
812 | UVD_CGC_CTRL__MPC_MODE_MASK
813 | UVD_CGC_CTRL__LBSI_MODE_MASK
814 | UVD_CGC_CTRL__LRBBM_MODE_MASK
815 | UVD_CGC_CTRL__WCB_MODE_MASK
816 | UVD_CGC_CTRL__VCPU_MODE_MASK
817 | UVD_CGC_CTRL__SCPU_MODE_MASK);
818 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
820 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
821 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
822 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
823 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
824 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
825 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
826 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
827 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
828 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
829 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
830 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
831 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
834 static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
839 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
840 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
841 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
842 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
843 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
844 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
845 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
846 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
847 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
848 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
849 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
851 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
852 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS,
853 UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0, 0xFFFFF, ret);
855 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
856 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
857 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
858 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
859 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
860 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
861 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
862 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
863 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
864 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
865 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
866 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFF, ret);
869 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS,
870 * UVDU_PWR_STATUS are 0 (power on) */
872 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
874 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
875 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
876 UVD_POWER_STATUS__UVD_PG_EN_MASK;
878 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
881 static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
886 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
887 /* Before power off, this indicator has to be turned on */
888 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
889 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
890 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
891 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
894 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
895 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
896 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
897 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
898 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
899 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
900 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
901 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
902 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
903 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
905 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
907 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
908 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
909 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
910 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
911 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
912 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
913 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
914 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
915 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
916 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT);
917 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFF, ret);
921 static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
923 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
924 uint32_t rb_bufsz, tmp;
926 vcn_v2_0_enable_static_power_gating(adev);
928 /* enable dynamic power gating mode */
929 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
930 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
931 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
932 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
935 adev->vcn.dpg_sram_curr_addr = (uint32_t*)adev->vcn.dpg_sram_cpu_addr;
937 /* enable clock gating */
938 vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect);
940 /* enable VCPU clock */
941 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
942 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
943 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
944 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
945 UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
947 /* disable master interupt */
948 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
949 UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
951 /* setup mmUVD_LMI_CTRL */
952 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
953 UVD_LMI_CTRL__REQ_MODE_MASK |
954 UVD_LMI_CTRL__CRC_RESET_MASK |
955 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
956 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
957 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
958 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
960 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
961 UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
963 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
964 UVD, 0, mmUVD_MPC_CNTL),
965 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
967 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
968 UVD, 0, mmUVD_MPC_SET_MUXA0),
969 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
970 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
971 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
972 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
974 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
975 UVD, 0, mmUVD_MPC_SET_MUXB0),
976 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
977 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
978 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
979 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
981 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
982 UVD, 0, mmUVD_MPC_SET_MUX),
983 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
984 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
985 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
987 vcn_v2_0_mc_resume_dpg_mode(adev, indirect);
989 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
990 UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
991 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
992 UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
994 /* release VCPU reset to boot */
995 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
996 UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect);
998 /* enable LMI MC and UMC channels */
999 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
1000 UVD, 0, mmUVD_LMI_CTRL2),
1001 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect);
1003 /* enable master interrupt */
1004 WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
1005 UVD, 0, mmUVD_MASTINT_EN),
1006 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
1009 psp_update_vcn_sram(adev, 0, adev->vcn.dpg_sram_gpu_addr,
1010 (uint32_t)((uintptr_t)adev->vcn.dpg_sram_curr_addr -
1011 (uintptr_t)adev->vcn.dpg_sram_cpu_addr));
1013 /* force RBC into idle state */
1014 rb_bufsz = order_base_2(ring->ring_size);
1015 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1016 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1017 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1018 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1019 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1020 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1022 /* set the write pointer delay */
1023 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
1025 /* set the wb address */
1026 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
1027 (upper_32_bits(ring->gpu_addr) >> 2));
1029 /* programm the RB_BASE for ring buffer */
1030 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1031 lower_32_bits(ring->gpu_addr));
1032 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1033 upper_32_bits(ring->gpu_addr));
1035 /* Initialize the ring buffer's read and write pointers */
1036 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1038 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
1040 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1041 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1042 lower_32_bits(ring->wptr));
1047 static int vcn_v2_0_start(struct amdgpu_device *adev)
1049 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
1050 uint32_t rb_bufsz, tmp;
1051 uint32_t lmi_swap_cntl;
1054 if (adev->pm.dpm_enabled)
1055 amdgpu_dpm_enable_uvd(adev, true);
1057 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1058 r = vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
1064 vcn_v2_0_disable_static_power_gating(adev);
1066 /* set uvd status busy */
1067 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1068 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
1070 /*SW clock gating */
1071 vcn_v2_0_disable_clock_gating(adev);
1073 /* enable VCPU clock */
1074 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
1075 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1077 /* disable master interrupt */
1078 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
1079 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1081 /* setup mmUVD_LMI_CTRL */
1082 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
1083 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
1084 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1085 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1086 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1087 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1089 /* setup mmUVD_MPC_CNTL */
1090 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
1091 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1092 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1093 WREG32_SOC15(VCN, 0, mmUVD_MPC_CNTL, tmp);
1095 /* setup UVD_MPC_SET_MUXA0 */
1096 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
1097 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1098 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1099 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1100 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1102 /* setup UVD_MPC_SET_MUXB0 */
1103 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
1104 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1105 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1106 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1107 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1109 /* setup mmUVD_MPC_SET_MUX */
1110 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
1111 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1112 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1113 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1115 vcn_v2_0_mc_resume(adev);
1117 /* release VCPU reset to boot */
1118 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
1119 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1121 /* enable LMI MC and UMC channels */
1122 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
1123 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1125 tmp = RREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET);
1126 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1127 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1128 WREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET, tmp);
1130 /* disable byte swapping */
1133 /* swap (8 in 32) RB and IB */
1134 lmi_swap_cntl = 0xa;
1136 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1138 for (i = 0; i < 10; ++i) {
1141 for (j = 0; j < 100; ++j) {
1142 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1151 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1152 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1153 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1154 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1156 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
1157 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1163 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1167 /* enable master interrupt */
1168 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
1169 UVD_MASTINT_EN__VCPU_EN_MASK,
1170 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1172 /* clear the busy bit of VCN_STATUS */
1173 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
1174 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1176 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_VMID, 0);
1178 /* force RBC into idle state */
1179 rb_bufsz = order_base_2(ring->ring_size);
1180 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1181 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1182 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1183 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1184 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1185 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1187 /* programm the RB_BASE for ring buffer */
1188 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1189 lower_32_bits(ring->gpu_addr));
1190 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1191 upper_32_bits(ring->gpu_addr));
1193 /* Initialize the ring buffer's read and write pointers */
1194 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1196 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1197 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1198 lower_32_bits(ring->wptr));
1200 ring = &adev->vcn.ring_enc[0];
1201 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1202 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1203 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1204 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1205 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1207 ring = &adev->vcn.ring_enc[1];
1208 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1209 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1210 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1211 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1212 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1215 r = jpeg_v2_0_start(adev);
1220 static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev)
1225 /* Wait for power status to be 1 */
1226 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1227 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1229 /* wait for read ptr to be equal to write ptr */
1230 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1231 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1233 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1234 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
1236 tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1237 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1239 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1240 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1242 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1243 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1245 /* disable dynamic power gating mode */
1246 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1247 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1252 static int vcn_v2_0_stop(struct amdgpu_device *adev)
1257 r = jpeg_v2_0_stop(adev);
1261 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1262 r = vcn_v2_0_stop_dpg_mode(adev);
1268 /* wait for uvd idle */
1269 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r);
1273 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1274 UVD_LMI_STATUS__READ_CLEAN_MASK |
1275 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1276 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1277 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp, r);
1281 /* stall UMC channel */
1282 tmp = RREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2);
1283 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1284 WREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2, tmp);
1286 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1287 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1288 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp, r);
1292 /* disable VCPU clock */
1293 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1294 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1297 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1298 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1299 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1302 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1303 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1304 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1307 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1308 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1309 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1312 WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
1314 vcn_v2_0_enable_clock_gating(adev);
1315 vcn_v2_0_enable_static_power_gating(adev);
1318 if (adev->pm.dpm_enabled)
1319 amdgpu_dpm_enable_uvd(adev, false);
1324 static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
1325 struct dpg_pause_state *new_state)
1327 struct amdgpu_ring *ring;
1328 uint32_t reg_data = 0;
1331 /* pause/unpause if state is changed */
1332 if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
1333 DRM_DEBUG("dpg pause state changed %d -> %d",
1334 adev->vcn.pause_state.fw_based, new_state->fw_based);
1335 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1336 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1338 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1340 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1,
1341 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1345 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1346 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1349 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1350 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1351 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
1354 ring = &adev->vcn.ring_enc[0];
1355 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1356 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1357 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1358 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1359 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1361 ring = &adev->vcn.ring_enc[1];
1362 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1363 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1364 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1365 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1366 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1368 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1369 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1371 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1372 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1373 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1376 /* unpause dpg, no need to wait */
1377 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1378 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1380 adev->vcn.pause_state.fw_based = new_state->fw_based;
1386 static bool vcn_v2_0_is_idle(void *handle)
1388 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1390 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1393 static int vcn_v2_0_wait_for_idle(void *handle)
1395 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1398 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1399 UVD_STATUS__IDLE, ret);
1404 static int vcn_v2_0_set_clockgating_state(void *handle,
1405 enum amd_clockgating_state state)
1407 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1408 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1411 /* wait for STATUS to clear */
1412 if (vcn_v2_0_is_idle(handle))
1414 vcn_v2_0_enable_clock_gating(adev);
1416 /* disable HW gating and enable Sw gating */
1417 vcn_v2_0_disable_clock_gating(adev);
1423 * vcn_v2_0_dec_ring_get_rptr - get read pointer
1425 * @ring: amdgpu_ring pointer
1427 * Returns the current hardware read pointer
1429 static uint64_t vcn_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1431 struct amdgpu_device *adev = ring->adev;
1433 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1437 * vcn_v2_0_dec_ring_get_wptr - get write pointer
1439 * @ring: amdgpu_ring pointer
1441 * Returns the current hardware write pointer
1443 static uint64_t vcn_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1445 struct amdgpu_device *adev = ring->adev;
1447 if (ring->use_doorbell)
1448 return adev->wb.wb[ring->wptr_offs];
1450 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1454 * vcn_v2_0_dec_ring_set_wptr - set write pointer
1456 * @ring: amdgpu_ring pointer
1458 * Commits the write pointer to the hardware
1460 static void vcn_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1462 struct amdgpu_device *adev = ring->adev;
1464 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1465 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1466 lower_32_bits(ring->wptr) | 0x80000000);
1468 if (ring->use_doorbell) {
1469 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1470 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1472 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1477 * vcn_v2_0_dec_ring_insert_start - insert a start command
1479 * @ring: amdgpu_ring pointer
1481 * Write a start command to the ring.
1483 static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1485 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
1486 amdgpu_ring_write(ring, 0);
1487 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
1488 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
1492 * vcn_v2_0_dec_ring_insert_end - insert a end command
1494 * @ring: amdgpu_ring pointer
1496 * Write a end command to the ring.
1498 static void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1500 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
1501 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
1505 * vcn_v2_0_dec_ring_insert_nop - insert a nop command
1507 * @ring: amdgpu_ring pointer
1509 * Write a nop command to the ring.
1511 static void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1515 WARN_ON(ring->wptr % 2 || count % 2);
1517 for (i = 0; i < count / 2; i++) {
1518 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP_INTERNAL_OFFSET, 0));
1519 amdgpu_ring_write(ring, 0);
1524 * vcn_v2_0_dec_ring_emit_fence - emit an fence & trap command
1526 * @ring: amdgpu_ring pointer
1527 * @fence: fence to emit
1529 * Write a fence and a trap command to the ring.
1531 static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1534 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1536 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID_INTERNAL_OFFSET, 0));
1537 amdgpu_ring_write(ring, seq);
1539 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
1540 amdgpu_ring_write(ring, addr & 0xffffffff);
1542 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET, 0));
1543 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1545 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
1546 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
1548 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
1549 amdgpu_ring_write(ring, 0);
1551 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET, 0));
1552 amdgpu_ring_write(ring, 0);
1554 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
1556 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
1560 * vcn_v2_0_dec_ring_emit_ib - execute indirect buffer
1562 * @ring: amdgpu_ring pointer
1563 * @ib: indirect buffer to execute
1565 * Write ring commands to execute the indirect buffer
1567 static void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1568 struct amdgpu_job *job,
1569 struct amdgpu_ib *ib,
1572 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1574 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET, 0));
1575 amdgpu_ring_write(ring, vmid);
1577 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET, 0));
1578 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1579 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET, 0));
1580 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1581 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET, 0));
1582 amdgpu_ring_write(ring, ib->length_dw);
1585 static void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
1586 uint32_t reg, uint32_t val,
1589 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
1590 amdgpu_ring_write(ring, reg << 2);
1592 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET, 0));
1593 amdgpu_ring_write(ring, val);
1595 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8_INTERNAL_OFFSET, 0));
1596 amdgpu_ring_write(ring, mask);
1598 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
1600 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
1603 static void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1604 unsigned vmid, uint64_t pd_addr)
1606 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1607 uint32_t data0, data1, mask;
1609 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1611 /* wait for register write */
1612 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1613 data1 = lower_32_bits(pd_addr);
1615 vcn_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1618 static void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1619 uint32_t reg, uint32_t val)
1621 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
1622 amdgpu_ring_write(ring, reg << 2);
1624 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET, 0));
1625 amdgpu_ring_write(ring, val);
1627 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
1629 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
1633 * vcn_v2_0_enc_ring_get_rptr - get enc read pointer
1635 * @ring: amdgpu_ring pointer
1637 * Returns the current hardware enc read pointer
1639 static uint64_t vcn_v2_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1641 struct amdgpu_device *adev = ring->adev;
1643 if (ring == &adev->vcn.ring_enc[0])
1644 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1646 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1650 * vcn_v2_0_enc_ring_get_wptr - get enc write pointer
1652 * @ring: amdgpu_ring pointer
1654 * Returns the current hardware enc write pointer
1656 static uint64_t vcn_v2_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1658 struct amdgpu_device *adev = ring->adev;
1660 if (ring == &adev->vcn.ring_enc[0]) {
1661 if (ring->use_doorbell)
1662 return adev->wb.wb[ring->wptr_offs];
1664 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1666 if (ring->use_doorbell)
1667 return adev->wb.wb[ring->wptr_offs];
1669 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1674 * vcn_v2_0_enc_ring_set_wptr - set enc write pointer
1676 * @ring: amdgpu_ring pointer
1678 * Commits the enc write pointer to the hardware
1680 static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1682 struct amdgpu_device *adev = ring->adev;
1684 if (ring == &adev->vcn.ring_enc[0]) {
1685 if (ring->use_doorbell) {
1686 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1687 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1689 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1692 if (ring->use_doorbell) {
1693 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1694 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1696 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1702 * vcn_v2_0_enc_ring_emit_fence - emit an enc fence & trap command
1704 * @ring: amdgpu_ring pointer
1705 * @fence: fence to emit
1707 * Write enc a fence and a trap command to the ring.
1709 static void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1710 u64 seq, unsigned flags)
1712 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1714 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1715 amdgpu_ring_write(ring, addr);
1716 amdgpu_ring_write(ring, upper_32_bits(addr));
1717 amdgpu_ring_write(ring, seq);
1718 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1721 static void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1723 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1727 * vcn_v2_0_enc_ring_emit_ib - enc execute indirect buffer
1729 * @ring: amdgpu_ring pointer
1730 * @ib: indirect buffer to execute
1732 * Write enc ring commands to execute the indirect buffer
1734 static void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1735 struct amdgpu_job *job,
1736 struct amdgpu_ib *ib,
1739 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1741 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1742 amdgpu_ring_write(ring, vmid);
1743 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1744 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1745 amdgpu_ring_write(ring, ib->length_dw);
1748 static void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1749 uint32_t reg, uint32_t val,
1752 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1753 amdgpu_ring_write(ring, reg << 2);
1754 amdgpu_ring_write(ring, mask);
1755 amdgpu_ring_write(ring, val);
1758 static void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1759 unsigned int vmid, uint64_t pd_addr)
1761 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1763 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1765 /* wait for reg writes */
1766 vcn_v2_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1767 lower_32_bits(pd_addr), 0xffffffff);
1770 static void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1771 uint32_t reg, uint32_t val)
1773 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1774 amdgpu_ring_write(ring, reg << 2);
1775 amdgpu_ring_write(ring, val);
1779 * vcn_v2_0_jpeg_ring_get_rptr - get read pointer
1781 * @ring: amdgpu_ring pointer
1783 * Returns the current hardware read pointer
1785 static uint64_t vcn_v2_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
1787 struct amdgpu_device *adev = ring->adev;
1789 return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
1793 * vcn_v2_0_jpeg_ring_get_wptr - get write pointer
1795 * @ring: amdgpu_ring pointer
1797 * Returns the current hardware write pointer
1799 static uint64_t vcn_v2_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
1801 struct amdgpu_device *adev = ring->adev;
1803 if (ring->use_doorbell)
1804 return adev->wb.wb[ring->wptr_offs];
1806 return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1810 * vcn_v2_0_jpeg_ring_set_wptr - set write pointer
1812 * @ring: amdgpu_ring pointer
1814 * Commits the write pointer to the hardware
1816 static void vcn_v2_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
1818 struct amdgpu_device *adev = ring->adev;
1820 if (ring->use_doorbell) {
1821 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1822 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1824 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
1829 * vcn_v2_0_jpeg_ring_insert_start - insert a start command
1831 * @ring: amdgpu_ring pointer
1833 * Write a start command to the ring.
1835 static void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
1837 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
1838 0, 0, PACKETJ_TYPE0));
1839 amdgpu_ring_write(ring, 0x68e04);
1841 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
1842 0, 0, PACKETJ_TYPE0));
1843 amdgpu_ring_write(ring, 0x80010000);
1847 * vcn_v2_0_jpeg_ring_insert_end - insert a end command
1849 * @ring: amdgpu_ring pointer
1851 * Write a end command to the ring.
1853 static void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
1855 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
1856 0, 0, PACKETJ_TYPE0));
1857 amdgpu_ring_write(ring, 0x68e04);
1859 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
1860 0, 0, PACKETJ_TYPE0));
1861 amdgpu_ring_write(ring, 0x00010000);
1865 * vcn_v2_0_jpeg_ring_emit_fence - emit an fence & trap command
1867 * @ring: amdgpu_ring pointer
1868 * @fence: fence to emit
1870 * Write a fence and a trap command to the ring.
1872 static void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1875 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1877 amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET,
1878 0, 0, PACKETJ_TYPE0));
1879 amdgpu_ring_write(ring, seq);
1881 amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET,
1882 0, 0, PACKETJ_TYPE0));
1883 amdgpu_ring_write(ring, seq);
1885 amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET,
1886 0, 0, PACKETJ_TYPE0));
1887 amdgpu_ring_write(ring, lower_32_bits(addr));
1889 amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET,
1890 0, 0, PACKETJ_TYPE0));
1891 amdgpu_ring_write(ring, upper_32_bits(addr));
1893 amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
1894 0, 0, PACKETJ_TYPE0));
1895 amdgpu_ring_write(ring, 0x8);
1897 amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
1898 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
1899 amdgpu_ring_write(ring, 0);
1901 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
1902 0, 0, PACKETJ_TYPE0));
1903 amdgpu_ring_write(ring, 0x3fbc);
1905 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
1906 0, 0, PACKETJ_TYPE0));
1907 amdgpu_ring_write(ring, 0x1);
1909 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
1910 amdgpu_ring_write(ring, 0);
1914 * vcn_v2_0_jpeg_ring_emit_ib - execute indirect buffer
1916 * @ring: amdgpu_ring pointer
1917 * @ib: indirect buffer to execute
1919 * Write ring commands to execute the indirect buffer.
1921 static void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
1922 struct amdgpu_job *job,
1923 struct amdgpu_ib *ib,
1926 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1928 amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
1929 0, 0, PACKETJ_TYPE0));
1930 amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1932 amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
1933 0, 0, PACKETJ_TYPE0));
1934 amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1936 amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
1937 0, 0, PACKETJ_TYPE0));
1938 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1940 amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET,
1941 0, 0, PACKETJ_TYPE0));
1942 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1944 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET,
1945 0, 0, PACKETJ_TYPE0));
1946 amdgpu_ring_write(ring, ib->length_dw);
1948 amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET,
1949 0, 0, PACKETJ_TYPE0));
1950 amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
1952 amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET,
1953 0, 0, PACKETJ_TYPE0));
1954 amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
1956 amdgpu_ring_write(ring, PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
1957 amdgpu_ring_write(ring, 0);
1959 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
1960 0, 0, PACKETJ_TYPE0));
1961 amdgpu_ring_write(ring, 0x01400200);
1963 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
1964 0, 0, PACKETJ_TYPE0));
1965 amdgpu_ring_write(ring, 0x2);
1967 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_STATUS_INTERNAL_OFFSET,
1968 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
1969 amdgpu_ring_write(ring, 0x2);
1972 static void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
1973 uint32_t reg, uint32_t val,
1976 uint32_t reg_offset = (reg << 2);
1978 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
1979 0, 0, PACKETJ_TYPE0));
1980 amdgpu_ring_write(ring, 0x01400200);
1982 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
1983 0, 0, PACKETJ_TYPE0));
1984 amdgpu_ring_write(ring, val);
1986 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
1987 0, 0, PACKETJ_TYPE0));
1988 if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
1989 amdgpu_ring_write(ring, 0);
1990 amdgpu_ring_write(ring,
1991 PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
1993 amdgpu_ring_write(ring, reg_offset);
1994 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
1995 0, 0, PACKETJ_TYPE3));
1997 amdgpu_ring_write(ring, mask);
2000 static void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
2001 unsigned vmid, uint64_t pd_addr)
2003 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
2004 uint32_t data0, data1, mask;
2006 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
2008 /* wait for register write */
2009 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
2010 data1 = lower_32_bits(pd_addr);
2012 vcn_v2_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
2015 static void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
2016 uint32_t reg, uint32_t val)
2018 uint32_t reg_offset = (reg << 2);
2020 amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
2021 0, 0, PACKETJ_TYPE0));
2022 if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
2023 amdgpu_ring_write(ring, 0);
2024 amdgpu_ring_write(ring,
2025 PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
2027 amdgpu_ring_write(ring, reg_offset);
2028 amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
2029 0, 0, PACKETJ_TYPE0));
2031 amdgpu_ring_write(ring, val);
2034 static void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
2038 WARN_ON(ring->wptr % 2 || count % 2);
2040 for (i = 0; i < count / 2; i++) {
2041 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
2042 amdgpu_ring_write(ring, 0);
2046 static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev,
2047 struct amdgpu_irq_src *source,
2049 enum amdgpu_interrupt_state state)
2054 static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
2055 struct amdgpu_irq_src *source,
2056 struct amdgpu_iv_entry *entry)
2058 DRM_DEBUG("IH: VCN TRAP\n");
2060 switch (entry->src_id) {
2061 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
2062 amdgpu_fence_process(&adev->vcn.ring_dec);
2064 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
2065 amdgpu_fence_process(&adev->vcn.ring_enc[0]);
2067 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
2068 amdgpu_fence_process(&adev->vcn.ring_enc[1]);
2070 case VCN_2_0__SRCID__JPEG_DECODE:
2071 amdgpu_fence_process(&adev->vcn.ring_jpeg);
2074 DRM_ERROR("Unhandled interrupt: %d %d\n",
2075 entry->src_id, entry->src_data[0]);
2082 static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
2084 struct amdgpu_device *adev = ring->adev;
2089 WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD);
2090 r = amdgpu_ring_alloc(ring, 4);
2093 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
2094 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
2095 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
2096 amdgpu_ring_write(ring, 0xDEADBEEF);
2097 amdgpu_ring_commit(ring);
2098 for (i = 0; i < adev->usec_timeout; i++) {
2099 tmp = RREG32(adev->vcn.external.scratch9);
2100 if (tmp == 0xDEADBEEF)
2105 if (i >= adev->usec_timeout)
2112 static int vcn_v2_0_set_powergating_state(void *handle,
2113 enum amd_powergating_state state)
2115 /* This doesn't actually powergate the VCN block.
2116 * That's done in the dpm code via the SMC. This
2117 * just re-inits the block as necessary. The actual
2118 * gating still happens in the dpm code. We should
2119 * revisit this when there is a cleaner line between
2120 * the smc and the hw blocks
2123 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2125 if (state == adev->vcn.cur_state)
2128 if (state == AMD_PG_STATE_GATE)
2129 ret = vcn_v2_0_stop(adev);
2131 ret = vcn_v2_0_start(adev);
2134 adev->vcn.cur_state = state;
2138 static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
2140 .early_init = vcn_v2_0_early_init,
2142 .sw_init = vcn_v2_0_sw_init,
2143 .sw_fini = vcn_v2_0_sw_fini,
2144 .hw_init = vcn_v2_0_hw_init,
2145 .hw_fini = vcn_v2_0_hw_fini,
2146 .suspend = vcn_v2_0_suspend,
2147 .resume = vcn_v2_0_resume,
2148 .is_idle = vcn_v2_0_is_idle,
2149 .wait_for_idle = vcn_v2_0_wait_for_idle,
2150 .check_soft_reset = NULL,
2151 .pre_soft_reset = NULL,
2153 .post_soft_reset = NULL,
2154 .set_clockgating_state = vcn_v2_0_set_clockgating_state,
2155 .set_powergating_state = vcn_v2_0_set_powergating_state,
2158 static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
2159 .type = AMDGPU_RING_TYPE_VCN_DEC,
2161 .vmhub = AMDGPU_MMHUB,
2162 .get_rptr = vcn_v2_0_dec_ring_get_rptr,
2163 .get_wptr = vcn_v2_0_dec_ring_get_wptr,
2164 .set_wptr = vcn_v2_0_dec_ring_set_wptr,
2166 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2167 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2168 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
2169 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
2171 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
2172 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
2173 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
2174 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
2175 .test_ring = vcn_v2_0_dec_ring_test_ring,
2176 .test_ib = amdgpu_vcn_dec_ring_test_ib,
2177 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
2178 .insert_start = vcn_v2_0_dec_ring_insert_start,
2179 .insert_end = vcn_v2_0_dec_ring_insert_end,
2180 .pad_ib = amdgpu_ring_generic_pad_ib,
2181 .begin_use = amdgpu_vcn_ring_begin_use,
2182 .end_use = amdgpu_vcn_ring_end_use,
2183 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
2184 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
2185 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2188 static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
2189 .type = AMDGPU_RING_TYPE_VCN_ENC,
2191 .nop = VCN_ENC_CMD_NO_OP,
2192 .vmhub = AMDGPU_MMHUB,
2193 .get_rptr = vcn_v2_0_enc_ring_get_rptr,
2194 .get_wptr = vcn_v2_0_enc_ring_get_wptr,
2195 .set_wptr = vcn_v2_0_enc_ring_set_wptr,
2197 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2198 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2199 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
2200 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
2201 1, /* vcn_v2_0_enc_ring_insert_end */
2202 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
2203 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
2204 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
2205 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2206 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2207 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2208 .insert_nop = amdgpu_ring_insert_nop,
2209 .insert_end = vcn_v2_0_enc_ring_insert_end,
2210 .pad_ib = amdgpu_ring_generic_pad_ib,
2211 .begin_use = amdgpu_vcn_ring_begin_use,
2212 .end_use = amdgpu_vcn_ring_end_use,
2213 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2214 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2215 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2218 static const struct amdgpu_ring_funcs vcn_v2_0_jpeg_ring_vm_funcs = {
2219 .type = AMDGPU_RING_TYPE_VCN_JPEG,
2221 .vmhub = AMDGPU_MMHUB,
2222 .get_rptr = vcn_v2_0_jpeg_ring_get_rptr,
2223 .get_wptr = vcn_v2_0_jpeg_ring_get_wptr,
2224 .set_wptr = vcn_v2_0_jpeg_ring_set_wptr,
2226 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2227 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2228 8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */
2229 18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */
2231 .emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */
2232 .emit_ib = vcn_v2_0_jpeg_ring_emit_ib,
2233 .emit_fence = vcn_v2_0_jpeg_ring_emit_fence,
2234 .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush,
2235 .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
2236 .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
2237 .insert_nop = vcn_v2_0_jpeg_ring_nop,
2238 .insert_start = vcn_v2_0_jpeg_ring_insert_start,
2239 .insert_end = vcn_v2_0_jpeg_ring_insert_end,
2240 .pad_ib = amdgpu_ring_generic_pad_ib,
2241 .begin_use = amdgpu_vcn_ring_begin_use,
2242 .end_use = amdgpu_vcn_ring_end_use,
2243 .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg,
2244 .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait,
2245 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2248 static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2250 adev->vcn.ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
2251 DRM_INFO("VCN decode is enabled in VM mode\n");
2254 static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2258 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
2259 adev->vcn.ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
2261 DRM_INFO("VCN encode is enabled in VM mode\n");
2264 static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
2266 adev->vcn.ring_jpeg.funcs = &vcn_v2_0_jpeg_ring_vm_funcs;
2267 DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
2270 static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
2271 .set = vcn_v2_0_set_interrupt_state,
2272 .process = vcn_v2_0_process_interrupt,
2275 static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
2277 adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2;
2278 adev->vcn.irq.funcs = &vcn_v2_0_irq_funcs;
2281 const struct amdgpu_ip_block_version vcn_v2_0_ip_block =
2283 .type = AMD_IP_BLOCK_TYPE_VCN,
2287 .funcs = &vcn_v2_0_ip_funcs,