2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_vcn.h"
30 #include "soc15_common.h"
32 #include "vcn/vcn_1_0_offset.h"
33 #include "vcn/vcn_1_0_sh_mask.h"
34 #include "hdp/hdp_4_0_offset.h"
35 #include "mmhub/mmhub_9_1_offset.h"
36 #include "mmhub/mmhub_9_1_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
40 #define mmUVD_RBC_XX_IB_REG_CHECK 0x05ab
41 #define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
42 #define mmUVD_REG_XX_MASK 0x05ac
43 #define mmUVD_REG_XX_MASK_BASE_IDX 1
45 static int vcn_v1_0_stop(struct amdgpu_device *adev);
46 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
47 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
48 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
49 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
50 static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
51 static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
52 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
53 struct dpg_pause_state *new_state);
56 * vcn_v1_0_early_init - set function pointers
58 * @handle: amdgpu_device pointer
60 * Set ring and irq function pointers
62 static int vcn_v1_0_early_init(void *handle)
64 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
66 adev->vcn.num_enc_rings = 2;
68 vcn_v1_0_set_dec_ring_funcs(adev);
69 vcn_v1_0_set_enc_ring_funcs(adev);
70 vcn_v1_0_set_jpeg_ring_funcs(adev);
71 vcn_v1_0_set_irq_funcs(adev);
77 * vcn_v1_0_sw_init - sw init for VCN block
79 * @handle: amdgpu_device pointer
81 * Load firmware and sw initialization
83 static int vcn_v1_0_sw_init(void *handle)
85 struct amdgpu_ring *ring;
87 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
90 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq);
95 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
96 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
103 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.irq);
107 r = amdgpu_vcn_sw_init(adev);
111 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
112 const struct common_firmware_header *hdr;
113 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
114 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
115 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
116 adev->firmware.fw_size +=
117 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
118 DRM_INFO("PSP loading VCN firmware\n");
121 r = amdgpu_vcn_resume(adev);
125 ring = &adev->vcn.ring_dec;
126 sprintf(ring->name, "vcn_dec");
127 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
131 adev->vcn.internal.scratch9 = adev->vcn.external.scratch9 =
132 SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
133 adev->vcn.internal.data0 = adev->vcn.external.data0 =
134 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
135 adev->vcn.internal.data1 = adev->vcn.external.data1 =
136 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
137 adev->vcn.internal.cmd = adev->vcn.external.cmd =
138 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
139 adev->vcn.internal.nop = adev->vcn.external.nop =
140 SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
142 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
143 ring = &adev->vcn.ring_enc[i];
144 sprintf(ring->name, "vcn_enc%d", i);
145 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
150 ring = &adev->vcn.ring_jpeg;
151 sprintf(ring->name, "vcn_jpeg");
152 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
156 adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
162 * vcn_v1_0_sw_fini - sw fini for VCN block
164 * @handle: amdgpu_device pointer
166 * VCN suspend and free up sw allocation
168 static int vcn_v1_0_sw_fini(void *handle)
171 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
173 r = amdgpu_vcn_suspend(adev);
177 r = amdgpu_vcn_sw_fini(adev);
183 * vcn_v1_0_hw_init - start and test VCN block
185 * @handle: amdgpu_device pointer
187 * Initialize the hardware, boot up the VCPU and do some testing
189 static int vcn_v1_0_hw_init(void *handle)
191 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
192 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
195 r = amdgpu_ring_test_helper(ring);
199 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
200 ring = &adev->vcn.ring_enc[i];
201 ring->sched.ready = true;
202 r = amdgpu_ring_test_helper(ring);
207 ring = &adev->vcn.ring_jpeg;
208 r = amdgpu_ring_test_helper(ring);
214 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
215 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
221 * vcn_v1_0_hw_fini - stop the hardware block
223 * @handle: amdgpu_device pointer
225 * Stop the VCN block, mark ring as not ready any more
227 static int vcn_v1_0_hw_fini(void *handle)
229 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
230 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
232 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
233 RREG32_SOC15(VCN, 0, mmUVD_STATUS))
234 vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
236 ring->sched.ready = false;
242 * vcn_v1_0_suspend - suspend VCN block
244 * @handle: amdgpu_device pointer
246 * HW fini and suspend VCN block
248 static int vcn_v1_0_suspend(void *handle)
251 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
253 r = vcn_v1_0_hw_fini(adev);
257 r = amdgpu_vcn_suspend(adev);
263 * vcn_v1_0_resume - resume VCN block
265 * @handle: amdgpu_device pointer
267 * Resume firmware and hw init VCN block
269 static int vcn_v1_0_resume(void *handle)
272 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
274 r = amdgpu_vcn_resume(adev);
278 r = vcn_v1_0_hw_init(adev);
284 * vcn_v1_0_mc_resume_spg_mode - memory controller programming
286 * @adev: amdgpu_device pointer
288 * Let the VCN memory controller know it's offsets
290 static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
292 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
295 /* cache window 0: fw */
296 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
297 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
298 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
299 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
300 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
301 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
304 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
305 lower_32_bits(adev->vcn.gpu_addr));
306 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
307 upper_32_bits(adev->vcn.gpu_addr));
309 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
310 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
313 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
315 /* cache window 1: stack */
316 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
317 lower_32_bits(adev->vcn.gpu_addr + offset));
318 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
319 upper_32_bits(adev->vcn.gpu_addr + offset));
320 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
321 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
323 /* cache window 2: context */
324 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
325 lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
326 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
327 upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
328 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
329 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
331 WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
332 adev->gfx.config.gb_addr_config);
333 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
334 adev->gfx.config.gb_addr_config);
335 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
336 adev->gfx.config.gb_addr_config);
337 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
338 adev->gfx.config.gb_addr_config);
339 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
340 adev->gfx.config.gb_addr_config);
341 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
342 adev->gfx.config.gb_addr_config);
343 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
344 adev->gfx.config.gb_addr_config);
345 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
346 adev->gfx.config.gb_addr_config);
347 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
348 adev->gfx.config.gb_addr_config);
349 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
350 adev->gfx.config.gb_addr_config);
351 WREG32_SOC15(UVD, 0, mmUVD_JPEG_ADDR_CONFIG,
352 adev->gfx.config.gb_addr_config);
353 WREG32_SOC15(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG,
354 adev->gfx.config.gb_addr_config);
357 static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
359 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
362 /* cache window 0: fw */
363 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
364 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
365 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo),
367 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
368 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi),
370 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0,
374 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
375 lower_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
376 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
377 upper_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
379 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
380 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
383 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0);
385 /* cache window 1: stack */
386 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
387 lower_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
388 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
389 upper_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
390 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
392 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE,
395 /* cache window 2: context */
396 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
397 lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
399 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
400 upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
402 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
403 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,
406 /* VCN global tiling registers */
407 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
408 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
409 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
410 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
411 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
412 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
413 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
414 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
415 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
416 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
417 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
418 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
419 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
420 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
421 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
422 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
423 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
424 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
425 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
426 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
430 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
432 * @adev: amdgpu_device pointer
433 * @sw: enable SW clock gating
435 * Disable clock gating for VCN block
437 static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
441 /* JPEG disable CGC */
442 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
444 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
445 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
447 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
449 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
450 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
451 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
453 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
454 data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
455 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
457 /* UVD disable CGC */
458 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
459 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
460 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
462 data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
464 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
465 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
466 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
468 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
469 data &= ~(UVD_CGC_GATE__SYS_MASK
470 | UVD_CGC_GATE__UDEC_MASK
471 | UVD_CGC_GATE__MPEG2_MASK
472 | UVD_CGC_GATE__REGS_MASK
473 | UVD_CGC_GATE__RBC_MASK
474 | UVD_CGC_GATE__LMI_MC_MASK
475 | UVD_CGC_GATE__LMI_UMC_MASK
476 | UVD_CGC_GATE__IDCT_MASK
477 | UVD_CGC_GATE__MPRD_MASK
478 | UVD_CGC_GATE__MPC_MASK
479 | UVD_CGC_GATE__LBSI_MASK
480 | UVD_CGC_GATE__LRBBM_MASK
481 | UVD_CGC_GATE__UDEC_RE_MASK
482 | UVD_CGC_GATE__UDEC_CM_MASK
483 | UVD_CGC_GATE__UDEC_IT_MASK
484 | UVD_CGC_GATE__UDEC_DB_MASK
485 | UVD_CGC_GATE__UDEC_MP_MASK
486 | UVD_CGC_GATE__WCB_MASK
487 | UVD_CGC_GATE__VCPU_MASK
488 | UVD_CGC_GATE__SCPU_MASK);
489 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
491 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
492 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
493 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
494 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
495 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
496 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
497 | UVD_CGC_CTRL__SYS_MODE_MASK
498 | UVD_CGC_CTRL__UDEC_MODE_MASK
499 | UVD_CGC_CTRL__MPEG2_MODE_MASK
500 | UVD_CGC_CTRL__REGS_MODE_MASK
501 | UVD_CGC_CTRL__RBC_MODE_MASK
502 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
503 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
504 | UVD_CGC_CTRL__IDCT_MODE_MASK
505 | UVD_CGC_CTRL__MPRD_MODE_MASK
506 | UVD_CGC_CTRL__MPC_MODE_MASK
507 | UVD_CGC_CTRL__LBSI_MODE_MASK
508 | UVD_CGC_CTRL__LRBBM_MODE_MASK
509 | UVD_CGC_CTRL__WCB_MODE_MASK
510 | UVD_CGC_CTRL__VCPU_MODE_MASK
511 | UVD_CGC_CTRL__SCPU_MODE_MASK);
512 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
515 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
516 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
517 | UVD_SUVD_CGC_GATE__SIT_MASK
518 | UVD_SUVD_CGC_GATE__SMP_MASK
519 | UVD_SUVD_CGC_GATE__SCM_MASK
520 | UVD_SUVD_CGC_GATE__SDB_MASK
521 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
522 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
523 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
524 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
525 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
526 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
527 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
528 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
529 | UVD_SUVD_CGC_GATE__SCLR_MASK
530 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
531 | UVD_SUVD_CGC_GATE__ENT_MASK
532 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
533 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
534 | UVD_SUVD_CGC_GATE__SITE_MASK
535 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
536 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
537 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
538 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
539 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
540 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
542 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
543 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
544 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
545 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
546 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
547 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
548 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
549 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
550 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
551 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
552 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
553 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
557 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
559 * @adev: amdgpu_device pointer
560 * @sw: enable SW clock gating
562 * Enable clock gating for VCN block
564 static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
568 /* enable JPEG CGC */
569 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
570 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
571 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
573 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
574 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
575 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
576 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
578 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
579 data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
580 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
583 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
584 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
585 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
587 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
588 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
589 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
590 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
592 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
593 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
594 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
595 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
596 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
597 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
598 | UVD_CGC_CTRL__SYS_MODE_MASK
599 | UVD_CGC_CTRL__UDEC_MODE_MASK
600 | UVD_CGC_CTRL__MPEG2_MODE_MASK
601 | UVD_CGC_CTRL__REGS_MODE_MASK
602 | UVD_CGC_CTRL__RBC_MODE_MASK
603 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
604 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
605 | UVD_CGC_CTRL__IDCT_MODE_MASK
606 | UVD_CGC_CTRL__MPRD_MODE_MASK
607 | UVD_CGC_CTRL__MPC_MODE_MASK
608 | UVD_CGC_CTRL__LBSI_MODE_MASK
609 | UVD_CGC_CTRL__LRBBM_MODE_MASK
610 | UVD_CGC_CTRL__WCB_MODE_MASK
611 | UVD_CGC_CTRL__VCPU_MODE_MASK
612 | UVD_CGC_CTRL__SCPU_MODE_MASK);
613 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
615 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
616 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
617 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
618 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
619 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
620 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
621 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
622 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
623 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
624 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
625 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
626 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
629 static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel)
631 uint32_t reg_data = 0;
633 /* disable JPEG CGC */
634 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
635 reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
637 reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
638 reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
639 reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
640 WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
642 WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
644 /* enable sw clock gating control */
645 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
646 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
648 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
649 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
650 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
651 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
652 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
653 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
654 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
655 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
656 UVD_CGC_CTRL__SYS_MODE_MASK |
657 UVD_CGC_CTRL__UDEC_MODE_MASK |
658 UVD_CGC_CTRL__MPEG2_MODE_MASK |
659 UVD_CGC_CTRL__REGS_MODE_MASK |
660 UVD_CGC_CTRL__RBC_MODE_MASK |
661 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
662 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
663 UVD_CGC_CTRL__IDCT_MODE_MASK |
664 UVD_CGC_CTRL__MPRD_MODE_MASK |
665 UVD_CGC_CTRL__MPC_MODE_MASK |
666 UVD_CGC_CTRL__LBSI_MODE_MASK |
667 UVD_CGC_CTRL__LRBBM_MODE_MASK |
668 UVD_CGC_CTRL__WCB_MODE_MASK |
669 UVD_CGC_CTRL__VCPU_MODE_MASK |
670 UVD_CGC_CTRL__SCPU_MODE_MASK);
671 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
673 /* turn off clock gating */
674 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
676 /* turn on SUVD clock gating */
677 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel);
679 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
680 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel);
683 static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
688 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
689 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
690 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
691 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
692 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
693 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
694 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
695 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
696 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
697 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
698 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
699 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
701 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
702 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret);
704 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
705 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
706 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
707 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
708 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
709 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
710 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
711 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
712 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
713 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
714 | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
715 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
716 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF, ret);
719 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
721 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
723 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
724 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
726 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
729 static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
734 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
735 /* Before power off, this indicator has to be turned on */
736 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
737 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
738 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
739 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
742 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
743 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
744 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
745 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
746 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
747 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
748 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
749 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
750 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
751 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
752 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
754 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
756 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
757 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
758 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
759 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
760 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
761 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
762 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
763 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
764 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
765 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
766 | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
767 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret);
772 * vcn_v1_0_start - start VCN block
774 * @adev: amdgpu_device pointer
776 * Setup and start the VCN block
778 static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
780 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
781 uint32_t rb_bufsz, tmp;
782 uint32_t lmi_swap_cntl;
785 /* disable byte swapping */
788 vcn_1_0_disable_static_power_gating(adev);
790 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
791 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
793 /* disable clock gating */
794 vcn_v1_0_disable_clock_gating(adev);
796 /* disable interupt */
797 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
798 ~UVD_MASTINT_EN__VCPU_EN_MASK);
800 /* initialize VCN memory controller */
801 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
802 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
803 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
804 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
805 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
806 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
809 /* swap (8 in 32) RB and IB */
812 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
814 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
815 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
816 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
817 WREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL, tmp);
819 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
820 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
821 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
822 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
823 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
825 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
826 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
827 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
828 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
829 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
831 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
832 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
833 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
834 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
836 vcn_v1_0_mc_resume_spg_mode(adev);
838 WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK, 0x10);
839 WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK,
840 RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK) | 0x3);
842 /* enable VCPU clock */
843 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
845 /* boot up the VCPU */
846 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
847 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
850 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
851 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
853 tmp = RREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET);
854 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
855 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
856 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, tmp);
858 for (i = 0; i < 10; ++i) {
861 for (j = 0; j < 100; ++j) {
862 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
863 if (status & UVD_STATUS__IDLE)
868 if (status & UVD_STATUS__IDLE)
871 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
872 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
873 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
874 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
876 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
877 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
883 DRM_ERROR("VCN decode not responding, giving up!!!\n");
886 /* enable master interrupt */
887 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
888 UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK);
890 /* enable system interrupt for JRBC, TODO: move to set interrupt*/
891 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN),
892 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK,
893 ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK);
895 /* clear the busy bit of UVD_STATUS */
896 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) & ~UVD_STATUS__UVD_BUSY;
897 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
899 /* force RBC into idle state */
900 rb_bufsz = order_base_2(ring->ring_size);
901 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
902 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
903 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
904 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
905 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
906 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
908 /* set the write pointer delay */
909 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
911 /* set the wb address */
912 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
913 (upper_32_bits(ring->gpu_addr) >> 2));
915 /* programm the RB_BASE for ring buffer */
916 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
917 lower_32_bits(ring->gpu_addr));
918 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
919 upper_32_bits(ring->gpu_addr));
921 /* Initialize the ring buffer's read and write pointers */
922 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
924 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
926 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
927 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
928 lower_32_bits(ring->wptr));
930 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
931 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
933 ring = &adev->vcn.ring_enc[0];
934 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
935 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
936 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
937 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
938 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
940 ring = &adev->vcn.ring_enc[1];
941 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
942 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
943 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
944 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
945 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
947 ring = &adev->vcn.ring_jpeg;
948 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
949 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
950 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
951 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
952 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
953 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
954 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
955 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
957 /* initialize wptr */
958 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
960 /* copy patch commands to the jpeg ring */
961 vcn_v1_0_jpeg_ring_set_patch_ring(ring,
962 (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
967 static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
969 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
970 uint32_t rb_bufsz, tmp;
971 uint32_t lmi_swap_cntl;
973 /* disable byte swapping */
976 vcn_1_0_enable_static_power_gating(adev);
978 /* enable dynamic power gating mode */
979 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
980 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
981 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
982 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
984 /* enable clock gating */
985 vcn_v1_0_clock_gating_dpg_mode(adev, 0);
987 /* enable VCPU clock */
988 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
989 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
990 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
991 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0);
993 /* disable interupt */
994 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
995 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
997 /* initialize VCN memory controller */
998 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
999 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1000 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1001 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1002 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1003 UVD_LMI_CTRL__REQ_MODE_MASK |
1004 UVD_LMI_CTRL__CRC_RESET_MASK |
1005 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1006 0x00100000L, 0xFFFFFFFF, 0);
1009 /* swap (8 in 32) RB and IB */
1010 lmi_swap_cntl = 0xa;
1012 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0);
1014 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_CNTL,
1015 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0);
1017 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA0,
1018 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1019 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1020 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1021 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0);
1023 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB0,
1024 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1025 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1026 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1027 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0);
1029 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUX,
1030 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1031 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1032 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0);
1034 vcn_v1_0_mc_resume_dpg_mode(adev);
1036 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0);
1037 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0);
1039 /* boot up the VCPU */
1040 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0);
1043 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2,
1044 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT,
1047 /* enable master interrupt */
1048 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
1049 UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
1051 vcn_v1_0_clock_gating_dpg_mode(adev, 1);
1052 /* setup mmUVD_LMI_CTRL */
1053 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
1054 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1055 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1056 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1057 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1058 UVD_LMI_CTRL__REQ_MODE_MASK |
1059 UVD_LMI_CTRL__CRC_RESET_MASK |
1060 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1061 0x00100000L, 0xFFFFFFFF, 1);
1063 tmp = adev->gfx.config.gb_addr_config;
1064 /* setup VCN global tiling registers */
1065 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1066 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1068 /* enable System Interrupt for JRBC */
1069 WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SYS_INT_EN,
1070 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1);
1072 /* force RBC into idle state */
1073 rb_bufsz = order_base_2(ring->ring_size);
1074 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1075 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1076 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1077 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1078 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1079 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1081 /* set the write pointer delay */
1082 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
1084 /* set the wb address */
1085 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
1086 (upper_32_bits(ring->gpu_addr) >> 2));
1088 /* programm the RB_BASE for ring buffer */
1089 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1090 lower_32_bits(ring->gpu_addr));
1091 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1092 upper_32_bits(ring->gpu_addr));
1094 /* Initialize the ring buffer's read and write pointers */
1095 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1097 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
1099 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1100 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1101 lower_32_bits(ring->wptr));
1103 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
1104 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1106 /* initialize JPEG wptr */
1107 ring = &adev->vcn.ring_jpeg;
1108 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1110 /* copy patch commands to the jpeg ring */
1111 vcn_v1_0_jpeg_ring_set_patch_ring(ring,
1112 (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
1117 static int vcn_v1_0_start(struct amdgpu_device *adev)
1121 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1122 r = vcn_v1_0_start_dpg_mode(adev);
1124 r = vcn_v1_0_start_spg_mode(adev);
1129 * vcn_v1_0_stop - stop VCN block
1131 * @adev: amdgpu_device pointer
1133 * stop the VCN block
1135 static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
1139 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, ret_code);
1141 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1142 UVD_LMI_STATUS__READ_CLEAN_MASK |
1143 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1144 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1145 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code);
1147 /* put VCPU into reset */
1148 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1149 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1150 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1152 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1153 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1154 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code);
1156 /* disable VCPU clock */
1157 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1158 ~UVD_VCPU_CNTL__CLK_EN_MASK);
1160 /* reset LMI UMC/LMI */
1161 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1162 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1163 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1165 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1166 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1167 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1169 WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
1171 vcn_v1_0_enable_clock_gating(adev);
1172 vcn_1_0_enable_static_power_gating(adev);
1176 static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
1181 /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
1182 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1183 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1184 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1186 /* wait for read ptr to be equal to write ptr */
1187 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1188 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1190 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1191 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
1193 tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1194 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1196 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1197 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1199 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1200 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1201 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1203 /* disable dynamic power gating mode */
1204 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1205 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1210 static int vcn_v1_0_stop(struct amdgpu_device *adev)
1214 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1215 r = vcn_v1_0_stop_dpg_mode(adev);
1217 r = vcn_v1_0_stop_spg_mode(adev);
1222 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
1223 struct dpg_pause_state *new_state)
1226 uint32_t reg_data = 0;
1227 uint32_t reg_data2 = 0;
1228 struct amdgpu_ring *ring;
1230 /* pause/unpause if state is changed */
1231 if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
1232 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1233 adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
1234 new_state->fw_based, new_state->jpeg);
1236 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1237 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1239 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1242 if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
1243 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1244 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1245 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1248 /* pause DPG non-jpeg */
1249 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1250 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1251 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1252 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1253 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
1256 ring = &adev->vcn.ring_enc[0];
1257 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1258 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1259 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1260 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1261 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1263 ring = &adev->vcn.ring_enc[1];
1264 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1265 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1266 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1267 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1268 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1270 ring = &adev->vcn.ring_dec;
1271 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1272 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1273 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1274 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1275 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1278 /* unpause dpg non-jpeg, no need to wait */
1279 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1280 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1282 adev->vcn.pause_state.fw_based = new_state->fw_based;
1285 /* pause/unpause if state is changed */
1286 if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
1287 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1288 adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
1289 new_state->fw_based, new_state->jpeg);
1291 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1292 (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
1294 if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
1297 if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
1298 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1299 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1300 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1303 /* Make sure JPRG Snoop is disabled before sending the pause */
1304 reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
1305 reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK;
1306 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2);
1308 /* pause DPG jpeg */
1309 reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1310 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1311 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1312 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
1313 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code);
1316 ring = &adev->vcn.ring_jpeg;
1317 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
1318 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1319 UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
1320 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1321 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
1322 lower_32_bits(ring->gpu_addr));
1323 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
1324 upper_32_bits(ring->gpu_addr));
1325 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
1326 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
1327 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1328 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1330 ring = &adev->vcn.ring_dec;
1331 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1332 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1333 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1334 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1335 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1338 /* unpause dpg jpeg, no need to wait */
1339 reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1340 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1342 adev->vcn.pause_state.jpeg = new_state->jpeg;
1348 static bool vcn_v1_0_is_idle(void *handle)
1350 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1352 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1355 static int vcn_v1_0_wait_for_idle(void *handle)
1357 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1360 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1361 UVD_STATUS__IDLE, ret);
1366 static int vcn_v1_0_set_clockgating_state(void *handle,
1367 enum amd_clockgating_state state)
1369 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1370 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1373 /* wait for STATUS to clear */
1374 if (vcn_v1_0_is_idle(handle))
1376 vcn_v1_0_enable_clock_gating(adev);
1378 /* disable HW gating and enable Sw gating */
1379 vcn_v1_0_disable_clock_gating(adev);
1385 * vcn_v1_0_dec_ring_get_rptr - get read pointer
1387 * @ring: amdgpu_ring pointer
1389 * Returns the current hardware read pointer
1391 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1393 struct amdgpu_device *adev = ring->adev;
1395 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1399 * vcn_v1_0_dec_ring_get_wptr - get write pointer
1401 * @ring: amdgpu_ring pointer
1403 * Returns the current hardware write pointer
1405 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1407 struct amdgpu_device *adev = ring->adev;
1409 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1413 * vcn_v1_0_dec_ring_set_wptr - set write pointer
1415 * @ring: amdgpu_ring pointer
1417 * Commits the write pointer to the hardware
1419 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1421 struct amdgpu_device *adev = ring->adev;
1423 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1424 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1425 lower_32_bits(ring->wptr) | 0x80000000);
1427 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1431 * vcn_v1_0_dec_ring_insert_start - insert a start command
1433 * @ring: amdgpu_ring pointer
1435 * Write a start command to the ring.
1437 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1439 struct amdgpu_device *adev = ring->adev;
1441 amdgpu_ring_write(ring,
1442 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1443 amdgpu_ring_write(ring, 0);
1444 amdgpu_ring_write(ring,
1445 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1446 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
1450 * vcn_v1_0_dec_ring_insert_end - insert a end command
1452 * @ring: amdgpu_ring pointer
1454 * Write a end command to the ring.
1456 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1458 struct amdgpu_device *adev = ring->adev;
1460 amdgpu_ring_write(ring,
1461 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1462 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
1466 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
1468 * @ring: amdgpu_ring pointer
1469 * @fence: fence to emit
1471 * Write a fence and a trap command to the ring.
1473 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1476 struct amdgpu_device *adev = ring->adev;
1478 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1480 amdgpu_ring_write(ring,
1481 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
1482 amdgpu_ring_write(ring, seq);
1483 amdgpu_ring_write(ring,
1484 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1485 amdgpu_ring_write(ring, addr & 0xffffffff);
1486 amdgpu_ring_write(ring,
1487 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1488 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1489 amdgpu_ring_write(ring,
1490 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1491 amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
1493 amdgpu_ring_write(ring,
1494 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1495 amdgpu_ring_write(ring, 0);
1496 amdgpu_ring_write(ring,
1497 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1498 amdgpu_ring_write(ring, 0);
1499 amdgpu_ring_write(ring,
1500 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1501 amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
1505 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
1507 * @ring: amdgpu_ring pointer
1508 * @ib: indirect buffer to execute
1510 * Write ring commands to execute the indirect buffer
1512 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1513 struct amdgpu_job *job,
1514 struct amdgpu_ib *ib,
1517 struct amdgpu_device *adev = ring->adev;
1518 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1520 amdgpu_ring_write(ring,
1521 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
1522 amdgpu_ring_write(ring, vmid);
1524 amdgpu_ring_write(ring,
1525 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1526 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1527 amdgpu_ring_write(ring,
1528 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1529 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1530 amdgpu_ring_write(ring,
1531 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
1532 amdgpu_ring_write(ring, ib->length_dw);
1535 static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
1536 uint32_t reg, uint32_t val,
1539 struct amdgpu_device *adev = ring->adev;
1541 amdgpu_ring_write(ring,
1542 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1543 amdgpu_ring_write(ring, reg << 2);
1544 amdgpu_ring_write(ring,
1545 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1546 amdgpu_ring_write(ring, val);
1547 amdgpu_ring_write(ring,
1548 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1549 amdgpu_ring_write(ring, mask);
1550 amdgpu_ring_write(ring,
1551 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1552 amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
1555 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1556 unsigned vmid, uint64_t pd_addr)
1558 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1559 uint32_t data0, data1, mask;
1561 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1563 /* wait for register write */
1564 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1565 data1 = lower_32_bits(pd_addr);
1567 vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1570 static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1571 uint32_t reg, uint32_t val)
1573 struct amdgpu_device *adev = ring->adev;
1575 amdgpu_ring_write(ring,
1576 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1577 amdgpu_ring_write(ring, reg << 2);
1578 amdgpu_ring_write(ring,
1579 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1580 amdgpu_ring_write(ring, val);
1581 amdgpu_ring_write(ring,
1582 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1583 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
1587 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
1589 * @ring: amdgpu_ring pointer
1591 * Returns the current hardware enc read pointer
1593 static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1595 struct amdgpu_device *adev = ring->adev;
1597 if (ring == &adev->vcn.ring_enc[0])
1598 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1600 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1604 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
1606 * @ring: amdgpu_ring pointer
1608 * Returns the current hardware enc write pointer
1610 static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1612 struct amdgpu_device *adev = ring->adev;
1614 if (ring == &adev->vcn.ring_enc[0])
1615 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1617 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1621 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
1623 * @ring: amdgpu_ring pointer
1625 * Commits the enc write pointer to the hardware
1627 static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1629 struct amdgpu_device *adev = ring->adev;
1631 if (ring == &adev->vcn.ring_enc[0])
1632 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
1633 lower_32_bits(ring->wptr));
1635 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
1636 lower_32_bits(ring->wptr));
1640 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
1642 * @ring: amdgpu_ring pointer
1643 * @fence: fence to emit
1645 * Write enc a fence and a trap command to the ring.
1647 static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1648 u64 seq, unsigned flags)
1650 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1652 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1653 amdgpu_ring_write(ring, addr);
1654 amdgpu_ring_write(ring, upper_32_bits(addr));
1655 amdgpu_ring_write(ring, seq);
1656 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1659 static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1661 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1665 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1667 * @ring: amdgpu_ring pointer
1668 * @ib: indirect buffer to execute
1670 * Write enc ring commands to execute the indirect buffer
1672 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1673 struct amdgpu_job *job,
1674 struct amdgpu_ib *ib,
1677 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1679 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1680 amdgpu_ring_write(ring, vmid);
1681 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1682 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1683 amdgpu_ring_write(ring, ib->length_dw);
1686 static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1687 uint32_t reg, uint32_t val,
1690 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1691 amdgpu_ring_write(ring, reg << 2);
1692 amdgpu_ring_write(ring, mask);
1693 amdgpu_ring_write(ring, val);
1696 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1697 unsigned int vmid, uint64_t pd_addr)
1699 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1701 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1703 /* wait for reg writes */
1704 vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1705 lower_32_bits(pd_addr), 0xffffffff);
1708 static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1709 uint32_t reg, uint32_t val)
1711 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1712 amdgpu_ring_write(ring, reg << 2);
1713 amdgpu_ring_write(ring, val);
1718 * vcn_v1_0_jpeg_ring_get_rptr - get read pointer
1720 * @ring: amdgpu_ring pointer
1722 * Returns the current hardware read pointer
1724 static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
1726 struct amdgpu_device *adev = ring->adev;
1728 return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
1732 * vcn_v1_0_jpeg_ring_get_wptr - get write pointer
1734 * @ring: amdgpu_ring pointer
1736 * Returns the current hardware write pointer
1738 static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
1740 struct amdgpu_device *adev = ring->adev;
1742 return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1746 * vcn_v1_0_jpeg_ring_set_wptr - set write pointer
1748 * @ring: amdgpu_ring pointer
1750 * Commits the write pointer to the hardware
1752 static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
1754 struct amdgpu_device *adev = ring->adev;
1756 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
1760 * vcn_v1_0_jpeg_ring_insert_start - insert a start command
1762 * @ring: amdgpu_ring pointer
1764 * Write a start command to the ring.
1766 static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
1768 struct amdgpu_device *adev = ring->adev;
1770 amdgpu_ring_write(ring,
1771 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1772 amdgpu_ring_write(ring, 0x68e04);
1774 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1775 amdgpu_ring_write(ring, 0x80010000);
1779 * vcn_v1_0_jpeg_ring_insert_end - insert a end command
1781 * @ring: amdgpu_ring pointer
1783 * Write a end command to the ring.
1785 static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
1787 struct amdgpu_device *adev = ring->adev;
1789 amdgpu_ring_write(ring,
1790 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1791 amdgpu_ring_write(ring, 0x68e04);
1793 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1794 amdgpu_ring_write(ring, 0x00010000);
1798 * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command
1800 * @ring: amdgpu_ring pointer
1801 * @fence: fence to emit
1803 * Write a fence and a trap command to the ring.
1805 static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1808 struct amdgpu_device *adev = ring->adev;
1810 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1812 amdgpu_ring_write(ring,
1813 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
1814 amdgpu_ring_write(ring, seq);
1816 amdgpu_ring_write(ring,
1817 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
1818 amdgpu_ring_write(ring, seq);
1820 amdgpu_ring_write(ring,
1821 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1822 amdgpu_ring_write(ring, lower_32_bits(addr));
1824 amdgpu_ring_write(ring,
1825 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1826 amdgpu_ring_write(ring, upper_32_bits(addr));
1828 amdgpu_ring_write(ring,
1829 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
1830 amdgpu_ring_write(ring, 0x8);
1832 amdgpu_ring_write(ring,
1833 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
1834 amdgpu_ring_write(ring, 0);
1836 amdgpu_ring_write(ring,
1837 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1838 amdgpu_ring_write(ring, 0x01400200);
1840 amdgpu_ring_write(ring,
1841 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1842 amdgpu_ring_write(ring, seq);
1844 amdgpu_ring_write(ring,
1845 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1846 amdgpu_ring_write(ring, lower_32_bits(addr));
1848 amdgpu_ring_write(ring,
1849 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1850 amdgpu_ring_write(ring, upper_32_bits(addr));
1852 amdgpu_ring_write(ring,
1853 PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
1854 amdgpu_ring_write(ring, 0xffffffff);
1856 amdgpu_ring_write(ring,
1857 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1858 amdgpu_ring_write(ring, 0x3fbc);
1860 amdgpu_ring_write(ring,
1861 PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1862 amdgpu_ring_write(ring, 0x1);
1865 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
1866 amdgpu_ring_write(ring, 0);
1870 * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer
1872 * @ring: amdgpu_ring pointer
1873 * @ib: indirect buffer to execute
1875 * Write ring commands to execute the indirect buffer.
1877 static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
1878 struct amdgpu_job *job,
1879 struct amdgpu_ib *ib,
1882 struct amdgpu_device *adev = ring->adev;
1883 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1885 amdgpu_ring_write(ring,
1886 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
1887 amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1889 amdgpu_ring_write(ring,
1890 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
1891 amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1893 amdgpu_ring_write(ring,
1894 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1895 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1897 amdgpu_ring_write(ring,
1898 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1899 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1901 amdgpu_ring_write(ring,
1902 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
1903 amdgpu_ring_write(ring, ib->length_dw);
1905 amdgpu_ring_write(ring,
1906 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1907 amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
1909 amdgpu_ring_write(ring,
1910 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1911 amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
1913 amdgpu_ring_write(ring,
1914 PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
1915 amdgpu_ring_write(ring, 0);
1917 amdgpu_ring_write(ring,
1918 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1919 amdgpu_ring_write(ring, 0x01400200);
1921 amdgpu_ring_write(ring,
1922 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1923 amdgpu_ring_write(ring, 0x2);
1925 amdgpu_ring_write(ring,
1926 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
1927 amdgpu_ring_write(ring, 0x2);
1930 static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
1931 uint32_t reg, uint32_t val,
1934 struct amdgpu_device *adev = ring->adev;
1935 uint32_t reg_offset = (reg << 2);
1937 amdgpu_ring_write(ring,
1938 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1939 amdgpu_ring_write(ring, 0x01400200);
1941 amdgpu_ring_write(ring,
1942 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1943 amdgpu_ring_write(ring, val);
1945 amdgpu_ring_write(ring,
1946 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1947 if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1948 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1949 amdgpu_ring_write(ring, 0);
1950 amdgpu_ring_write(ring,
1951 PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
1953 amdgpu_ring_write(ring, reg_offset);
1954 amdgpu_ring_write(ring,
1955 PACKETJ(0, 0, 0, PACKETJ_TYPE3));
1957 amdgpu_ring_write(ring, mask);
1960 static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
1961 unsigned vmid, uint64_t pd_addr)
1963 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1964 uint32_t data0, data1, mask;
1966 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1968 /* wait for register write */
1969 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1970 data1 = lower_32_bits(pd_addr);
1972 vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
1975 static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
1976 uint32_t reg, uint32_t val)
1978 struct amdgpu_device *adev = ring->adev;
1979 uint32_t reg_offset = (reg << 2);
1981 amdgpu_ring_write(ring,
1982 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1983 if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1984 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1985 amdgpu_ring_write(ring, 0);
1986 amdgpu_ring_write(ring,
1987 PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
1989 amdgpu_ring_write(ring, reg_offset);
1990 amdgpu_ring_write(ring,
1991 PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1993 amdgpu_ring_write(ring, val);
1996 static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
2000 WARN_ON(ring->wptr % 2 || count % 2);
2002 for (i = 0; i < count / 2; i++) {
2003 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
2004 amdgpu_ring_write(ring, 0);
2008 static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
2010 struct amdgpu_device *adev = ring->adev;
2011 ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
2012 if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
2013 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
2014 ring->ring[(*ptr)++] = 0;
2015 ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
2017 ring->ring[(*ptr)++] = reg_offset;
2018 ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
2020 ring->ring[(*ptr)++] = val;
2023 static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
2025 struct amdgpu_device *adev = ring->adev;
2027 uint32_t reg, reg_offset, val, mask, i;
2029 // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
2030 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
2031 reg_offset = (reg << 2);
2032 val = lower_32_bits(ring->gpu_addr);
2033 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2035 // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
2036 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
2037 reg_offset = (reg << 2);
2038 val = upper_32_bits(ring->gpu_addr);
2039 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2041 // 3rd to 5th: issue MEM_READ commands
2042 for (i = 0; i <= 2; i++) {
2043 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
2044 ring->ring[ptr++] = 0;
2047 // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
2048 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
2049 reg_offset = (reg << 2);
2051 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2053 // 7th: program mmUVD_JRBC_RB_REF_DATA
2054 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA);
2055 reg_offset = (reg << 2);
2057 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2059 // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
2060 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
2061 reg_offset = (reg << 2);
2065 ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
2066 ring->ring[ptr++] = 0x01400200;
2067 ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
2068 ring->ring[ptr++] = val;
2069 ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
2070 if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
2071 ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
2072 ring->ring[ptr++] = 0;
2073 ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
2075 ring->ring[ptr++] = reg_offset;
2076 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
2078 ring->ring[ptr++] = mask;
2080 //9th to 21st: insert no-op
2081 for (i = 0; i <= 12; i++) {
2082 ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
2083 ring->ring[ptr++] = 0;
2086 //22nd: reset mmUVD_JRBC_RB_RPTR
2087 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR);
2088 reg_offset = (reg << 2);
2090 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2092 //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
2093 reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
2094 reg_offset = (reg << 2);
2096 vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
2099 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
2100 struct amdgpu_irq_src *source,
2102 enum amdgpu_interrupt_state state)
2107 static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
2108 struct amdgpu_irq_src *source,
2109 struct amdgpu_iv_entry *entry)
2111 DRM_DEBUG("IH: VCN TRAP\n");
2113 switch (entry->src_id) {
2115 amdgpu_fence_process(&adev->vcn.ring_dec);
2118 amdgpu_fence_process(&adev->vcn.ring_enc[0]);
2121 amdgpu_fence_process(&adev->vcn.ring_enc[1]);
2124 amdgpu_fence_process(&adev->vcn.ring_jpeg);
2127 DRM_ERROR("Unhandled interrupt: %d %d\n",
2128 entry->src_id, entry->src_data[0]);
2135 static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
2137 struct amdgpu_device *adev = ring->adev;
2140 WARN_ON(ring->wptr % 2 || count % 2);
2142 for (i = 0; i < count / 2; i++) {
2143 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
2144 amdgpu_ring_write(ring, 0);
2148 static int vcn_v1_0_set_powergating_state(void *handle,
2149 enum amd_powergating_state state)
2151 /* This doesn't actually powergate the VCN block.
2152 * That's done in the dpm code via the SMC. This
2153 * just re-inits the block as necessary. The actual
2154 * gating still happens in the dpm code. We should
2155 * revisit this when there is a cleaner line between
2156 * the smc and the hw blocks
2159 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2161 if(state == adev->vcn.cur_state)
2164 if (state == AMD_PG_STATE_GATE)
2165 ret = vcn_v1_0_stop(adev);
2167 ret = vcn_v1_0_start(adev);
2170 adev->vcn.cur_state = state;
2174 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
2176 .early_init = vcn_v1_0_early_init,
2178 .sw_init = vcn_v1_0_sw_init,
2179 .sw_fini = vcn_v1_0_sw_fini,
2180 .hw_init = vcn_v1_0_hw_init,
2181 .hw_fini = vcn_v1_0_hw_fini,
2182 .suspend = vcn_v1_0_suspend,
2183 .resume = vcn_v1_0_resume,
2184 .is_idle = vcn_v1_0_is_idle,
2185 .wait_for_idle = vcn_v1_0_wait_for_idle,
2186 .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
2187 .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
2188 .soft_reset = NULL /* vcn_v1_0_soft_reset */,
2189 .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
2190 .set_clockgating_state = vcn_v1_0_set_clockgating_state,
2191 .set_powergating_state = vcn_v1_0_set_powergating_state,
2194 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
2195 .type = AMDGPU_RING_TYPE_VCN_DEC,
2197 .support_64bit_ptrs = false,
2198 .no_user_fence = true,
2199 .vmhub = AMDGPU_MMHUB,
2200 .get_rptr = vcn_v1_0_dec_ring_get_rptr,
2201 .get_wptr = vcn_v1_0_dec_ring_get_wptr,
2202 .set_wptr = vcn_v1_0_dec_ring_set_wptr,
2204 6 + 6 + /* hdp invalidate / flush */
2205 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2206 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2207 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
2208 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
2210 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
2211 .emit_ib = vcn_v1_0_dec_ring_emit_ib,
2212 .emit_fence = vcn_v1_0_dec_ring_emit_fence,
2213 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
2214 .test_ring = amdgpu_vcn_dec_ring_test_ring,
2215 .test_ib = amdgpu_vcn_dec_ring_test_ib,
2216 .insert_nop = vcn_v1_0_dec_ring_insert_nop,
2217 .insert_start = vcn_v1_0_dec_ring_insert_start,
2218 .insert_end = vcn_v1_0_dec_ring_insert_end,
2219 .pad_ib = amdgpu_ring_generic_pad_ib,
2220 .begin_use = amdgpu_vcn_ring_begin_use,
2221 .end_use = amdgpu_vcn_ring_end_use,
2222 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
2223 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
2224 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2227 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
2228 .type = AMDGPU_RING_TYPE_VCN_ENC,
2230 .nop = VCN_ENC_CMD_NO_OP,
2231 .support_64bit_ptrs = false,
2232 .no_user_fence = true,
2233 .vmhub = AMDGPU_MMHUB,
2234 .get_rptr = vcn_v1_0_enc_ring_get_rptr,
2235 .get_wptr = vcn_v1_0_enc_ring_get_wptr,
2236 .set_wptr = vcn_v1_0_enc_ring_set_wptr,
2238 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2239 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2240 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
2241 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
2242 1, /* vcn_v1_0_enc_ring_insert_end */
2243 .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
2244 .emit_ib = vcn_v1_0_enc_ring_emit_ib,
2245 .emit_fence = vcn_v1_0_enc_ring_emit_fence,
2246 .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
2247 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2248 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2249 .insert_nop = amdgpu_ring_insert_nop,
2250 .insert_end = vcn_v1_0_enc_ring_insert_end,
2251 .pad_ib = amdgpu_ring_generic_pad_ib,
2252 .begin_use = amdgpu_vcn_ring_begin_use,
2253 .end_use = amdgpu_vcn_ring_end_use,
2254 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
2255 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
2256 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2259 static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
2260 .type = AMDGPU_RING_TYPE_VCN_JPEG,
2262 .nop = PACKET0(0x81ff, 0),
2263 .support_64bit_ptrs = false,
2264 .no_user_fence = true,
2265 .vmhub = AMDGPU_MMHUB,
2267 .get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
2268 .get_wptr = vcn_v1_0_jpeg_ring_get_wptr,
2269 .set_wptr = vcn_v1_0_jpeg_ring_set_wptr,
2271 6 + 6 + /* hdp invalidate / flush */
2272 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2273 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2274 8 + /* vcn_v1_0_jpeg_ring_emit_vm_flush */
2275 26 + 26 + /* vcn_v1_0_jpeg_ring_emit_fence x2 vm fence */
2277 .emit_ib_size = 22, /* vcn_v1_0_jpeg_ring_emit_ib */
2278 .emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
2279 .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
2280 .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
2281 .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
2282 .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
2283 .insert_nop = vcn_v1_0_jpeg_ring_nop,
2284 .insert_start = vcn_v1_0_jpeg_ring_insert_start,
2285 .insert_end = vcn_v1_0_jpeg_ring_insert_end,
2286 .pad_ib = amdgpu_ring_generic_pad_ib,
2287 .begin_use = amdgpu_vcn_ring_begin_use,
2288 .end_use = amdgpu_vcn_ring_end_use,
2289 .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg,
2290 .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait,
2291 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2294 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2296 adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
2297 DRM_INFO("VCN decode is enabled in VM mode\n");
2300 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2304 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
2305 adev->vcn.ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
2307 DRM_INFO("VCN encode is enabled in VM mode\n");
2310 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
2312 adev->vcn.ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
2313 DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
2316 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
2317 .set = vcn_v1_0_set_interrupt_state,
2318 .process = vcn_v1_0_process_interrupt,
2321 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
2323 adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2;
2324 adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
2327 const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
2329 .type = AMD_IP_BLOCK_TYPE_VCN,
2333 .funcs = &vcn_v1_0_ip_funcs,