2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/kernel.h>
24 #include <linux/firmware.h>
27 #include "amdgpu_gfx.h"
30 #include "amdgpu_atomfirmware.h"
31 #include "amdgpu_pm.h"
33 #include "gc/gc_9_0_offset.h"
34 #include "gc/gc_9_0_sh_mask.h"
35 #include "vega10_enum.h"
36 #include "hdp/hdp_4_0_offset.h"
38 #include "soc15_common.h"
39 #include "clearstate_gfx9.h"
40 #include "v9_structs.h"
42 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
44 #include "amdgpu_ras.h"
46 #define GFX9_NUM_GFX_RINGS 1
47 #define GFX9_MEC_HPD_SIZE 4096
48 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
49 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
51 #define mmPWR_MISC_CNTL_STATUS 0x0183
52 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
53 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
54 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
55 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
56 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
58 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
59 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
60 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
61 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
62 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
63 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
65 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
66 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
67 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
68 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
69 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
70 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
72 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
73 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
74 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
75 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
76 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
77 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
79 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
80 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
81 MODULE_FIRMWARE("amdgpu/raven_me.bin");
82 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
83 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
84 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
86 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
87 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
88 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
89 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
90 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
91 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
92 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
94 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
95 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
96 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
97 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
98 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
99 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
100 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
102 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
104 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
105 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
106 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
107 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
108 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
109 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
110 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
111 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
112 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
117 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
118 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
119 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
120 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
121 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
122 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
123 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
126 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
128 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
129 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
130 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
131 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
132 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
133 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
134 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
137 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
142 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
143 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
144 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
145 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
148 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
150 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
151 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
152 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
153 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
154 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
155 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
156 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
157 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
158 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
159 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
160 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
163 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
165 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
166 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
167 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
168 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
169 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
170 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
171 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
172 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
173 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
174 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
175 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
176 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
177 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
178 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
179 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
180 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
181 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
182 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
183 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
184 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
185 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
186 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
187 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
188 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
191 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
193 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
194 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
195 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
196 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
197 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
198 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
199 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
202 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
204 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
205 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
206 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
207 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
208 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
209 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
210 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
211 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
212 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
213 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
214 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
215 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
216 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
217 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
218 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
219 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
220 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
221 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
222 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
225 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
227 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
228 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
229 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
232 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
234 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
235 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
236 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
237 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
238 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
239 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
240 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
241 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
242 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
243 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
244 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
245 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
246 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
247 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
248 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
249 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
252 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
254 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
255 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
256 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
257 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
258 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
259 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
260 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
261 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
262 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
263 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
264 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
265 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
266 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
269 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
271 mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
272 mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
273 mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
274 mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
275 mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
276 mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
277 mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
278 mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
281 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
283 mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
284 mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
285 mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
286 mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
287 mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
288 mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
289 mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
290 mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
293 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
294 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
295 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
296 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
298 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
299 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
300 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
301 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
302 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
303 struct amdgpu_cu_info *cu_info);
304 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
305 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
306 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
308 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
310 switch (adev->asic_type) {
312 soc15_program_register_sequence(adev,
313 golden_settings_gc_9_0,
314 ARRAY_SIZE(golden_settings_gc_9_0));
315 soc15_program_register_sequence(adev,
316 golden_settings_gc_9_0_vg10,
317 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
320 soc15_program_register_sequence(adev,
321 golden_settings_gc_9_2_1,
322 ARRAY_SIZE(golden_settings_gc_9_2_1));
323 soc15_program_register_sequence(adev,
324 golden_settings_gc_9_2_1_vg12,
325 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
328 soc15_program_register_sequence(adev,
329 golden_settings_gc_9_0,
330 ARRAY_SIZE(golden_settings_gc_9_0));
331 soc15_program_register_sequence(adev,
332 golden_settings_gc_9_0_vg20,
333 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
336 soc15_program_register_sequence(adev, golden_settings_gc_9_1,
337 ARRAY_SIZE(golden_settings_gc_9_1));
338 if (adev->rev_id >= 8)
339 soc15_program_register_sequence(adev,
340 golden_settings_gc_9_1_rv2,
341 ARRAY_SIZE(golden_settings_gc_9_1_rv2));
343 soc15_program_register_sequence(adev,
344 golden_settings_gc_9_1_rv1,
345 ARRAY_SIZE(golden_settings_gc_9_1_rv1));
351 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
352 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
355 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
357 adev->gfx.scratch.num_reg = 8;
358 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
359 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
362 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
363 bool wc, uint32_t reg, uint32_t val)
365 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
366 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
367 WRITE_DATA_DST_SEL(0) |
368 (wc ? WR_CONFIRM : 0));
369 amdgpu_ring_write(ring, reg);
370 amdgpu_ring_write(ring, 0);
371 amdgpu_ring_write(ring, val);
374 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
375 int mem_space, int opt, uint32_t addr0,
376 uint32_t addr1, uint32_t ref, uint32_t mask,
379 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
380 amdgpu_ring_write(ring,
381 /* memory (1) or register (0) */
382 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
383 WAIT_REG_MEM_OPERATION(opt) | /* wait */
384 WAIT_REG_MEM_FUNCTION(3) | /* equal */
385 WAIT_REG_MEM_ENGINE(eng_sel)));
388 BUG_ON(addr0 & 0x3); /* Dword align */
389 amdgpu_ring_write(ring, addr0);
390 amdgpu_ring_write(ring, addr1);
391 amdgpu_ring_write(ring, ref);
392 amdgpu_ring_write(ring, mask);
393 amdgpu_ring_write(ring, inv); /* poll interval */
396 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
398 struct amdgpu_device *adev = ring->adev;
404 r = amdgpu_gfx_scratch_get(adev, &scratch);
408 WREG32(scratch, 0xCAFEDEAD);
409 r = amdgpu_ring_alloc(ring, 3);
411 goto error_free_scratch;
413 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
414 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
415 amdgpu_ring_write(ring, 0xDEADBEEF);
416 amdgpu_ring_commit(ring);
418 for (i = 0; i < adev->usec_timeout; i++) {
419 tmp = RREG32(scratch);
420 if (tmp == 0xDEADBEEF)
425 if (i >= adev->usec_timeout)
429 amdgpu_gfx_scratch_free(adev, scratch);
433 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
435 struct amdgpu_device *adev = ring->adev;
437 struct dma_fence *f = NULL;
444 r = amdgpu_device_wb_get(adev, &index);
448 gpu_addr = adev->wb.gpu_addr + (index * 4);
449 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
450 memset(&ib, 0, sizeof(ib));
451 r = amdgpu_ib_get(adev, NULL, 16, &ib);
455 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
456 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
457 ib.ptr[2] = lower_32_bits(gpu_addr);
458 ib.ptr[3] = upper_32_bits(gpu_addr);
459 ib.ptr[4] = 0xDEADBEEF;
462 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
466 r = dma_fence_wait_timeout(f, false, timeout);
474 tmp = adev->wb.wb[index];
475 if (tmp == 0xDEADBEEF)
481 amdgpu_ib_free(adev, &ib, NULL);
484 amdgpu_device_wb_free(adev, index);
489 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
491 release_firmware(adev->gfx.pfp_fw);
492 adev->gfx.pfp_fw = NULL;
493 release_firmware(adev->gfx.me_fw);
494 adev->gfx.me_fw = NULL;
495 release_firmware(adev->gfx.ce_fw);
496 adev->gfx.ce_fw = NULL;
497 release_firmware(adev->gfx.rlc_fw);
498 adev->gfx.rlc_fw = NULL;
499 release_firmware(adev->gfx.mec_fw);
500 adev->gfx.mec_fw = NULL;
501 release_firmware(adev->gfx.mec2_fw);
502 adev->gfx.mec2_fw = NULL;
504 kfree(adev->gfx.rlc.register_list_format);
507 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
509 const struct rlc_firmware_header_v2_1 *rlc_hdr;
511 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
512 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
513 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
514 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
515 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
516 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
517 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
518 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
519 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
520 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
521 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
522 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
523 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
524 adev->gfx.rlc.reg_list_format_direct_reg_list_length =
525 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
528 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
530 adev->gfx.me_fw_write_wait = false;
531 adev->gfx.mec_fw_write_wait = false;
533 switch (adev->asic_type) {
535 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
536 (adev->gfx.me_feature_version >= 42) &&
537 (adev->gfx.pfp_fw_version >= 0x000000b1) &&
538 (adev->gfx.pfp_feature_version >= 42))
539 adev->gfx.me_fw_write_wait = true;
541 if ((adev->gfx.mec_fw_version >= 0x00000193) &&
542 (adev->gfx.mec_feature_version >= 42))
543 adev->gfx.mec_fw_write_wait = true;
546 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
547 (adev->gfx.me_feature_version >= 44) &&
548 (adev->gfx.pfp_fw_version >= 0x000000b2) &&
549 (adev->gfx.pfp_feature_version >= 44))
550 adev->gfx.me_fw_write_wait = true;
552 if ((adev->gfx.mec_fw_version >= 0x00000196) &&
553 (adev->gfx.mec_feature_version >= 44))
554 adev->gfx.mec_fw_write_wait = true;
557 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
558 (adev->gfx.me_feature_version >= 44) &&
559 (adev->gfx.pfp_fw_version >= 0x000000b2) &&
560 (adev->gfx.pfp_feature_version >= 44))
561 adev->gfx.me_fw_write_wait = true;
563 if ((adev->gfx.mec_fw_version >= 0x00000197) &&
564 (adev->gfx.mec_feature_version >= 44))
565 adev->gfx.mec_fw_write_wait = true;
568 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
569 (adev->gfx.me_feature_version >= 42) &&
570 (adev->gfx.pfp_fw_version >= 0x000000b1) &&
571 (adev->gfx.pfp_feature_version >= 42))
572 adev->gfx.me_fw_write_wait = true;
574 if ((adev->gfx.mec_fw_version >= 0x00000192) &&
575 (adev->gfx.mec_feature_version >= 42))
576 adev->gfx.mec_fw_write_wait = true;
583 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
585 switch (adev->asic_type) {
591 if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
593 if ((adev->gfx.rlc_fw_version != 106 &&
594 adev->gfx.rlc_fw_version < 531) ||
595 (adev->gfx.rlc_fw_version == 53815) ||
596 (adev->gfx.rlc_feature_version < 1) ||
597 !adev->gfx.rlc.is_rlc_v2_1)
598 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
605 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
607 const char *chip_name;
610 struct amdgpu_firmware_info *info = NULL;
611 const struct common_firmware_header *header = NULL;
612 const struct gfx_firmware_header_v1_0 *cp_hdr;
613 const struct rlc_firmware_header_v2_0 *rlc_hdr;
614 unsigned int *tmp = NULL;
616 uint16_t version_major;
617 uint16_t version_minor;
618 uint32_t smu_version;
622 switch (adev->asic_type) {
624 chip_name = "vega10";
627 chip_name = "vega12";
630 chip_name = "vega20";
633 if (adev->rev_id >= 8)
634 chip_name = "raven2";
635 else if (adev->pdev->device == 0x15d8)
636 chip_name = "picasso";
644 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
645 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
648 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
651 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
652 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
653 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
655 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
656 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
659 err = amdgpu_ucode_validate(adev->gfx.me_fw);
662 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
663 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
664 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
666 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
667 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
670 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
673 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
674 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
675 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
678 * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
679 * instead of picasso_rlc.bin.
681 * PCO AM4: revision >= 0xC8 && revision <= 0xCF
682 * or revision >= 0xD8 && revision <= 0xDF
683 * otherwise is PCO FP5
685 if (!strcmp(chip_name, "picasso") &&
686 (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
687 ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
688 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
689 else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
690 (smu_version >= 0x41e2b))
692 *SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
694 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
696 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
697 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
700 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
701 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
703 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
704 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
705 if (version_major == 2 && version_minor == 1)
706 adev->gfx.rlc.is_rlc_v2_1 = true;
708 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
709 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
710 adev->gfx.rlc.save_and_restore_offset =
711 le32_to_cpu(rlc_hdr->save_and_restore_offset);
712 adev->gfx.rlc.clear_state_descriptor_offset =
713 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
714 adev->gfx.rlc.avail_scratch_ram_locations =
715 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
716 adev->gfx.rlc.reg_restore_list_size =
717 le32_to_cpu(rlc_hdr->reg_restore_list_size);
718 adev->gfx.rlc.reg_list_format_start =
719 le32_to_cpu(rlc_hdr->reg_list_format_start);
720 adev->gfx.rlc.reg_list_format_separate_start =
721 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
722 adev->gfx.rlc.starting_offsets_start =
723 le32_to_cpu(rlc_hdr->starting_offsets_start);
724 adev->gfx.rlc.reg_list_format_size_bytes =
725 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
726 adev->gfx.rlc.reg_list_size_bytes =
727 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
728 adev->gfx.rlc.register_list_format =
729 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
730 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
731 if (!adev->gfx.rlc.register_list_format) {
736 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
737 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
738 for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
739 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
741 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
743 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
744 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
745 for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
746 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
748 if (adev->gfx.rlc.is_rlc_v2_1)
749 gfx_v9_0_init_rlc_ext_microcode(adev);
751 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
752 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
755 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
758 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
759 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
760 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
763 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
764 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
766 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
769 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
770 adev->gfx.mec2_fw->data;
771 adev->gfx.mec2_fw_version =
772 le32_to_cpu(cp_hdr->header.ucode_version);
773 adev->gfx.mec2_feature_version =
774 le32_to_cpu(cp_hdr->ucode_feature_version);
777 adev->gfx.mec2_fw = NULL;
780 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
781 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
782 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
783 info->fw = adev->gfx.pfp_fw;
784 header = (const struct common_firmware_header *)info->fw->data;
785 adev->firmware.fw_size +=
786 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
788 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
789 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
790 info->fw = adev->gfx.me_fw;
791 header = (const struct common_firmware_header *)info->fw->data;
792 adev->firmware.fw_size +=
793 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
795 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
796 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
797 info->fw = adev->gfx.ce_fw;
798 header = (const struct common_firmware_header *)info->fw->data;
799 adev->firmware.fw_size +=
800 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
802 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
803 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
804 info->fw = adev->gfx.rlc_fw;
805 header = (const struct common_firmware_header *)info->fw->data;
806 adev->firmware.fw_size +=
807 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
809 if (adev->gfx.rlc.is_rlc_v2_1 &&
810 adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
811 adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
812 adev->gfx.rlc.save_restore_list_srm_size_bytes) {
813 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
814 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
815 info->fw = adev->gfx.rlc_fw;
816 adev->firmware.fw_size +=
817 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
819 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
820 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
821 info->fw = adev->gfx.rlc_fw;
822 adev->firmware.fw_size +=
823 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
825 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
826 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
827 info->fw = adev->gfx.rlc_fw;
828 adev->firmware.fw_size +=
829 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
832 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
833 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
834 info->fw = adev->gfx.mec_fw;
835 header = (const struct common_firmware_header *)info->fw->data;
836 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
837 adev->firmware.fw_size +=
838 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
840 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
841 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
842 info->fw = adev->gfx.mec_fw;
843 adev->firmware.fw_size +=
844 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
846 if (adev->gfx.mec2_fw) {
847 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
848 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
849 info->fw = adev->gfx.mec2_fw;
850 header = (const struct common_firmware_header *)info->fw->data;
851 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
852 adev->firmware.fw_size +=
853 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
854 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
855 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
856 info->fw = adev->gfx.mec2_fw;
857 adev->firmware.fw_size +=
858 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
864 gfx_v9_0_check_if_need_gfxoff(adev);
865 gfx_v9_0_check_fw_write_wait(adev);
868 "gfx9: Failed to load firmware \"%s\"\n",
870 release_firmware(adev->gfx.pfp_fw);
871 adev->gfx.pfp_fw = NULL;
872 release_firmware(adev->gfx.me_fw);
873 adev->gfx.me_fw = NULL;
874 release_firmware(adev->gfx.ce_fw);
875 adev->gfx.ce_fw = NULL;
876 release_firmware(adev->gfx.rlc_fw);
877 adev->gfx.rlc_fw = NULL;
878 release_firmware(adev->gfx.mec_fw);
879 adev->gfx.mec_fw = NULL;
880 release_firmware(adev->gfx.mec2_fw);
881 adev->gfx.mec2_fw = NULL;
886 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
889 const struct cs_section_def *sect = NULL;
890 const struct cs_extent_def *ext = NULL;
892 /* begin clear state */
894 /* context control state */
897 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
898 for (ext = sect->section; ext->extent != NULL; ++ext) {
899 if (sect->id == SECT_CONTEXT)
900 count += 2 + ext->reg_count;
906 /* end clear state */
914 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
915 volatile u32 *buffer)
918 const struct cs_section_def *sect = NULL;
919 const struct cs_extent_def *ext = NULL;
921 if (adev->gfx.rlc.cs_data == NULL)
926 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
927 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
929 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
930 buffer[count++] = cpu_to_le32(0x80000000);
931 buffer[count++] = cpu_to_le32(0x80000000);
933 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
934 for (ext = sect->section; ext->extent != NULL; ++ext) {
935 if (sect->id == SECT_CONTEXT) {
937 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
938 buffer[count++] = cpu_to_le32(ext->reg_index -
939 PACKET3_SET_CONTEXT_REG_START);
940 for (i = 0; i < ext->reg_count; i++)
941 buffer[count++] = cpu_to_le32(ext->extent[i]);
948 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
949 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
951 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
952 buffer[count++] = cpu_to_le32(0);
955 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
957 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
958 uint32_t pg_always_on_cu_num = 2;
959 uint32_t always_on_cu_num;
961 uint32_t mask, cu_bitmap, counter;
963 if (adev->flags & AMD_IS_APU)
964 always_on_cu_num = 4;
965 else if (adev->asic_type == CHIP_VEGA12)
966 always_on_cu_num = 8;
968 always_on_cu_num = 12;
970 mutex_lock(&adev->grbm_idx_mutex);
971 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
972 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
976 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
978 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
979 if (cu_info->bitmap[i][j] & mask) {
980 if (counter == pg_always_on_cu_num)
981 WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
982 if (counter < always_on_cu_num)
991 WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
992 cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
995 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
996 mutex_unlock(&adev->grbm_idx_mutex);
999 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
1003 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1004 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1005 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
1006 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1007 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
1009 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1010 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1012 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1013 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1015 mutex_lock(&adev->grbm_idx_mutex);
1016 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1017 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1018 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1020 /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1021 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1022 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1023 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1024 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1026 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1027 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1030 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1033 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1034 * programmed in gfx_v9_0_init_always_on_cu_mask()
1037 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1038 * but used for RLC_LB_CNTL configuration */
1039 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1040 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1041 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1042 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1043 mutex_unlock(&adev->grbm_idx_mutex);
1045 gfx_v9_0_init_always_on_cu_mask(adev);
1048 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1052 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1053 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1054 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1055 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1056 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1058 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1059 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1061 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1062 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1064 mutex_lock(&adev->grbm_idx_mutex);
1065 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1066 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1067 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1069 /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1070 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1071 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1072 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1073 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1075 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1076 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1079 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1082 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1083 * programmed in gfx_v9_0_init_always_on_cu_mask()
1086 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1087 * but used for RLC_LB_CNTL configuration */
1088 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1089 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1090 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1091 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1092 mutex_unlock(&adev->grbm_idx_mutex);
1094 gfx_v9_0_init_always_on_cu_mask(adev);
1097 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1099 WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1102 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1107 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1109 const struct cs_section_def *cs_data;
1112 adev->gfx.rlc.cs_data = gfx9_cs_data;
1114 cs_data = adev->gfx.rlc.cs_data;
1117 /* init clear state block */
1118 r = amdgpu_gfx_rlc_init_csb(adev);
1123 if (adev->asic_type == CHIP_RAVEN) {
1124 /* TODO: double check the cp_table_size for RV */
1125 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1126 r = amdgpu_gfx_rlc_init_cpt(adev);
1131 switch (adev->asic_type) {
1133 gfx_v9_0_init_lbpw(adev);
1136 gfx_v9_4_init_lbpw(adev);
1145 static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
1149 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
1150 if (unlikely(r != 0))
1153 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
1154 AMDGPU_GEM_DOMAIN_VRAM);
1156 adev->gfx.rlc.clear_state_gpu_addr =
1157 amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
1159 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1164 static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
1168 if (!adev->gfx.rlc.clear_state_obj)
1171 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
1172 if (likely(r == 0)) {
1173 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
1174 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1178 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1180 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1181 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1184 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1188 const __le32 *fw_data;
1191 size_t mec_hpd_size;
1193 const struct gfx_firmware_header_v1_0 *mec_hdr;
1195 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1197 /* take ownership of the relevant compute queues */
1198 amdgpu_gfx_compute_queue_acquire(adev);
1199 mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1201 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1202 AMDGPU_GEM_DOMAIN_VRAM,
1203 &adev->gfx.mec.hpd_eop_obj,
1204 &adev->gfx.mec.hpd_eop_gpu_addr,
1207 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1208 gfx_v9_0_mec_fini(adev);
1212 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
1214 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1215 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1217 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1219 fw_data = (const __le32 *)
1220 (adev->gfx.mec_fw->data +
1221 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1222 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
1224 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1225 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1226 &adev->gfx.mec.mec_fw_obj,
1227 &adev->gfx.mec.mec_fw_gpu_addr,
1230 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1231 gfx_v9_0_mec_fini(adev);
1235 memcpy(fw, fw_data, fw_size);
1237 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1238 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1243 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1245 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1246 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1247 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1248 (address << SQ_IND_INDEX__INDEX__SHIFT) |
1249 (SQ_IND_INDEX__FORCE_READ_MASK));
1250 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1253 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1254 uint32_t wave, uint32_t thread,
1255 uint32_t regno, uint32_t num, uint32_t *out)
1257 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1258 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1259 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1260 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
1261 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
1262 (SQ_IND_INDEX__FORCE_READ_MASK) |
1263 (SQ_IND_INDEX__AUTO_INCR_MASK));
1265 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1268 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1270 /* type 1 wave data */
1271 dst[(*no_fields)++] = 1;
1272 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
1273 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
1274 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
1275 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
1276 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
1277 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
1278 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
1279 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
1280 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
1281 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
1282 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
1283 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
1284 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
1285 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
1288 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
1289 uint32_t wave, uint32_t start,
1290 uint32_t size, uint32_t *dst)
1293 adev, simd, wave, 0,
1294 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
1297 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
1298 uint32_t wave, uint32_t thread,
1299 uint32_t start, uint32_t size,
1303 adev, simd, wave, thread,
1304 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1307 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
1308 u32 me, u32 pipe, u32 q)
1310 soc15_grbm_select(adev, me, pipe, q, 0);
1313 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
1314 .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
1315 .select_se_sh = &gfx_v9_0_select_se_sh,
1316 .read_wave_data = &gfx_v9_0_read_wave_data,
1317 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
1318 .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
1319 .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
1322 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
1327 adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
1329 switch (adev->asic_type) {
1331 adev->gfx.config.max_hw_contexts = 8;
1332 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1333 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1334 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1335 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1336 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
1339 adev->gfx.config.max_hw_contexts = 8;
1340 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1341 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1342 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1343 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1344 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
1345 DRM_INFO("fix gfx.config for vega12\n");
1348 adev->gfx.config.max_hw_contexts = 8;
1349 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1350 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1351 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1352 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1353 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1354 gb_addr_config &= ~0xf3e777ff;
1355 gb_addr_config |= 0x22014042;
1356 /* check vbios table if gpu info is not available */
1357 err = amdgpu_atomfirmware_get_gfx_info(adev);
1362 adev->gfx.config.max_hw_contexts = 8;
1363 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1364 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1365 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1366 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1367 if (adev->rev_id >= 8)
1368 gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
1370 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
1377 adev->gfx.config.gb_addr_config = gb_addr_config;
1379 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1381 adev->gfx.config.gb_addr_config,
1385 adev->gfx.config.max_tile_pipes =
1386 adev->gfx.config.gb_addr_config_fields.num_pipes;
1388 adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
1390 adev->gfx.config.gb_addr_config,
1393 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1395 adev->gfx.config.gb_addr_config,
1397 MAX_COMPRESSED_FRAGS);
1398 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1400 adev->gfx.config.gb_addr_config,
1403 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1405 adev->gfx.config.gb_addr_config,
1407 NUM_SHADER_ENGINES);
1408 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1410 adev->gfx.config.gb_addr_config,
1412 PIPE_INTERLEAVE_SIZE));
1417 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
1418 struct amdgpu_ngg_buf *ngg_buf,
1420 int default_size_se)
1425 dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
1428 size_se = size_se ? size_se : default_size_se;
1430 ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
1431 r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
1432 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1437 dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
1440 ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
1445 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
1449 for (i = 0; i < NGG_BUF_MAX; i++)
1450 amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
1451 &adev->gfx.ngg.buf[i].gpu_addr,
1454 memset(&adev->gfx.ngg.buf[0], 0,
1455 sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
1457 adev->gfx.ngg.init = false;
1462 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
1466 if (!amdgpu_ngg || adev->gfx.ngg.init == true)
1469 /* GDS reserve memory: 64 bytes alignment */
1470 adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
1471 adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
1472 adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
1473 adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
1474 adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
1476 /* Primitive Buffer */
1477 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
1478 amdgpu_prim_buf_per_se,
1481 dev_err(adev->dev, "Failed to create Primitive Buffer\n");
1485 /* Position Buffer */
1486 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
1487 amdgpu_pos_buf_per_se,
1490 dev_err(adev->dev, "Failed to create Position Buffer\n");
1494 /* Control Sideband */
1495 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
1496 amdgpu_cntl_sb_buf_per_se,
1499 dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
1503 /* Parameter Cache, not created by default */
1504 if (amdgpu_param_buf_per_se <= 0)
1507 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
1508 amdgpu_param_buf_per_se,
1511 dev_err(adev->dev, "Failed to create Parameter Cache\n");
1516 adev->gfx.ngg.init = true;
1519 gfx_v9_0_ngg_fini(adev);
1523 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
1525 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
1532 /* Program buffer size */
1533 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
1534 adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
1535 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
1536 adev->gfx.ngg.buf[NGG_POS].size >> 8);
1537 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
1539 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
1540 adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
1541 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
1542 adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
1543 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
1545 /* Program buffer base address */
1546 base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1547 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
1548 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
1550 base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1551 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
1552 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
1554 base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1555 data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
1556 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
1558 base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1559 data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
1560 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
1562 base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1563 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
1564 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
1566 base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1567 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
1568 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
1570 /* Clear GDS reserved memory */
1571 r = amdgpu_ring_alloc(ring, 17);
1573 DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n",
1578 gfx_v9_0_write_data_to_reg(ring, 0, false,
1579 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
1580 (adev->gds.mem.total_size +
1581 adev->gfx.ngg.gds_reserve_size));
1583 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
1584 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
1585 PACKET3_DMA_DATA_DST_SEL(1) |
1586 PACKET3_DMA_DATA_SRC_SEL(2)));
1587 amdgpu_ring_write(ring, 0);
1588 amdgpu_ring_write(ring, 0);
1589 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
1590 amdgpu_ring_write(ring, 0);
1591 amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
1592 adev->gfx.ngg.gds_reserve_size);
1594 gfx_v9_0_write_data_to_reg(ring, 0, false,
1595 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
1597 amdgpu_ring_commit(ring);
1602 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1603 int mec, int pipe, int queue)
1607 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1609 ring = &adev->gfx.compute_ring[ring_id];
1614 ring->queue = queue;
1616 ring->ring_obj = NULL;
1617 ring->use_doorbell = true;
1618 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1619 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1620 + (ring_id * GFX9_MEC_HPD_SIZE);
1621 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1623 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1624 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1627 /* type-2 packets are deprecated on MEC, use type-3 instead */
1628 r = amdgpu_ring_init(adev, ring, 1024,
1629 &adev->gfx.eop_irq, irq_type);
1637 static int gfx_v9_0_sw_init(void *handle)
1639 int i, j, k, r, ring_id;
1640 struct amdgpu_ring *ring;
1641 struct amdgpu_kiq *kiq;
1642 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1644 switch (adev->asic_type) {
1649 adev->gfx.mec.num_mec = 2;
1652 adev->gfx.mec.num_mec = 1;
1656 adev->gfx.mec.num_pipe_per_mec = 4;
1657 adev->gfx.mec.num_queue_per_pipe = 8;
1660 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
1664 /* Privileged reg */
1665 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
1666 &adev->gfx.priv_reg_irq);
1670 /* Privileged inst */
1671 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
1672 &adev->gfx.priv_inst_irq);
1677 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
1678 &adev->gfx.cp_ecc_error_irq);
1683 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
1684 &adev->gfx.cp_ecc_error_irq);
1688 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1690 gfx_v9_0_scratch_init(adev);
1692 r = gfx_v9_0_init_microcode(adev);
1694 DRM_ERROR("Failed to load gfx firmware!\n");
1698 r = adev->gfx.rlc.funcs->init(adev);
1700 DRM_ERROR("Failed to init rlc BOs!\n");
1704 r = gfx_v9_0_mec_init(adev);
1706 DRM_ERROR("Failed to init MEC BOs!\n");
1710 /* set up the gfx ring */
1711 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1712 ring = &adev->gfx.gfx_ring[i];
1713 ring->ring_obj = NULL;
1715 sprintf(ring->name, "gfx");
1717 sprintf(ring->name, "gfx_%d", i);
1718 ring->use_doorbell = true;
1719 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
1720 r = amdgpu_ring_init(adev, ring, 1024,
1721 &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
1726 /* set up the compute queues - allocate horizontally across pipes */
1728 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1729 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1730 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1731 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
1734 r = gfx_v9_0_compute_ring_init(adev,
1745 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
1747 DRM_ERROR("Failed to init KIQ BOs!\n");
1751 kiq = &adev->gfx.kiq;
1752 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1756 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1757 r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
1761 adev->gfx.ce_ram_size = 0x8000;
1763 r = gfx_v9_0_gpu_early_init(adev);
1767 r = gfx_v9_0_ngg_init(adev);
1775 static int gfx_v9_0_sw_fini(void *handle)
1778 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1780 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
1782 struct ras_common_if *ras_if = adev->gfx.ras_if;
1783 struct ras_ih_if ih_info = {
1787 amdgpu_ras_debugfs_remove(adev, ras_if);
1788 amdgpu_ras_sysfs_remove(adev, ras_if);
1789 amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
1790 amdgpu_ras_feature_enable(adev, ras_if, 0);
1794 amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
1795 amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
1796 amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
1798 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1799 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1800 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1801 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1803 amdgpu_gfx_compute_mqd_sw_fini(adev);
1804 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1805 amdgpu_gfx_kiq_fini(adev);
1807 gfx_v9_0_mec_fini(adev);
1808 gfx_v9_0_ngg_fini(adev);
1809 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
1810 &adev->gfx.rlc.clear_state_gpu_addr,
1811 (void **)&adev->gfx.rlc.cs_ptr);
1812 if (adev->asic_type == CHIP_RAVEN) {
1813 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
1814 &adev->gfx.rlc.cp_table_gpu_addr,
1815 (void **)&adev->gfx.rlc.cp_table_ptr);
1817 gfx_v9_0_free_microcode(adev);
1823 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
1828 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
1832 if (instance == 0xffffffff)
1833 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1835 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1837 if (se_num == 0xffffffff)
1838 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1840 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1842 if (sh_num == 0xffffffff)
1843 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1845 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1847 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1850 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1854 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1855 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1857 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1858 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1860 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1861 adev->gfx.config.max_sh_per_se);
1863 return (~data) & mask;
1866 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
1871 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1872 adev->gfx.config.max_sh_per_se;
1874 mutex_lock(&adev->grbm_idx_mutex);
1875 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1876 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1877 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1878 data = gfx_v9_0_get_rb_active_bitmap(adev);
1879 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1880 rb_bitmap_width_per_sh);
1883 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1884 mutex_unlock(&adev->grbm_idx_mutex);
1886 adev->gfx.config.backend_enable_mask = active_rbs;
1887 adev->gfx.config.num_rbs = hweight32(active_rbs);
1890 #define DEFAULT_SH_MEM_BASES (0x6000)
1891 #define FIRST_COMPUTE_VMID (8)
1892 #define LAST_COMPUTE_VMID (16)
1893 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
1896 uint32_t sh_mem_config;
1897 uint32_t sh_mem_bases;
1900 * Configure apertures:
1901 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1902 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1903 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1905 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1907 sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1908 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1909 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1911 mutex_lock(&adev->srbm_mutex);
1912 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1913 soc15_grbm_select(adev, 0, 0, 0, i);
1914 /* CP and shaders */
1915 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
1916 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1918 soc15_grbm_select(adev, 0, 0, 0, 0);
1919 mutex_unlock(&adev->srbm_mutex);
1922 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
1927 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1929 gfx_v9_0_tiling_mode_table_init(adev);
1931 gfx_v9_0_setup_rb(adev);
1932 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
1933 adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
1935 /* XXX SH_MEM regs */
1936 /* where to put LDS, scratch, GPUVM in FSA64 space */
1937 mutex_lock(&adev->srbm_mutex);
1938 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) {
1939 soc15_grbm_select(adev, 0, 0, 0, i);
1940 /* CP and shaders */
1942 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1943 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1944 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1945 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
1947 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1948 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1949 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1950 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1951 (adev->gmc.private_aperture_start >> 48));
1952 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1953 (adev->gmc.shared_aperture_start >> 48));
1954 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1957 soc15_grbm_select(adev, 0, 0, 0, 0);
1959 mutex_unlock(&adev->srbm_mutex);
1961 gfx_v9_0_init_compute_vmid(adev);
1963 mutex_lock(&adev->grbm_idx_mutex);
1965 * making sure that the following register writes will be broadcasted
1966 * to all the shaders
1968 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1970 WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
1971 (adev->gfx.config.sc_prim_fifo_size_frontend <<
1972 PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1973 (adev->gfx.config.sc_prim_fifo_size_backend <<
1974 PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1975 (adev->gfx.config.sc_hiz_tile_fifo_size <<
1976 PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1977 (adev->gfx.config.sc_earlyz_tile_fifo_size <<
1978 PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
1979 mutex_unlock(&adev->grbm_idx_mutex);
1983 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
1988 mutex_lock(&adev->grbm_idx_mutex);
1989 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1990 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1991 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1992 for (k = 0; k < adev->usec_timeout; k++) {
1993 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
1997 if (k == adev->usec_timeout) {
1998 gfx_v9_0_select_se_sh(adev, 0xffffffff,
1999 0xffffffff, 0xffffffff);
2000 mutex_unlock(&adev->grbm_idx_mutex);
2001 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
2007 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2008 mutex_unlock(&adev->grbm_idx_mutex);
2010 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2011 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2012 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2013 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2014 for (k = 0; k < adev->usec_timeout; k++) {
2015 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2021 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2024 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2026 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2027 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2028 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2029 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2031 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2034 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2037 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2038 adev->gfx.rlc.clear_state_gpu_addr >> 32);
2039 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2040 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2041 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2042 adev->gfx.rlc.clear_state_size);
2045 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2046 int indirect_offset,
2048 int *unique_indirect_regs,
2049 int unique_indirect_reg_count,
2050 int *indirect_start_offsets,
2051 int *indirect_start_offsets_count,
2052 int max_start_offsets_count)
2056 for (; indirect_offset < list_size; indirect_offset++) {
2057 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2058 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2059 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2061 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2062 indirect_offset += 2;
2064 /* look for the matching indice */
2065 for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2066 if (unique_indirect_regs[idx] ==
2067 register_list_format[indirect_offset] ||
2068 !unique_indirect_regs[idx])
2072 BUG_ON(idx >= unique_indirect_reg_count);
2074 if (!unique_indirect_regs[idx])
2075 unique_indirect_regs[idx] = register_list_format[indirect_offset];
2082 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2084 int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2085 int unique_indirect_reg_count = 0;
2087 int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2088 int indirect_start_offsets_count = 0;
2094 u32 *register_list_format =
2095 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2096 if (!register_list_format)
2098 memcpy(register_list_format, adev->gfx.rlc.register_list_format,
2099 adev->gfx.rlc.reg_list_format_size_bytes);
2101 /* setup unique_indirect_regs array and indirect_start_offsets array */
2102 unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2103 gfx_v9_1_parse_ind_reg_list(register_list_format,
2104 adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2105 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2106 unique_indirect_regs,
2107 unique_indirect_reg_count,
2108 indirect_start_offsets,
2109 &indirect_start_offsets_count,
2110 ARRAY_SIZE(indirect_start_offsets));
2112 /* enable auto inc in case it is disabled */
2113 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2114 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2115 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2117 /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2118 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2119 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2120 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2121 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2122 adev->gfx.rlc.register_restore[i]);
2124 /* load indirect register */
2125 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2126 adev->gfx.rlc.reg_list_format_start);
2128 /* direct register portion */
2129 for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2130 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2131 register_list_format[i]);
2133 /* indirect register portion */
2134 while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2135 if (register_list_format[i] == 0xFFFFFFFF) {
2136 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2140 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2141 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2143 for (j = 0; j < unique_indirect_reg_count; j++) {
2144 if (register_list_format[i] == unique_indirect_regs[j]) {
2145 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2150 BUG_ON(j >= unique_indirect_reg_count);
2155 /* set save/restore list size */
2156 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2157 list_size = list_size >> 1;
2158 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2159 adev->gfx.rlc.reg_restore_list_size);
2160 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2162 /* write the starting offsets to RLC scratch ram */
2163 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2164 adev->gfx.rlc.starting_offsets_start);
2165 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2166 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2167 indirect_start_offsets[i]);
2169 /* load unique indirect regs*/
2170 for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2171 if (unique_indirect_regs[i] != 0) {
2172 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2173 + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2174 unique_indirect_regs[i] & 0x3FFFF);
2176 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2177 + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2178 unique_indirect_regs[i] >> 20);
2182 kfree(register_list_format);
2186 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2188 WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2191 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2195 uint32_t default_data = 0;
2197 default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2198 if (enable == true) {
2199 /* enable GFXIP control over CGPG */
2200 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2201 if(default_data != data)
2202 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2205 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2206 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2207 if(default_data != data)
2208 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2210 /* restore GFXIP control over GCPG */
2211 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2212 if(default_data != data)
2213 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2217 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2221 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2222 AMD_PG_SUPPORT_GFX_SMG |
2223 AMD_PG_SUPPORT_GFX_DMG)) {
2224 /* init IDLE_POLL_COUNT = 60 */
2225 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2226 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2227 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2228 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2230 /* init RLC PG Delay */
2232 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2233 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2234 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2235 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2236 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2238 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2239 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2240 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2241 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2243 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2244 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2245 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2246 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2248 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2249 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2251 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2252 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2253 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2255 pwr_10_0_gfxip_control_over_cgpg(adev, true);
2259 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2263 uint32_t default_data = 0;
2265 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2266 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2267 SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2269 if (default_data != data)
2270 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2273 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2277 uint32_t default_data = 0;
2279 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2280 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2281 SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2283 if(default_data != data)
2284 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2287 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2291 uint32_t default_data = 0;
2293 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2294 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2297 if(default_data != data)
2298 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2301 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2304 uint32_t data, default_data;
2306 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2307 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2308 GFX_POWER_GATING_ENABLE,
2310 if(default_data != data)
2311 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2314 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2317 uint32_t data, default_data;
2319 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2320 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2321 GFX_PIPELINE_PG_ENABLE,
2323 if(default_data != data)
2324 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2327 /* read any GFX register to wake up GFX */
2328 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2331 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
2334 uint32_t data, default_data;
2336 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2337 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2338 STATIC_PER_CU_PG_ENABLE,
2340 if(default_data != data)
2341 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2344 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
2347 uint32_t data, default_data;
2349 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2350 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2351 DYN_PER_CU_PG_ENABLE,
2353 if(default_data != data)
2354 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2357 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2359 gfx_v9_0_init_csb(adev);
2362 * Rlc save restore list is workable since v2_1.
2363 * And it's needed by gfxoff feature.
2365 if (adev->gfx.rlc.is_rlc_v2_1) {
2366 gfx_v9_1_init_rlc_save_restore_list(adev);
2367 gfx_v9_0_enable_save_restore_machine(adev);
2370 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2371 AMD_PG_SUPPORT_GFX_SMG |
2372 AMD_PG_SUPPORT_GFX_DMG |
2374 AMD_PG_SUPPORT_GDS |
2375 AMD_PG_SUPPORT_RLC_SMU_HS)) {
2376 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2377 adev->gfx.rlc.cp_table_gpu_addr >> 8);
2378 gfx_v9_0_init_gfx_power_gating(adev);
2382 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
2384 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
2385 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2386 gfx_v9_0_wait_for_rlc_serdes(adev);
2389 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
2391 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2393 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2397 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2399 #ifdef AMDGPU_RLC_DEBUG_RETRY
2403 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2406 /* carrizo do enable cp interrupt after cp inited */
2407 if (!(adev->flags & AMD_IS_APU)) {
2408 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2412 #ifdef AMDGPU_RLC_DEBUG_RETRY
2413 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
2414 rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
2415 if(rlc_ucode_ver == 0x108) {
2416 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
2417 rlc_ucode_ver, adev->gfx.rlc_fw_version);
2418 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
2419 * default is 0x9C4 to create a 100us interval */
2420 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
2421 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
2422 * to disable the page fault retry interrupts, default is
2424 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
2429 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
2431 const struct rlc_firmware_header_v2_0 *hdr;
2432 const __le32 *fw_data;
2433 unsigned i, fw_size;
2435 if (!adev->gfx.rlc_fw)
2438 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2439 amdgpu_ucode_print_rlc_hdr(&hdr->header);
2441 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2442 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2443 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2445 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
2446 RLCG_UCODE_LOADING_START_ADDRESS);
2447 for (i = 0; i < fw_size; i++)
2448 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2449 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2454 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2458 if (amdgpu_sriov_vf(adev)) {
2459 gfx_v9_0_init_csb(adev);
2463 adev->gfx.rlc.funcs->stop(adev);
2466 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2468 gfx_v9_0_init_pg(adev);
2470 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2471 /* legacy rlc firmware loading */
2472 r = gfx_v9_0_rlc_load_microcode(adev);
2477 switch (adev->asic_type) {
2479 if (amdgpu_lbpw == 0)
2480 gfx_v9_0_enable_lbpw(adev, false);
2482 gfx_v9_0_enable_lbpw(adev, true);
2485 if (amdgpu_lbpw > 0)
2486 gfx_v9_0_enable_lbpw(adev, true);
2488 gfx_v9_0_enable_lbpw(adev, false);
2494 adev->gfx.rlc.funcs->start(adev);
2499 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2502 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2504 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2505 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2506 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2508 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2509 adev->gfx.gfx_ring[i].sched.ready = false;
2511 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2515 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2517 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2518 const struct gfx_firmware_header_v1_0 *ce_hdr;
2519 const struct gfx_firmware_header_v1_0 *me_hdr;
2520 const __le32 *fw_data;
2521 unsigned i, fw_size;
2523 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2526 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2527 adev->gfx.pfp_fw->data;
2528 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2529 adev->gfx.ce_fw->data;
2530 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2531 adev->gfx.me_fw->data;
2533 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2534 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2535 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2537 gfx_v9_0_cp_gfx_enable(adev, false);
2540 fw_data = (const __le32 *)
2541 (adev->gfx.pfp_fw->data +
2542 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2543 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2544 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
2545 for (i = 0; i < fw_size; i++)
2546 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2547 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2550 fw_data = (const __le32 *)
2551 (adev->gfx.ce_fw->data +
2552 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2553 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2554 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
2555 for (i = 0; i < fw_size; i++)
2556 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2557 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2560 fw_data = (const __le32 *)
2561 (adev->gfx.me_fw->data +
2562 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2563 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2564 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
2565 for (i = 0; i < fw_size; i++)
2566 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2567 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2572 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
2574 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2575 const struct cs_section_def *sect = NULL;
2576 const struct cs_extent_def *ext = NULL;
2580 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2581 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2583 gfx_v9_0_cp_gfx_enable(adev, true);
2585 r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
2587 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2591 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2592 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2594 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2595 amdgpu_ring_write(ring, 0x80000000);
2596 amdgpu_ring_write(ring, 0x80000000);
2598 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
2599 for (ext = sect->section; ext->extent != NULL; ++ext) {
2600 if (sect->id == SECT_CONTEXT) {
2601 amdgpu_ring_write(ring,
2602 PACKET3(PACKET3_SET_CONTEXT_REG,
2604 amdgpu_ring_write(ring,
2605 ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2606 for (i = 0; i < ext->reg_count; i++)
2607 amdgpu_ring_write(ring, ext->extent[i]);
2612 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2613 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2615 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2616 amdgpu_ring_write(ring, 0);
2618 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2619 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2620 amdgpu_ring_write(ring, 0x8000);
2621 amdgpu_ring_write(ring, 0x8000);
2623 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
2624 tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
2625 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
2626 amdgpu_ring_write(ring, tmp);
2627 amdgpu_ring_write(ring, 0);
2629 amdgpu_ring_commit(ring);
2634 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
2636 struct amdgpu_ring *ring;
2639 u64 rb_addr, rptr_addr, wptr_gpu_addr;
2641 /* Set the write pointer delay */
2642 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2644 /* set the RB to use vmid 0 */
2645 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2647 /* Set ring buffer size */
2648 ring = &adev->gfx.gfx_ring[0];
2649 rb_bufsz = order_base_2(ring->ring_size / 8);
2650 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2651 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2653 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2655 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2657 /* Initialize the ring buffer's write pointers */
2659 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2660 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2662 /* set the wb address wether it's enabled or not */
2663 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2664 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2665 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2667 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2668 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
2669 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
2672 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2674 rb_addr = ring->gpu_addr >> 8;
2675 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2676 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2678 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2679 if (ring->use_doorbell) {
2680 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2681 DOORBELL_OFFSET, ring->doorbell_index);
2682 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2685 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
2687 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2689 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2690 DOORBELL_RANGE_LOWER, ring->doorbell_index);
2691 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2693 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2694 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2697 /* start the ring */
2698 gfx_v9_0_cp_gfx_start(adev);
2699 ring->sched.ready = true;
2704 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2709 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2711 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2712 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2713 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2714 adev->gfx.compute_ring[i].sched.ready = false;
2715 adev->gfx.kiq.ring.sched.ready = false;
2720 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2722 const struct gfx_firmware_header_v1_0 *mec_hdr;
2723 const __le32 *fw_data;
2727 if (!adev->gfx.mec_fw)
2730 gfx_v9_0_cp_compute_enable(adev, false);
2732 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2733 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2735 fw_data = (const __le32 *)
2736 (adev->gfx.mec_fw->data +
2737 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2739 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2740 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2741 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2743 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2744 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
2745 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2746 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2749 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2750 mec_hdr->jt_offset);
2751 for (i = 0; i < mec_hdr->jt_size; i++)
2752 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2753 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2755 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2756 adev->gfx.mec_fw_version);
2757 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
2763 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
2766 struct amdgpu_device *adev = ring->adev;
2768 /* tell RLC which is KIQ queue */
2769 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2771 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2772 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2774 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2777 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
2779 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
2780 uint64_t queue_mask = 0;
2783 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
2784 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
2787 /* This situation may be hit in the future if a new HW
2788 * generation exposes more than 64 queues. If so, the
2789 * definition of queue_mask needs updating */
2790 if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
2791 DRM_ERROR("Invalid KCQ enabled: %d\n", i);
2795 queue_mask |= (1ull << i);
2798 r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8);
2800 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2805 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
2806 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
2807 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
2808 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
2809 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
2810 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
2811 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
2812 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
2813 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
2814 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2815 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2816 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
2817 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2819 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
2820 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
2821 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
2822 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
2823 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
2824 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
2825 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
2826 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
2827 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
2828 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
2829 PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
2830 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
2831 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
2832 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
2833 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
2834 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
2835 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
2838 r = amdgpu_ring_test_helper(kiq_ring);
2840 DRM_ERROR("KCQ enable failed\n");
2845 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
2847 struct amdgpu_device *adev = ring->adev;
2848 struct v9_mqd *mqd = ring->mqd_ptr;
2849 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2852 mqd->header = 0xC0310800;
2853 mqd->compute_pipelinestat_enable = 0x00000001;
2854 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2855 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2856 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2857 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2858 mqd->compute_misc_reserved = 0x00000003;
2860 mqd->dynamic_cu_mask_addr_lo =
2861 lower_32_bits(ring->mqd_gpu_addr
2862 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2863 mqd->dynamic_cu_mask_addr_hi =
2864 upper_32_bits(ring->mqd_gpu_addr
2865 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2867 eop_base_addr = ring->eop_gpu_addr >> 8;
2868 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2869 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2871 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2872 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
2873 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2874 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
2876 mqd->cp_hqd_eop_control = tmp;
2878 /* enable doorbell? */
2879 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2881 if (ring->use_doorbell) {
2882 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2883 DOORBELL_OFFSET, ring->doorbell_index);
2884 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2886 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2887 DOORBELL_SOURCE, 0);
2888 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2891 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2895 mqd->cp_hqd_pq_doorbell_control = tmp;
2897 /* disable the queue if it's active */
2899 mqd->cp_hqd_dequeue_request = 0;
2900 mqd->cp_hqd_pq_rptr = 0;
2901 mqd->cp_hqd_pq_wptr_lo = 0;
2902 mqd->cp_hqd_pq_wptr_hi = 0;
2904 /* set the pointer to the MQD */
2905 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
2906 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2908 /* set MQD vmid to 0 */
2909 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
2910 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2911 mqd->cp_mqd_control = tmp;
2913 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2914 hqd_gpu_addr = ring->gpu_addr >> 8;
2915 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2916 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2918 /* set up the HQD, this is similar to CP_RB0_CNTL */
2919 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
2920 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2921 (order_base_2(ring->ring_size / 4) - 1));
2922 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2923 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
2925 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
2927 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2928 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
2929 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2930 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2931 mqd->cp_hqd_pq_control = tmp;
2933 /* set the wb address whether it's enabled or not */
2934 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2935 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2936 mqd->cp_hqd_pq_rptr_report_addr_hi =
2937 upper_32_bits(wb_gpu_addr) & 0xffff;
2939 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2940 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2941 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2942 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2945 /* enable the doorbell if requested */
2946 if (ring->use_doorbell) {
2947 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2948 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2949 DOORBELL_OFFSET, ring->doorbell_index);
2951 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2953 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2954 DOORBELL_SOURCE, 0);
2955 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2959 mqd->cp_hqd_pq_doorbell_control = tmp;
2961 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2963 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
2965 /* set the vmid for the queue */
2966 mqd->cp_hqd_vmid = 0;
2968 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
2969 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
2970 mqd->cp_hqd_persistent_state = tmp;
2972 /* set MIN_IB_AVAIL_SIZE */
2973 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
2974 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
2975 mqd->cp_hqd_ib_control = tmp;
2977 /* activate the queue */
2978 mqd->cp_hqd_active = 1;
2983 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
2985 struct amdgpu_device *adev = ring->adev;
2986 struct v9_mqd *mqd = ring->mqd_ptr;
2989 /* disable wptr polling */
2990 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2992 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
2993 mqd->cp_hqd_eop_base_addr_lo);
2994 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
2995 mqd->cp_hqd_eop_base_addr_hi);
2997 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2998 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
2999 mqd->cp_hqd_eop_control);
3001 /* enable doorbell? */
3002 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3003 mqd->cp_hqd_pq_doorbell_control);
3005 /* disable the queue if it's active */
3006 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3007 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3008 for (j = 0; j < adev->usec_timeout; j++) {
3009 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3013 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3014 mqd->cp_hqd_dequeue_request);
3015 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
3016 mqd->cp_hqd_pq_rptr);
3017 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3018 mqd->cp_hqd_pq_wptr_lo);
3019 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3020 mqd->cp_hqd_pq_wptr_hi);
3023 /* set the pointer to the MQD */
3024 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
3025 mqd->cp_mqd_base_addr_lo);
3026 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3027 mqd->cp_mqd_base_addr_hi);
3029 /* set MQD vmid to 0 */
3030 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
3031 mqd->cp_mqd_control);
3033 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3034 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
3035 mqd->cp_hqd_pq_base_lo);
3036 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
3037 mqd->cp_hqd_pq_base_hi);
3039 /* set up the HQD, this is similar to CP_RB0_CNTL */
3040 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
3041 mqd->cp_hqd_pq_control);
3043 /* set the wb address whether it's enabled or not */
3044 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3045 mqd->cp_hqd_pq_rptr_report_addr_lo);
3046 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3047 mqd->cp_hqd_pq_rptr_report_addr_hi);
3049 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3050 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3051 mqd->cp_hqd_pq_wptr_poll_addr_lo);
3052 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3053 mqd->cp_hqd_pq_wptr_poll_addr_hi);
3055 /* enable the doorbell if requested */
3056 if (ring->use_doorbell) {
3057 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3058 (adev->doorbell_index.kiq * 2) << 2);
3059 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3060 (adev->doorbell_index.userqueue_end * 2) << 2);
3063 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3064 mqd->cp_hqd_pq_doorbell_control);
3066 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3067 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3068 mqd->cp_hqd_pq_wptr_lo);
3069 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3070 mqd->cp_hqd_pq_wptr_hi);
3072 /* set the vmid for the queue */
3073 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3075 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3076 mqd->cp_hqd_persistent_state);
3078 /* activate the queue */
3079 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
3080 mqd->cp_hqd_active);
3082 if (ring->use_doorbell)
3083 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3088 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3090 struct amdgpu_device *adev = ring->adev;
3093 /* disable the queue if it's active */
3094 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3096 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3098 for (j = 0; j < adev->usec_timeout; j++) {
3099 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3104 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3105 DRM_DEBUG("KIQ dequeue request failed.\n");
3107 /* Manual disable if dequeue request times out */
3108 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
3111 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3115 WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3116 WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3117 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3118 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3119 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3120 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3121 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3122 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3127 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3129 struct amdgpu_device *adev = ring->adev;
3130 struct v9_mqd *mqd = ring->mqd_ptr;
3131 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3133 gfx_v9_0_kiq_setting(ring);
3135 if (adev->in_gpu_reset) { /* for GPU_RESET case */
3136 /* reset MQD to a clean status */
3137 if (adev->gfx.mec.mqd_backup[mqd_idx])
3138 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3140 /* reset ring buffer */
3142 amdgpu_ring_clear_ring(ring);
3144 mutex_lock(&adev->srbm_mutex);
3145 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3146 gfx_v9_0_kiq_init_register(ring);
3147 soc15_grbm_select(adev, 0, 0, 0, 0);
3148 mutex_unlock(&adev->srbm_mutex);
3150 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3151 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3152 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3153 mutex_lock(&adev->srbm_mutex);
3154 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3155 gfx_v9_0_mqd_init(ring);
3156 gfx_v9_0_kiq_init_register(ring);
3157 soc15_grbm_select(adev, 0, 0, 0, 0);
3158 mutex_unlock(&adev->srbm_mutex);
3160 if (adev->gfx.mec.mqd_backup[mqd_idx])
3161 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3167 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3169 struct amdgpu_device *adev = ring->adev;
3170 struct v9_mqd *mqd = ring->mqd_ptr;
3171 int mqd_idx = ring - &adev->gfx.compute_ring[0];
3173 if (!adev->in_gpu_reset && !adev->in_suspend) {
3174 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3175 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3176 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3177 mutex_lock(&adev->srbm_mutex);
3178 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3179 gfx_v9_0_mqd_init(ring);
3180 soc15_grbm_select(adev, 0, 0, 0, 0);
3181 mutex_unlock(&adev->srbm_mutex);
3183 if (adev->gfx.mec.mqd_backup[mqd_idx])
3184 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3185 } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3186 /* reset MQD to a clean status */
3187 if (adev->gfx.mec.mqd_backup[mqd_idx])
3188 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3190 /* reset ring buffer */
3192 amdgpu_ring_clear_ring(ring);
3194 amdgpu_ring_clear_ring(ring);
3200 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3202 struct amdgpu_ring *ring;
3205 ring = &adev->gfx.kiq.ring;
3207 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3208 if (unlikely(r != 0))
3211 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3212 if (unlikely(r != 0))
3215 gfx_v9_0_kiq_init_queue(ring);
3216 amdgpu_bo_kunmap(ring->mqd_obj);
3217 ring->mqd_ptr = NULL;
3218 amdgpu_bo_unreserve(ring->mqd_obj);
3219 ring->sched.ready = true;
3223 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3225 struct amdgpu_ring *ring = NULL;
3228 gfx_v9_0_cp_compute_enable(adev, true);
3230 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3231 ring = &adev->gfx.compute_ring[i];
3233 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3234 if (unlikely(r != 0))
3236 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3238 r = gfx_v9_0_kcq_init_queue(ring);
3239 amdgpu_bo_kunmap(ring->mqd_obj);
3240 ring->mqd_ptr = NULL;
3242 amdgpu_bo_unreserve(ring->mqd_obj);
3247 r = gfx_v9_0_kiq_kcq_enable(adev);
3252 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3255 struct amdgpu_ring *ring;
3257 if (!(adev->flags & AMD_IS_APU))
3258 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3260 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3261 /* legacy firmware loading */
3262 r = gfx_v9_0_cp_gfx_load_microcode(adev);
3266 r = gfx_v9_0_cp_compute_load_microcode(adev);
3271 r = gfx_v9_0_kiq_resume(adev);
3275 r = gfx_v9_0_cp_gfx_resume(adev);
3279 r = gfx_v9_0_kcq_resume(adev);
3283 ring = &adev->gfx.gfx_ring[0];
3284 r = amdgpu_ring_test_helper(ring);
3288 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3289 ring = &adev->gfx.compute_ring[i];
3290 amdgpu_ring_test_helper(ring);
3293 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3298 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3300 gfx_v9_0_cp_gfx_enable(adev, enable);
3301 gfx_v9_0_cp_compute_enable(adev, enable);
3304 static int gfx_v9_0_hw_init(void *handle)
3307 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3309 gfx_v9_0_init_golden_registers(adev);
3311 gfx_v9_0_constants_init(adev);
3313 r = gfx_v9_0_csb_vram_pin(adev);
3317 r = adev->gfx.rlc.funcs->resume(adev);
3321 r = gfx_v9_0_cp_resume(adev);
3325 r = gfx_v9_0_ngg_en(adev);
3332 static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
3335 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
3337 r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
3339 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3341 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3342 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
3344 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
3345 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
3346 PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
3347 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
3348 PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
3349 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
3350 amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
3351 amdgpu_ring_write(kiq_ring, 0);
3352 amdgpu_ring_write(kiq_ring, 0);
3353 amdgpu_ring_write(kiq_ring, 0);
3355 r = amdgpu_ring_test_helper(kiq_ring);
3357 DRM_ERROR("KCQ disable failed\n");
3362 static int gfx_v9_0_hw_fini(void *handle)
3364 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3366 amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
3367 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3368 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3370 /* disable KCQ to avoid CPC touch memory not valid anymore */
3371 gfx_v9_0_kcq_disable(adev);
3373 if (amdgpu_sriov_vf(adev)) {
3374 gfx_v9_0_cp_gfx_enable(adev, false);
3375 /* must disable polling for SRIOV when hw finished, otherwise
3376 * CPC engine may still keep fetching WB address which is already
3377 * invalid after sw finished and trigger DMAR reading error in
3380 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3384 /* Use deinitialize sequence from CAIL when unbinding device from driver,
3385 * otherwise KIQ is hanging when binding back
3387 if (!adev->in_gpu_reset && !adev->in_suspend) {
3388 mutex_lock(&adev->srbm_mutex);
3389 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
3390 adev->gfx.kiq.ring.pipe,
3391 adev->gfx.kiq.ring.queue, 0);
3392 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
3393 soc15_grbm_select(adev, 0, 0, 0, 0);
3394 mutex_unlock(&adev->srbm_mutex);
3397 gfx_v9_0_cp_enable(adev, false);
3398 adev->gfx.rlc.funcs->stop(adev);
3400 gfx_v9_0_csb_vram_unpin(adev);
3405 static int gfx_v9_0_suspend(void *handle)
3407 return gfx_v9_0_hw_fini(handle);
3410 static int gfx_v9_0_resume(void *handle)
3412 return gfx_v9_0_hw_init(handle);
3415 static bool gfx_v9_0_is_idle(void *handle)
3417 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3419 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3420 GRBM_STATUS, GUI_ACTIVE))
3426 static int gfx_v9_0_wait_for_idle(void *handle)
3429 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3431 for (i = 0; i < adev->usec_timeout; i++) {
3432 if (gfx_v9_0_is_idle(handle))
3439 static int gfx_v9_0_soft_reset(void *handle)
3441 u32 grbm_soft_reset = 0;
3443 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3446 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3447 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3448 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3449 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
3450 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
3451 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
3452 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
3453 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3454 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3455 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3456 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
3459 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3460 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3461 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3465 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3466 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3467 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3468 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3471 if (grbm_soft_reset) {
3473 adev->gfx.rlc.funcs->stop(adev);
3475 /* Disable GFX parsing/prefetching */
3476 gfx_v9_0_cp_gfx_enable(adev, false);
3478 /* Disable MEC parsing/prefetching */
3479 gfx_v9_0_cp_compute_enable(adev, false);
3481 if (grbm_soft_reset) {
3482 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3483 tmp |= grbm_soft_reset;
3484 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3485 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3486 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3490 tmp &= ~grbm_soft_reset;
3491 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3492 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3495 /* Wait a little for things to settle down */
3501 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3505 mutex_lock(&adev->gfx.gpu_clock_mutex);
3506 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3507 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3508 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3509 mutex_unlock(&adev->gfx.gpu_clock_mutex);
3513 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3515 uint32_t gds_base, uint32_t gds_size,
3516 uint32_t gws_base, uint32_t gws_size,
3517 uint32_t oa_base, uint32_t oa_size)
3519 struct amdgpu_device *adev = ring->adev;
3522 gfx_v9_0_write_data_to_reg(ring, 0, false,
3523 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
3527 gfx_v9_0_write_data_to_reg(ring, 0, false,
3528 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
3532 gfx_v9_0_write_data_to_reg(ring, 0, false,
3533 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
3534 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3537 gfx_v9_0_write_data_to_reg(ring, 0, false,
3538 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
3539 (1 << (oa_size + oa_base)) - (1 << oa_base));
3542 static int gfx_v9_0_early_init(void *handle)
3544 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3546 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
3547 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3548 gfx_v9_0_set_ring_funcs(adev);
3549 gfx_v9_0_set_irq_funcs(adev);
3550 gfx_v9_0_set_gds_init(adev);
3551 gfx_v9_0_set_rlc_funcs(adev);
3556 static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
3557 struct amdgpu_iv_entry *entry);
3559 static int gfx_v9_0_ecc_late_init(void *handle)
3561 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3562 struct ras_common_if **ras_if = &adev->gfx.ras_if;
3563 struct ras_ih_if ih_info = {
3564 .cb = gfx_v9_0_process_ras_data_cb,
3566 struct ras_fs_if fs_info = {
3567 .sysfs_name = "gfx_err_count",
3568 .debugfs_name = "gfx_err_inject",
3570 struct ras_common_if ras_block = {
3571 .block = AMDGPU_RAS_BLOCK__GFX,
3572 .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
3573 .sub_block_index = 0,
3578 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
3579 amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
3586 *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
3590 **ras_if = ras_block;
3592 r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
3596 ih_info.head = **ras_if;
3597 fs_info.head = **ras_if;
3599 r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
3603 r = amdgpu_ras_debugfs_create(adev, &fs_info);
3607 r = amdgpu_ras_sysfs_create(adev, &fs_info);
3611 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
3617 amdgpu_ras_sysfs_remove(adev, *ras_if);
3619 amdgpu_ras_debugfs_remove(adev, *ras_if);
3621 amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
3623 amdgpu_ras_feature_enable(adev, *ras_if, 0);
3630 static int gfx_v9_0_late_init(void *handle)
3632 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3635 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3639 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3643 r = gfx_v9_0_ecc_late_init(handle);
3650 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
3652 uint32_t rlc_setting;
3654 /* if RLC is not enabled, do nothing */
3655 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3656 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3662 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
3667 data = RLC_SAFE_MODE__CMD_MASK;
3668 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3669 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3671 /* wait for RLC_SAFE_MODE */
3672 for (i = 0; i < adev->usec_timeout; i++) {
3673 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3679 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
3683 data = RLC_SAFE_MODE__CMD_MASK;
3684 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3687 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
3690 amdgpu_gfx_rlc_enter_safe_mode(adev);
3692 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
3693 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
3694 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
3695 gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
3697 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
3698 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
3701 amdgpu_gfx_rlc_exit_safe_mode(adev);
3704 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
3707 /* TODO: double check if we need to perform under safe mode */
3708 /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3710 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
3711 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
3713 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
3715 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
3716 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
3718 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
3720 /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3723 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3728 amdgpu_gfx_rlc_enter_safe_mode(adev);
3730 /* It is disabled by HW by default */
3731 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3732 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
3733 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3735 if (adev->asic_type != CHIP_VEGA12)
3736 data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
3738 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3739 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3740 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3742 /* only for Vega10 & Raven1 */
3743 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
3746 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3748 /* MGLS is a global flag to control all MGLS in GFX */
3749 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3750 /* 2 - RLC memory Light sleep */
3751 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
3752 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3753 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3755 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3757 /* 3 - CP memory Light sleep */
3758 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3759 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3760 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3762 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3766 /* 1 - MGCG_OVERRIDE */
3767 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3769 if (adev->asic_type != CHIP_VEGA12)
3770 data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
3772 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3773 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3774 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3775 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3778 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3780 /* 2 - disable MGLS in RLC */
3781 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3782 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3783 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3784 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3787 /* 3 - disable MGLS in CP */
3788 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3789 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3790 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3791 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3795 amdgpu_gfx_rlc_exit_safe_mode(adev);
3798 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
3803 amdgpu_gfx_rlc_enter_safe_mode(adev);
3805 /* Enable 3D CGCG/CGLS */
3806 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3807 /* write cmd to clear cgcg/cgls ov */
3808 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3809 /* unset CGCG override */
3810 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3811 /* update CGCG and CGLS override bits */
3813 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3815 /* enable 3Dcgcg FSM(0x0000363f) */
3816 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3818 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3819 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
3820 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3821 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3822 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
3824 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3826 /* set IDLE_POLL_COUNT(0x00900100) */
3827 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3828 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3829 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3831 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3833 /* Disable CGCG/CGLS */
3834 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3835 /* disable cgcg, cgls should be disabled */
3836 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
3837 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
3838 /* disable cgcg and cgls in FSM */
3840 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3843 amdgpu_gfx_rlc_exit_safe_mode(adev);
3846 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3851 amdgpu_gfx_rlc_enter_safe_mode(adev);
3853 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3854 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3855 /* unset CGCG override */
3856 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3857 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3858 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3860 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3861 /* update CGCG and CGLS override bits */
3863 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3865 /* enable cgcg FSM(0x0000363F) */
3866 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3868 data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3869 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3870 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3871 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3872 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3874 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3876 /* set IDLE_POLL_COUNT(0x00900100) */
3877 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3878 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3879 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3881 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3883 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3884 /* reset CGCG/CGLS bits */
3885 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3886 /* disable cgcg and cgls in FSM */
3888 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3891 amdgpu_gfx_rlc_exit_safe_mode(adev);
3894 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
3898 /* CGCG/CGLS should be enabled after MGCG/MGLS
3899 * === MGCG + MGLS ===
3901 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3902 /* === CGCG /CGLS for GFX 3D Only === */
3903 gfx_v9_0_update_3d_clock_gating(adev, enable);
3904 /* === CGCG + CGLS === */
3905 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3907 /* CGCG/CGLS should be disabled before MGCG/MGLS
3908 * === CGCG + CGLS ===
3910 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3911 /* === CGCG /CGLS for GFX 3D Only === */
3912 gfx_v9_0_update_3d_clock_gating(adev, enable);
3913 /* === MGCG + MGLS === */
3914 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3919 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
3920 .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
3921 .set_safe_mode = gfx_v9_0_set_safe_mode,
3922 .unset_safe_mode = gfx_v9_0_unset_safe_mode,
3923 .init = gfx_v9_0_rlc_init,
3924 .get_csb_size = gfx_v9_0_get_csb_size,
3925 .get_csb_buffer = gfx_v9_0_get_csb_buffer,
3926 .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
3927 .resume = gfx_v9_0_rlc_resume,
3928 .stop = gfx_v9_0_rlc_stop,
3929 .reset = gfx_v9_0_rlc_reset,
3930 .start = gfx_v9_0_rlc_start
3933 static int gfx_v9_0_set_powergating_state(void *handle,
3934 enum amd_powergating_state state)
3936 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3937 bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
3939 switch (adev->asic_type) {
3942 amdgpu_gfx_off_ctrl(adev, false);
3943 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
3945 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
3946 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
3947 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
3949 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
3950 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
3953 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
3954 gfx_v9_0_enable_cp_power_gating(adev, true);
3956 gfx_v9_0_enable_cp_power_gating(adev, false);
3958 /* update gfx cgpg state */
3959 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
3961 /* update mgcg state */
3962 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
3965 amdgpu_gfx_off_ctrl(adev, true);
3969 amdgpu_gfx_off_ctrl(adev, false);
3970 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
3972 amdgpu_gfx_off_ctrl(adev, true);
3982 static int gfx_v9_0_set_clockgating_state(void *handle,
3983 enum amd_clockgating_state state)
3985 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3987 if (amdgpu_sriov_vf(adev))
3990 switch (adev->asic_type) {
3995 gfx_v9_0_update_gfx_clock_gating(adev,
3996 state == AMD_CG_STATE_GATE ? true : false);
4004 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
4006 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4009 if (amdgpu_sriov_vf(adev))
4012 /* AMD_CG_SUPPORT_GFX_MGCG */
4013 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4014 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
4015 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
4017 /* AMD_CG_SUPPORT_GFX_CGCG */
4018 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4019 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
4020 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
4022 /* AMD_CG_SUPPORT_GFX_CGLS */
4023 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
4024 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
4026 /* AMD_CG_SUPPORT_GFX_RLC_LS */
4027 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4028 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
4029 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
4031 /* AMD_CG_SUPPORT_GFX_CP_LS */
4032 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4033 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
4034 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
4036 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
4037 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4038 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
4039 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
4041 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
4042 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
4043 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
4046 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
4048 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
4051 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
4053 struct amdgpu_device *adev = ring->adev;
4056 /* XXX check if swapping is necessary on BE */
4057 if (ring->use_doorbell) {
4058 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
4060 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
4061 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
4067 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
4069 struct amdgpu_device *adev = ring->adev;
4071 if (ring->use_doorbell) {
4072 /* XXX check if swapping is necessary on BE */
4073 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4074 WDOORBELL64(ring->doorbell_index, ring->wptr);
4076 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4077 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
4081 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
4083 struct amdgpu_device *adev = ring->adev;
4084 u32 ref_and_mask, reg_mem_engine;
4085 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
4087 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4090 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
4093 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
4100 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
4101 reg_mem_engine = 1; /* pfp */
4104 gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
4105 adev->nbio_funcs->get_hdp_flush_req_offset(adev),
4106 adev->nbio_funcs->get_hdp_flush_done_offset(adev),
4107 ref_and_mask, ref_and_mask, 0x20);
4110 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4111 struct amdgpu_job *job,
4112 struct amdgpu_ib *ib,
4115 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4116 u32 header, control = 0;
4118 if (ib->flags & AMDGPU_IB_FLAG_CE)
4119 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
4121 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4123 control |= ib->length_dw | (vmid << 24);
4125 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
4126 control |= INDIRECT_BUFFER_PRE_ENB(1);
4128 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
4129 gfx_v9_0_ring_emit_de_meta(ring);
4132 amdgpu_ring_write(ring, header);
4133 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4134 amdgpu_ring_write(ring,
4138 lower_32_bits(ib->gpu_addr));
4139 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4140 amdgpu_ring_write(ring, control);
4143 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4144 struct amdgpu_job *job,
4145 struct amdgpu_ib *ib,
4148 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4149 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
4151 /* Currently, there is a high possibility to get wave ID mismatch
4152 * between ME and GDS, leading to a hw deadlock, because ME generates
4153 * different wave IDs than the GDS expects. This situation happens
4154 * randomly when at least 5 compute pipes use GDS ordered append.
4155 * The wave IDs generated by ME are also wrong after suspend/resume.
4156 * Those are probably bugs somewhere else in the kernel driver.
4158 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
4159 * GDS to 0 for this ring (me/pipe).
4161 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
4162 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
4163 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
4164 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
4167 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
4168 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4169 amdgpu_ring_write(ring,
4173 lower_32_bits(ib->gpu_addr));
4174 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4175 amdgpu_ring_write(ring, control);
4178 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
4179 u64 seq, unsigned flags)
4181 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4182 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4183 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
4185 /* RELEASE_MEM - flush caches, send int */
4186 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
4187 amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
4188 EOP_TC_NC_ACTION_EN) :
4189 (EOP_TCL1_ACTION_EN |
4191 EOP_TC_WB_ACTION_EN |
4192 EOP_TC_MD_ACTION_EN)) |
4193 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4195 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
4198 * the address should be Qword aligned if 64bit write, Dword
4199 * aligned if only send 32bit data low (discard data high)
4205 amdgpu_ring_write(ring, lower_32_bits(addr));
4206 amdgpu_ring_write(ring, upper_32_bits(addr));
4207 amdgpu_ring_write(ring, lower_32_bits(seq));
4208 amdgpu_ring_write(ring, upper_32_bits(seq));
4209 amdgpu_ring_write(ring, 0);
4212 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
4214 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4215 uint32_t seq = ring->fence_drv.sync_seq;
4216 uint64_t addr = ring->fence_drv.gpu_addr;
4218 gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
4219 lower_32_bits(addr), upper_32_bits(addr),
4220 seq, 0xffffffff, 4);
4223 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4224 unsigned vmid, uint64_t pd_addr)
4226 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
4228 /* compute doesn't have PFP */
4229 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
4230 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4231 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4232 amdgpu_ring_write(ring, 0x0);
4236 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4238 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
4241 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4245 /* XXX check if swapping is necessary on BE */
4246 if (ring->use_doorbell)
4247 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
4253 static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
4256 struct amdgpu_device *adev = ring->adev;
4257 int pipe_num, tmp, reg;
4258 int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
4260 pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
4262 /* first me only has 2 entries, GFX and HP3D */
4266 reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
4268 tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
4272 static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
4273 struct amdgpu_ring *ring,
4278 struct amdgpu_ring *iring;
4280 mutex_lock(&adev->gfx.pipe_reserve_mutex);
4281 pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
4283 set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4285 clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4287 if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
4288 /* Clear all reservations - everyone reacquires all resources */
4289 for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
4290 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
4293 for (i = 0; i < adev->gfx.num_compute_rings; ++i)
4294 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
4297 /* Lower all pipes without a current reservation */
4298 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
4299 iring = &adev->gfx.gfx_ring[i];
4300 pipe = amdgpu_gfx_queue_to_bit(adev,
4304 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4305 gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4308 for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
4309 iring = &adev->gfx.compute_ring[i];
4310 pipe = amdgpu_gfx_queue_to_bit(adev,
4314 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4315 gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4319 mutex_unlock(&adev->gfx.pipe_reserve_mutex);
4322 static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
4323 struct amdgpu_ring *ring,
4326 uint32_t pipe_priority = acquire ? 0x2 : 0x0;
4327 uint32_t queue_priority = acquire ? 0xf : 0x0;
4329 mutex_lock(&adev->srbm_mutex);
4330 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4332 WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
4333 WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
4335 soc15_grbm_select(adev, 0, 0, 0, 0);
4336 mutex_unlock(&adev->srbm_mutex);
4339 static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
4340 enum drm_sched_priority priority)
4342 struct amdgpu_device *adev = ring->adev;
4343 bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
4345 if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
4348 gfx_v9_0_hqd_set_priority(adev, ring, acquire);
4349 gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
4352 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4354 struct amdgpu_device *adev = ring->adev;
4356 /* XXX check if swapping is necessary on BE */
4357 if (ring->use_doorbell) {
4358 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4359 WDOORBELL64(ring->doorbell_index, ring->wptr);
4361 BUG(); /* only DOORBELL method supported on gfx9 now */
4365 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
4366 u64 seq, unsigned int flags)
4368 struct amdgpu_device *adev = ring->adev;
4370 /* we only allocate 32bit for each seq wb address */
4371 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
4373 /* write fence seq to the "addr" */
4374 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4375 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4376 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
4377 amdgpu_ring_write(ring, lower_32_bits(addr));
4378 amdgpu_ring_write(ring, upper_32_bits(addr));
4379 amdgpu_ring_write(ring, lower_32_bits(seq));
4381 if (flags & AMDGPU_FENCE_FLAG_INT) {
4382 /* set register to trigger INT */
4383 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4384 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4385 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
4386 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
4387 amdgpu_ring_write(ring, 0);
4388 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
4392 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
4394 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4395 amdgpu_ring_write(ring, 0);
4398 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
4400 struct v9_ce_ib_state ce_payload = {0};
4404 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
4405 csa_addr = amdgpu_csa_vaddr(ring->adev);
4407 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4408 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
4409 WRITE_DATA_DST_SEL(8) |
4411 WRITE_DATA_CACHE_POLICY(0));
4412 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
4413 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
4414 amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
4417 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
4419 struct v9_de_ib_state de_payload = {0};
4420 uint64_t csa_addr, gds_addr;
4423 csa_addr = amdgpu_csa_vaddr(ring->adev);
4424 gds_addr = csa_addr + 4096;
4425 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
4426 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
4428 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
4429 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4430 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
4431 WRITE_DATA_DST_SEL(8) |
4433 WRITE_DATA_CACHE_POLICY(0));
4434 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
4435 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
4436 amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
4439 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
4441 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
4442 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
4445 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
4449 if (amdgpu_sriov_vf(ring->adev))
4450 gfx_v9_0_ring_emit_ce_meta(ring);
4452 gfx_v9_0_ring_emit_tmz(ring, true);
4454 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
4455 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
4456 /* set load_global_config & load_global_uconfig */
4458 /* set load_cs_sh_regs */
4460 /* set load_per_context_state & load_gfx_sh_regs for GFX */
4463 /* set load_ce_ram if preamble presented */
4464 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
4467 /* still load_ce_ram if this is the first time preamble presented
4468 * although there is no context switch happens.
4470 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
4474 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4475 amdgpu_ring_write(ring, dw2);
4476 amdgpu_ring_write(ring, 0);
4479 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
4482 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
4483 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
4484 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
4485 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
4486 ret = ring->wptr & ring->buf_mask;
4487 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
4491 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
4494 BUG_ON(offset > ring->buf_mask);
4495 BUG_ON(ring->ring[offset] != 0x55aa55aa);
4497 cur = (ring->wptr & ring->buf_mask) - 1;
4498 if (likely(cur > offset))
4499 ring->ring[offset] = cur - offset;
4501 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
4504 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
4506 struct amdgpu_device *adev = ring->adev;
4508 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4509 amdgpu_ring_write(ring, 0 | /* src: register*/
4510 (5 << 8) | /* dst: memory */
4511 (1 << 20)); /* write confirm */
4512 amdgpu_ring_write(ring, reg);
4513 amdgpu_ring_write(ring, 0);
4514 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4515 adev->virt.reg_val_offs * 4));
4516 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4517 adev->virt.reg_val_offs * 4));
4520 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
4525 switch (ring->funcs->type) {
4526 case AMDGPU_RING_TYPE_GFX:
4527 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
4529 case AMDGPU_RING_TYPE_KIQ:
4530 cmd = (1 << 16); /* no inc addr */
4536 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4537 amdgpu_ring_write(ring, cmd);
4538 amdgpu_ring_write(ring, reg);
4539 amdgpu_ring_write(ring, 0);
4540 amdgpu_ring_write(ring, val);
4543 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4544 uint32_t val, uint32_t mask)
4546 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4549 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
4550 uint32_t reg0, uint32_t reg1,
4551 uint32_t ref, uint32_t mask)
4553 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4554 struct amdgpu_device *adev = ring->adev;
4555 bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
4556 adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
4559 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
4562 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
4566 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
4568 struct amdgpu_device *adev = ring->adev;
4571 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
4572 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
4573 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
4574 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
4575 WREG32(mmSQ_CMD, value);
4578 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4579 enum amdgpu_interrupt_state state)
4582 case AMDGPU_IRQ_STATE_DISABLE:
4583 case AMDGPU_IRQ_STATE_ENABLE:
4584 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4585 TIME_STAMP_INT_ENABLE,
4586 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4593 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4595 enum amdgpu_interrupt_state state)
4597 u32 mec_int_cntl, mec_int_cntl_reg;
4600 * amdgpu controls only the first MEC. That's why this function only
4601 * handles the setting of interrupts for this specific MEC. All other
4602 * pipes' interrupts are set by amdkfd.
4608 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4611 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
4614 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
4617 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
4620 DRM_DEBUG("invalid pipe %d\n", pipe);
4624 DRM_DEBUG("invalid me %d\n", me);
4629 case AMDGPU_IRQ_STATE_DISABLE:
4630 mec_int_cntl = RREG32(mec_int_cntl_reg);
4631 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4632 TIME_STAMP_INT_ENABLE, 0);
4633 WREG32(mec_int_cntl_reg, mec_int_cntl);
4635 case AMDGPU_IRQ_STATE_ENABLE:
4636 mec_int_cntl = RREG32(mec_int_cntl_reg);
4637 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4638 TIME_STAMP_INT_ENABLE, 1);
4639 WREG32(mec_int_cntl_reg, mec_int_cntl);
4646 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4647 struct amdgpu_irq_src *source,
4649 enum amdgpu_interrupt_state state)
4652 case AMDGPU_IRQ_STATE_DISABLE:
4653 case AMDGPU_IRQ_STATE_ENABLE:
4654 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4655 PRIV_REG_INT_ENABLE,
4656 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4665 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4666 struct amdgpu_irq_src *source,
4668 enum amdgpu_interrupt_state state)
4671 case AMDGPU_IRQ_STATE_DISABLE:
4672 case AMDGPU_IRQ_STATE_ENABLE:
4673 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4674 PRIV_INSTR_INT_ENABLE,
4675 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4683 #define ENABLE_ECC_ON_ME_PIPE(me, pipe) \
4684 WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
4685 CP_ECC_ERROR_INT_ENABLE, 1)
4687 #define DISABLE_ECC_ON_ME_PIPE(me, pipe) \
4688 WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
4689 CP_ECC_ERROR_INT_ENABLE, 0)
4691 static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
4692 struct amdgpu_irq_src *source,
4694 enum amdgpu_interrupt_state state)
4697 case AMDGPU_IRQ_STATE_DISABLE:
4698 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4699 CP_ECC_ERROR_INT_ENABLE, 0);
4700 DISABLE_ECC_ON_ME_PIPE(1, 0);
4701 DISABLE_ECC_ON_ME_PIPE(1, 1);
4702 DISABLE_ECC_ON_ME_PIPE(1, 2);
4703 DISABLE_ECC_ON_ME_PIPE(1, 3);
4706 case AMDGPU_IRQ_STATE_ENABLE:
4707 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4708 CP_ECC_ERROR_INT_ENABLE, 1);
4709 ENABLE_ECC_ON_ME_PIPE(1, 0);
4710 ENABLE_ECC_ON_ME_PIPE(1, 1);
4711 ENABLE_ECC_ON_ME_PIPE(1, 2);
4712 ENABLE_ECC_ON_ME_PIPE(1, 3);
4722 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4723 struct amdgpu_irq_src *src,
4725 enum amdgpu_interrupt_state state)
4728 case AMDGPU_CP_IRQ_GFX_EOP:
4729 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
4731 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4732 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4734 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4735 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4737 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4738 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4740 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4741 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4743 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4744 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4746 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4747 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4749 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4750 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4752 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4753 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4761 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
4762 struct amdgpu_irq_src *source,
4763 struct amdgpu_iv_entry *entry)
4766 u8 me_id, pipe_id, queue_id;
4767 struct amdgpu_ring *ring;
4769 DRM_DEBUG("IH: CP EOP\n");
4770 me_id = (entry->ring_id & 0x0c) >> 2;
4771 pipe_id = (entry->ring_id & 0x03) >> 0;
4772 queue_id = (entry->ring_id & 0x70) >> 4;
4776 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4780 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4781 ring = &adev->gfx.compute_ring[i];
4782 /* Per-queue interrupt is supported for MEC starting from VI.
4783 * The interrupt can only be enabled/disabled per pipe instead of per queue.
4785 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4786 amdgpu_fence_process(ring);
4793 static void gfx_v9_0_fault(struct amdgpu_device *adev,
4794 struct amdgpu_iv_entry *entry)
4796 u8 me_id, pipe_id, queue_id;
4797 struct amdgpu_ring *ring;
4800 me_id = (entry->ring_id & 0x0c) >> 2;
4801 pipe_id = (entry->ring_id & 0x03) >> 0;
4802 queue_id = (entry->ring_id & 0x70) >> 4;
4806 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
4810 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4811 ring = &adev->gfx.compute_ring[i];
4812 if (ring->me == me_id && ring->pipe == pipe_id &&
4813 ring->queue == queue_id)
4814 drm_sched_fault(&ring->sched);
4820 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
4821 struct amdgpu_irq_src *source,
4822 struct amdgpu_iv_entry *entry)
4824 DRM_ERROR("Illegal register access in command stream\n");
4825 gfx_v9_0_fault(adev, entry);
4829 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
4830 struct amdgpu_irq_src *source,
4831 struct amdgpu_iv_entry *entry)
4833 DRM_ERROR("Illegal instruction in command stream\n");
4834 gfx_v9_0_fault(adev, entry);
4838 static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
4839 struct amdgpu_iv_entry *entry)
4841 /* TODO ue will trigger an interrupt. */
4842 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
4843 amdgpu_ras_reset_gpu(adev, 0);
4844 return AMDGPU_RAS_UE;
4847 static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev,
4848 struct amdgpu_irq_src *source,
4849 struct amdgpu_iv_entry *entry)
4851 struct ras_common_if *ras_if = adev->gfx.ras_if;
4852 struct ras_dispatch_if ih_data = {
4859 ih_data.head = *ras_if;
4861 DRM_ERROR("CP ECC ERROR IRQ\n");
4862 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
4866 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
4868 .early_init = gfx_v9_0_early_init,
4869 .late_init = gfx_v9_0_late_init,
4870 .sw_init = gfx_v9_0_sw_init,
4871 .sw_fini = gfx_v9_0_sw_fini,
4872 .hw_init = gfx_v9_0_hw_init,
4873 .hw_fini = gfx_v9_0_hw_fini,
4874 .suspend = gfx_v9_0_suspend,
4875 .resume = gfx_v9_0_resume,
4876 .is_idle = gfx_v9_0_is_idle,
4877 .wait_for_idle = gfx_v9_0_wait_for_idle,
4878 .soft_reset = gfx_v9_0_soft_reset,
4879 .set_clockgating_state = gfx_v9_0_set_clockgating_state,
4880 .set_powergating_state = gfx_v9_0_set_powergating_state,
4881 .get_clockgating_state = gfx_v9_0_get_clockgating_state,
4884 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
4885 .type = AMDGPU_RING_TYPE_GFX,
4887 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4888 .support_64bit_ptrs = true,
4889 .vmhub = AMDGPU_GFXHUB,
4890 .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
4891 .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
4892 .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
4893 .emit_frame_size = /* totally 242 maximum if 16 IBs */
4895 7 + /* PIPELINE_SYNC */
4896 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4897 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4899 8 + /* FENCE for VM_FLUSH */
4900 20 + /* GDS switch */
4901 4 + /* double SWITCH_BUFFER,
4902 the first COND_EXEC jump to the place just
4903 prior to this double SWITCH_BUFFER */
4911 8 + 8 + /* FENCE x2 */
4912 2, /* SWITCH_BUFFER */
4913 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
4914 .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
4915 .emit_fence = gfx_v9_0_ring_emit_fence,
4916 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4917 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4918 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4919 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4920 .test_ring = gfx_v9_0_ring_test_ring,
4921 .test_ib = gfx_v9_0_ring_test_ib,
4922 .insert_nop = amdgpu_ring_insert_nop,
4923 .pad_ib = amdgpu_ring_generic_pad_ib,
4924 .emit_switch_buffer = gfx_v9_ring_emit_sb,
4925 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
4926 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
4927 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
4928 .emit_tmz = gfx_v9_0_ring_emit_tmz,
4929 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4930 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4931 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4932 .soft_recovery = gfx_v9_0_ring_soft_recovery,
4935 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
4936 .type = AMDGPU_RING_TYPE_COMPUTE,
4938 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4939 .support_64bit_ptrs = true,
4940 .vmhub = AMDGPU_GFXHUB,
4941 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4942 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4943 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4945 20 + /* gfx_v9_0_ring_emit_gds_switch */
4946 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4947 5 + /* hdp invalidate */
4948 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4949 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4950 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4951 2 + /* gfx_v9_0_ring_emit_vm_flush */
4952 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
4953 .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
4954 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4955 .emit_fence = gfx_v9_0_ring_emit_fence,
4956 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4957 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4958 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4959 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4960 .test_ring = gfx_v9_0_ring_test_ring,
4961 .test_ib = gfx_v9_0_ring_test_ib,
4962 .insert_nop = amdgpu_ring_insert_nop,
4963 .pad_ib = amdgpu_ring_generic_pad_ib,
4964 .set_priority = gfx_v9_0_ring_set_priority_compute,
4965 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4966 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4967 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4970 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
4971 .type = AMDGPU_RING_TYPE_KIQ,
4973 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4974 .support_64bit_ptrs = true,
4975 .vmhub = AMDGPU_GFXHUB,
4976 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4977 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4978 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4980 20 + /* gfx_v9_0_ring_emit_gds_switch */
4981 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4982 5 + /* hdp invalidate */
4983 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4984 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4985 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4986 2 + /* gfx_v9_0_ring_emit_vm_flush */
4987 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
4988 .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
4989 .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
4990 .test_ring = gfx_v9_0_ring_test_ring,
4991 .insert_nop = amdgpu_ring_insert_nop,
4992 .pad_ib = amdgpu_ring_generic_pad_ib,
4993 .emit_rreg = gfx_v9_0_ring_emit_rreg,
4994 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4995 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4996 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4999 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
5003 adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
5005 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5006 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
5008 for (i = 0; i < adev->gfx.num_compute_rings; i++)
5009 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
5012 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
5013 .set = gfx_v9_0_set_eop_interrupt_state,
5014 .process = gfx_v9_0_eop_irq,
5017 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
5018 .set = gfx_v9_0_set_priv_reg_fault_state,
5019 .process = gfx_v9_0_priv_reg_irq,
5022 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
5023 .set = gfx_v9_0_set_priv_inst_fault_state,
5024 .process = gfx_v9_0_priv_inst_irq,
5027 static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
5028 .set = gfx_v9_0_set_cp_ecc_error_state,
5029 .process = gfx_v9_0_cp_ecc_error_irq,
5033 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
5035 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5036 adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
5038 adev->gfx.priv_reg_irq.num_types = 1;
5039 adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
5041 adev->gfx.priv_inst_irq.num_types = 1;
5042 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
5044 adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
5045 adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
5048 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
5050 switch (adev->asic_type) {
5055 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
5062 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
5064 /* init asci gds info */
5065 switch (adev->asic_type) {
5069 adev->gds.mem.total_size = 0x10000;
5072 adev->gds.mem.total_size = 0x1000;
5075 adev->gds.mem.total_size = 0x10000;
5079 switch (adev->asic_type) {
5082 adev->gds.gds_compute_max_wave_id = 0x7ff;
5085 adev->gds.gds_compute_max_wave_id = 0x27f;
5088 if (adev->rev_id >= 0x8)
5089 adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
5091 adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
5094 /* this really depends on the chip */
5095 adev->gds.gds_compute_max_wave_id = 0x7ff;
5099 adev->gds.gws.total_size = 64;
5100 adev->gds.oa.total_size = 16;
5102 if (adev->gds.mem.total_size == 64 * 1024) {
5103 adev->gds.mem.gfx_partition_size = 4096;
5104 adev->gds.mem.cs_partition_size = 4096;
5106 adev->gds.gws.gfx_partition_size = 4;
5107 adev->gds.gws.cs_partition_size = 4;
5109 adev->gds.oa.gfx_partition_size = 4;
5110 adev->gds.oa.cs_partition_size = 1;
5112 adev->gds.mem.gfx_partition_size = 1024;
5113 adev->gds.mem.cs_partition_size = 1024;
5115 adev->gds.gws.gfx_partition_size = 16;
5116 adev->gds.gws.cs_partition_size = 16;
5118 adev->gds.oa.gfx_partition_size = 4;
5119 adev->gds.oa.cs_partition_size = 4;
5123 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
5131 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
5132 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
5134 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
5137 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
5141 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
5142 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
5144 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
5145 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
5147 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
5149 return (~data) & mask;
5152 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
5153 struct amdgpu_cu_info *cu_info)
5155 int i, j, k, counter, active_cu_number = 0;
5156 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5157 unsigned disable_masks[4 * 2];
5159 if (!adev || !cu_info)
5162 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5164 mutex_lock(&adev->grbm_idx_mutex);
5165 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5166 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5170 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
5172 gfx_v9_0_set_user_cu_inactive_bitmap(
5173 adev, disable_masks[i * 2 + j]);
5174 bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
5175 cu_info->bitmap[i][j] = bitmap;
5177 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
5178 if (bitmap & mask) {
5179 if (counter < adev->gfx.config.max_cu_per_sh)
5185 active_cu_number += counter;
5187 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5188 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
5191 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5192 mutex_unlock(&adev->grbm_idx_mutex);
5194 cu_info->number = active_cu_number;
5195 cu_info->ao_cu_mask = ao_cu_mask;
5196 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5201 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
5203 .type = AMD_IP_BLOCK_TYPE_GFX,
5207 .funcs = &gfx_v9_0_ip_funcs,