Merge branch 'drm-next-5.2' of git://people.freedesktop.org/~agd5f/linux into drm...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / gfx_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/kernel.h>
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include "soc15.h"
29 #include "soc15d.h"
30 #include "amdgpu_atomfirmware.h"
31
32 #include "gc/gc_9_0_offset.h"
33 #include "gc/gc_9_0_sh_mask.h"
34 #include "vega10_enum.h"
35 #include "hdp/hdp_4_0_offset.h"
36
37 #include "soc15_common.h"
38 #include "clearstate_gfx9.h"
39 #include "v9_structs.h"
40
41 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
42
43 #include "amdgpu_ras.h"
44
45 #define GFX9_NUM_GFX_RINGS     1
46 #define GFX9_MEC_HPD_SIZE 4096
47 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
48 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
49
50 #define mmPWR_MISC_CNTL_STATUS                                  0x0183
51 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX                         0
52 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT        0x0
53 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT          0x1
54 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK          0x00000001L
55 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK            0x00000006L
56
57 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
58 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
59 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
60 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
61 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
62 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
63
64 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
65 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
66 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
67 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
68 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
69 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
70
71 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
72 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
73 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
74 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
75 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
76 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
77
78 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
79 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
80 MODULE_FIRMWARE("amdgpu/raven_me.bin");
81 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
82 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
83 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
84
85 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
86 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
87 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
88 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
89 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
90 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
91 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
92
93 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
94 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
95 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
96 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
97 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
98 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
99
100 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
101 {
102         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
103         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
104         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
105         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
106         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
107         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
108         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
109         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
110         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
111         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
112         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
113         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
114         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
115         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
116         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
117         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
118         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
119         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
120         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
121         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
122 };
123
124 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
125 {
126         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
127         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
128         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
129         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
130         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
131         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
132         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
133         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
134         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
135         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
136         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
137         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
138         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
139         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
140         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
141         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
142         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
143         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
144 };
145
146 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
147 {
148         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
149         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
150         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
151         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
152         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
153         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
154         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
155         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
156         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
157         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
158         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
159 };
160
161 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
162 {
163         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
164         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
165         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
166         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
167         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
168         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
169         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
170         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
171         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
172         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
173         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
174         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
175         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
176         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
177         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
178         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
179         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
180         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
181         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
182         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
183         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
184         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
185         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
186         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
187 };
188
189 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
190 {
191         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
192         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
193         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
194         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
195         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
196         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
197         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
198 };
199
200 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
201 {
202         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
203         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
204         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
205         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
206         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
207         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
208         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
209         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
210         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
211         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
212         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
213         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
214         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
215         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
216         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
217         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
218         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
219         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
220         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
221 };
222
223 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
224 {
225         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
226         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
227         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
228 };
229
230 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
231 {
232         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
233         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
234         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
235         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
236         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
237         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
238         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
239         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
240         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
241         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
242         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
243         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
244         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
245         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
246         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
247         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
248 };
249
250 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
251 {
252         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
253         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
254         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
255         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
256         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
257         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
258         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
259         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
260         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
261         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
262         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
263         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
264         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
265 };
266
267 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
268 {
269         mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
270         mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
271         mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
272         mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
273         mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
274         mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
275         mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
276         mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
277 };
278
279 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
280 {
281         mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
282         mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
283         mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
284         mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
285         mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
286         mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
287         mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
288         mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
289 };
290
291 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
292 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
293 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
294 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
295
296 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
297 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
298 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
299 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
300 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
301                                  struct amdgpu_cu_info *cu_info);
302 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
303 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
304 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
305
306 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
307 {
308         switch (adev->asic_type) {
309         case CHIP_VEGA10:
310                 soc15_program_register_sequence(adev,
311                                                  golden_settings_gc_9_0,
312                                                  ARRAY_SIZE(golden_settings_gc_9_0));
313                 soc15_program_register_sequence(adev,
314                                                  golden_settings_gc_9_0_vg10,
315                                                  ARRAY_SIZE(golden_settings_gc_9_0_vg10));
316                 break;
317         case CHIP_VEGA12:
318                 soc15_program_register_sequence(adev,
319                                                 golden_settings_gc_9_2_1,
320                                                 ARRAY_SIZE(golden_settings_gc_9_2_1));
321                 soc15_program_register_sequence(adev,
322                                                 golden_settings_gc_9_2_1_vg12,
323                                                 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
324                 break;
325         case CHIP_VEGA20:
326                 soc15_program_register_sequence(adev,
327                                                 golden_settings_gc_9_0,
328                                                 ARRAY_SIZE(golden_settings_gc_9_0));
329                 soc15_program_register_sequence(adev,
330                                                 golden_settings_gc_9_0_vg20,
331                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
332                 break;
333         case CHIP_RAVEN:
334                 soc15_program_register_sequence(adev, golden_settings_gc_9_1,
335                                                 ARRAY_SIZE(golden_settings_gc_9_1));
336                 if (adev->rev_id >= 8)
337                         soc15_program_register_sequence(adev,
338                                                         golden_settings_gc_9_1_rv2,
339                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv2));
340                 else
341                         soc15_program_register_sequence(adev,
342                                                         golden_settings_gc_9_1_rv1,
343                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv1));
344                 break;
345         default:
346                 break;
347         }
348
349         soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
350                                         (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
351 }
352
353 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
354 {
355         adev->gfx.scratch.num_reg = 8;
356         adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
357         adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
358 }
359
360 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
361                                        bool wc, uint32_t reg, uint32_t val)
362 {
363         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
364         amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
365                                 WRITE_DATA_DST_SEL(0) |
366                                 (wc ? WR_CONFIRM : 0));
367         amdgpu_ring_write(ring, reg);
368         amdgpu_ring_write(ring, 0);
369         amdgpu_ring_write(ring, val);
370 }
371
372 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
373                                   int mem_space, int opt, uint32_t addr0,
374                                   uint32_t addr1, uint32_t ref, uint32_t mask,
375                                   uint32_t inv)
376 {
377         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
378         amdgpu_ring_write(ring,
379                                  /* memory (1) or register (0) */
380                                  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
381                                  WAIT_REG_MEM_OPERATION(opt) | /* wait */
382                                  WAIT_REG_MEM_FUNCTION(3) |  /* equal */
383                                  WAIT_REG_MEM_ENGINE(eng_sel)));
384
385         if (mem_space)
386                 BUG_ON(addr0 & 0x3); /* Dword align */
387         amdgpu_ring_write(ring, addr0);
388         amdgpu_ring_write(ring, addr1);
389         amdgpu_ring_write(ring, ref);
390         amdgpu_ring_write(ring, mask);
391         amdgpu_ring_write(ring, inv); /* poll interval */
392 }
393
394 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
395 {
396         struct amdgpu_device *adev = ring->adev;
397         uint32_t scratch;
398         uint32_t tmp = 0;
399         unsigned i;
400         int r;
401
402         r = amdgpu_gfx_scratch_get(adev, &scratch);
403         if (r)
404                 return r;
405
406         WREG32(scratch, 0xCAFEDEAD);
407         r = amdgpu_ring_alloc(ring, 3);
408         if (r)
409                 goto error_free_scratch;
410
411         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
412         amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
413         amdgpu_ring_write(ring, 0xDEADBEEF);
414         amdgpu_ring_commit(ring);
415
416         for (i = 0; i < adev->usec_timeout; i++) {
417                 tmp = RREG32(scratch);
418                 if (tmp == 0xDEADBEEF)
419                         break;
420                 DRM_UDELAY(1);
421         }
422
423         if (i >= adev->usec_timeout)
424                 r = -ETIMEDOUT;
425
426 error_free_scratch:
427         amdgpu_gfx_scratch_free(adev, scratch);
428         return r;
429 }
430
431 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
432 {
433         struct amdgpu_device *adev = ring->adev;
434         struct amdgpu_ib ib;
435         struct dma_fence *f = NULL;
436
437         unsigned index;
438         uint64_t gpu_addr;
439         uint32_t tmp;
440         long r;
441
442         r = amdgpu_device_wb_get(adev, &index);
443         if (r)
444                 return r;
445
446         gpu_addr = adev->wb.gpu_addr + (index * 4);
447         adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
448         memset(&ib, 0, sizeof(ib));
449         r = amdgpu_ib_get(adev, NULL, 16, &ib);
450         if (r)
451                 goto err1;
452
453         ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
454         ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
455         ib.ptr[2] = lower_32_bits(gpu_addr);
456         ib.ptr[3] = upper_32_bits(gpu_addr);
457         ib.ptr[4] = 0xDEADBEEF;
458         ib.length_dw = 5;
459
460         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
461         if (r)
462                 goto err2;
463
464         r = dma_fence_wait_timeout(f, false, timeout);
465         if (r == 0) {
466                 r = -ETIMEDOUT;
467                 goto err2;
468         } else if (r < 0) {
469                 goto err2;
470         }
471
472         tmp = adev->wb.wb[index];
473         if (tmp == 0xDEADBEEF)
474                 r = 0;
475         else
476                 r = -EINVAL;
477
478 err2:
479         amdgpu_ib_free(adev, &ib, NULL);
480         dma_fence_put(f);
481 err1:
482         amdgpu_device_wb_free(adev, index);
483         return r;
484 }
485
486
487 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
488 {
489         release_firmware(adev->gfx.pfp_fw);
490         adev->gfx.pfp_fw = NULL;
491         release_firmware(adev->gfx.me_fw);
492         adev->gfx.me_fw = NULL;
493         release_firmware(adev->gfx.ce_fw);
494         adev->gfx.ce_fw = NULL;
495         release_firmware(adev->gfx.rlc_fw);
496         adev->gfx.rlc_fw = NULL;
497         release_firmware(adev->gfx.mec_fw);
498         adev->gfx.mec_fw = NULL;
499         release_firmware(adev->gfx.mec2_fw);
500         adev->gfx.mec2_fw = NULL;
501
502         kfree(adev->gfx.rlc.register_list_format);
503 }
504
505 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
506 {
507         const struct rlc_firmware_header_v2_1 *rlc_hdr;
508
509         rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
510         adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
511         adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
512         adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
513         adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
514         adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
515         adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
516         adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
517         adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
518         adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
519         adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
520         adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
521         adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
522         adev->gfx.rlc.reg_list_format_direct_reg_list_length =
523                         le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
524 }
525
526 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
527 {
528         adev->gfx.me_fw_write_wait = false;
529         adev->gfx.mec_fw_write_wait = false;
530
531         switch (adev->asic_type) {
532         case CHIP_VEGA10:
533                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
534                     (adev->gfx.me_feature_version >= 42) &&
535                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
536                     (adev->gfx.pfp_feature_version >= 42))
537                         adev->gfx.me_fw_write_wait = true;
538
539                 if ((adev->gfx.mec_fw_version >=  0x00000193) &&
540                     (adev->gfx.mec_feature_version >= 42))
541                         adev->gfx.mec_fw_write_wait = true;
542                 break;
543         case CHIP_VEGA12:
544                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
545                     (adev->gfx.me_feature_version >= 44) &&
546                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
547                     (adev->gfx.pfp_feature_version >= 44))
548                         adev->gfx.me_fw_write_wait = true;
549
550                 if ((adev->gfx.mec_fw_version >=  0x00000196) &&
551                     (adev->gfx.mec_feature_version >= 44))
552                         adev->gfx.mec_fw_write_wait = true;
553                 break;
554         case CHIP_VEGA20:
555                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
556                     (adev->gfx.me_feature_version >= 44) &&
557                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
558                     (adev->gfx.pfp_feature_version >= 44))
559                         adev->gfx.me_fw_write_wait = true;
560
561                 if ((adev->gfx.mec_fw_version >=  0x00000197) &&
562                     (adev->gfx.mec_feature_version >= 44))
563                         adev->gfx.mec_fw_write_wait = true;
564                 break;
565         case CHIP_RAVEN:
566                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
567                     (adev->gfx.me_feature_version >= 42) &&
568                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
569                     (adev->gfx.pfp_feature_version >= 42))
570                         adev->gfx.me_fw_write_wait = true;
571
572                 if ((adev->gfx.mec_fw_version >=  0x00000192) &&
573                     (adev->gfx.mec_feature_version >= 42))
574                         adev->gfx.mec_fw_write_wait = true;
575                 break;
576         default:
577                 break;
578         }
579 }
580
581 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
582 {
583         switch (adev->asic_type) {
584         case CHIP_VEGA10:
585         case CHIP_VEGA12:
586         case CHIP_VEGA20:
587                 break;
588         case CHIP_RAVEN:
589                 if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
590                         break;
591                 if ((adev->gfx.rlc_fw_version < 531) ||
592                     (adev->gfx.rlc_fw_version == 53815) ||
593                     (adev->gfx.rlc_feature_version < 1) ||
594                     !adev->gfx.rlc.is_rlc_v2_1)
595                         adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
596                 break;
597         default:
598                 break;
599         }
600 }
601
602 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
603 {
604         const char *chip_name;
605         char fw_name[30];
606         int err;
607         struct amdgpu_firmware_info *info = NULL;
608         const struct common_firmware_header *header = NULL;
609         const struct gfx_firmware_header_v1_0 *cp_hdr;
610         const struct rlc_firmware_header_v2_0 *rlc_hdr;
611         unsigned int *tmp = NULL;
612         unsigned int i = 0;
613         uint16_t version_major;
614         uint16_t version_minor;
615
616         DRM_DEBUG("\n");
617
618         switch (adev->asic_type) {
619         case CHIP_VEGA10:
620                 chip_name = "vega10";
621                 break;
622         case CHIP_VEGA12:
623                 chip_name = "vega12";
624                 break;
625         case CHIP_VEGA20:
626                 chip_name = "vega20";
627                 break;
628         case CHIP_RAVEN:
629                 if (adev->rev_id >= 8)
630                         chip_name = "raven2";
631                 else if (adev->pdev->device == 0x15d8)
632                         chip_name = "picasso";
633                 else
634                         chip_name = "raven";
635                 break;
636         default:
637                 BUG();
638         }
639
640         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
641         err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
642         if (err)
643                 goto out;
644         err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
645         if (err)
646                 goto out;
647         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
648         adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
649         adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
650
651         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
652         err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
653         if (err)
654                 goto out;
655         err = amdgpu_ucode_validate(adev->gfx.me_fw);
656         if (err)
657                 goto out;
658         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
659         adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
660         adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
661
662         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
663         err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
664         if (err)
665                 goto out;
666         err = amdgpu_ucode_validate(adev->gfx.ce_fw);
667         if (err)
668                 goto out;
669         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
670         adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
671         adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
672
673         /*
674          * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
675          * instead of picasso_rlc.bin.
676          * Judgment method:
677          * PCO AM4: revision >= 0xC8 && revision <= 0xCF
678          *          or revision >= 0xD8 && revision <= 0xDF
679          * otherwise is PCO FP5
680          */
681         if (!strcmp(chip_name, "picasso") &&
682                 (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
683                 ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
684                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
685         else
686                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
687         err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
688         if (err)
689                 goto out;
690         err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
691         rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
692
693         version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
694         version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
695         if (version_major == 2 && version_minor == 1)
696                 adev->gfx.rlc.is_rlc_v2_1 = true;
697
698         adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
699         adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
700         adev->gfx.rlc.save_and_restore_offset =
701                         le32_to_cpu(rlc_hdr->save_and_restore_offset);
702         adev->gfx.rlc.clear_state_descriptor_offset =
703                         le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
704         adev->gfx.rlc.avail_scratch_ram_locations =
705                         le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
706         adev->gfx.rlc.reg_restore_list_size =
707                         le32_to_cpu(rlc_hdr->reg_restore_list_size);
708         adev->gfx.rlc.reg_list_format_start =
709                         le32_to_cpu(rlc_hdr->reg_list_format_start);
710         adev->gfx.rlc.reg_list_format_separate_start =
711                         le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
712         adev->gfx.rlc.starting_offsets_start =
713                         le32_to_cpu(rlc_hdr->starting_offsets_start);
714         adev->gfx.rlc.reg_list_format_size_bytes =
715                         le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
716         adev->gfx.rlc.reg_list_size_bytes =
717                         le32_to_cpu(rlc_hdr->reg_list_size_bytes);
718         adev->gfx.rlc.register_list_format =
719                         kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
720                                 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
721         if (!adev->gfx.rlc.register_list_format) {
722                 err = -ENOMEM;
723                 goto out;
724         }
725
726         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
727                         le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
728         for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
729                 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
730
731         adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
732
733         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
734                         le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
735         for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
736                 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
737
738         if (adev->gfx.rlc.is_rlc_v2_1)
739                 gfx_v9_0_init_rlc_ext_microcode(adev);
740
741         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
742         err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
743         if (err)
744                 goto out;
745         err = amdgpu_ucode_validate(adev->gfx.mec_fw);
746         if (err)
747                 goto out;
748         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
749         adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
750         adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
751
752
753         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
754         err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
755         if (!err) {
756                 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
757                 if (err)
758                         goto out;
759                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
760                 adev->gfx.mec2_fw->data;
761                 adev->gfx.mec2_fw_version =
762                 le32_to_cpu(cp_hdr->header.ucode_version);
763                 adev->gfx.mec2_feature_version =
764                 le32_to_cpu(cp_hdr->ucode_feature_version);
765         } else {
766                 err = 0;
767                 adev->gfx.mec2_fw = NULL;
768         }
769
770         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
771                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
772                 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
773                 info->fw = adev->gfx.pfp_fw;
774                 header = (const struct common_firmware_header *)info->fw->data;
775                 adev->firmware.fw_size +=
776                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
777
778                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
779                 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
780                 info->fw = adev->gfx.me_fw;
781                 header = (const struct common_firmware_header *)info->fw->data;
782                 adev->firmware.fw_size +=
783                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
784
785                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
786                 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
787                 info->fw = adev->gfx.ce_fw;
788                 header = (const struct common_firmware_header *)info->fw->data;
789                 adev->firmware.fw_size +=
790                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
791
792                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
793                 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
794                 info->fw = adev->gfx.rlc_fw;
795                 header = (const struct common_firmware_header *)info->fw->data;
796                 adev->firmware.fw_size +=
797                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
798
799                 if (adev->gfx.rlc.is_rlc_v2_1 &&
800                     adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
801                     adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
802                     adev->gfx.rlc.save_restore_list_srm_size_bytes) {
803                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
804                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
805                         info->fw = adev->gfx.rlc_fw;
806                         adev->firmware.fw_size +=
807                                 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
808
809                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
810                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
811                         info->fw = adev->gfx.rlc_fw;
812                         adev->firmware.fw_size +=
813                                 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
814
815                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
816                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
817                         info->fw = adev->gfx.rlc_fw;
818                         adev->firmware.fw_size +=
819                                 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
820                 }
821
822                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
823                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
824                 info->fw = adev->gfx.mec_fw;
825                 header = (const struct common_firmware_header *)info->fw->data;
826                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
827                 adev->firmware.fw_size +=
828                         ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
829
830                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
831                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
832                 info->fw = adev->gfx.mec_fw;
833                 adev->firmware.fw_size +=
834                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
835
836                 if (adev->gfx.mec2_fw) {
837                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
838                         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
839                         info->fw = adev->gfx.mec2_fw;
840                         header = (const struct common_firmware_header *)info->fw->data;
841                         cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
842                         adev->firmware.fw_size +=
843                                 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
844                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
845                         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
846                         info->fw = adev->gfx.mec2_fw;
847                         adev->firmware.fw_size +=
848                                 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
849                 }
850
851         }
852
853 out:
854         gfx_v9_0_check_if_need_gfxoff(adev);
855         gfx_v9_0_check_fw_write_wait(adev);
856         if (err) {
857                 dev_err(adev->dev,
858                         "gfx9: Failed to load firmware \"%s\"\n",
859                         fw_name);
860                 release_firmware(adev->gfx.pfp_fw);
861                 adev->gfx.pfp_fw = NULL;
862                 release_firmware(adev->gfx.me_fw);
863                 adev->gfx.me_fw = NULL;
864                 release_firmware(adev->gfx.ce_fw);
865                 adev->gfx.ce_fw = NULL;
866                 release_firmware(adev->gfx.rlc_fw);
867                 adev->gfx.rlc_fw = NULL;
868                 release_firmware(adev->gfx.mec_fw);
869                 adev->gfx.mec_fw = NULL;
870                 release_firmware(adev->gfx.mec2_fw);
871                 adev->gfx.mec2_fw = NULL;
872         }
873         return err;
874 }
875
876 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
877 {
878         u32 count = 0;
879         const struct cs_section_def *sect = NULL;
880         const struct cs_extent_def *ext = NULL;
881
882         /* begin clear state */
883         count += 2;
884         /* context control state */
885         count += 3;
886
887         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
888                 for (ext = sect->section; ext->extent != NULL; ++ext) {
889                         if (sect->id == SECT_CONTEXT)
890                                 count += 2 + ext->reg_count;
891                         else
892                                 return 0;
893                 }
894         }
895
896         /* end clear state */
897         count += 2;
898         /* clear state */
899         count += 2;
900
901         return count;
902 }
903
904 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
905                                     volatile u32 *buffer)
906 {
907         u32 count = 0, i;
908         const struct cs_section_def *sect = NULL;
909         const struct cs_extent_def *ext = NULL;
910
911         if (adev->gfx.rlc.cs_data == NULL)
912                 return;
913         if (buffer == NULL)
914                 return;
915
916         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
917         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
918
919         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
920         buffer[count++] = cpu_to_le32(0x80000000);
921         buffer[count++] = cpu_to_le32(0x80000000);
922
923         for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
924                 for (ext = sect->section; ext->extent != NULL; ++ext) {
925                         if (sect->id == SECT_CONTEXT) {
926                                 buffer[count++] =
927                                         cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
928                                 buffer[count++] = cpu_to_le32(ext->reg_index -
929                                                 PACKET3_SET_CONTEXT_REG_START);
930                                 for (i = 0; i < ext->reg_count; i++)
931                                         buffer[count++] = cpu_to_le32(ext->extent[i]);
932                         } else {
933                                 return;
934                         }
935                 }
936         }
937
938         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
939         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
940
941         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
942         buffer[count++] = cpu_to_le32(0);
943 }
944
945 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
946 {
947         struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
948         uint32_t pg_always_on_cu_num = 2;
949         uint32_t always_on_cu_num;
950         uint32_t i, j, k;
951         uint32_t mask, cu_bitmap, counter;
952
953         if (adev->flags & AMD_IS_APU)
954                 always_on_cu_num = 4;
955         else if (adev->asic_type == CHIP_VEGA12)
956                 always_on_cu_num = 8;
957         else
958                 always_on_cu_num = 12;
959
960         mutex_lock(&adev->grbm_idx_mutex);
961         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
962                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
963                         mask = 1;
964                         cu_bitmap = 0;
965                         counter = 0;
966                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
967
968                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
969                                 if (cu_info->bitmap[i][j] & mask) {
970                                         if (counter == pg_always_on_cu_num)
971                                                 WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
972                                         if (counter < always_on_cu_num)
973                                                 cu_bitmap |= mask;
974                                         else
975                                                 break;
976                                         counter++;
977                                 }
978                                 mask <<= 1;
979                         }
980
981                         WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
982                         cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
983                 }
984         }
985         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
986         mutex_unlock(&adev->grbm_idx_mutex);
987 }
988
989 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
990 {
991         uint32_t data;
992
993         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
994         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
995         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
996         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
997         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
998
999         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1000         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1001
1002         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1003         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1004
1005         mutex_lock(&adev->grbm_idx_mutex);
1006         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1007         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1008         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1009
1010         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1011         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1012         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1013         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1014         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1015
1016         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1017         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1018         data &= 0x0000FFFF;
1019         data |= 0x00C00000;
1020         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1021
1022         /*
1023          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1024          * programmed in gfx_v9_0_init_always_on_cu_mask()
1025          */
1026
1027         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1028          * but used for RLC_LB_CNTL configuration */
1029         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1030         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1031         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1032         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1033         mutex_unlock(&adev->grbm_idx_mutex);
1034
1035         gfx_v9_0_init_always_on_cu_mask(adev);
1036 }
1037
1038 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1039 {
1040         uint32_t data;
1041
1042         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1043         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1044         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1045         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1046         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1047
1048         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1049         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1050
1051         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1052         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1053
1054         mutex_lock(&adev->grbm_idx_mutex);
1055         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1056         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1057         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1058
1059         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1060         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1061         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1062         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1063         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1064
1065         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1066         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1067         data &= 0x0000FFFF;
1068         data |= 0x00C00000;
1069         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1070
1071         /*
1072          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1073          * programmed in gfx_v9_0_init_always_on_cu_mask()
1074          */
1075
1076         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1077          * but used for RLC_LB_CNTL configuration */
1078         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1079         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1080         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1081         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1082         mutex_unlock(&adev->grbm_idx_mutex);
1083
1084         gfx_v9_0_init_always_on_cu_mask(adev);
1085 }
1086
1087 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1088 {
1089         WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1090 }
1091
1092 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1093 {
1094         return 5;
1095 }
1096
1097 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1098 {
1099         const struct cs_section_def *cs_data;
1100         int r;
1101
1102         adev->gfx.rlc.cs_data = gfx9_cs_data;
1103
1104         cs_data = adev->gfx.rlc.cs_data;
1105
1106         if (cs_data) {
1107                 /* init clear state block */
1108                 r = amdgpu_gfx_rlc_init_csb(adev);
1109                 if (r)
1110                         return r;
1111         }
1112
1113         if (adev->asic_type == CHIP_RAVEN) {
1114                 /* TODO: double check the cp_table_size for RV */
1115                 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1116                 r = amdgpu_gfx_rlc_init_cpt(adev);
1117                 if (r)
1118                         return r;
1119         }
1120
1121         switch (adev->asic_type) {
1122         case CHIP_RAVEN:
1123                 gfx_v9_0_init_lbpw(adev);
1124                 break;
1125         case CHIP_VEGA20:
1126                 gfx_v9_4_init_lbpw(adev);
1127                 break;
1128         default:
1129                 break;
1130         }
1131
1132         return 0;
1133 }
1134
1135 static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
1136 {
1137         int r;
1138
1139         r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
1140         if (unlikely(r != 0))
1141                 return r;
1142
1143         r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
1144                         AMDGPU_GEM_DOMAIN_VRAM);
1145         if (!r)
1146                 adev->gfx.rlc.clear_state_gpu_addr =
1147                         amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
1148
1149         amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1150
1151         return r;
1152 }
1153
1154 static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
1155 {
1156         int r;
1157
1158         if (!adev->gfx.rlc.clear_state_obj)
1159                 return;
1160
1161         r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
1162         if (likely(r == 0)) {
1163                 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
1164                 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1165         }
1166 }
1167
1168 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1169 {
1170         amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1171         amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1172 }
1173
1174 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1175 {
1176         int r;
1177         u32 *hpd;
1178         const __le32 *fw_data;
1179         unsigned fw_size;
1180         u32 *fw;
1181         size_t mec_hpd_size;
1182
1183         const struct gfx_firmware_header_v1_0 *mec_hdr;
1184
1185         bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1186
1187         /* take ownership of the relevant compute queues */
1188         amdgpu_gfx_compute_queue_acquire(adev);
1189         mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1190
1191         r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1192                                       AMDGPU_GEM_DOMAIN_VRAM,
1193                                       &adev->gfx.mec.hpd_eop_obj,
1194                                       &adev->gfx.mec.hpd_eop_gpu_addr,
1195                                       (void **)&hpd);
1196         if (r) {
1197                 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1198                 gfx_v9_0_mec_fini(adev);
1199                 return r;
1200         }
1201
1202         memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
1203
1204         amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1205         amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1206
1207         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1208
1209         fw_data = (const __le32 *)
1210                 (adev->gfx.mec_fw->data +
1211                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1212         fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
1213
1214         r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1215                                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1216                                       &adev->gfx.mec.mec_fw_obj,
1217                                       &adev->gfx.mec.mec_fw_gpu_addr,
1218                                       (void **)&fw);
1219         if (r) {
1220                 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1221                 gfx_v9_0_mec_fini(adev);
1222                 return r;
1223         }
1224
1225         memcpy(fw, fw_data, fw_size);
1226
1227         amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1228         amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1229
1230         return 0;
1231 }
1232
1233 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1234 {
1235         WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1236                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1237                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1238                 (address << SQ_IND_INDEX__INDEX__SHIFT) |
1239                 (SQ_IND_INDEX__FORCE_READ_MASK));
1240         return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1241 }
1242
1243 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1244                            uint32_t wave, uint32_t thread,
1245                            uint32_t regno, uint32_t num, uint32_t *out)
1246 {
1247         WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1248                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1249                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1250                 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
1251                 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
1252                 (SQ_IND_INDEX__FORCE_READ_MASK) |
1253                 (SQ_IND_INDEX__AUTO_INCR_MASK));
1254         while (num--)
1255                 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1256 }
1257
1258 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1259 {
1260         /* type 1 wave data */
1261         dst[(*no_fields)++] = 1;
1262         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
1263         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
1264         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
1265         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
1266         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
1267         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
1268         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
1269         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
1270         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
1271         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
1272         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
1273         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
1274         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
1275         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
1276 }
1277
1278 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
1279                                      uint32_t wave, uint32_t start,
1280                                      uint32_t size, uint32_t *dst)
1281 {
1282         wave_read_regs(
1283                 adev, simd, wave, 0,
1284                 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
1285 }
1286
1287 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
1288                                      uint32_t wave, uint32_t thread,
1289                                      uint32_t start, uint32_t size,
1290                                      uint32_t *dst)
1291 {
1292         wave_read_regs(
1293                 adev, simd, wave, thread,
1294                 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1295 }
1296
1297 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
1298                                   u32 me, u32 pipe, u32 q)
1299 {
1300         soc15_grbm_select(adev, me, pipe, q, 0);
1301 }
1302
1303 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
1304         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
1305         .select_se_sh = &gfx_v9_0_select_se_sh,
1306         .read_wave_data = &gfx_v9_0_read_wave_data,
1307         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
1308         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
1309         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
1310 };
1311
1312 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
1313 {
1314         u32 gb_addr_config;
1315         int err;
1316
1317         adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
1318
1319         switch (adev->asic_type) {
1320         case CHIP_VEGA10:
1321                 adev->gfx.config.max_hw_contexts = 8;
1322                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1323                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1324                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1325                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1326                 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
1327                 break;
1328         case CHIP_VEGA12:
1329                 adev->gfx.config.max_hw_contexts = 8;
1330                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1331                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1332                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1333                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1334                 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
1335                 DRM_INFO("fix gfx.config for vega12\n");
1336                 break;
1337         case CHIP_VEGA20:
1338                 adev->gfx.config.max_hw_contexts = 8;
1339                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1340                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1341                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1342                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1343                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1344                 gb_addr_config &= ~0xf3e777ff;
1345                 gb_addr_config |= 0x22014042;
1346                 /* check vbios table if gpu info is not available */
1347                 err = amdgpu_atomfirmware_get_gfx_info(adev);
1348                 if (err)
1349                         return err;
1350                 break;
1351         case CHIP_RAVEN:
1352                 adev->gfx.config.max_hw_contexts = 8;
1353                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1354                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1355                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1356                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1357                 if (adev->rev_id >= 8)
1358                         gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
1359                 else
1360                         gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
1361                 break;
1362         default:
1363                 BUG();
1364                 break;
1365         }
1366
1367         adev->gfx.config.gb_addr_config = gb_addr_config;
1368
1369         adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1370                         REG_GET_FIELD(
1371                                         adev->gfx.config.gb_addr_config,
1372                                         GB_ADDR_CONFIG,
1373                                         NUM_PIPES);
1374
1375         adev->gfx.config.max_tile_pipes =
1376                 adev->gfx.config.gb_addr_config_fields.num_pipes;
1377
1378         adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
1379                         REG_GET_FIELD(
1380                                         adev->gfx.config.gb_addr_config,
1381                                         GB_ADDR_CONFIG,
1382                                         NUM_BANKS);
1383         adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1384                         REG_GET_FIELD(
1385                                         adev->gfx.config.gb_addr_config,
1386                                         GB_ADDR_CONFIG,
1387                                         MAX_COMPRESSED_FRAGS);
1388         adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1389                         REG_GET_FIELD(
1390                                         adev->gfx.config.gb_addr_config,
1391                                         GB_ADDR_CONFIG,
1392                                         NUM_RB_PER_SE);
1393         adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1394                         REG_GET_FIELD(
1395                                         adev->gfx.config.gb_addr_config,
1396                                         GB_ADDR_CONFIG,
1397                                         NUM_SHADER_ENGINES);
1398         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1399                         REG_GET_FIELD(
1400                                         adev->gfx.config.gb_addr_config,
1401                                         GB_ADDR_CONFIG,
1402                                         PIPE_INTERLEAVE_SIZE));
1403
1404         return 0;
1405 }
1406
1407 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
1408                                    struct amdgpu_ngg_buf *ngg_buf,
1409                                    int size_se,
1410                                    int default_size_se)
1411 {
1412         int r;
1413
1414         if (size_se < 0) {
1415                 dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
1416                 return -EINVAL;
1417         }
1418         size_se = size_se ? size_se : default_size_se;
1419
1420         ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
1421         r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
1422                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1423                                     &ngg_buf->bo,
1424                                     &ngg_buf->gpu_addr,
1425                                     NULL);
1426         if (r) {
1427                 dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
1428                 return r;
1429         }
1430         ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
1431
1432         return r;
1433 }
1434
1435 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
1436 {
1437         int i;
1438
1439         for (i = 0; i < NGG_BUF_MAX; i++)
1440                 amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
1441                                       &adev->gfx.ngg.buf[i].gpu_addr,
1442                                       NULL);
1443
1444         memset(&adev->gfx.ngg.buf[0], 0,
1445                         sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
1446
1447         adev->gfx.ngg.init = false;
1448
1449         return 0;
1450 }
1451
1452 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
1453 {
1454         int r;
1455
1456         if (!amdgpu_ngg || adev->gfx.ngg.init == true)
1457                 return 0;
1458
1459         /* GDS reserve memory: 64 bytes alignment */
1460         adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
1461         adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
1462         adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
1463         adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
1464         adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
1465
1466         /* Primitive Buffer */
1467         r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
1468                                     amdgpu_prim_buf_per_se,
1469                                     64 * 1024);
1470         if (r) {
1471                 dev_err(adev->dev, "Failed to create Primitive Buffer\n");
1472                 goto err;
1473         }
1474
1475         /* Position Buffer */
1476         r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
1477                                     amdgpu_pos_buf_per_se,
1478                                     256 * 1024);
1479         if (r) {
1480                 dev_err(adev->dev, "Failed to create Position Buffer\n");
1481                 goto err;
1482         }
1483
1484         /* Control Sideband */
1485         r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
1486                                     amdgpu_cntl_sb_buf_per_se,
1487                                     256);
1488         if (r) {
1489                 dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
1490                 goto err;
1491         }
1492
1493         /* Parameter Cache, not created by default */
1494         if (amdgpu_param_buf_per_se <= 0)
1495                 goto out;
1496
1497         r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
1498                                     amdgpu_param_buf_per_se,
1499                                     512 * 1024);
1500         if (r) {
1501                 dev_err(adev->dev, "Failed to create Parameter Cache\n");
1502                 goto err;
1503         }
1504
1505 out:
1506         adev->gfx.ngg.init = true;
1507         return 0;
1508 err:
1509         gfx_v9_0_ngg_fini(adev);
1510         return r;
1511 }
1512
1513 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
1514 {
1515         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
1516         int r;
1517         u32 data, base;
1518
1519         if (!amdgpu_ngg)
1520                 return 0;
1521
1522         /* Program buffer size */
1523         data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
1524                              adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
1525         data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
1526                              adev->gfx.ngg.buf[NGG_POS].size >> 8);
1527         WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
1528
1529         data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
1530                              adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
1531         data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
1532                              adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
1533         WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
1534
1535         /* Program buffer base address */
1536         base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1537         data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
1538         WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
1539
1540         base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1541         data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
1542         WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
1543
1544         base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1545         data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
1546         WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
1547
1548         base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1549         data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
1550         WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
1551
1552         base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1553         data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
1554         WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
1555
1556         base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1557         data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
1558         WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
1559
1560         /* Clear GDS reserved memory */
1561         r = amdgpu_ring_alloc(ring, 17);
1562         if (r) {
1563                 DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n",
1564                           ring->name, r);
1565                 return r;
1566         }
1567
1568         gfx_v9_0_write_data_to_reg(ring, 0, false,
1569                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
1570                                    (adev->gds.mem.total_size +
1571                                     adev->gfx.ngg.gds_reserve_size));
1572
1573         amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
1574         amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
1575                                 PACKET3_DMA_DATA_DST_SEL(1) |
1576                                 PACKET3_DMA_DATA_SRC_SEL(2)));
1577         amdgpu_ring_write(ring, 0);
1578         amdgpu_ring_write(ring, 0);
1579         amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
1580         amdgpu_ring_write(ring, 0);
1581         amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
1582                                 adev->gfx.ngg.gds_reserve_size);
1583
1584         gfx_v9_0_write_data_to_reg(ring, 0, false,
1585                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
1586
1587         amdgpu_ring_commit(ring);
1588
1589         return 0;
1590 }
1591
1592 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1593                                       int mec, int pipe, int queue)
1594 {
1595         int r;
1596         unsigned irq_type;
1597         struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1598
1599         ring = &adev->gfx.compute_ring[ring_id];
1600
1601         /* mec0 is me1 */
1602         ring->me = mec + 1;
1603         ring->pipe = pipe;
1604         ring->queue = queue;
1605
1606         ring->ring_obj = NULL;
1607         ring->use_doorbell = true;
1608         ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1609         ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1610                                 + (ring_id * GFX9_MEC_HPD_SIZE);
1611         sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1612
1613         irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1614                 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1615                 + ring->pipe;
1616
1617         /* type-2 packets are deprecated on MEC, use type-3 instead */
1618         r = amdgpu_ring_init(adev, ring, 1024,
1619                              &adev->gfx.eop_irq, irq_type);
1620         if (r)
1621                 return r;
1622
1623
1624         return 0;
1625 }
1626
1627 static int gfx_v9_0_sw_init(void *handle)
1628 {
1629         int i, j, k, r, ring_id;
1630         struct amdgpu_ring *ring;
1631         struct amdgpu_kiq *kiq;
1632         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1633
1634         switch (adev->asic_type) {
1635         case CHIP_VEGA10:
1636         case CHIP_VEGA12:
1637         case CHIP_VEGA20:
1638         case CHIP_RAVEN:
1639                 adev->gfx.mec.num_mec = 2;
1640                 break;
1641         default:
1642                 adev->gfx.mec.num_mec = 1;
1643                 break;
1644         }
1645
1646         adev->gfx.mec.num_pipe_per_mec = 4;
1647         adev->gfx.mec.num_queue_per_pipe = 8;
1648
1649         /* EOP Event */
1650         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
1651         if (r)
1652                 return r;
1653
1654         /* Privileged reg */
1655         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
1656                               &adev->gfx.priv_reg_irq);
1657         if (r)
1658                 return r;
1659
1660         /* Privileged inst */
1661         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
1662                               &adev->gfx.priv_inst_irq);
1663         if (r)
1664                 return r;
1665
1666         /* ECC error */
1667         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
1668                               &adev->gfx.cp_ecc_error_irq);
1669         if (r)
1670                 return r;
1671
1672         /* FUE error */
1673         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
1674                               &adev->gfx.cp_ecc_error_irq);
1675         if (r)
1676                 return r;
1677
1678         adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1679
1680         gfx_v9_0_scratch_init(adev);
1681
1682         r = gfx_v9_0_init_microcode(adev);
1683         if (r) {
1684                 DRM_ERROR("Failed to load gfx firmware!\n");
1685                 return r;
1686         }
1687
1688         r = adev->gfx.rlc.funcs->init(adev);
1689         if (r) {
1690                 DRM_ERROR("Failed to init rlc BOs!\n");
1691                 return r;
1692         }
1693
1694         r = gfx_v9_0_mec_init(adev);
1695         if (r) {
1696                 DRM_ERROR("Failed to init MEC BOs!\n");
1697                 return r;
1698         }
1699
1700         /* set up the gfx ring */
1701         for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1702                 ring = &adev->gfx.gfx_ring[i];
1703                 ring->ring_obj = NULL;
1704                 if (!i)
1705                         sprintf(ring->name, "gfx");
1706                 else
1707                         sprintf(ring->name, "gfx_%d", i);
1708                 ring->use_doorbell = true;
1709                 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
1710                 r = amdgpu_ring_init(adev, ring, 1024,
1711                                      &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
1712                 if (r)
1713                         return r;
1714         }
1715
1716         /* set up the compute queues - allocate horizontally across pipes */
1717         ring_id = 0;
1718         for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1719                 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1720                         for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1721                                 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
1722                                         continue;
1723
1724                                 r = gfx_v9_0_compute_ring_init(adev,
1725                                                                ring_id,
1726                                                                i, k, j);
1727                                 if (r)
1728                                         return r;
1729
1730                                 ring_id++;
1731                         }
1732                 }
1733         }
1734
1735         r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
1736         if (r) {
1737                 DRM_ERROR("Failed to init KIQ BOs!\n");
1738                 return r;
1739         }
1740
1741         kiq = &adev->gfx.kiq;
1742         r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1743         if (r)
1744                 return r;
1745
1746         /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1747         r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
1748         if (r)
1749                 return r;
1750
1751         adev->gfx.ce_ram_size = 0x8000;
1752
1753         r = gfx_v9_0_gpu_early_init(adev);
1754         if (r)
1755                 return r;
1756
1757         r = gfx_v9_0_ngg_init(adev);
1758         if (r)
1759                 return r;
1760
1761         return 0;
1762 }
1763
1764
1765 static int gfx_v9_0_sw_fini(void *handle)
1766 {
1767         int i;
1768         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1769
1770         if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
1771                         adev->gfx.ras_if) {
1772                 struct ras_common_if *ras_if = adev->gfx.ras_if;
1773                 struct ras_ih_if ih_info = {
1774                         .head = *ras_if,
1775                 };
1776
1777                 amdgpu_ras_debugfs_remove(adev, ras_if);
1778                 amdgpu_ras_sysfs_remove(adev, ras_if);
1779                 amdgpu_ras_interrupt_remove_handler(adev,  &ih_info);
1780                 amdgpu_ras_feature_enable(adev, ras_if, 0);
1781                 kfree(ras_if);
1782         }
1783
1784         amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
1785         amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
1786         amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
1787
1788         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1789                 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1790         for (i = 0; i < adev->gfx.num_compute_rings; i++)
1791                 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1792
1793         amdgpu_gfx_compute_mqd_sw_fini(adev);
1794         amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1795         amdgpu_gfx_kiq_fini(adev);
1796
1797         gfx_v9_0_mec_fini(adev);
1798         gfx_v9_0_ngg_fini(adev);
1799         amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
1800                                 &adev->gfx.rlc.clear_state_gpu_addr,
1801                                 (void **)&adev->gfx.rlc.cs_ptr);
1802         if (adev->asic_type == CHIP_RAVEN) {
1803                 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
1804                                 &adev->gfx.rlc.cp_table_gpu_addr,
1805                                 (void **)&adev->gfx.rlc.cp_table_ptr);
1806         }
1807         gfx_v9_0_free_microcode(adev);
1808
1809         return 0;
1810 }
1811
1812
1813 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
1814 {
1815         /* TODO */
1816 }
1817
1818 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
1819 {
1820         u32 data;
1821
1822         if (instance == 0xffffffff)
1823                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1824         else
1825                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1826
1827         if (se_num == 0xffffffff)
1828                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1829         else
1830                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1831
1832         if (sh_num == 0xffffffff)
1833                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1834         else
1835                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1836
1837         WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1838 }
1839
1840 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1841 {
1842         u32 data, mask;
1843
1844         data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1845         data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1846
1847         data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1848         data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1849
1850         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1851                                          adev->gfx.config.max_sh_per_se);
1852
1853         return (~data) & mask;
1854 }
1855
1856 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
1857 {
1858         int i, j;
1859         u32 data;
1860         u32 active_rbs = 0;
1861         u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1862                                         adev->gfx.config.max_sh_per_se;
1863
1864         mutex_lock(&adev->grbm_idx_mutex);
1865         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1866                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1867                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1868                         data = gfx_v9_0_get_rb_active_bitmap(adev);
1869                         active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1870                                                rb_bitmap_width_per_sh);
1871                 }
1872         }
1873         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1874         mutex_unlock(&adev->grbm_idx_mutex);
1875
1876         adev->gfx.config.backend_enable_mask = active_rbs;
1877         adev->gfx.config.num_rbs = hweight32(active_rbs);
1878 }
1879
1880 #define DEFAULT_SH_MEM_BASES    (0x6000)
1881 #define FIRST_COMPUTE_VMID      (8)
1882 #define LAST_COMPUTE_VMID       (16)
1883 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
1884 {
1885         int i;
1886         uint32_t sh_mem_config;
1887         uint32_t sh_mem_bases;
1888
1889         /*
1890          * Configure apertures:
1891          * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1892          * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1893          * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1894          */
1895         sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1896
1897         sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1898                         SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1899                         SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1900
1901         mutex_lock(&adev->srbm_mutex);
1902         for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1903                 soc15_grbm_select(adev, 0, 0, 0, i);
1904                 /* CP and shaders */
1905                 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
1906                 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1907         }
1908         soc15_grbm_select(adev, 0, 0, 0, 0);
1909         mutex_unlock(&adev->srbm_mutex);
1910 }
1911
1912 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
1913 {
1914         u32 tmp;
1915         int i;
1916
1917         WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1918
1919         gfx_v9_0_tiling_mode_table_init(adev);
1920
1921         gfx_v9_0_setup_rb(adev);
1922         gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
1923         adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
1924
1925         /* XXX SH_MEM regs */
1926         /* where to put LDS, scratch, GPUVM in FSA64 space */
1927         mutex_lock(&adev->srbm_mutex);
1928         for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) {
1929                 soc15_grbm_select(adev, 0, 0, 0, i);
1930                 /* CP and shaders */
1931                 if (i == 0) {
1932                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1933                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1934                         WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1935                         WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
1936                 } else {
1937                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1938                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1939                         WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1940                         tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1941                                 (adev->gmc.private_aperture_start >> 48));
1942                         tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1943                                 (adev->gmc.shared_aperture_start >> 48));
1944                         WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1945                 }
1946         }
1947         soc15_grbm_select(adev, 0, 0, 0, 0);
1948
1949         mutex_unlock(&adev->srbm_mutex);
1950
1951         gfx_v9_0_init_compute_vmid(adev);
1952
1953         mutex_lock(&adev->grbm_idx_mutex);
1954         /*
1955          * making sure that the following register writes will be broadcasted
1956          * to all the shaders
1957          */
1958         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1959
1960         WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
1961                    (adev->gfx.config.sc_prim_fifo_size_frontend <<
1962                         PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1963                    (adev->gfx.config.sc_prim_fifo_size_backend <<
1964                         PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1965                    (adev->gfx.config.sc_hiz_tile_fifo_size <<
1966                         PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1967                    (adev->gfx.config.sc_earlyz_tile_fifo_size <<
1968                         PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
1969         mutex_unlock(&adev->grbm_idx_mutex);
1970
1971 }
1972
1973 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
1974 {
1975         u32 i, j, k;
1976         u32 mask;
1977
1978         mutex_lock(&adev->grbm_idx_mutex);
1979         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1980                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1981                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1982                         for (k = 0; k < adev->usec_timeout; k++) {
1983                                 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
1984                                         break;
1985                                 udelay(1);
1986                         }
1987                         if (k == adev->usec_timeout) {
1988                                 gfx_v9_0_select_se_sh(adev, 0xffffffff,
1989                                                       0xffffffff, 0xffffffff);
1990                                 mutex_unlock(&adev->grbm_idx_mutex);
1991                                 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
1992                                          i, j);
1993                                 return;
1994                         }
1995                 }
1996         }
1997         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1998         mutex_unlock(&adev->grbm_idx_mutex);
1999
2000         mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2001                 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2002                 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2003                 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2004         for (k = 0; k < adev->usec_timeout; k++) {
2005                 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2006                         break;
2007                 udelay(1);
2008         }
2009 }
2010
2011 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2012                                                bool enable)
2013 {
2014         u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2015
2016         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2017         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2018         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2019         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2020
2021         WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2022 }
2023
2024 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2025 {
2026         /* csib */
2027         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2028                         adev->gfx.rlc.clear_state_gpu_addr >> 32);
2029         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2030                         adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2031         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2032                         adev->gfx.rlc.clear_state_size);
2033 }
2034
2035 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2036                                 int indirect_offset,
2037                                 int list_size,
2038                                 int *unique_indirect_regs,
2039                                 int unique_indirect_reg_count,
2040                                 int *indirect_start_offsets,
2041                                 int *indirect_start_offsets_count,
2042                                 int max_start_offsets_count)
2043 {
2044         int idx;
2045
2046         for (; indirect_offset < list_size; indirect_offset++) {
2047                 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2048                 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2049                 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2050
2051                 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2052                         indirect_offset += 2;
2053
2054                         /* look for the matching indice */
2055                         for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2056                                 if (unique_indirect_regs[idx] ==
2057                                         register_list_format[indirect_offset] ||
2058                                         !unique_indirect_regs[idx])
2059                                         break;
2060                         }
2061
2062                         BUG_ON(idx >= unique_indirect_reg_count);
2063
2064                         if (!unique_indirect_regs[idx])
2065                                 unique_indirect_regs[idx] = register_list_format[indirect_offset];
2066
2067                         indirect_offset++;
2068                 }
2069         }
2070 }
2071
2072 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2073 {
2074         int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2075         int unique_indirect_reg_count = 0;
2076
2077         int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2078         int indirect_start_offsets_count = 0;
2079
2080         int list_size = 0;
2081         int i = 0, j = 0;
2082         u32 tmp = 0;
2083
2084         u32 *register_list_format =
2085                 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2086         if (!register_list_format)
2087                 return -ENOMEM;
2088         memcpy(register_list_format, adev->gfx.rlc.register_list_format,
2089                 adev->gfx.rlc.reg_list_format_size_bytes);
2090
2091         /* setup unique_indirect_regs array and indirect_start_offsets array */
2092         unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2093         gfx_v9_1_parse_ind_reg_list(register_list_format,
2094                                     adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2095                                     adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2096                                     unique_indirect_regs,
2097                                     unique_indirect_reg_count,
2098                                     indirect_start_offsets,
2099                                     &indirect_start_offsets_count,
2100                                     ARRAY_SIZE(indirect_start_offsets));
2101
2102         /* enable auto inc in case it is disabled */
2103         tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2104         tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2105         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2106
2107         /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2108         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2109                 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2110         for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2111                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2112                         adev->gfx.rlc.register_restore[i]);
2113
2114         /* load indirect register */
2115         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2116                 adev->gfx.rlc.reg_list_format_start);
2117
2118         /* direct register portion */
2119         for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2120                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2121                         register_list_format[i]);
2122
2123         /* indirect register portion */
2124         while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2125                 if (register_list_format[i] == 0xFFFFFFFF) {
2126                         WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2127                         continue;
2128                 }
2129
2130                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2131                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2132
2133                 for (j = 0; j < unique_indirect_reg_count; j++) {
2134                         if (register_list_format[i] == unique_indirect_regs[j]) {
2135                                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2136                                 break;
2137                         }
2138                 }
2139
2140                 BUG_ON(j >= unique_indirect_reg_count);
2141
2142                 i++;
2143         }
2144
2145         /* set save/restore list size */
2146         list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2147         list_size = list_size >> 1;
2148         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2149                 adev->gfx.rlc.reg_restore_list_size);
2150         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2151
2152         /* write the starting offsets to RLC scratch ram */
2153         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2154                 adev->gfx.rlc.starting_offsets_start);
2155         for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2156                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2157                        indirect_start_offsets[i]);
2158
2159         /* load unique indirect regs*/
2160         for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2161                 if (unique_indirect_regs[i] != 0) {
2162                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2163                                + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2164                                unique_indirect_regs[i] & 0x3FFFF);
2165
2166                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2167                                + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2168                                unique_indirect_regs[i] >> 20);
2169                 }
2170         }
2171
2172         kfree(register_list_format);
2173         return 0;
2174 }
2175
2176 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2177 {
2178         WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2179 }
2180
2181 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2182                                              bool enable)
2183 {
2184         uint32_t data = 0;
2185         uint32_t default_data = 0;
2186
2187         default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2188         if (enable == true) {
2189                 /* enable GFXIP control over CGPG */
2190                 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2191                 if(default_data != data)
2192                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2193
2194                 /* update status */
2195                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2196                 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2197                 if(default_data != data)
2198                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2199         } else {
2200                 /* restore GFXIP control over GCPG */
2201                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2202                 if(default_data != data)
2203                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2204         }
2205 }
2206
2207 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2208 {
2209         uint32_t data = 0;
2210
2211         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2212                               AMD_PG_SUPPORT_GFX_SMG |
2213                               AMD_PG_SUPPORT_GFX_DMG)) {
2214                 /* init IDLE_POLL_COUNT = 60 */
2215                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2216                 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2217                 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2218                 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2219
2220                 /* init RLC PG Delay */
2221                 data = 0;
2222                 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2223                 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2224                 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2225                 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2226                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2227
2228                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2229                 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2230                 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2231                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2232
2233                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2234                 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2235                 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2236                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2237
2238                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2239                 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2240
2241                 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2242                 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2243                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2244
2245                 pwr_10_0_gfxip_control_over_cgpg(adev, true);
2246         }
2247 }
2248
2249 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2250                                                 bool enable)
2251 {
2252         uint32_t data = 0;
2253         uint32_t default_data = 0;
2254
2255         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2256         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2257                              SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2258                              enable ? 1 : 0);
2259         if (default_data != data)
2260                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2261 }
2262
2263 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2264                                                 bool enable)
2265 {
2266         uint32_t data = 0;
2267         uint32_t default_data = 0;
2268
2269         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2270         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2271                              SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2272                              enable ? 1 : 0);
2273         if(default_data != data)
2274                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2275 }
2276
2277 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2278                                         bool enable)
2279 {
2280         uint32_t data = 0;
2281         uint32_t default_data = 0;
2282
2283         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2284         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2285                              CP_PG_DISABLE,
2286                              enable ? 0 : 1);
2287         if(default_data != data)
2288                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2289 }
2290
2291 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2292                                                 bool enable)
2293 {
2294         uint32_t data, default_data;
2295
2296         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2297         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2298                              GFX_POWER_GATING_ENABLE,
2299                              enable ? 1 : 0);
2300         if(default_data != data)
2301                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2302 }
2303
2304 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2305                                                 bool enable)
2306 {
2307         uint32_t data, default_data;
2308
2309         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2310         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2311                              GFX_PIPELINE_PG_ENABLE,
2312                              enable ? 1 : 0);
2313         if(default_data != data)
2314                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2315
2316         if (!enable)
2317                 /* read any GFX register to wake up GFX */
2318                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2319 }
2320
2321 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
2322                                                        bool enable)
2323 {
2324         uint32_t data, default_data;
2325
2326         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2327         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2328                              STATIC_PER_CU_PG_ENABLE,
2329                              enable ? 1 : 0);
2330         if(default_data != data)
2331                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2332 }
2333
2334 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
2335                                                 bool enable)
2336 {
2337         uint32_t data, default_data;
2338
2339         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2340         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2341                              DYN_PER_CU_PG_ENABLE,
2342                              enable ? 1 : 0);
2343         if(default_data != data)
2344                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2345 }
2346
2347 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2348 {
2349         gfx_v9_0_init_csb(adev);
2350
2351         /*
2352          * Rlc save restore list is workable since v2_1.
2353          * And it's needed by gfxoff feature.
2354          */
2355         if (adev->gfx.rlc.is_rlc_v2_1) {
2356                 gfx_v9_1_init_rlc_save_restore_list(adev);
2357                 gfx_v9_0_enable_save_restore_machine(adev);
2358         }
2359
2360         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2361                               AMD_PG_SUPPORT_GFX_SMG |
2362                               AMD_PG_SUPPORT_GFX_DMG |
2363                               AMD_PG_SUPPORT_CP |
2364                               AMD_PG_SUPPORT_GDS |
2365                               AMD_PG_SUPPORT_RLC_SMU_HS)) {
2366                 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2367                        adev->gfx.rlc.cp_table_gpu_addr >> 8);
2368                 gfx_v9_0_init_gfx_power_gating(adev);
2369         }
2370 }
2371
2372 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
2373 {
2374         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
2375         gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2376         gfx_v9_0_wait_for_rlc_serdes(adev);
2377 }
2378
2379 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
2380 {
2381         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2382         udelay(50);
2383         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2384         udelay(50);
2385 }
2386
2387 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2388 {
2389 #ifdef AMDGPU_RLC_DEBUG_RETRY
2390         u32 rlc_ucode_ver;
2391 #endif
2392
2393         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2394         udelay(50);
2395
2396         /* carrizo do enable cp interrupt after cp inited */
2397         if (!(adev->flags & AMD_IS_APU)) {
2398                 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2399                 udelay(50);
2400         }
2401
2402 #ifdef AMDGPU_RLC_DEBUG_RETRY
2403         /* RLC_GPM_GENERAL_6 : RLC Ucode version */
2404         rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
2405         if(rlc_ucode_ver == 0x108) {
2406                 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
2407                                 rlc_ucode_ver, adev->gfx.rlc_fw_version);
2408                 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
2409                  * default is 0x9C4 to create a 100us interval */
2410                 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
2411                 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
2412                  * to disable the page fault retry interrupts, default is
2413                  * 0x100 (256) */
2414                 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
2415         }
2416 #endif
2417 }
2418
2419 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
2420 {
2421         const struct rlc_firmware_header_v2_0 *hdr;
2422         const __le32 *fw_data;
2423         unsigned i, fw_size;
2424
2425         if (!adev->gfx.rlc_fw)
2426                 return -EINVAL;
2427
2428         hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2429         amdgpu_ucode_print_rlc_hdr(&hdr->header);
2430
2431         fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2432                            le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2433         fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2434
2435         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
2436                         RLCG_UCODE_LOADING_START_ADDRESS);
2437         for (i = 0; i < fw_size; i++)
2438                 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2439         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2440
2441         return 0;
2442 }
2443
2444 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2445 {
2446         int r;
2447
2448         if (amdgpu_sriov_vf(adev)) {
2449                 gfx_v9_0_init_csb(adev);
2450                 return 0;
2451         }
2452
2453         adev->gfx.rlc.funcs->stop(adev);
2454
2455         /* disable CG */
2456         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2457
2458         adev->gfx.rlc.funcs->reset(adev);
2459
2460         gfx_v9_0_init_pg(adev);
2461
2462         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2463                 /* legacy rlc firmware loading */
2464                 r = gfx_v9_0_rlc_load_microcode(adev);
2465                 if (r)
2466                         return r;
2467         }
2468
2469         switch (adev->asic_type) {
2470         case CHIP_RAVEN:
2471                 if (amdgpu_lbpw == 0)
2472                         gfx_v9_0_enable_lbpw(adev, false);
2473                 else
2474                         gfx_v9_0_enable_lbpw(adev, true);
2475                 break;
2476         case CHIP_VEGA20:
2477                 if (amdgpu_lbpw > 0)
2478                         gfx_v9_0_enable_lbpw(adev, true);
2479                 else
2480                         gfx_v9_0_enable_lbpw(adev, false);
2481                 break;
2482         default:
2483                 break;
2484         }
2485
2486         adev->gfx.rlc.funcs->start(adev);
2487
2488         return 0;
2489 }
2490
2491 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2492 {
2493         int i;
2494         u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2495
2496         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2497         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2498         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2499         if (!enable) {
2500                 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2501                         adev->gfx.gfx_ring[i].sched.ready = false;
2502         }
2503         WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2504         udelay(50);
2505 }
2506
2507 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2508 {
2509         const struct gfx_firmware_header_v1_0 *pfp_hdr;
2510         const struct gfx_firmware_header_v1_0 *ce_hdr;
2511         const struct gfx_firmware_header_v1_0 *me_hdr;
2512         const __le32 *fw_data;
2513         unsigned i, fw_size;
2514
2515         if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2516                 return -EINVAL;
2517
2518         pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2519                 adev->gfx.pfp_fw->data;
2520         ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2521                 adev->gfx.ce_fw->data;
2522         me_hdr = (const struct gfx_firmware_header_v1_0 *)
2523                 adev->gfx.me_fw->data;
2524
2525         amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2526         amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2527         amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2528
2529         gfx_v9_0_cp_gfx_enable(adev, false);
2530
2531         /* PFP */
2532         fw_data = (const __le32 *)
2533                 (adev->gfx.pfp_fw->data +
2534                  le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2535         fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2536         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
2537         for (i = 0; i < fw_size; i++)
2538                 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2539         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2540
2541         /* CE */
2542         fw_data = (const __le32 *)
2543                 (adev->gfx.ce_fw->data +
2544                  le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2545         fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2546         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
2547         for (i = 0; i < fw_size; i++)
2548                 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2549         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2550
2551         /* ME */
2552         fw_data = (const __le32 *)
2553                 (adev->gfx.me_fw->data +
2554                  le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2555         fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2556         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
2557         for (i = 0; i < fw_size; i++)
2558                 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2559         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2560
2561         return 0;
2562 }
2563
2564 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
2565 {
2566         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2567         const struct cs_section_def *sect = NULL;
2568         const struct cs_extent_def *ext = NULL;
2569         int r, i, tmp;
2570
2571         /* init the CP */
2572         WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2573         WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2574
2575         gfx_v9_0_cp_gfx_enable(adev, true);
2576
2577         r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
2578         if (r) {
2579                 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2580                 return r;
2581         }
2582
2583         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2584         amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2585
2586         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2587         amdgpu_ring_write(ring, 0x80000000);
2588         amdgpu_ring_write(ring, 0x80000000);
2589
2590         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
2591                 for (ext = sect->section; ext->extent != NULL; ++ext) {
2592                         if (sect->id == SECT_CONTEXT) {
2593                                 amdgpu_ring_write(ring,
2594                                        PACKET3(PACKET3_SET_CONTEXT_REG,
2595                                                ext->reg_count));
2596                                 amdgpu_ring_write(ring,
2597                                        ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2598                                 for (i = 0; i < ext->reg_count; i++)
2599                                         amdgpu_ring_write(ring, ext->extent[i]);
2600                         }
2601                 }
2602         }
2603
2604         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2605         amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2606
2607         amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2608         amdgpu_ring_write(ring, 0);
2609
2610         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2611         amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2612         amdgpu_ring_write(ring, 0x8000);
2613         amdgpu_ring_write(ring, 0x8000);
2614
2615         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
2616         tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
2617                 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
2618         amdgpu_ring_write(ring, tmp);
2619         amdgpu_ring_write(ring, 0);
2620
2621         amdgpu_ring_commit(ring);
2622
2623         return 0;
2624 }
2625
2626 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
2627 {
2628         struct amdgpu_ring *ring;
2629         u32 tmp;
2630         u32 rb_bufsz;
2631         u64 rb_addr, rptr_addr, wptr_gpu_addr;
2632
2633         /* Set the write pointer delay */
2634         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2635
2636         /* set the RB to use vmid 0 */
2637         WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2638
2639         /* Set ring buffer size */
2640         ring = &adev->gfx.gfx_ring[0];
2641         rb_bufsz = order_base_2(ring->ring_size / 8);
2642         tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2643         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2644 #ifdef __BIG_ENDIAN
2645         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2646 #endif
2647         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2648
2649         /* Initialize the ring buffer's write pointers */
2650         ring->wptr = 0;
2651         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2652         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2653
2654         /* set the wb address wether it's enabled or not */
2655         rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2656         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2657         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2658
2659         wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2660         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
2661         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
2662
2663         mdelay(1);
2664         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2665
2666         rb_addr = ring->gpu_addr >> 8;
2667         WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2668         WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2669
2670         tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2671         if (ring->use_doorbell) {
2672                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2673                                     DOORBELL_OFFSET, ring->doorbell_index);
2674                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2675                                     DOORBELL_EN, 1);
2676         } else {
2677                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
2678         }
2679         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2680
2681         tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2682                         DOORBELL_RANGE_LOWER, ring->doorbell_index);
2683         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2684
2685         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2686                        CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2687
2688
2689         /* start the ring */
2690         gfx_v9_0_cp_gfx_start(adev);
2691         ring->sched.ready = true;
2692
2693         return 0;
2694 }
2695
2696 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2697 {
2698         int i;
2699
2700         if (enable) {
2701                 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2702         } else {
2703                 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2704                         (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2705                 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2706                         adev->gfx.compute_ring[i].sched.ready = false;
2707                 adev->gfx.kiq.ring.sched.ready = false;
2708         }
2709         udelay(50);
2710 }
2711
2712 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2713 {
2714         const struct gfx_firmware_header_v1_0 *mec_hdr;
2715         const __le32 *fw_data;
2716         unsigned i;
2717         u32 tmp;
2718
2719         if (!adev->gfx.mec_fw)
2720                 return -EINVAL;
2721
2722         gfx_v9_0_cp_compute_enable(adev, false);
2723
2724         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2725         amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2726
2727         fw_data = (const __le32 *)
2728                 (adev->gfx.mec_fw->data +
2729                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2730         tmp = 0;
2731         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2732         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2733         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2734
2735         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2736                 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
2737         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2738                 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2739
2740         /* MEC1 */
2741         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2742                          mec_hdr->jt_offset);
2743         for (i = 0; i < mec_hdr->jt_size; i++)
2744                 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2745                         le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2746
2747         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2748                         adev->gfx.mec_fw_version);
2749         /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
2750
2751         return 0;
2752 }
2753
2754 /* KIQ functions */
2755 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
2756 {
2757         uint32_t tmp;
2758         struct amdgpu_device *adev = ring->adev;
2759
2760         /* tell RLC which is KIQ queue */
2761         tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2762         tmp &= 0xffffff00;
2763         tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2764         WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2765         tmp |= 0x80;
2766         WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2767 }
2768
2769 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
2770 {
2771         struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
2772         uint64_t queue_mask = 0;
2773         int r, i;
2774
2775         for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
2776                 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
2777                         continue;
2778
2779                 /* This situation may be hit in the future if a new HW
2780                  * generation exposes more than 64 queues. If so, the
2781                  * definition of queue_mask needs updating */
2782                 if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
2783                         DRM_ERROR("Invalid KCQ enabled: %d\n", i);
2784                         break;
2785                 }
2786
2787                 queue_mask |= (1ull << i);
2788         }
2789
2790         r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8);
2791         if (r) {
2792                 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2793                 return r;
2794         }
2795
2796         /* set resources */
2797         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
2798         amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
2799                           PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
2800         amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
2801         amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
2802         amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
2803         amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
2804         amdgpu_ring_write(kiq_ring, 0); /* oac mask */
2805         amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
2806         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2807                 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2808                 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
2809                 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2810
2811                 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
2812                 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
2813                 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
2814                                   PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
2815                                   PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
2816                                   PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
2817                                   PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
2818                                   PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
2819                                   PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
2820                                   PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
2821                                   PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
2822                                   PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
2823                 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
2824                 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
2825                 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
2826                 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
2827                 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
2828         }
2829
2830         r = amdgpu_ring_test_helper(kiq_ring);
2831         if (r)
2832                 DRM_ERROR("KCQ enable failed\n");
2833
2834         return r;
2835 }
2836
2837 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
2838 {
2839         struct amdgpu_device *adev = ring->adev;
2840         struct v9_mqd *mqd = ring->mqd_ptr;
2841         uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2842         uint32_t tmp;
2843
2844         mqd->header = 0xC0310800;
2845         mqd->compute_pipelinestat_enable = 0x00000001;
2846         mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2847         mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2848         mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2849         mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2850         mqd->compute_misc_reserved = 0x00000003;
2851
2852         mqd->dynamic_cu_mask_addr_lo =
2853                 lower_32_bits(ring->mqd_gpu_addr
2854                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2855         mqd->dynamic_cu_mask_addr_hi =
2856                 upper_32_bits(ring->mqd_gpu_addr
2857                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2858
2859         eop_base_addr = ring->eop_gpu_addr >> 8;
2860         mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2861         mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2862
2863         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2864         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
2865         tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2866                         (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
2867
2868         mqd->cp_hqd_eop_control = tmp;
2869
2870         /* enable doorbell? */
2871         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2872
2873         if (ring->use_doorbell) {
2874                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2875                                     DOORBELL_OFFSET, ring->doorbell_index);
2876                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2877                                     DOORBELL_EN, 1);
2878                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2879                                     DOORBELL_SOURCE, 0);
2880                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2881                                     DOORBELL_HIT, 0);
2882         } else {
2883                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2884                                          DOORBELL_EN, 0);
2885         }
2886
2887         mqd->cp_hqd_pq_doorbell_control = tmp;
2888
2889         /* disable the queue if it's active */
2890         ring->wptr = 0;
2891         mqd->cp_hqd_dequeue_request = 0;
2892         mqd->cp_hqd_pq_rptr = 0;
2893         mqd->cp_hqd_pq_wptr_lo = 0;
2894         mqd->cp_hqd_pq_wptr_hi = 0;
2895
2896         /* set the pointer to the MQD */
2897         mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
2898         mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2899
2900         /* set MQD vmid to 0 */
2901         tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
2902         tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2903         mqd->cp_mqd_control = tmp;
2904
2905         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2906         hqd_gpu_addr = ring->gpu_addr >> 8;
2907         mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2908         mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2909
2910         /* set up the HQD, this is similar to CP_RB0_CNTL */
2911         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
2912         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2913                             (order_base_2(ring->ring_size / 4) - 1));
2914         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2915                         ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
2916 #ifdef __BIG_ENDIAN
2917         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
2918 #endif
2919         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2920         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
2921         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2922         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2923         mqd->cp_hqd_pq_control = tmp;
2924
2925         /* set the wb address whether it's enabled or not */
2926         wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2927         mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2928         mqd->cp_hqd_pq_rptr_report_addr_hi =
2929                 upper_32_bits(wb_gpu_addr) & 0xffff;
2930
2931         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2932         wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2933         mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2934         mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2935
2936         tmp = 0;
2937         /* enable the doorbell if requested */
2938         if (ring->use_doorbell) {
2939                 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2940                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2941                                 DOORBELL_OFFSET, ring->doorbell_index);
2942
2943                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2944                                          DOORBELL_EN, 1);
2945                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2946                                          DOORBELL_SOURCE, 0);
2947                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2948                                          DOORBELL_HIT, 0);
2949         }
2950
2951         mqd->cp_hqd_pq_doorbell_control = tmp;
2952
2953         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2954         ring->wptr = 0;
2955         mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
2956
2957         /* set the vmid for the queue */
2958         mqd->cp_hqd_vmid = 0;
2959
2960         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
2961         tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
2962         mqd->cp_hqd_persistent_state = tmp;
2963
2964         /* set MIN_IB_AVAIL_SIZE */
2965         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
2966         tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
2967         mqd->cp_hqd_ib_control = tmp;
2968
2969         /* activate the queue */
2970         mqd->cp_hqd_active = 1;
2971
2972         return 0;
2973 }
2974
2975 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
2976 {
2977         struct amdgpu_device *adev = ring->adev;
2978         struct v9_mqd *mqd = ring->mqd_ptr;
2979         int j;
2980
2981         /* disable wptr polling */
2982         WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2983
2984         WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
2985                mqd->cp_hqd_eop_base_addr_lo);
2986         WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
2987                mqd->cp_hqd_eop_base_addr_hi);
2988
2989         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2990         WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
2991                mqd->cp_hqd_eop_control);
2992
2993