Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / umc_v8_10.c
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "umc_v8_10.h"
24 #include "amdgpu_ras.h"
25 #include "amdgpu_umc.h"
26 #include "amdgpu.h"
27 #include "umc/umc_8_10_0_offset.h"
28 #include "umc/umc_8_10_0_sh_mask.h"
29
30 #define UMC_8_NODE_DIST   0x800000
31 #define UMC_8_INST_DIST   0x4000
32
33 struct channelnum_map_colbit {
34         uint32_t channel_num;
35         uint32_t col_bit;
36 };
37
38 const struct channelnum_map_colbit umc_v8_10_channelnum_map_colbit_table[] = {
39         {24, 13},
40         {20, 13},
41         {16, 12},
42         {14, 12},
43         {12, 12},
44         {10, 12},
45         {6,  11},
46 };
47
48 const uint32_t
49         umc_v8_10_channel_idx_tbl_ext0[]
50                                 [UMC_V8_10_UMC_INSTANCE_NUM]
51                                 [UMC_V8_10_CHANNEL_INSTANCE_NUM] = {
52            {{1,   5}, {7,  3}},
53            {{14, 15}, {13, 12}},
54            {{10, 11}, {9,  8}},
55            {{6,   2}, {0,  4}}
56         };
57
58 const uint32_t
59         umc_v8_10_channel_idx_tbl[]
60                                 [UMC_V8_10_UMC_INSTANCE_NUM]
61                                 [UMC_V8_10_CHANNEL_INSTANCE_NUM] = {
62            {{16, 18}, {17, 19}},
63            {{15, 11}, {3,   7}},
64            {{1,   5}, {13,  9}},
65            {{23, 21}, {22, 20}},
66            {{0,   4}, {12,  8}},
67            {{14, 10}, {2,   6}}
68         };
69
70 static inline uint32_t get_umc_v8_10_reg_offset(struct amdgpu_device *adev,
71                                             uint32_t node_inst,
72                                             uint32_t umc_inst,
73                                             uint32_t ch_inst)
74 {
75         return adev->umc.channel_offs * ch_inst + UMC_8_INST_DIST * umc_inst +
76                 UMC_8_NODE_DIST * node_inst;
77 }
78
79 static int umc_v8_10_clear_error_count_per_channel(struct amdgpu_device *adev,
80                                         uint32_t node_inst, uint32_t umc_inst,
81                                         uint32_t ch_inst, void *data)
82 {
83         uint32_t ecc_err_cnt_addr;
84         uint32_t umc_reg_offset =
85                 get_umc_v8_10_reg_offset(adev, node_inst, umc_inst, ch_inst);
86
87         ecc_err_cnt_addr =
88                 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccErrCnt);
89
90         /* clear error count */
91         WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
92                         UMC_V8_10_CE_CNT_INIT);
93
94         return 0;
95 }
96
97 static void umc_v8_10_clear_error_count(struct amdgpu_device *adev)
98 {
99         amdgpu_umc_loop_channels(adev,
100                 umc_v8_10_clear_error_count_per_channel, NULL);
101 }
102
103 static void umc_v8_10_query_correctable_error_count(struct amdgpu_device *adev,
104                                                    uint32_t umc_reg_offset,
105                                                    unsigned long *error_count)
106 {
107         uint64_t mc_umc_status;
108         uint32_t mc_umc_status_addr;
109
110         /* UMC 8_10 registers */
111         mc_umc_status_addr =
112                 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
113
114         /* Rely on MCUMC_STATUS for correctable error counter
115          * MCUMC_STATUS is a 64 bit register
116          */
117         mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
118         if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
119             REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
120                 *error_count += 1;
121 }
122
123 static void umc_v8_10_query_uncorrectable_error_count(struct amdgpu_device *adev,
124                                                       uint32_t umc_reg_offset,
125                                                       unsigned long *error_count)
126 {
127         uint64_t mc_umc_status;
128         uint32_t mc_umc_status_addr;
129
130         mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
131
132         /* Check the MCUMC_STATUS. */
133         mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
134         if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
135             (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
136             REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
137             REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
138             REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
139             REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
140                 *error_count += 1;
141 }
142
143 static int umc_v8_10_query_ecc_error_count(struct amdgpu_device *adev,
144                                         uint32_t node_inst, uint32_t umc_inst,
145                                         uint32_t ch_inst, void *data)
146 {
147         struct ras_err_data *err_data = (struct ras_err_data *)data;
148         uint32_t umc_reg_offset =
149                 get_umc_v8_10_reg_offset(adev, node_inst, umc_inst, ch_inst);
150
151         umc_v8_10_query_correctable_error_count(adev,
152                                         umc_reg_offset,
153                                         &(err_data->ce_count));
154         umc_v8_10_query_uncorrectable_error_count(adev,
155                                         umc_reg_offset,
156                                         &(err_data->ue_count));
157
158         return 0;
159 }
160
161 static void umc_v8_10_query_ras_error_count(struct amdgpu_device *adev,
162                                            void *ras_error_status)
163 {
164         amdgpu_umc_loop_channels(adev,
165                 umc_v8_10_query_ecc_error_count, ras_error_status);
166
167         umc_v8_10_clear_error_count(adev);
168 }
169
170 static uint32_t umc_v8_10_get_col_bit(uint32_t channel_num)
171 {
172         uint32_t t = 0;
173
174         for (t = 0; t < ARRAY_SIZE(umc_v8_10_channelnum_map_colbit_table); t++)
175                 if (channel_num == umc_v8_10_channelnum_map_colbit_table[t].channel_num)
176                         return umc_v8_10_channelnum_map_colbit_table[t].col_bit;
177
178         /* Failed to get col_bit. */
179         return U32_MAX;
180 }
181
182 /*
183  * Mapping normal address to soc physical address in swizzle mode.
184  */
185 static int umc_v8_10_swizzle_mode_na_to_pa(struct amdgpu_device *adev,
186                                         uint32_t channel_idx,
187                                         uint64_t na, uint64_t *soc_pa)
188 {
189         uint32_t channel_num = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
190         uint32_t col_bit = umc_v8_10_get_col_bit(channel_num);
191         uint64_t tmp_addr;
192
193         if (col_bit == U32_MAX)
194                 return -1;
195
196         tmp_addr = SWIZZLE_MODE_TMP_ADDR(na, channel_num, channel_idx);
197         *soc_pa = SWIZZLE_MODE_ADDR_HI(tmp_addr, col_bit) |
198                 SWIZZLE_MODE_ADDR_MID(na, col_bit) |
199                 SWIZZLE_MODE_ADDR_LOW(tmp_addr, col_bit) |
200                 SWIZZLE_MODE_ADDR_LSB(na);
201
202         return 0;
203 }
204
205 static void umc_v8_10_convert_error_address(struct amdgpu_device *adev,
206                                             struct ras_err_data *err_data, uint64_t err_addr,
207                                             uint32_t ch_inst, uint32_t umc_inst,
208                                             uint32_t node_inst, uint64_t mc_umc_status)
209 {
210         uint64_t na_err_addr_base;
211         uint64_t na_err_addr, retired_page_addr;
212         uint32_t channel_index, addr_lsb, col = 0;
213         int ret = 0;
214
215         channel_index =
216                 adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num *
217                                         adev->umc.channel_inst_num +
218                                         umc_inst * adev->umc.channel_inst_num +
219                                         ch_inst];
220
221         /* the lowest lsb bits should be ignored */
222         addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb);
223         err_addr &= ~((0x1ULL << addr_lsb) - 1);
224         na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
225
226         /* loop for all possibilities of [C6 C5] in normal address. */
227         for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
228                 na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
229
230                 /* Mapping normal error address to retired soc physical address. */
231                 ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
232                                                 na_err_addr, &retired_page_addr);
233                 if (ret) {
234                         dev_err(adev->dev, "Failed to map pa from umc na.\n");
235                         break;
236                 }
237                 dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
238                         retired_page_addr);
239                 amdgpu_umc_fill_error_record(err_data, na_err_addr,
240                                 retired_page_addr, channel_index, umc_inst);
241         }
242 }
243
244 static int umc_v8_10_query_error_address(struct amdgpu_device *adev,
245                                         uint32_t node_inst, uint32_t umc_inst,
246                                         uint32_t ch_inst, void *data)
247 {
248         uint64_t mc_umc_status_addr;
249         uint64_t mc_umc_status, err_addr;
250         uint64_t mc_umc_addrt0;
251         struct ras_err_data *err_data = (struct ras_err_data *)data;
252         uint32_t umc_reg_offset =
253                 get_umc_v8_10_reg_offset(adev, node_inst, umc_inst, ch_inst);
254
255         mc_umc_status_addr =
256                 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
257         mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
258
259         if (mc_umc_status == 0)
260                 return 0;
261
262         if (!err_data->err_addr) {
263                 /* clear umc status */
264                 WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
265                 return 0;
266         }
267
268         /* calculate error address if ue error is detected */
269         if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
270             REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
271             REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
272
273                 mc_umc_addrt0 = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
274                 err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
275                 err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
276
277                 umc_v8_10_convert_error_address(adev, err_data, err_addr,
278                                         ch_inst, umc_inst, node_inst, mc_umc_status);
279         }
280
281         /* clear umc status */
282         WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
283
284         return 0;
285 }
286
287 static void umc_v8_10_query_ras_error_address(struct amdgpu_device *adev,
288                                              void *ras_error_status)
289 {
290         amdgpu_umc_loop_channels(adev,
291                 umc_v8_10_query_error_address, ras_error_status);
292 }
293
294 static int umc_v8_10_err_cnt_init_per_channel(struct amdgpu_device *adev,
295                                         uint32_t node_inst, uint32_t umc_inst,
296                                         uint32_t ch_inst, void *data)
297 {
298         uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
299         uint32_t ecc_err_cnt_addr;
300         uint32_t umc_reg_offset =
301                 get_umc_v8_10_reg_offset(adev, node_inst, umc_inst, ch_inst);
302
303         ecc_err_cnt_sel_addr =
304                 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccErrCntSel);
305         ecc_err_cnt_addr =
306                 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccErrCnt);
307
308         ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
309
310         /* set ce error interrupt type to APIC based interrupt */
311         ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel,
312                                         GeccErrInt, 0x1);
313         WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
314         /* set error count to initial value */
315         WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V8_10_CE_CNT_INIT);
316
317         return 0;
318 }
319
320 static void umc_v8_10_err_cnt_init(struct amdgpu_device *adev)
321 {
322         amdgpu_umc_loop_channels(adev,
323                 umc_v8_10_err_cnt_init_per_channel, NULL);
324 }
325
326 static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev)
327 {
328         /*
329          * Force return true, because UMCCH0_0_GeccCtrl
330          * is not accessible from host side
331          */
332         return true;
333 }
334
335 static void umc_v8_10_ecc_info_query_correctable_error_count(struct amdgpu_device *adev,
336                                       uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst,
337                                       unsigned long *error_count)
338 {
339         uint64_t mc_umc_status;
340         uint32_t eccinfo_table_idx;
341         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
342
343         eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
344                                   adev->umc.channel_inst_num +
345                                   umc_inst * adev->umc.channel_inst_num +
346                                   ch_inst;
347
348         /* check the MCUMC_STATUS */
349         mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
350         if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
351             REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) {
352                 *error_count += 1;
353         }
354 }
355
356 static void umc_v8_10_ecc_info_query_uncorrectable_error_count(struct amdgpu_device *adev,
357                                       uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst,
358                                       unsigned long *error_count)
359 {
360         uint64_t mc_umc_status;
361         uint32_t eccinfo_table_idx;
362         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
363
364         eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
365                                   adev->umc.channel_inst_num +
366                                   umc_inst * adev->umc.channel_inst_num +
367                                   ch_inst;
368
369         /* check the MCUMC_STATUS */
370         mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
371         if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
372             (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
373             REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
374             REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
375             REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
376             REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) {
377                 *error_count += 1;
378         }
379 }
380
381 static int umc_v8_10_ecc_info_query_ecc_error_count(struct amdgpu_device *adev,
382                                         uint32_t node_inst, uint32_t umc_inst,
383                                         uint32_t ch_inst, void *data)
384 {
385         struct ras_err_data *err_data = (struct ras_err_data *)data;
386
387         umc_v8_10_ecc_info_query_correctable_error_count(adev,
388                                         node_inst, umc_inst, ch_inst,
389                                         &(err_data->ce_count));
390         umc_v8_10_ecc_info_query_uncorrectable_error_count(adev,
391                                         node_inst, umc_inst, ch_inst,
392                                         &(err_data->ue_count));
393         return 0;
394 }
395
396 static void umc_v8_10_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
397                                         void *ras_error_status)
398 {
399         amdgpu_umc_loop_channels(adev,
400                 umc_v8_10_ecc_info_query_ecc_error_count, ras_error_status);
401 }
402
403 static int umc_v8_10_ecc_info_query_error_address(struct amdgpu_device *adev,
404                                         uint32_t node_inst, uint32_t umc_inst,
405                                         uint32_t ch_inst, void *data)
406 {
407         uint32_t eccinfo_table_idx;
408         uint64_t mc_umc_status, err_addr;
409         struct ras_err_data *err_data = (struct ras_err_data *)data;
410         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
411
412         eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
413                                   adev->umc.channel_inst_num +
414                                   umc_inst * adev->umc.channel_inst_num +
415                                   ch_inst;
416
417         mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
418
419         if (mc_umc_status == 0)
420                 return 0;
421
422         if (!err_data->err_addr)
423                 return 0;
424
425         /* calculate error address if ue error is detected */
426         if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
427             REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
428             (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1)) {
429
430                 err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
431                 err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
432
433                 umc_v8_10_convert_error_address(adev, err_data, err_addr,
434                                         ch_inst, umc_inst, node_inst, mc_umc_status);
435         }
436
437         return 0;
438 }
439
440 static void umc_v8_10_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
441                                         void *ras_error_status)
442 {
443         amdgpu_umc_loop_channels(adev,
444                 umc_v8_10_ecc_info_query_error_address, ras_error_status);
445 }
446
447 static void umc_v8_10_set_eeprom_table_version(struct amdgpu_ras_eeprom_table_header *hdr)
448 {
449         hdr->version = RAS_TABLE_VER_V2_1;
450 }
451
452 const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
453         .query_ras_error_count = umc_v8_10_query_ras_error_count,
454         .query_ras_error_address = umc_v8_10_query_ras_error_address,
455 };
456
457 struct amdgpu_umc_ras umc_v8_10_ras = {
458         .ras_block = {
459                 .hw_ops = &umc_v8_10_ras_hw_ops,
460         },
461         .err_cnt_init = umc_v8_10_err_cnt_init,
462         .query_ras_poison_mode = umc_v8_10_query_ras_poison_mode,
463         .ecc_info_query_ras_error_count = umc_v8_10_ecc_info_query_ras_error_count,
464         .ecc_info_query_ras_error_address = umc_v8_10_ecc_info_query_ras_error_address,
465         .set_eeprom_table_version = umc_v8_10_set_eeprom_table_version,
466 };