2 * Copyright 2021 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "amdgpu_ras.h"
25 #include "amdgpu_umc.h"
28 #include "umc/umc_6_7_0_offset.h"
29 #include "umc/umc_6_7_0_sh_mask.h"
32 umc_v6_7_channel_idx_tbl_second[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM] = {
33 {28, 20, 24, 16, 12, 4, 8, 0},
34 {6, 30, 2, 26, 22, 14, 18, 10},
35 {19, 11, 15, 7, 3, 27, 31, 23},
36 {9, 1, 5, 29, 25, 17, 21, 13}
39 umc_v6_7_channel_idx_tbl_first[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM] = {
40 {19, 11, 15, 7, 3, 27, 31, 23},
41 {9, 1, 5, 29, 25, 17, 21, 13},
42 {28, 20, 24, 16, 12, 4, 8, 0},
43 {6, 30, 2, 26, 22, 14, 18, 10},
46 static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev,
50 uint32_t index = umc_inst * adev->umc.channel_inst_num + ch_inst;
52 /* adjust umc and channel index offset,
53 * the register address is not linear on each umc instace */
57 return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst;
60 static inline uint32_t get_umc_v6_7_channel_index(struct amdgpu_device *adev,
64 return adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
67 static void umc_v6_7_query_error_status_helper(struct amdgpu_device *adev,
68 uint64_t mc_umc_status, uint32_t umc_reg_offset)
73 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)
74 dev_info(adev->dev, "Deferred error, no user action is needed.\n");
77 dev_info(adev->dev, "MCA STATUS 0x%llx, umc_reg_offset 0x%x\n", mc_umc_status, umc_reg_offset);
79 /* print IPID registers value */
81 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_IPIDT0);
82 reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
84 dev_info(adev->dev, "MCA IPID 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
86 /* print SYND registers value */
88 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_SYNDT0);
89 reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
91 dev_info(adev->dev, "MCA SYND 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
93 /* print MISC0 registers value */
95 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_MISC0T0);
96 reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
98 dev_info(adev->dev, "MCA MISC0 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
101 static void umc_v6_7_ecc_info_query_correctable_error_count(struct amdgpu_device *adev,
102 uint32_t umc_inst, uint32_t ch_inst,
103 unsigned long *error_count)
105 uint64_t mc_umc_status;
106 uint32_t eccinfo_table_idx;
107 uint32_t umc_reg_offset;
108 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
110 umc_reg_offset = get_umc_v6_7_reg_offset(adev,
113 eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
114 /* check for SRAM correctable error
115 MCUMC_STATUS is a 64 bit register */
116 mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
117 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
118 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) {
121 umc_v6_7_query_error_status_helper(adev, mc_umc_status, umc_reg_offset);
123 if (ras->umc_ecc.record_ce_addr_supported) {
124 uint64_t err_addr, soc_pa;
125 uint32_t channel_index =
126 adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
128 err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_ceumc_addr;
129 err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
130 /* translate umc channel address to soc pa, 3 parts are included */
131 soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
132 ADDR_OF_256B_BLOCK(channel_index) |
133 OFFSET_IN_256B_BLOCK(err_addr);
135 /* The umc channel bits are not original values, they are hashed */
136 SET_CHANNEL_HASH(channel_index, soc_pa);
138 dev_info(adev->dev, "Error Address(PA): 0x%llx\n", soc_pa);
143 static void umc_v6_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_device *adev,
144 uint32_t umc_inst, uint32_t ch_inst,
145 unsigned long *error_count)
147 uint64_t mc_umc_status;
148 uint32_t eccinfo_table_idx;
149 uint32_t umc_reg_offset;
150 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
152 umc_reg_offset = get_umc_v6_7_reg_offset(adev,
155 eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
156 /* check the MCUMC_STATUS */
157 mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
158 if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
159 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
160 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
161 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
162 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
163 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) {
166 umc_v6_7_query_error_status_helper(adev, mc_umc_status, umc_reg_offset);
170 static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
171 void *ras_error_status)
173 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
175 uint32_t umc_inst = 0;
176 uint32_t ch_inst = 0;
178 /*TODO: driver needs to toggle DF Cstate to ensure
179 * safe access of UMC registers. Will add the protection */
180 LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
181 umc_v6_7_ecc_info_query_correctable_error_count(adev,
183 &(err_data->ce_count));
184 umc_v6_7_ecc_info_querry_uncorrectable_error_count(adev,
186 &(err_data->ue_count));
190 static void umc_v6_7_convert_error_address(struct amdgpu_device *adev,
191 struct ras_err_data *err_data, uint64_t err_addr,
192 uint32_t ch_inst, uint32_t umc_inst)
194 uint32_t channel_index;
195 uint64_t soc_pa, retired_page, column;
198 adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
199 /* translate umc channel address to soc pa, 3 parts are included */
200 soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
201 ADDR_OF_256B_BLOCK(channel_index) |
202 OFFSET_IN_256B_BLOCK(err_addr);
204 /* The umc channel bits are not original values, they are hashed */
205 SET_CHANNEL_HASH(channel_index, soc_pa);
207 /* clear [C4 C3 C2] in soc physical address */
208 soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
210 /* loop for all possibilities of [C4 C3 C2] */
211 for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
212 retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
213 dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
214 amdgpu_umc_fill_error_record(err_data, err_addr,
215 retired_page, channel_index, umc_inst);
218 retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
219 dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
220 amdgpu_umc_fill_error_record(err_data, err_addr,
221 retired_page, channel_index, umc_inst);
225 static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
226 struct ras_err_data *err_data,
230 uint64_t mc_umc_status, err_addr;
231 uint32_t eccinfo_table_idx;
232 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
234 eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
235 mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
237 if (mc_umc_status == 0)
240 if (!err_data->err_addr)
243 /* calculate error address if ue error is detected */
244 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
245 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
247 err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
248 err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
250 umc_v6_7_convert_error_address(adev, err_data, err_addr,
255 static void umc_v6_7_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
256 void *ras_error_status)
258 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
260 uint32_t umc_inst = 0;
261 uint32_t ch_inst = 0;
263 /*TODO: driver needs to toggle DF Cstate to ensure
264 * safe access of UMC resgisters. Will add the protection
265 * when firmware interface is ready */
266 LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
267 umc_v6_7_ecc_info_query_error_address(adev,
274 static void umc_v6_7_query_correctable_error_count(struct amdgpu_device *adev,
275 uint32_t umc_reg_offset,
276 unsigned long *error_count,
280 uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
281 uint32_t ecc_err_cnt, ecc_err_cnt_addr;
282 uint64_t mc_umc_status;
283 uint32_t mc_umc_status_addr;
285 /* UMC 6_1_1 registers */
286 ecc_err_cnt_sel_addr =
287 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccErrCntSel);
289 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccErrCnt);
291 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
293 /* select the lower chip and check the error count */
294 ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
295 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
297 WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
299 ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
301 (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
302 UMC_V6_7_CE_CNT_INIT);
304 /* select the higher chip and check the err counter */
305 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
307 WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
309 ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
311 (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
312 UMC_V6_7_CE_CNT_INIT);
314 /* check for SRAM correctable error
315 MCUMC_STATUS is a 64 bit register */
316 mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
317 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
318 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) {
321 umc_v6_7_query_error_status_helper(adev, mc_umc_status, umc_reg_offset);
324 uint64_t err_addr, soc_pa;
325 uint32_t mc_umc_addrt0;
326 uint32_t channel_index;
329 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
332 adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
334 err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
335 err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
337 /* translate umc channel address to soc pa, 3 parts are included */
338 soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
339 ADDR_OF_256B_BLOCK(channel_index) |
340 OFFSET_IN_256B_BLOCK(err_addr);
342 /* The umc channel bits are not original values, they are hashed */
343 SET_CHANNEL_HASH(channel_index, soc_pa);
345 dev_info(adev->dev, "Error Address(PA): 0x%llx\n", soc_pa);
350 static void umc_v6_7_querry_uncorrectable_error_count(struct amdgpu_device *adev,
351 uint32_t umc_reg_offset,
352 unsigned long *error_count)
354 uint64_t mc_umc_status;
355 uint32_t mc_umc_status_addr;
358 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
360 /* check the MCUMC_STATUS */
361 mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
362 if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
363 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
364 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
365 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
366 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
367 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) {
370 umc_v6_7_query_error_status_helper(adev, mc_umc_status, umc_reg_offset);
374 static void umc_v6_7_reset_error_count_per_channel(struct amdgpu_device *adev,
375 uint32_t umc_reg_offset)
377 uint32_t ecc_err_cnt_addr;
378 uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
380 ecc_err_cnt_sel_addr =
381 SOC15_REG_OFFSET(UMC, 0,
382 regUMCCH0_0_EccErrCntSel);
384 SOC15_REG_OFFSET(UMC, 0,
385 regUMCCH0_0_EccErrCnt);
387 /* select the lower chip */
388 ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
389 umc_reg_offset) * 4);
390 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
391 UMCCH0_0_EccErrCntSel,
393 WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
396 /* clear lower chip error count */
397 WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
398 UMC_V6_7_CE_CNT_INIT);
400 /* select the higher chip */
401 ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
402 umc_reg_offset) * 4);
403 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
404 UMCCH0_0_EccErrCntSel,
406 WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
409 /* clear higher chip error count */
410 WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
411 UMC_V6_7_CE_CNT_INIT);
414 static void umc_v6_7_reset_error_count(struct amdgpu_device *adev)
416 uint32_t umc_inst = 0;
417 uint32_t ch_inst = 0;
418 uint32_t umc_reg_offset = 0;
420 LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
421 umc_reg_offset = get_umc_v6_7_reg_offset(adev,
425 umc_v6_7_reset_error_count_per_channel(adev,
430 static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev,
431 void *ras_error_status)
433 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
435 uint32_t umc_inst = 0;
436 uint32_t ch_inst = 0;
437 uint32_t umc_reg_offset = 0;
439 /*TODO: driver needs to toggle DF Cstate to ensure
440 * safe access of UMC registers. Will add the protection */
441 LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
442 umc_reg_offset = get_umc_v6_7_reg_offset(adev,
445 umc_v6_7_query_correctable_error_count(adev,
447 &(err_data->ce_count),
449 umc_v6_7_querry_uncorrectable_error_count(adev,
451 &(err_data->ue_count));
454 umc_v6_7_reset_error_count(adev);
457 static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
458 struct ras_err_data *err_data,
459 uint32_t umc_reg_offset, uint32_t ch_inst,
462 uint32_t mc_umc_status_addr;
463 uint64_t mc_umc_status = 0, mc_umc_addrt0, err_addr;
466 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
468 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
470 mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
472 if (mc_umc_status == 0)
475 if (!err_data->err_addr) {
476 /* clear umc status */
477 WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
481 /* calculate error address if ue error is detected */
482 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
483 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
484 err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
486 REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
488 umc_v6_7_convert_error_address(adev, err_data, err_addr,
492 /* clear umc status */
493 WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
496 static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
497 void *ras_error_status)
499 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
501 uint32_t umc_inst = 0;
502 uint32_t ch_inst = 0;
503 uint32_t umc_reg_offset = 0;
505 /*TODO: driver needs to toggle DF Cstate to ensure
506 * safe access of UMC resgisters. Will add the protection
507 * when firmware interface is ready */
508 LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
509 umc_reg_offset = get_umc_v6_7_reg_offset(adev,
512 umc_v6_7_query_error_address(adev,
514 umc_reg_offset, ch_inst,
519 static uint32_t umc_v6_7_query_ras_poison_mode_per_channel(
520 struct amdgpu_device *adev,
521 uint32_t umc_reg_offset)
523 uint32_t ecc_ctrl_addr, ecc_ctrl;
526 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccCtrl);
527 ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr +
528 umc_reg_offset) * 4);
530 return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_EccCtrl, UCFatalEn);
533 static bool umc_v6_7_query_ras_poison_mode(struct amdgpu_device *adev)
535 uint32_t umc_reg_offset = 0;
537 /* Enabling fatal error in umc instance0 channel0 will be
538 * considered as fatal error mode
540 umc_reg_offset = get_umc_v6_7_reg_offset(adev, 0, 0);
541 return !umc_v6_7_query_ras_poison_mode_per_channel(adev, umc_reg_offset);
544 const struct amdgpu_ras_block_hw_ops umc_v6_7_ras_hw_ops = {
545 .query_ras_error_count = umc_v6_7_query_ras_error_count,
546 .query_ras_error_address = umc_v6_7_query_ras_error_address,
549 struct amdgpu_umc_ras umc_v6_7_ras = {
551 .hw_ops = &umc_v6_7_ras_hw_ops,
553 .query_ras_poison_mode = umc_v6_7_query_ras_poison_mode,
554 .ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count,
555 .ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address,
556 .convert_ras_error_address = umc_v6_7_convert_error_address,