Merge tag 'v5.3-rc3' into drm-next-5.4
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / gfx_v9_0.c
index 1cf639a511783c9cb1280f86a6ddbc3a5577c349..52a6fd12e26655fd50731d864c0d8101113ceeff 100644 (file)
@@ -104,6 +104,390 @@ MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
 
+MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
+
+#define mmTCP_CHAN_STEER_0_ARCT                                                                0x0b03
+#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX                                                       0
+#define mmTCP_CHAN_STEER_1_ARCT                                                                0x0b04
+#define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX                                                       0
+#define mmTCP_CHAN_STEER_2_ARCT                                                                0x0b09
+#define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX                                                       0
+#define mmTCP_CHAN_STEER_3_ARCT                                                                0x0b0a
+#define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX                                                       0
+#define mmTCP_CHAN_STEER_4_ARCT                                                                0x0b0b
+#define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX                                                       0
+#define mmTCP_CHAN_STEER_5_ARCT                                                                0x0b0c
+#define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX                                                       0
+
+enum ta_ras_gfx_subblock {
+       /*CPC*/
+       TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
+       TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
+       TA_RAS_BLOCK__GFX_CPC_UCODE,
+       TA_RAS_BLOCK__GFX_DC_STATE_ME1,
+       TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
+       TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
+       TA_RAS_BLOCK__GFX_DC_STATE_ME2,
+       TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
+       TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
+       TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
+       /* CPF*/
+       TA_RAS_BLOCK__GFX_CPF_INDEX_START,
+       TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
+       TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
+       TA_RAS_BLOCK__GFX_CPF_TAG,
+       TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
+       /* CPG*/
+       TA_RAS_BLOCK__GFX_CPG_INDEX_START,
+       TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
+       TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
+       TA_RAS_BLOCK__GFX_CPG_TAG,
+       TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
+       /* GDS*/
+       TA_RAS_BLOCK__GFX_GDS_INDEX_START,
+       TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
+       TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
+       TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
+       TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
+       TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
+       TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
+       /* SPI*/
+       TA_RAS_BLOCK__GFX_SPI_SR_MEM,
+       /* SQ*/
+       TA_RAS_BLOCK__GFX_SQ_INDEX_START,
+       TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
+       TA_RAS_BLOCK__GFX_SQ_LDS_D,
+       TA_RAS_BLOCK__GFX_SQ_LDS_I,
+       TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
+       TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
+       /* SQC (3 ranges)*/
+       TA_RAS_BLOCK__GFX_SQC_INDEX_START,
+       /* SQC range 0*/
+       TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
+       TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
+               TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
+       TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
+       TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
+       TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
+       TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
+       TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
+       TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+       TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
+               TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+       /* SQC range 1*/
+       TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
+       TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
+               TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
+       TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
+       TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
+       TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
+       TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
+       TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
+       TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
+       TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
+       TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
+       TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
+               TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
+       /* SQC range 2*/
+       TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
+       TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
+               TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
+       TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
+       TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
+       TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
+       TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
+       TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
+       TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
+       TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
+       TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
+       TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
+               TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
+       TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
+       /* TA*/
+       TA_RAS_BLOCK__GFX_TA_INDEX_START,
+       TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
+       TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
+       TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
+       TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
+       TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
+       TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
+       /* TCA*/
+       TA_RAS_BLOCK__GFX_TCA_INDEX_START,
+       TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
+       TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
+       TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
+       /* TCC (5 sub-ranges)*/
+       TA_RAS_BLOCK__GFX_TCC_INDEX_START,
+       /* TCC range 0*/
+       TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
+       TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
+       TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
+       TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
+       TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
+       TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
+       TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
+       TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
+       TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
+       TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
+       /* TCC range 1*/
+       TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
+       TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
+       TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
+       TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
+               TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
+       /* TCC range 2*/
+       TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
+       TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
+       TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
+       TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
+       TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
+       TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
+       TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
+       TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
+       TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+       TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
+               TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+       /* TCC range 3*/
+       TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
+       TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
+       TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+       TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
+               TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+       /* TCC range 4*/
+       TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
+       TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
+               TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
+       TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
+       TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
+               TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
+       TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
+       /* TCI*/
+       TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
+       /* TCP*/
+       TA_RAS_BLOCK__GFX_TCP_INDEX_START,
+       TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
+       TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
+       TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
+       TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
+       TA_RAS_BLOCK__GFX_TCP_DB_RAM,
+       TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
+       TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
+       TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
+       /* TD*/
+       TA_RAS_BLOCK__GFX_TD_INDEX_START,
+       TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
+       TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
+       TA_RAS_BLOCK__GFX_TD_CS_FIFO,
+       TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
+       /* EA (3 sub-ranges)*/
+       TA_RAS_BLOCK__GFX_EA_INDEX_START,
+       /* EA range 0*/
+       TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
+       TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
+       TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
+       TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
+       TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
+       TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
+       TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
+       TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
+       TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
+       TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
+       /* EA range 1*/
+       TA_RAS_BLOCK__GFX_EA_INDEX1_START,
+       TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
+       TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
+       TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
+       TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
+       TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
+       TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
+       TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
+       TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
+       /* EA range 2*/
+       TA_RAS_BLOCK__GFX_EA_INDEX2_START,
+       TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
+       TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
+       TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
+       TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
+       TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
+       TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
+       /* UTC VM L2 bank*/
+       TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
+       /* UTC VM walker*/
+       TA_RAS_BLOCK__UTC_VML2_WALKER,
+       /* UTC ATC L2 2MB cache*/
+       TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
+       /* UTC ATC L2 4KB cache*/
+       TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
+       TA_RAS_BLOCK__GFX_MAX
+};
+
+struct ras_gfx_subblock {
+       unsigned char *name;
+       int ta_subblock;
+       int hw_supported_error_type;
+       int sw_supported_error_type;
+};
+
+#define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h)                             \
+       [AMDGPU_RAS_BLOCK__##subblock] = {                                     \
+               #subblock,                                                     \
+               TA_RAS_BLOCK__##subblock,                                      \
+               ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)),                  \
+               (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)),                  \
+       }
+
+static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
+       AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
+       AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
+       AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
+       AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
+       AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
+       AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+                            0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+                            0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+                            0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
+                            1),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
+                            0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
+                            0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
+                            0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
+                            0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
+                            1),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
+                            1),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
+                            1),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
+                            0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
+                            0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
+       AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
+};
+
 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
 {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
@@ -271,6 +655,18 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
 };
 
+static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
+{
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
+};
+
 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
 {
        mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
@@ -310,19 +706,21 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
+static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
+                                         void *ras_error_status);
+static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
+                                    void *inject_if);
 
 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
 {
        switch (adev->asic_type) {
        case CHIP_VEGA10:
-               if (!amdgpu_virt_support_skip_setting(adev)) {
-                       soc15_program_register_sequence(adev,
-                                                        golden_settings_gc_9_0,
-                                                        ARRAY_SIZE(golden_settings_gc_9_0));
-                       soc15_program_register_sequence(adev,
-                                                        golden_settings_gc_9_0_vg10,
-                                                        ARRAY_SIZE(golden_settings_gc_9_0_vg10));
-               }
+               soc15_program_register_sequence(adev,
+                                               golden_settings_gc_9_0,
+                                               ARRAY_SIZE(golden_settings_gc_9_0));
+               soc15_program_register_sequence(adev,
+                                               golden_settings_gc_9_0_vg10,
+                                               ARRAY_SIZE(golden_settings_gc_9_0_vg10));
                break;
        case CHIP_VEGA12:
                soc15_program_register_sequence(adev,
@@ -340,6 +738,11 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
                                                golden_settings_gc_9_0_vg20,
                                                ARRAY_SIZE(golden_settings_gc_9_0_vg20));
                break;
+       case CHIP_ARCTURUS:
+               soc15_program_register_sequence(adev,
+                                               golden_settings_gc_9_4_1_arct,
+                                               ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
+               break;
        case CHIP_RAVEN:
                soc15_program_register_sequence(adev, golden_settings_gc_9_1,
                                                ARRAY_SIZE(golden_settings_gc_9_1));
@@ -356,8 +759,9 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
                break;
        }
 
-       soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
-                                       (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
+       if (adev->asic_type != CHIP_ARCTURUS)
+               soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
+                                               (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
 }
 
 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
@@ -610,44 +1014,14 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
        }
 }
 
-static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
+static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
+                                         const char *chip_name)
 {
-       const char *chip_name;
        char fw_name[30];
        int err;
        struct amdgpu_firmware_info *info = NULL;
        const struct common_firmware_header *header = NULL;
        const struct gfx_firmware_header_v1_0 *cp_hdr;
-       const struct rlc_firmware_header_v2_0 *rlc_hdr;
-       unsigned int *tmp = NULL;
-       unsigned int i = 0;
-       uint16_t version_major;
-       uint16_t version_minor;
-       uint32_t smu_version;
-
-       DRM_DEBUG("\n");
-
-       switch (adev->asic_type) {
-       case CHIP_VEGA10:
-               chip_name = "vega10";
-               break;
-       case CHIP_VEGA12:
-               chip_name = "vega12";
-               break;
-       case CHIP_VEGA20:
-               chip_name = "vega20";
-               break;
-       case CHIP_RAVEN:
-               if (adev->rev_id >= 8)
-                       chip_name = "raven2";
-               else if (adev->pdev->device == 0x15d8)
-                       chip_name = "picasso";
-               else
-                       chip_name = "raven";
-               break;
-       default:
-               BUG();
-       }
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
        err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
@@ -682,6 +1056,58 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
        adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
        adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
+       if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+               info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
+               info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
+               info->fw = adev->gfx.pfp_fw;
+               header = (const struct common_firmware_header *)info->fw->data;
+               adev->firmware.fw_size +=
+                       ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+
+               info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
+               info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
+               info->fw = adev->gfx.me_fw;
+               header = (const struct common_firmware_header *)info->fw->data;
+               adev->firmware.fw_size +=
+                       ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+
+               info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
+               info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
+               info->fw = adev->gfx.ce_fw;
+               header = (const struct common_firmware_header *)info->fw->data;
+               adev->firmware.fw_size +=
+                       ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+       }
+
+out:
+       if (err) {
+               dev_err(adev->dev,
+                       "gfx9: Failed to load firmware \"%s\"\n",
+                       fw_name);
+               release_firmware(adev->gfx.pfp_fw);
+               adev->gfx.pfp_fw = NULL;
+               release_firmware(adev->gfx.me_fw);
+               adev->gfx.me_fw = NULL;
+               release_firmware(adev->gfx.ce_fw);
+               adev->gfx.ce_fw = NULL;
+       }
+       return err;
+}
+
+static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
+                                         const char *chip_name)
+{
+       char fw_name[30];
+       int err;
+       struct amdgpu_firmware_info *info = NULL;
+       const struct common_firmware_header *header = NULL;
+       const struct rlc_firmware_header_v2_0 *rlc_hdr;
+       unsigned int *tmp = NULL;
+       unsigned int i = 0;
+       uint16_t version_major;
+       uint16_t version_minor;
+       uint32_t smu_version;
+
        /*
         * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
         * instead of picasso_rlc.bin.
@@ -756,57 +1182,7 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
        if (adev->gfx.rlc.is_rlc_v2_1)
                gfx_v9_0_init_rlc_ext_microcode(adev);
 
-       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
-       err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
-       if (err)
-               goto out;
-       err = amdgpu_ucode_validate(adev->gfx.mec_fw);
-       if (err)
-               goto out;
-       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
-       adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
-       adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
-
-
-       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
-       err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
-       if (!err) {
-               err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
-               if (err)
-                       goto out;
-               cp_hdr = (const struct gfx_firmware_header_v1_0 *)
-               adev->gfx.mec2_fw->data;
-               adev->gfx.mec2_fw_version =
-               le32_to_cpu(cp_hdr->header.ucode_version);
-               adev->gfx.mec2_feature_version =
-               le32_to_cpu(cp_hdr->ucode_feature_version);
-       } else {
-               err = 0;
-               adev->gfx.mec2_fw = NULL;
-       }
-
        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
-               info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
-               info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
-               info->fw = adev->gfx.pfp_fw;
-               header = (const struct common_firmware_header *)info->fw->data;
-               adev->firmware.fw_size +=
-                       ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
-               info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
-               info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
-               info->fw = adev->gfx.me_fw;
-               header = (const struct common_firmware_header *)info->fw->data;
-               adev->firmware.fw_size +=
-                       ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
-               info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
-               info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
-               info->fw = adev->gfx.ce_fw;
-               header = (const struct common_firmware_header *)info->fw->data;
-               adev->firmware.fw_size +=
-                       ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
                info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
                info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
                info->fw = adev->gfx.rlc_fw;
@@ -836,7 +1212,58 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
                        adev->firmware.fw_size +=
                                ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
                }
+       }
 
+out:
+       if (err) {
+               dev_err(adev->dev,
+                       "gfx9: Failed to load firmware \"%s\"\n",
+                       fw_name);
+               release_firmware(adev->gfx.rlc_fw);
+               adev->gfx.rlc_fw = NULL;
+       }
+       return err;
+}
+
+static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
+                                         const char *chip_name)
+{
+       char fw_name[30];
+       int err;
+       struct amdgpu_firmware_info *info = NULL;
+       const struct common_firmware_header *header = NULL;
+       const struct gfx_firmware_header_v1_0 *cp_hdr;
+
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+       err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->gfx.mec_fw);
+       if (err)
+               goto out;
+       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+       adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+       adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+       err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+       if (!err) {
+               err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
+               if (err)
+                       goto out;
+               cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+               adev->gfx.mec2_fw->data;
+               adev->gfx.mec2_fw_version =
+               le32_to_cpu(cp_hdr->header.ucode_version);
+               adev->gfx.mec2_feature_version =
+               le32_to_cpu(cp_hdr->ucode_feature_version);
+       } else {
+               err = 0;
+               adev->gfx.mec2_fw = NULL;
+       }
+
+       if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
                info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
                info->fw = adev->gfx.mec_fw;
@@ -859,13 +1286,18 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
                        cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
                        adev->firmware.fw_size +=
                                ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-                       info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
-                       info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
-                       info->fw = adev->gfx.mec2_fw;
-                       adev->firmware.fw_size +=
-                               ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-               }
 
+                       /* TODO: Determine if MEC2 JT FW loading can be removed
+                                for all GFX V9 asic and above */
+                       if (adev->asic_type != CHIP_ARCTURUS) {
+                               info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
+                               info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
+                               info->fw = adev->gfx.mec2_fw;
+                               adev->firmware.fw_size +=
+                                       ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
+                                       PAGE_SIZE);
+                       }
+               }
        }
 
 out:
@@ -875,14 +1307,6 @@ out:
                dev_err(adev->dev,
                        "gfx9: Failed to load firmware \"%s\"\n",
                        fw_name);
-               release_firmware(adev->gfx.pfp_fw);
-               adev->gfx.pfp_fw = NULL;
-               release_firmware(adev->gfx.me_fw);
-               adev->gfx.me_fw = NULL;
-               release_firmware(adev->gfx.ce_fw);
-               adev->gfx.ce_fw = NULL;
-               release_firmware(adev->gfx.rlc_fw);
-               adev->gfx.rlc_fw = NULL;
                release_firmware(adev->gfx.mec_fw);
                adev->gfx.mec_fw = NULL;
                release_firmware(adev->gfx.mec2_fw);
@@ -891,6 +1315,56 @@ out:
        return err;
 }
 
+static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
+{
+       const char *chip_name;
+       int r;
+
+       DRM_DEBUG("\n");
+
+       switch (adev->asic_type) {
+       case CHIP_VEGA10:
+               chip_name = "vega10";
+               break;
+       case CHIP_VEGA12:
+               chip_name = "vega12";
+               break;
+       case CHIP_VEGA20:
+               chip_name = "vega20";
+               break;
+       case CHIP_RAVEN:
+               if (adev->rev_id >= 8)
+                       chip_name = "raven2";
+               else if (adev->pdev->device == 0x15d8)
+                       chip_name = "picasso";
+               else
+                       chip_name = "raven";
+               break;
+       case CHIP_ARCTURUS:
+               chip_name = "arcturus";
+               break;
+       default:
+               BUG();
+       }
+
+       /* No CPG in Arcturus */
+       if (adev->asic_type != CHIP_ARCTURUS) {
+               r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
+               if (r)
+                       return r;
+       }
+
+       r = gfx_v9_0_init_rlc_microcode(adev, chip_name);
+       if (r)
+               return r;
+
+       r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name);
+       if (r)
+               return r;
+
+       return r;
+}
+
 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
 {
        u32 count = 0;
@@ -1324,7 +1798,9 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
        .read_wave_data = &gfx_v9_0_read_wave_data,
        .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
        .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
-       .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
+       .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+       .ras_error_inject = &gfx_v9_0_ras_error_inject,
+       .query_ras_error_count = &gfx_v9_0_query_ras_error_count
 };
 
 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1377,6 +1853,16 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
                else
                        gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
                break;
+       case CHIP_ARCTURUS:
+               adev->gfx.config.max_hw_contexts = 8;
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+               gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
+               gb_addr_config &= ~0xf3e777ff;
+               gb_addr_config |= 0x22014042;
+               break;
        default:
                BUG();
                break;
@@ -1653,6 +2139,7 @@ static int gfx_v9_0_sw_init(void *handle)
        case CHIP_VEGA12:
        case CHIP_VEGA20:
        case CHIP_RAVEN:
+       case CHIP_ARCTURUS:
                adev->gfx.mec.num_mec = 2;
                break;
        default:
@@ -1929,6 +2416,24 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
        }
 }
 
+static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
+{
+       int vmid;
+
+       /*
+        * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
+        * access. Compute VMIDs should be enabled by FW for target VMIDs,
+        * the driver can enable them for graphics. VMID0 should maintain
+        * access so that HWS firmware can save/restore entries.
+        */
+       for (vmid = 1; vmid < 16; vmid++) {
+               WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
+               WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
+               WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
+               WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
+       }
+}
+
 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
 {
        u32 tmp;
@@ -1945,7 +2450,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
        /* XXX SH_MEM regs */
        /* where to put LDS, scratch, GPUVM in FSA64 space */
        mutex_lock(&adev->srbm_mutex);
-       for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) {
+       for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
                soc15_grbm_select(adev, 0, 0, 0, i);
                /* CP and shaders */
                if (i == 0) {
@@ -1973,6 +2478,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
        mutex_unlock(&adev->srbm_mutex);
 
        gfx_v9_0_init_compute_vmid(adev);
+       gfx_v9_0_init_gds_vmid(adev);
 }
 
 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
@@ -2849,6 +3355,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
        mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
        mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
        mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
+       mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
+       mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
+       mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
+       mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
        mqd->compute_misc_reserved = 0x00000003;
 
        mqd->dynamic_cu_mask_addr_lo =
@@ -3252,10 +3762,12 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
                gfx_v9_0_enable_gui_idle_interrupt(adev, false);
 
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
-               /* legacy firmware loading */
-               r = gfx_v9_0_cp_gfx_load_microcode(adev);
-               if (r)
-                       return r;
+               if (adev->asic_type != CHIP_ARCTURUS) {
+                       /* legacy firmware loading */
+                       r = gfx_v9_0_cp_gfx_load_microcode(adev);
+                       if (r)
+                               return r;
+               }
 
                r = gfx_v9_0_cp_compute_load_microcode(adev);
                if (r)
@@ -3266,18 +3778,22 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
        if (r)
                return r;
 
-       r = gfx_v9_0_cp_gfx_resume(adev);
-       if (r)
-               return r;
+       if (adev->asic_type != CHIP_ARCTURUS) {
+               r = gfx_v9_0_cp_gfx_resume(adev);
+               if (r)
+                       return r;
+       }
 
        r = gfx_v9_0_kcq_resume(adev);
        if (r)
                return r;
 
-       ring = &adev->gfx.gfx_ring[0];
-       r = amdgpu_ring_test_helper(ring);
-       if (r)
-               return r;
+       if (adev->asic_type != CHIP_ARCTURUS) {
+               ring = &adev->gfx.gfx_ring[0];
+               r = amdgpu_ring_test_helper(ring);
+               if (r)
+                       return r;
+       }
 
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                ring = &adev->gfx.compute_ring[i];
@@ -3291,7 +3807,8 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
 
 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
 {
-       gfx_v9_0_cp_gfx_enable(adev, enable);
+       if (adev->asic_type != CHIP_ARCTURUS)
+               gfx_v9_0_cp_gfx_enable(adev, enable);
        gfx_v9_0_cp_compute_enable(adev, enable);
 }
 
@@ -3300,7 +3817,8 @@ static int gfx_v9_0_hw_init(void *handle)
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       gfx_v9_0_init_golden_registers(adev);
+       if (!amdgpu_sriov_vf(adev))
+               gfx_v9_0_init_golden_registers(adev);
 
        gfx_v9_0_constants_init(adev);
 
@@ -3316,9 +3834,11 @@ static int gfx_v9_0_hw_init(void *handle)
        if (r)
                return r;
 
-       r = gfx_v9_0_ngg_en(adev);
-       if (r)
-               return r;
+       if (adev->asic_type != CHIP_ARCTURUS) {
+               r = gfx_v9_0_ngg_en(adev);
+               if (r)
+                       return r;
+       }
 
        return r;
 }
@@ -3466,8 +3986,9 @@ static int gfx_v9_0_soft_reset(void *handle)
                /* stop the rlc */
                adev->gfx.rlc.funcs->stop(adev);
 
-               /* Disable GFX parsing/prefetching */
-               gfx_v9_0_cp_gfx_enable(adev, false);
+               if (adev->asic_type != CHIP_ARCTURUS)
+                       /* Disable GFX parsing/prefetching */
+                       gfx_v9_0_cp_gfx_enable(adev, false);
 
                /* Disable MEC parsing/prefetching */
                gfx_v9_0_cp_compute_enable(adev, false);
@@ -3810,7 +4331,10 @@ static int gfx_v9_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
+       if (adev->asic_type == CHIP_ARCTURUS)
+               adev->gfx.num_gfx_rings = 0;
+       else
+               adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
        adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
        gfx_v9_0_set_ring_funcs(adev);
        gfx_v9_0_set_irq_funcs(adev);
@@ -3821,6 +4345,7 @@ static int gfx_v9_0_early_init(void *handle)
 }
 
 static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
+               struct ras_err_data *err_data,
                struct amdgpu_iv_entry *entry);
 
 static int gfx_v9_0_ecc_late_init(void *handle)
@@ -4330,14 +4855,16 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
        if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
                *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
 
-       /* AMD_CG_SUPPORT_GFX_3D_CGCG */
-       data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
-       if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
-               *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
+       if (adev->asic_type != CHIP_ARCTURUS) {
+               /* AMD_CG_SUPPORT_GFX_3D_CGCG */
+               data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
+               if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
+                       *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
 
-       /* AMD_CG_SUPPORT_GFX_3D_CGLS */
-       if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
-               *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
+               /* AMD_CG_SUPPORT_GFX_3D_CGLS */
+               if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
+                       *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
+       }
 }
 
 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
@@ -5133,12 +5660,420 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
 }
 
 static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
+               struct ras_err_data *err_data,
                struct amdgpu_iv_entry *entry)
 {
        /* TODO ue will trigger an interrupt. */
        kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+       if (adev->gfx.funcs->query_ras_error_count)
+               adev->gfx.funcs->query_ras_error_count(adev, err_data);
        amdgpu_ras_reset_gpu(adev, 0);
-       return AMDGPU_RAS_UE;
+       return AMDGPU_RAS_SUCCESS;
+}
+
+static const struct {
+       const char *name;
+       uint32_t ip;
+       uint32_t inst;
+       uint32_t seg;
+       uint32_t reg_offset;
+       uint32_t per_se_instance;
+       int32_t num_instance;
+       uint32_t sec_count_mask;
+       uint32_t ded_count_mask;
+} gfx_ras_edc_regs[] = {
+       { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1,
+         REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
+         REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, DED_COUNT) },
+       { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1,
+         REG_FIELD_MASK(CPC_EDC_UCODE_CNT, SEC_COUNT),
+         REG_FIELD_MASK(CPC_EDC_UCODE_CNT, DED_COUNT) },
+       { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1,
+         REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME1), 0 },
+       { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1,
+         REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME2), 0 },
+       { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1,
+         REG_FIELD_MASK(CPF_EDC_TAG_CNT, SEC_COUNT),
+         REG_FIELD_MASK(CPF_EDC_TAG_CNT, DED_COUNT) },
+       { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1,
+         REG_FIELD_MASK(CPG_EDC_DMA_CNT, ROQ_COUNT), 0 },
+       { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1,
+         REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
+         REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_DED_COUNT) },
+       { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1,
+         REG_FIELD_MASK(CPG_EDC_TAG_CNT, SEC_COUNT),
+         REG_FIELD_MASK(CPG_EDC_TAG_CNT, DED_COUNT) },
+       { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1,
+         REG_FIELD_MASK(DC_EDC_CSINVOC_CNT, COUNT_ME1), 0 },
+       { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1,
+         REG_FIELD_MASK(DC_EDC_RESTORE_CNT, COUNT_ME1), 0 },
+       { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1,
+         REG_FIELD_MASK(DC_EDC_STATE_CNT, COUNT_ME1), 0 },
+       { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1,
+         REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_SEC),
+         REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_DED) },
+       { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1,
+         REG_FIELD_MASK(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED), 0 },
+       { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+         0, 1, REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
+         REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) },
+       { "GDS_OA_PHY_PHY_CMD_RAM_MEM",
+         SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1,
+         REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
+         REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) },
+       { "GDS_OA_PHY_PHY_DATA_RAM_MEM",
+         SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1,
+         REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED), 0 },
+       { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
+         SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
+         REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
+         REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) },
+       { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
+         SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
+         REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
+         REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) },
+       { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
+         SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
+         REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
+         REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) },
+       { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
+         SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
+         REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
+         REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) },
+       { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 1, 1,
+         REG_FIELD_MASK(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT), 0 },
+       { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
+         REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
+         REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) },
+       { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
+         REG_FIELD_MASK(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT), 0 },
+       { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
+         REG_FIELD_MASK(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT), 0 },
+       { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
+         REG_FIELD_MASK(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT), 0 },
+       { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
+         REG_FIELD_MASK(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT), 0 },
+       { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2,
+         REG_FIELD_MASK(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT), 0 },
+       { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2,
+         REG_FIELD_MASK(TCA_EDC_CNT, REQ_FIFO_SED_COUNT), 0 },
+       { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
+         REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
+         REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) },
+       { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
+         REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
+         REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) },
+       { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
+         REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
+         REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) },
+       { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
+         REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
+         REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) },
+       { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
+         REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
+         REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) },
+       { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
+         REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT), 0 },
+       { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
+         REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT), 0 },
+       { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
+         REG_FIELD_MASK(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT), 0 },
+       { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
+         REG_FIELD_MASK(TCC_EDC_CNT, RETURN_DATA_SED_COUNT), 0 },
+       { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
+         REG_FIELD_MASK(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT), 0 },
+       { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
+         REG_FIELD_MASK(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT), 0 },
+       { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16,
+         REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT), 0 },
+       { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16,
+         REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT), 0 },
+       { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
+         16, REG_FIELD_MASK(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT), 0 },
+       { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+         0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
+         0 },
+       { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
+         16, REG_FIELD_MASK(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT), 0 },
+       { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+         0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
+         0 },
+       { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
+         16, REG_FIELD_MASK(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT), 0 },
+       { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 72,
+         REG_FIELD_MASK(TCI_EDC_CNT, WRITE_RAM_SED_COUNT), 0 },
+       { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
+         REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
+         REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) },
+       { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
+         REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
+         REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) },
+       { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
+         REG_FIELD_MASK(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT), 0 },
+       { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
+         REG_FIELD_MASK(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), 0 },
+       { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
+         REG_FIELD_MASK(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0 },
+       { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
+         REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
+         REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) },
+       { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
+         REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
+         REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) },
+       { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
+         REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
+         REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) },
+       { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
+         REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
+         REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) },
+       { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
+         REG_FIELD_MASK(TD_EDC_CNT, CS_FIFO_SED_COUNT), 0 },
+       { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
+         REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_SEC_COUNT),
+         REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_DED_COUNT) },
+       { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
+         REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_SEC_COUNT),
+         REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_DED_COUNT) },
+       { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
+         REG_FIELD_MASK(SQ_EDC_CNT, SGPR_SEC_COUNT),
+         REG_FIELD_MASK(SQ_EDC_CNT, SGPR_DED_COUNT) },
+       { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
+         REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_SEC_COUNT),
+         REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_DED_COUNT) },
+       { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
+         REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_SEC_COUNT),
+         REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_DED_COUNT) },
+       { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
+         REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_SEC_COUNT),
+         REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_DED_COUNT) },
+       { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
+         REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_SEC_COUNT),
+         REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_DED_COUNT) },
+       { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+         1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
+         REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) },
+       { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
+         6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
+         REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) },
+       { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+         1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
+         REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) },
+       { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
+         6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
+         REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) },
+       { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+         1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
+         REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) },
+       { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
+         6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
+         REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) },
+       { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
+         6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
+         REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) },
+       { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
+         6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
+         REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) },
+       { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
+         6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
+         REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) },
+       { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
+         6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
+         REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) },
+       { "SQC_INST_BANKA_UTCL1_MISS_FIFO",
+         SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
+         REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
+         0 },
+       { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
+         6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT), 0 },
+       { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
+         6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT), 0 },
+       { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
+         6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT), 0 },
+       { "SQC_DATA_BANKA_DIRTY_BIT_RAM",
+         SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
+         REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT), 0 },
+       { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
+         REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
+         REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) },
+       { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
+         6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
+         REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) },
+       { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
+         6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
+         REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) },
+       { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
+         6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
+         REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) },
+       { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
+         6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
+         REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) },
+       { "SQC_INST_BANKB_UTCL1_MISS_FIFO",
+         SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6,
+         REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
+         0 },
+       { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
+         6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT), 0 },
+       { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
+         6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT), 0 },
+       { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
+         6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT), 0 },
+       { "SQC_DATA_BANKB_DIRTY_BIT_RAM",
+         SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6,
+         REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT), 0 },
+       { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+         REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) },
+       { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+         REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) },
+       { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+         REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) },
+       { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+         REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) },
+       { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+         REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) },
+       { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0 },
+       { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0 },
+       { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0 },
+       { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0 },
+       { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0 },
+       { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+         REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) },
+       { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+         REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) },
+       { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+         REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) },
+       { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0 },
+       { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0 },
+       { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), 0 },
+       { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), 0 },
+       { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), 0 },
+       { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
+         REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), 0 },
+};
+
+static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
+                                    void *inject_if)
+{
+       struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
+       int ret;
+       struct ta_ras_trigger_error_input block_info = { 0 };
+
+       if (adev->asic_type != CHIP_VEGA20)
+               return -EINVAL;
+
+       if (!ras_gfx_subblocks[info->head.sub_block_index].name)
+               return -EPERM;
+
+       if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
+             info->head.type)) {
+               DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
+                       ras_gfx_subblocks[info->head.sub_block_index].name,
+                       info->head.type);
+               return -EPERM;
+       }
+
+       if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
+             info->head.type)) {
+               DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
+                       ras_gfx_subblocks[info->head.sub_block_index].name,
+                       info->head.type);
+               return -EPERM;
+       }
+
+       block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
+       block_info.sub_block_index =
+               ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
+       block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
+       block_info.address = info->address;
+       block_info.value = info->value;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       ret = psp_ras_trigger_error(&adev->psp, &block_info);
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       return ret;
+}
+
+static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
+                                         void *ras_error_status)
+{
+       struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+       uint32_t sec_count, ded_count;
+       uint32_t i;
+       uint32_t reg_value;
+       uint32_t se_id, instance_id;
+
+       if (adev->asic_type != CHIP_VEGA20)
+               return -EINVAL;
+
+       err_data->ue_count = 0;
+       err_data->ce_count = 0;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       for (se_id = 0; se_id < adev->gfx.config.max_shader_engines; se_id++) {
+               for (instance_id = 0; instance_id < 256; instance_id++) {
+                       for (i = 0;
+                            i < sizeof(gfx_ras_edc_regs) / sizeof(gfx_ras_edc_regs[0]);
+                            i++) {
+                               if (se_id != 0 &&
+                                   !gfx_ras_edc_regs[i].per_se_instance)
+                                       continue;
+                               if (instance_id >= gfx_ras_edc_regs[i].num_instance)
+                                       continue;
+
+                               gfx_v9_0_select_se_sh(adev, se_id, 0,
+                                                     instance_id);
+
+                               reg_value = RREG32(
+                                       adev->reg_offset[gfx_ras_edc_regs[i].ip]
+                                                       [gfx_ras_edc_regs[i].inst]
+                                                       [gfx_ras_edc_regs[i].seg] +
+                                       gfx_ras_edc_regs[i].reg_offset);
+                               sec_count = reg_value &
+                                           gfx_ras_edc_regs[i].sec_count_mask;
+                               ded_count = reg_value &
+                                           gfx_ras_edc_regs[i].ded_count_mask;
+                               if (sec_count) {
+                                       DRM_INFO(
+                                               "Instance[%d][%d]: SubBlock %s, SEC %d\n",
+                                               se_id, instance_id,
+                                               gfx_ras_edc_regs[i].name,
+                                               sec_count);
+                                       err_data->ce_count++;
+                               }
+
+                               if (ded_count) {
+                                       DRM_INFO(
+                                               "Instance[%d][%d]: SubBlock %s, DED %d\n",
+                                               se_id, instance_id,
+                                               gfx_ras_edc_regs[i].name,
+                                               ded_count);
+                                       err_data->ue_count++;
+                               }
+                       }
+               }
+       }
+       gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       return 0;
 }
 
 static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev,
@@ -5183,7 +6118,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
        .align_mask = 0xff,
        .nop = PACKET3(PACKET3_NOP, 0x3FFF),
        .support_64bit_ptrs = true,
-       .vmhub = AMDGPU_GFXHUB,
+       .vmhub = AMDGPU_GFXHUB_0,
        .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
        .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
        .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
@@ -5234,7 +6169,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
        .align_mask = 0xff,
        .nop = PACKET3(PACKET3_NOP, 0x3FFF),
        .support_64bit_ptrs = true,
-       .vmhub = AMDGPU_GFXHUB,
+       .vmhub = AMDGPU_GFXHUB_0,
        .get_rptr = gfx_v9_0_ring_get_rptr_compute,
        .get_wptr = gfx_v9_0_ring_get_wptr_compute,
        .set_wptr = gfx_v9_0_ring_set_wptr_compute,
@@ -5269,7 +6204,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
        .align_mask = 0xff,
        .nop = PACKET3(PACKET3_NOP, 0x3FFF),
        .support_64bit_ptrs = true,
-       .vmhub = AMDGPU_GFXHUB,
+       .vmhub = AMDGPU_GFXHUB_0,
        .get_rptr = gfx_v9_0_ring_get_rptr_compute,
        .get_wptr = gfx_v9_0_ring_get_wptr_compute,
        .set_wptr = gfx_v9_0_ring_set_wptr_compute,
@@ -5349,6 +6284,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
        case CHIP_VEGA12:
        case CHIP_VEGA20:
        case CHIP_RAVEN:
+       case CHIP_ARCTURUS:
                adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
                break;
        default:
@@ -5366,6 +6302,7 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
                adev->gds.gds_size = 0x10000;
                break;
        case CHIP_RAVEN:
+       case CHIP_ARCTURUS:
                adev->gds.gds_size = 0x1000;
                break;
        default:
@@ -5387,6 +6324,9 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
                else
                        adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
                break;
+       case CHIP_ARCTURUS:
+               adev->gds.gds_compute_max_wave_id = 0xfff;
+               break;
        default:
                /* this really depends on the chip */
                adev->gds.gds_compute_max_wave_id = 0x7ff;
@@ -5431,12 +6371,21 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
 {
        int i, j, k, counter, active_cu_number = 0;
        u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
-       unsigned disable_masks[4 * 2];
+       unsigned disable_masks[4 * 4];
 
        if (!adev || !cu_info)
                return -EINVAL;
 
-       amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+       /*
+        * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
+        */
+       if (adev->gfx.config.max_shader_engines *
+               adev->gfx.config.max_sh_per_se > 16)
+               return -EINVAL;
+
+       amdgpu_gfx_parse_disable_cu(disable_masks,
+                                   adev->gfx.config.max_shader_engines,
+                                   adev->gfx.config.max_sh_per_se);
 
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@@ -5445,11 +6394,23 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
                        ao_bitmap = 0;
                        counter = 0;
                        gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
-                       if (i < 4 && j < 2)
-                               gfx_v9_0_set_user_cu_inactive_bitmap(
-                                       adev, disable_masks[i * 2 + j]);
+                       gfx_v9_0_set_user_cu_inactive_bitmap(
+                               adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
                        bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
-                       cu_info->bitmap[i][j] = bitmap;
+
+                       /*
+                        * The bitmap(and ao_cu_bitmap) in cu_info structure is
+                        * 4x4 size array, and it's usually suitable for Vega
+                        * ASICs which has 4*2 SE/SH layout.
+                        * But for Arcturus, SE/SH layout is changed to 8*1.
+                        * To mostly reduce the impact, we make it compatible
+                        * with current bitmap array as below:
+                        *    SE4,SH0 --> bitmap[0][1]
+                        *    SE5,SH0 --> bitmap[1][1]
+                        *    SE6,SH0 --> bitmap[2][1]
+                        *    SE7,SH0 --> bitmap[3][1]
+                        */
+                       cu_info->bitmap[i % 4][j + i / 4] = bitmap;
 
                        for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
                                if (bitmap & mask) {
@@ -5462,7 +6423,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
                        active_cu_number += counter;
                        if (i < 2 && j < 2)
                                ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
-                       cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
+                       cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
                }
        }
        gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);