2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
26 #include "amdgpu_atomfirmware.h"
29 #include "hdp/hdp_4_0_offset.h"
30 #include "hdp/hdp_4_0_sh_mask.h"
31 #include "gc/gc_9_0_sh_mask.h"
32 #include "dce/dce_12_0_offset.h"
33 #include "dce/dce_12_0_sh_mask.h"
34 #include "vega10_enum.h"
35 #include "mmhub/mmhub_1_0_offset.h"
36 #include "athub/athub_1_0_offset.h"
38 #include "soc15_common.h"
39 #include "umc/umc_6_0_sh_mask.h"
41 #include "nbio_v6_1.h"
42 #include "nbio_v7_0.h"
43 #include "gfxhub_v1_0.h"
44 #include "mmhub_v1_0.h"
46 #define mmDF_CS_AON0_DramBaseAddress0 0x0044
47 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
48 //DF_CS_AON0_DramBaseAddress0
49 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
50 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
51 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
52 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
53 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
54 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
55 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
56 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
57 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
58 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
60 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
61 #define AMDGPU_NUM_OF_VMIDS 8
63 static const u32 golden_settings_vega10_hdp[] =
65 0xf64, 0x0fffffff, 0x00000000,
66 0xf65, 0x0fffffff, 0x00000000,
67 0xf66, 0x0fffffff, 0x00000000,
68 0xf67, 0x0fffffff, 0x00000000,
69 0xf68, 0x0fffffff, 0x00000000,
70 0xf6a, 0x0fffffff, 0x00000000,
71 0xf6b, 0x0fffffff, 0x00000000,
72 0xf6c, 0x0fffffff, 0x00000000,
73 0xf6d, 0x0fffffff, 0x00000000,
74 0xf6e, 0x0fffffff, 0x00000000,
77 static const u32 golden_settings_mmhub_1_0_0[] =
79 SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_WRCLI2), 0x00000007, 0xfe5fe0fa,
80 SOC15_REG_OFFSET(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0), 0x00000030, 0x55555565
83 static const u32 golden_settings_athub_1_0_0[] =
85 SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL), 0x0000ff00, 0x00000800,
86 SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008
89 /* Ecc related register addresses, (BASE + reg offset) */
90 /* Universal Memory Controller caps (may be fused). */
91 /* UMCCH:UmcLocalCap */
92 #define UMCLOCALCAPS_ADDR0 (0x00014306 + 0x00000000)
93 #define UMCLOCALCAPS_ADDR1 (0x00014306 + 0x00000800)
94 #define UMCLOCALCAPS_ADDR2 (0x00014306 + 0x00001000)
95 #define UMCLOCALCAPS_ADDR3 (0x00014306 + 0x00001800)
96 #define UMCLOCALCAPS_ADDR4 (0x00054306 + 0x00000000)
97 #define UMCLOCALCAPS_ADDR5 (0x00054306 + 0x00000800)
98 #define UMCLOCALCAPS_ADDR6 (0x00054306 + 0x00001000)
99 #define UMCLOCALCAPS_ADDR7 (0x00054306 + 0x00001800)
100 #define UMCLOCALCAPS_ADDR8 (0x00094306 + 0x00000000)
101 #define UMCLOCALCAPS_ADDR9 (0x00094306 + 0x00000800)
102 #define UMCLOCALCAPS_ADDR10 (0x00094306 + 0x00001000)
103 #define UMCLOCALCAPS_ADDR11 (0x00094306 + 0x00001800)
104 #define UMCLOCALCAPS_ADDR12 (0x000d4306 + 0x00000000)
105 #define UMCLOCALCAPS_ADDR13 (0x000d4306 + 0x00000800)
106 #define UMCLOCALCAPS_ADDR14 (0x000d4306 + 0x00001000)
107 #define UMCLOCALCAPS_ADDR15 (0x000d4306 + 0x00001800)
109 /* Universal Memory Controller Channel config. */
110 /* UMCCH:UMC_CONFIG */
111 #define UMCCH_UMC_CONFIG_ADDR0 (0x00014040 + 0x00000000)
112 #define UMCCH_UMC_CONFIG_ADDR1 (0x00014040 + 0x00000800)
113 #define UMCCH_UMC_CONFIG_ADDR2 (0x00014040 + 0x00001000)
114 #define UMCCH_UMC_CONFIG_ADDR3 (0x00014040 + 0x00001800)
115 #define UMCCH_UMC_CONFIG_ADDR4 (0x00054040 + 0x00000000)
116 #define UMCCH_UMC_CONFIG_ADDR5 (0x00054040 + 0x00000800)
117 #define UMCCH_UMC_CONFIG_ADDR6 (0x00054040 + 0x00001000)
118 #define UMCCH_UMC_CONFIG_ADDR7 (0x00054040 + 0x00001800)
119 #define UMCCH_UMC_CONFIG_ADDR8 (0x00094040 + 0x00000000)
120 #define UMCCH_UMC_CONFIG_ADDR9 (0x00094040 + 0x00000800)
121 #define UMCCH_UMC_CONFIG_ADDR10 (0x00094040 + 0x00001000)
122 #define UMCCH_UMC_CONFIG_ADDR11 (0x00094040 + 0x00001800)
123 #define UMCCH_UMC_CONFIG_ADDR12 (0x000d4040 + 0x00000000)
124 #define UMCCH_UMC_CONFIG_ADDR13 (0x000d4040 + 0x00000800)
125 #define UMCCH_UMC_CONFIG_ADDR14 (0x000d4040 + 0x00001000)
126 #define UMCCH_UMC_CONFIG_ADDR15 (0x000d4040 + 0x00001800)
128 /* Universal Memory Controller Channel Ecc config. */
130 #define UMCCH_ECCCTRL_ADDR0 (0x00014053 + 0x00000000)
131 #define UMCCH_ECCCTRL_ADDR1 (0x00014053 + 0x00000800)
132 #define UMCCH_ECCCTRL_ADDR2 (0x00014053 + 0x00001000)
133 #define UMCCH_ECCCTRL_ADDR3 (0x00014053 + 0x00001800)
134 #define UMCCH_ECCCTRL_ADDR4 (0x00054053 + 0x00000000)
135 #define UMCCH_ECCCTRL_ADDR5 (0x00054053 + 0x00000800)
136 #define UMCCH_ECCCTRL_ADDR6 (0x00054053 + 0x00001000)
137 #define UMCCH_ECCCTRL_ADDR7 (0x00054053 + 0x00001800)
138 #define UMCCH_ECCCTRL_ADDR8 (0x00094053 + 0x00000000)
139 #define UMCCH_ECCCTRL_ADDR9 (0x00094053 + 0x00000800)
140 #define UMCCH_ECCCTRL_ADDR10 (0x00094053 + 0x00001000)
141 #define UMCCH_ECCCTRL_ADDR11 (0x00094053 + 0x00001800)
142 #define UMCCH_ECCCTRL_ADDR12 (0x000d4053 + 0x00000000)
143 #define UMCCH_ECCCTRL_ADDR13 (0x000d4053 + 0x00000800)
144 #define UMCCH_ECCCTRL_ADDR14 (0x000d4053 + 0x00001000)
145 #define UMCCH_ECCCTRL_ADDR15 (0x000d4053 + 0x00001800)
147 static const uint32_t ecc_umclocalcap_addrs[] = {
166 static const uint32_t ecc_umcch_umc_config_addrs[] = {
167 UMCCH_UMC_CONFIG_ADDR0,
168 UMCCH_UMC_CONFIG_ADDR1,
169 UMCCH_UMC_CONFIG_ADDR2,
170 UMCCH_UMC_CONFIG_ADDR3,
171 UMCCH_UMC_CONFIG_ADDR4,
172 UMCCH_UMC_CONFIG_ADDR5,
173 UMCCH_UMC_CONFIG_ADDR6,
174 UMCCH_UMC_CONFIG_ADDR7,
175 UMCCH_UMC_CONFIG_ADDR8,
176 UMCCH_UMC_CONFIG_ADDR9,
177 UMCCH_UMC_CONFIG_ADDR10,
178 UMCCH_UMC_CONFIG_ADDR11,
179 UMCCH_UMC_CONFIG_ADDR12,
180 UMCCH_UMC_CONFIG_ADDR13,
181 UMCCH_UMC_CONFIG_ADDR14,
182 UMCCH_UMC_CONFIG_ADDR15,
185 static const uint32_t ecc_umcch_eccctrl_addrs[] = {
196 UMCCH_ECCCTRL_ADDR10,
197 UMCCH_ECCCTRL_ADDR11,
198 UMCCH_ECCCTRL_ADDR12,
199 UMCCH_ECCCTRL_ADDR13,
200 UMCCH_ECCCTRL_ADDR14,
201 UMCCH_ECCCTRL_ADDR15,
204 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
205 struct amdgpu_irq_src *src,
207 enum amdgpu_interrupt_state state)
209 struct amdgpu_vmhub *hub;
210 u32 tmp, reg, bits, i, j;
212 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
213 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
214 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
215 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
216 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
217 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
218 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
221 case AMDGPU_IRQ_STATE_DISABLE:
222 for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
223 hub = &adev->vmhub[j];
224 for (i = 0; i < 16; i++) {
225 reg = hub->vm_context0_cntl + i;
232 case AMDGPU_IRQ_STATE_ENABLE:
233 for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
234 hub = &adev->vmhub[j];
235 for (i = 0; i < 16; i++) {
236 reg = hub->vm_context0_cntl + i;
249 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
250 struct amdgpu_irq_src *source,
251 struct amdgpu_iv_entry *entry)
253 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
257 addr = (u64)entry->src_data[0] << 12;
258 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
260 if (!amdgpu_sriov_vf(adev)) {
261 status = RREG32(hub->vm_l2_pro_fault_status);
262 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
265 if (printk_ratelimit()) {
267 "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n",
268 entry->vm_id_src ? "mmhub" : "gfxhub",
269 entry->src_id, entry->ring_id, entry->vm_id,
271 dev_err(adev->dev, " at page 0x%016llx from %d\n",
272 addr, entry->client_id);
273 if (!amdgpu_sriov_vf(adev))
275 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
282 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
283 .set = gmc_v9_0_vm_fault_interrupt_state,
284 .process = gmc_v9_0_process_interrupt,
287 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
289 adev->mc.vm_fault.num_types = 1;
290 adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
293 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
297 /* invalidate using legacy mode on vm_id*/
298 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
299 PER_VMID_INVALIDATE_REQ, 1 << vm_id);
300 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
301 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
302 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
303 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
304 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
305 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
306 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
307 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
314 * VMID 0 is the physical GPU addresses as used by the kernel.
315 * VMIDs 1-15 are used for userspace clients and are handled
316 * by the amdgpu vm/hsa code.
320 * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback
322 * @adev: amdgpu_device pointer
323 * @vmid: vm instance to flush
325 * Flush the TLB for the requested page table.
327 static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
330 /* Use register 17 for GART */
331 const unsigned eng = 17;
334 /* flush hdp cache */
335 if (adev->flags & AMD_IS_APU)
336 nbio_v7_0_hdp_flush(adev);
338 nbio_v6_1_hdp_flush(adev);
340 spin_lock(&adev->mc.invalidate_lock);
342 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
343 struct amdgpu_vmhub *hub = &adev->vmhub[i];
344 u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
346 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
348 /* Busy wait for ACK.*/
349 for (j = 0; j < 100; j++) {
350 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
359 /* Wait for ACK with a delay.*/
360 for (j = 0; j < adev->usec_timeout; j++) {
361 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
367 if (j < adev->usec_timeout)
370 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
373 spin_unlock(&adev->mc.invalidate_lock);
377 * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO
379 * @adev: amdgpu_device pointer
380 * @cpu_pt_addr: cpu address of the page table
381 * @gpu_page_idx: entry in the page table to update
382 * @addr: dst addr to write into pte/pde
383 * @flags: access flags
385 * Update the page tables using the CPU.
387 static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev,
389 uint32_t gpu_page_idx,
393 void __iomem *ptr = (void *)cpu_pt_addr;
397 * PTE format on VEGA 10:
406 * 47:12 4k physical page base address
416 * PDE format on VEGA 10:
417 * 63:59 block fragment size
421 * 47:6 physical base address of PD or PTE
429 * The following is for PTE only. GART does not have PDEs.
431 value = addr & 0x0000FFFFFFFFF000ULL;
433 writeq(value, ptr + (gpu_page_idx * 8));
437 static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
441 uint64_t pte_flag = 0;
443 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
444 pte_flag |= AMDGPU_PTE_EXECUTABLE;
445 if (flags & AMDGPU_VM_PAGE_READABLE)
446 pte_flag |= AMDGPU_PTE_READABLE;
447 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
448 pte_flag |= AMDGPU_PTE_WRITEABLE;
450 switch (flags & AMDGPU_VM_MTYPE_MASK) {
451 case AMDGPU_VM_MTYPE_DEFAULT:
452 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
454 case AMDGPU_VM_MTYPE_NC:
455 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
457 case AMDGPU_VM_MTYPE_WC:
458 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
460 case AMDGPU_VM_MTYPE_CC:
461 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
463 case AMDGPU_VM_MTYPE_UC:
464 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
467 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
471 if (flags & AMDGPU_VM_PAGE_PRT)
472 pte_flag |= AMDGPU_PTE_PRT;
477 static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
479 addr = adev->vm_manager.vram_base_offset + addr - adev->mc.vram_start;
480 BUG_ON(addr & 0xFFFF00000000003FULL);
484 static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
485 .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
486 .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
487 .get_invalidate_req = gmc_v9_0_get_invalidate_req,
488 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
489 .get_vm_pde = gmc_v9_0_get_vm_pde
492 static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
494 if (adev->gart.gart_funcs == NULL)
495 adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
498 static int gmc_v9_0_early_init(void *handle)
500 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
502 gmc_v9_0_set_gart_funcs(adev);
503 gmc_v9_0_set_irq_funcs(adev);
508 static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
517 DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");
520 for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
521 reg_addr = ecc_umclocalcap_addrs[i];
523 "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
525 reg_val = RREG32(reg_addr);
526 field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
533 DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
538 for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
539 reg_addr = ecc_umcch_umc_config_addrs[i];
541 "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
543 reg_val = RREG32(reg_addr);
544 field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
548 "DramReady: 0x%08x\n",
552 DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
557 for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
558 reg_addr = ecc_umcch_eccctrl_addrs[i];
560 "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
562 reg_val = RREG32(reg_addr);
563 field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
565 fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
571 reg_val, field_val, fv2);
574 DRM_DEBUG("ecc: WrEccEn is not set\n");
578 DRM_DEBUG("ecc: RdEccEn is not set\n");
583 DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
584 return lost_sheep == 0;
587 static int gmc_v9_0_late_init(void *handle)
589 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
591 * The latest engine allocation on gfx9 is:
593 * Engine 2, 3: firmware
594 * Engine 4~13: amdgpu ring, subject to change when ring number changes
596 * Engine 16: kfd tlb invalidation
597 * Engine 17: Gart flushes
599 unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
603 for(i = 0; i < adev->num_rings; ++i) {
604 struct amdgpu_ring *ring = adev->rings[i];
605 unsigned vmhub = ring->funcs->vmhub;
607 ring->vm_inv_eng = vm_inv_eng[vmhub]++;
608 dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
609 ring->idx, ring->name, ring->vm_inv_eng,
613 /* Engine 16 is used for KFD and 17 for GART flushes */
614 for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
615 BUG_ON(vm_inv_eng[i] > 16);
617 r = gmc_v9_0_ecc_available(adev);
619 DRM_INFO("ECC is active.\n");
621 DRM_INFO("ECC is not present.\n");
623 DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
627 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
630 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
631 struct amdgpu_mc *mc)
634 if (!amdgpu_sriov_vf(adev))
635 base = mmhub_v1_0_get_fb_location(adev);
636 amdgpu_vram_location(adev, &adev->mc, base);
637 amdgpu_gart_location(adev, mc);
638 /* base offset of vram pages */
639 if (adev->flags & AMD_IS_APU)
640 adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
642 adev->vm_manager.vram_base_offset = 0;
646 * gmc_v9_0_mc_init - initialize the memory controller driver params
648 * @adev: amdgpu_device pointer
650 * Look up the amount of vram, vram width, and decide how to place
651 * vram and gart within the GPU's physical address space.
652 * Returns 0 for success.
654 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
657 int chansize, numchan;
660 adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
661 if (!adev->mc.vram_width) {
662 /* hbm memory channel size */
665 tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
666 tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
667 tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
698 adev->mc.vram_width = numchan * chansize;
701 /* size in MB on si */
702 adev->mc.mc_vram_size =
703 ((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) :
704 nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL;
705 adev->mc.real_vram_size = adev->mc.mc_vram_size;
707 if (!(adev->flags & AMD_IS_APU)) {
708 r = amdgpu_device_resize_fb_bar(adev);
712 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
713 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
715 /* In case the PCI BAR is larger than the actual amount of vram */
716 adev->mc.visible_vram_size = adev->mc.aper_size;
717 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
718 adev->mc.visible_vram_size = adev->mc.real_vram_size;
720 /* set the gart size */
721 if (amdgpu_gart_size == -1) {
722 switch (adev->asic_type) {
723 case CHIP_VEGA10: /* all engines support GPUVM */
725 adev->mc.gart_size = 256ULL << 20;
727 case CHIP_RAVEN: /* DCE SG support */
728 adev->mc.gart_size = 1024ULL << 20;
732 adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
735 gmc_v9_0_vram_gtt_location(adev, &adev->mc);
740 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
744 if (adev->gart.robj) {
745 WARN(1, "VEGA10 PCIE GART already initialized\n");
748 /* Initialize common gart structure */
749 r = amdgpu_gart_init(adev);
752 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
753 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
754 AMDGPU_PTE_EXECUTABLE;
755 return amdgpu_gart_table_vram_alloc(adev);
758 static int gmc_v9_0_sw_init(void *handle)
762 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
764 gfxhub_v1_0_init(adev);
765 mmhub_v1_0_init(adev);
767 spin_lock_init(&adev->mc.invalidate_lock);
769 switch (adev->asic_type) {
771 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
772 if (adev->rev_id == 0x0 || adev->rev_id == 0x1)
773 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3);
775 /* vm_size is 64GB for legacy 2-level page support */
776 amdgpu_vm_adjust_size(adev, 64, 9, 1);
779 /* XXX Don't know how to get VRAM type yet. */
780 adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
782 * To fulfill 4-level page support,
783 * vm size is 256TB (48bit), maximum size of Vega10,
784 * block size 512 (9bit)
786 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3);
792 /* This interrupt is VMC page fault.*/
793 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
795 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
801 /* Set the internal MC address mask
802 * This is the max address of the GPU's
803 * internal address space.
805 adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
808 * It needs to reserve 8M stolen memory for vega10
809 * TODO: Figure out how to avoid that...
811 adev->mc.stolen_size = 8 * 1024 * 1024;
813 /* set DMA mask + need_dma32 flags.
814 * PCIE - can handle 44-bits.
815 * IGP - can handle 44-bits
816 * PCI - dma32 for legacy pci gart, 44 bits on vega10
818 adev->need_dma32 = false;
819 dma_bits = adev->need_dma32 ? 32 : 44;
820 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
822 adev->need_dma32 = true;
824 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
826 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
828 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
829 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
832 r = gmc_v9_0_mc_init(adev);
837 r = amdgpu_bo_init(adev);
841 r = gmc_v9_0_gart_init(adev);
847 * VMID 0 is reserved for System
848 * amdgpu graphics/compute will use VMIDs 1-7
849 * amdkfd will use VMIDs 8-15
851 adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
852 adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
854 amdgpu_vm_manager_init(adev);
860 * gmc_v9_0_gart_fini - vm fini callback
862 * @adev: amdgpu_device pointer
864 * Tears down the driver GART/VM setup (CIK).
866 static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
868 amdgpu_gart_table_vram_free(adev);
869 amdgpu_gart_fini(adev);
872 static int gmc_v9_0_sw_fini(void *handle)
874 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
876 amdgpu_gem_force_release(adev);
877 amdgpu_vm_manager_fini(adev);
878 gmc_v9_0_gart_fini(adev);
879 amdgpu_bo_fini(adev);
884 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
886 switch (adev->asic_type) {
888 amdgpu_program_register_sequence(adev,
889 golden_settings_mmhub_1_0_0,
890 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
891 amdgpu_program_register_sequence(adev,
892 golden_settings_athub_1_0_0,
893 ARRAY_SIZE(golden_settings_athub_1_0_0));
896 amdgpu_program_register_sequence(adev,
897 golden_settings_athub_1_0_0,
898 ARRAY_SIZE(golden_settings_athub_1_0_0));
906 * gmc_v9_0_gart_enable - gart enable
908 * @adev: amdgpu_device pointer
910 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
916 amdgpu_program_register_sequence(adev,
917 golden_settings_vega10_hdp,
918 ARRAY_SIZE(golden_settings_vega10_hdp));
920 if (adev->gart.robj == NULL) {
921 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
924 r = amdgpu_gart_table_vram_pin(adev);
928 switch (adev->asic_type) {
930 mmhub_v1_0_initialize_power_gating(adev);
931 mmhub_v1_0_update_power_gating(adev, true);
937 r = gfxhub_v1_0_gart_enable(adev);
941 r = mmhub_v1_0_gart_enable(adev);
945 WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
947 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
948 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
950 /* After HDP is initialized, flush HDP.*/
951 if (adev->flags & AMD_IS_APU)
952 nbio_v7_0_hdp_flush(adev);
954 nbio_v6_1_hdp_flush(adev);
956 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
961 gfxhub_v1_0_set_fault_enable_default(adev, value);
962 mmhub_v1_0_set_fault_enable_default(adev, value);
963 gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
965 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
966 (unsigned)(adev->mc.gart_size >> 20),
967 (unsigned long long)adev->gart.table_addr);
968 adev->gart.ready = true;
972 static int gmc_v9_0_hw_init(void *handle)
975 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
977 /* The sequence of these two function calls matters.*/
978 gmc_v9_0_init_golden_registers(adev);
980 if (adev->mode_info.num_crtc) {
981 /* Lockout access through VGA aperture*/
982 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
984 /* disable VGA render */
985 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
988 r = gmc_v9_0_gart_enable(adev);
994 * gmc_v9_0_gart_disable - gart disable
996 * @adev: amdgpu_device pointer
998 * This disables all VM page table.
1000 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1002 gfxhub_v1_0_gart_disable(adev);
1003 mmhub_v1_0_gart_disable(adev);
1004 amdgpu_gart_table_vram_unpin(adev);
1007 static int gmc_v9_0_hw_fini(void *handle)
1009 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1011 if (amdgpu_sriov_vf(adev)) {
1012 /* full access mode, so don't touch any GMC register */
1013 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1017 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
1018 gmc_v9_0_gart_disable(adev);
1023 static int gmc_v9_0_suspend(void *handle)
1025 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1027 return gmc_v9_0_hw_fini(adev);
1030 static int gmc_v9_0_resume(void *handle)
1033 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1035 r = gmc_v9_0_hw_init(adev);
1039 amdgpu_vm_reset_all_ids(adev);
1044 static bool gmc_v9_0_is_idle(void *handle)
1046 /* MC is always ready in GMC v9.*/
1050 static int gmc_v9_0_wait_for_idle(void *handle)
1052 /* There is no need to wait for MC idle in GMC v9.*/
1056 static int gmc_v9_0_soft_reset(void *handle)
1058 /* XXX for emulation.*/
1062 static int gmc_v9_0_set_clockgating_state(void *handle,
1063 enum amd_clockgating_state state)
1065 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1067 return mmhub_v1_0_set_clockgating(adev, state);
1070 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1072 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1074 mmhub_v1_0_get_clockgating(adev, flags);
1077 static int gmc_v9_0_set_powergating_state(void *handle,
1078 enum amd_powergating_state state)
1083 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1085 .early_init = gmc_v9_0_early_init,
1086 .late_init = gmc_v9_0_late_init,
1087 .sw_init = gmc_v9_0_sw_init,
1088 .sw_fini = gmc_v9_0_sw_fini,
1089 .hw_init = gmc_v9_0_hw_init,
1090 .hw_fini = gmc_v9_0_hw_fini,
1091 .suspend = gmc_v9_0_suspend,
1092 .resume = gmc_v9_0_resume,
1093 .is_idle = gmc_v9_0_is_idle,
1094 .wait_for_idle = gmc_v9_0_wait_for_idle,
1095 .soft_reset = gmc_v9_0_soft_reset,
1096 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1097 .set_powergating_state = gmc_v9_0_set_powergating_state,
1098 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1101 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1103 .type = AMD_IP_BLOCK_TYPE_GMC,
1107 .funcs = &gmc_v9_0_ip_funcs,