2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
26 #include "amdgpu_atomfirmware.h"
28 #include "vega10/soc15ip.h"
29 #include "vega10/HDP/hdp_4_0_offset.h"
30 #include "vega10/HDP/hdp_4_0_sh_mask.h"
31 #include "vega10/GC/gc_9_0_sh_mask.h"
32 #include "vega10/DC/dce_12_0_offset.h"
33 #include "vega10/DC/dce_12_0_sh_mask.h"
34 #include "vega10/vega10_enum.h"
35 #include "vega10/MMHUB/mmhub_1_0_offset.h"
36 #include "vega10/ATHUB/athub_1_0_offset.h"
38 #include "soc15_common.h"
40 #include "nbio_v6_1.h"
41 #include "nbio_v7_0.h"
42 #include "gfxhub_v1_0.h"
43 #include "mmhub_v1_0.h"
45 #define mmDF_CS_AON0_DramBaseAddress0 0x0044
46 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
47 //DF_CS_AON0_DramBaseAddress0
48 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
49 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
50 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
51 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
52 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
53 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
54 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
55 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
56 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
57 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
59 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
60 #define AMDGPU_NUM_OF_VMIDS 8
62 static const u32 golden_settings_vega10_hdp[] =
64 0xf64, 0x0fffffff, 0x00000000,
65 0xf65, 0x0fffffff, 0x00000000,
66 0xf66, 0x0fffffff, 0x00000000,
67 0xf67, 0x0fffffff, 0x00000000,
68 0xf68, 0x0fffffff, 0x00000000,
69 0xf6a, 0x0fffffff, 0x00000000,
70 0xf6b, 0x0fffffff, 0x00000000,
71 0xf6c, 0x0fffffff, 0x00000000,
72 0xf6d, 0x0fffffff, 0x00000000,
73 0xf6e, 0x0fffffff, 0x00000000,
76 static const u32 golden_settings_mmhub_1_0_0[] =
78 SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_WRCLI2), 0x00000007, 0xfe5fe0fa,
79 SOC15_REG_OFFSET(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0), 0x00000030, 0x55555565
82 static const u32 golden_settings_athub_1_0_0[] =
84 SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL), 0x0000ff00, 0x00000800,
85 SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008
88 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
89 struct amdgpu_irq_src *src,
91 enum amdgpu_interrupt_state state)
93 struct amdgpu_vmhub *hub;
94 u32 tmp, reg, bits, i, j;
96 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
97 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
98 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
99 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
100 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
101 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
102 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
105 case AMDGPU_IRQ_STATE_DISABLE:
106 for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
107 hub = &adev->vmhub[j];
108 for (i = 0; i < 16; i++) {
109 reg = hub->vm_context0_cntl + i;
116 case AMDGPU_IRQ_STATE_ENABLE:
117 for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
118 hub = &adev->vmhub[j];
119 for (i = 0; i < 16; i++) {
120 reg = hub->vm_context0_cntl + i;
133 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
134 struct amdgpu_irq_src *source,
135 struct amdgpu_iv_entry *entry)
137 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
141 addr = (u64)entry->src_data[0] << 12;
142 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
144 if (!amdgpu_sriov_vf(adev)) {
145 status = RREG32(hub->vm_l2_pro_fault_status);
146 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
149 if (printk_ratelimit()) {
151 "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n",
152 entry->vm_id_src ? "mmhub" : "gfxhub",
153 entry->src_id, entry->ring_id, entry->vm_id,
155 dev_err(adev->dev, " at page 0x%016llx from %d\n",
156 addr, entry->client_id);
157 if (!amdgpu_sriov_vf(adev))
159 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
166 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
167 .set = gmc_v9_0_vm_fault_interrupt_state,
168 .process = gmc_v9_0_process_interrupt,
171 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
173 adev->mc.vm_fault.num_types = 1;
174 adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
177 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
181 /* invalidate using legacy mode on vm_id*/
182 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
183 PER_VMID_INVALIDATE_REQ, 1 << vm_id);
184 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
185 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
186 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
187 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
188 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
189 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
190 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
191 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
198 * VMID 0 is the physical GPU addresses as used by the kernel.
199 * VMIDs 1-15 are used for userspace clients and are handled
200 * by the amdgpu vm/hsa code.
204 * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback
206 * @adev: amdgpu_device pointer
207 * @vmid: vm instance to flush
209 * Flush the TLB for the requested page table.
211 static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
214 /* Use register 17 for GART */
215 const unsigned eng = 17;
218 /* flush hdp cache */
219 if (adev->flags & AMD_IS_APU)
220 nbio_v7_0_hdp_flush(adev);
222 nbio_v6_1_hdp_flush(adev);
224 spin_lock(&adev->mc.invalidate_lock);
226 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
227 struct amdgpu_vmhub *hub = &adev->vmhub[i];
228 u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
230 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
232 /* Busy wait for ACK.*/
233 for (j = 0; j < 100; j++) {
234 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
243 /* Wait for ACK with a delay.*/
244 for (j = 0; j < adev->usec_timeout; j++) {
245 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
251 if (j < adev->usec_timeout)
254 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
257 spin_unlock(&adev->mc.invalidate_lock);
261 * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO
263 * @adev: amdgpu_device pointer
264 * @cpu_pt_addr: cpu address of the page table
265 * @gpu_page_idx: entry in the page table to update
266 * @addr: dst addr to write into pte/pde
267 * @flags: access flags
269 * Update the page tables using the CPU.
271 static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev,
273 uint32_t gpu_page_idx,
277 void __iomem *ptr = (void *)cpu_pt_addr;
281 * PTE format on VEGA 10:
290 * 47:12 4k physical page base address
300 * PDE format on VEGA 10:
301 * 63:59 block fragment size
305 * 47:6 physical base address of PD or PTE
313 * The following is for PTE only. GART does not have PDEs.
315 value = addr & 0x0000FFFFFFFFF000ULL;
317 writeq(value, ptr + (gpu_page_idx * 8));
321 static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
325 uint64_t pte_flag = 0;
327 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
328 pte_flag |= AMDGPU_PTE_EXECUTABLE;
329 if (flags & AMDGPU_VM_PAGE_READABLE)
330 pte_flag |= AMDGPU_PTE_READABLE;
331 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
332 pte_flag |= AMDGPU_PTE_WRITEABLE;
334 switch (flags & AMDGPU_VM_MTYPE_MASK) {
335 case AMDGPU_VM_MTYPE_DEFAULT:
336 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
338 case AMDGPU_VM_MTYPE_NC:
339 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
341 case AMDGPU_VM_MTYPE_WC:
342 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
344 case AMDGPU_VM_MTYPE_CC:
345 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
347 case AMDGPU_VM_MTYPE_UC:
348 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
351 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
355 if (flags & AMDGPU_VM_PAGE_PRT)
356 pte_flag |= AMDGPU_PTE_PRT;
361 static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
363 addr = adev->vm_manager.vram_base_offset + addr - adev->mc.vram_start;
364 BUG_ON(addr & 0xFFFF00000000003FULL);
368 static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
369 .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
370 .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
371 .get_invalidate_req = gmc_v9_0_get_invalidate_req,
372 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
373 .get_vm_pde = gmc_v9_0_get_vm_pde
376 static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
378 if (adev->gart.gart_funcs == NULL)
379 adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
382 static int gmc_v9_0_early_init(void *handle)
384 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
386 gmc_v9_0_set_gart_funcs(adev);
387 gmc_v9_0_set_irq_funcs(adev);
392 static int gmc_v9_0_late_init(void *handle)
394 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
395 unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
398 for(i = 0; i < adev->num_rings; ++i) {
399 struct amdgpu_ring *ring = adev->rings[i];
400 unsigned vmhub = ring->funcs->vmhub;
402 ring->vm_inv_eng = vm_inv_eng[vmhub]++;
403 dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
404 ring->idx, ring->name, ring->vm_inv_eng,
408 /* Engine 17 is used for GART flushes */
409 for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
410 BUG_ON(vm_inv_eng[i] > 17);
412 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
415 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
416 struct amdgpu_mc *mc)
419 if (!amdgpu_sriov_vf(adev))
420 base = mmhub_v1_0_get_fb_location(adev);
421 amdgpu_vram_location(adev, &adev->mc, base);
422 amdgpu_gart_location(adev, mc);
423 /* base offset of vram pages */
424 if (adev->flags & AMD_IS_APU)
425 adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
427 adev->vm_manager.vram_base_offset = 0;
431 * gmc_v9_0_mc_init - initialize the memory controller driver params
433 * @adev: amdgpu_device pointer
435 * Look up the amount of vram, vram width, and decide how to place
436 * vram and gart within the GPU's physical address space.
437 * Returns 0 for success.
439 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
442 int chansize, numchan;
444 adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
445 if (!adev->mc.vram_width) {
446 /* hbm memory channel size */
449 tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
450 tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
451 tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
482 adev->mc.vram_width = numchan * chansize;
485 /* Could aper size report 0 ? */
486 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
487 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
488 /* size in MB on si */
489 adev->mc.mc_vram_size =
490 ((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) :
491 nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL;
492 adev->mc.real_vram_size = adev->mc.mc_vram_size;
493 adev->mc.visible_vram_size = adev->mc.aper_size;
495 /* In case the PCI BAR is larger than the actual amount of vram */
496 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
497 adev->mc.visible_vram_size = adev->mc.real_vram_size;
499 /* set the gart size */
500 if (amdgpu_gart_size == -1) {
501 switch (adev->asic_type) {
502 case CHIP_VEGA10: /* all engines support GPUVM */
504 adev->mc.gart_size = 256ULL << 20;
506 case CHIP_RAVEN: /* DCE SG support */
507 adev->mc.gart_size = 1024ULL << 20;
511 adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
514 gmc_v9_0_vram_gtt_location(adev, &adev->mc);
519 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
523 if (adev->gart.robj) {
524 WARN(1, "VEGA10 PCIE GART already initialized\n");
527 /* Initialize common gart structure */
528 r = amdgpu_gart_init(adev);
531 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
532 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
533 AMDGPU_PTE_EXECUTABLE;
534 return amdgpu_gart_table_vram_alloc(adev);
537 static int gmc_v9_0_sw_init(void *handle)
541 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
543 gfxhub_v1_0_init(adev);
544 mmhub_v1_0_init(adev);
546 spin_lock_init(&adev->mc.invalidate_lock);
548 switch (adev->asic_type) {
550 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
551 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
552 adev->vm_manager.vm_size = 1U << 18;
553 adev->vm_manager.block_size = 9;
554 adev->vm_manager.num_level = 3;
555 amdgpu_vm_set_fragment_size(adev, 9);
557 /* vm_size is 64GB for legacy 2-level page support */
558 amdgpu_vm_adjust_size(adev, 64, 9);
559 adev->vm_manager.num_level = 1;
563 /* XXX Don't know how to get VRAM type yet. */
564 adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
566 * To fulfill 4-level page support,
567 * vm size is 256TB (48bit), maximum size of Vega10,
568 * block size 512 (9bit)
570 adev->vm_manager.vm_size = 1U << 18;
571 adev->vm_manager.block_size = 9;
572 adev->vm_manager.num_level = 3;
573 amdgpu_vm_set_fragment_size(adev, 9);
579 DRM_INFO("vm size is %llu GB, block size is %u-bit,fragment size is %u-bit\n",
580 adev->vm_manager.vm_size,
581 adev->vm_manager.block_size,
582 adev->vm_manager.fragment_size);
584 /* This interrupt is VMC page fault.*/
585 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
587 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
593 adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
595 /* Set the internal MC address mask
596 * This is the max address of the GPU's
597 * internal address space.
599 adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
602 * It needs to reserve 8M stolen memory for vega10
603 * TODO: Figure out how to avoid that...
605 adev->mc.stolen_size = 8 * 1024 * 1024;
607 /* set DMA mask + need_dma32 flags.
608 * PCIE - can handle 44-bits.
609 * IGP - can handle 44-bits
610 * PCI - dma32 for legacy pci gart, 44 bits on vega10
612 adev->need_dma32 = false;
613 dma_bits = adev->need_dma32 ? 32 : 44;
614 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
616 adev->need_dma32 = true;
618 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
620 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
622 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
623 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
626 r = gmc_v9_0_mc_init(adev);
631 r = amdgpu_bo_init(adev);
635 r = gmc_v9_0_gart_init(adev);
641 * VMID 0 is reserved for System
642 * amdgpu graphics/compute will use VMIDs 1-7
643 * amdkfd will use VMIDs 8-15
645 adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
646 adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
648 amdgpu_vm_manager_init(adev);
654 * gmc_v8_0_gart_fini - vm fini callback
656 * @adev: amdgpu_device pointer
658 * Tears down the driver GART/VM setup (CIK).
660 static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
662 amdgpu_gart_table_vram_free(adev);
663 amdgpu_gart_fini(adev);
666 static int gmc_v9_0_sw_fini(void *handle)
668 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
670 amdgpu_vm_manager_fini(adev);
671 gmc_v9_0_gart_fini(adev);
672 amdgpu_gem_force_release(adev);
673 amdgpu_bo_fini(adev);
678 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
680 switch (adev->asic_type) {
682 amdgpu_program_register_sequence(adev,
683 golden_settings_mmhub_1_0_0,
684 (const u32)ARRAY_SIZE(golden_settings_mmhub_1_0_0));
685 amdgpu_program_register_sequence(adev,
686 golden_settings_athub_1_0_0,
687 (const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
690 amdgpu_program_register_sequence(adev,
691 golden_settings_athub_1_0_0,
692 (const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
700 * gmc_v9_0_gart_enable - gart enable
702 * @adev: amdgpu_device pointer
704 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
710 amdgpu_program_register_sequence(adev,
711 golden_settings_vega10_hdp,
712 (const u32)ARRAY_SIZE(golden_settings_vega10_hdp));
714 if (adev->gart.robj == NULL) {
715 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
718 r = amdgpu_gart_table_vram_pin(adev);
722 switch (adev->asic_type) {
724 mmhub_v1_0_initialize_power_gating(adev);
725 mmhub_v1_0_update_power_gating(adev, true);
731 r = gfxhub_v1_0_gart_enable(adev);
735 r = mmhub_v1_0_gart_enable(adev);
739 WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
741 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
742 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
744 /* After HDP is initialized, flush HDP.*/
745 if (adev->flags & AMD_IS_APU)
746 nbio_v7_0_hdp_flush(adev);
748 nbio_v6_1_hdp_flush(adev);
750 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
755 gfxhub_v1_0_set_fault_enable_default(adev, value);
756 mmhub_v1_0_set_fault_enable_default(adev, value);
757 gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
759 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
760 (unsigned)(adev->mc.gart_size >> 20),
761 (unsigned long long)adev->gart.table_addr);
762 adev->gart.ready = true;
766 static int gmc_v9_0_hw_init(void *handle)
769 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
771 /* The sequence of these two function calls matters.*/
772 gmc_v9_0_init_golden_registers(adev);
774 if (adev->mode_info.num_crtc) {
775 /* Lockout access through VGA aperture*/
776 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
778 /* disable VGA render */
779 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
782 r = gmc_v9_0_gart_enable(adev);
788 * gmc_v9_0_gart_disable - gart disable
790 * @adev: amdgpu_device pointer
792 * This disables all VM page table.
794 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
796 gfxhub_v1_0_gart_disable(adev);
797 mmhub_v1_0_gart_disable(adev);
798 amdgpu_gart_table_vram_unpin(adev);
801 static int gmc_v9_0_hw_fini(void *handle)
803 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
805 if (amdgpu_sriov_vf(adev)) {
806 /* full access mode, so don't touch any GMC register */
807 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
811 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
812 gmc_v9_0_gart_disable(adev);
817 static int gmc_v9_0_suspend(void *handle)
819 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
821 return gmc_v9_0_hw_fini(adev);
824 static int gmc_v9_0_resume(void *handle)
827 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
829 r = gmc_v9_0_hw_init(adev);
833 amdgpu_vm_reset_all_ids(adev);
838 static bool gmc_v9_0_is_idle(void *handle)
840 /* MC is always ready in GMC v9.*/
844 static int gmc_v9_0_wait_for_idle(void *handle)
846 /* There is no need to wait for MC idle in GMC v9.*/
850 static int gmc_v9_0_soft_reset(void *handle)
852 /* XXX for emulation.*/
856 static int gmc_v9_0_set_clockgating_state(void *handle,
857 enum amd_clockgating_state state)
859 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
861 return mmhub_v1_0_set_clockgating(adev, state);
864 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
866 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
868 mmhub_v1_0_get_clockgating(adev, flags);
871 static int gmc_v9_0_set_powergating_state(void *handle,
872 enum amd_powergating_state state)
877 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
879 .early_init = gmc_v9_0_early_init,
880 .late_init = gmc_v9_0_late_init,
881 .sw_init = gmc_v9_0_sw_init,
882 .sw_fini = gmc_v9_0_sw_fini,
883 .hw_init = gmc_v9_0_hw_init,
884 .hw_fini = gmc_v9_0_hw_fini,
885 .suspend = gmc_v9_0_suspend,
886 .resume = gmc_v9_0_resume,
887 .is_idle = gmc_v9_0_is_idle,
888 .wait_for_idle = gmc_v9_0_wait_for_idle,
889 .soft_reset = gmc_v9_0_soft_reset,
890 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
891 .set_powergating_state = gmc_v9_0_set_powergating_state,
892 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
895 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
897 .type = AMD_IP_BLOCK_TYPE_GMC,
901 .funcs = &gmc_v9_0_ip_funcs,