2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
27 #include <drm/drm_cache.h>
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
34 #include "gc/gc_9_0_sh_mask.h"
35 #include "dce/dce_12_0_offset.h"
36 #include "dce/dce_12_0_sh_mask.h"
37 #include "vega10_enum.h"
38 #include "mmhub/mmhub_1_0_offset.h"
39 #include "athub/athub_1_0_sh_mask.h"
40 #include "athub/athub_1_0_offset.h"
41 #include "oss/osssys_4_0_offset.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "mmhub_v9_4.h"
53 #include "mmhub_v1_7.h"
60 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
62 #include "amdgpu_ras.h"
63 #include "amdgpu_xgmi.h"
65 #include "amdgpu_reset.h"
67 /* add these here since we already include dce12 headers and these are for DCN */
68 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
69 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
70 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
71 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
72 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
73 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
74 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
75 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
77 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea
78 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2
81 static const char *gfxhub_client_ids[] = {
97 static const char *mmhub_client_ids_raven[][2] = {
122 static const char *mmhub_client_ids_renoir[][2] = {
150 static const char *mmhub_client_ids_vega10[][2] = {
163 [32+14][0] = "SDMA0",
176 [32+4][1] = "DCEDWB",
179 [32+14][1] = "SDMA1",
182 static const char *mmhub_client_ids_vega12[][2] = {
195 [32+15][0] = "SDMA0",
205 [32+1][1] = "DCEDWB",
211 [32+15][1] = "SDMA1",
214 static const char *mmhub_client_ids_vega20[][2] = {
228 [32+12][0] = "UTCL2",
229 [32+14][0] = "SDMA1",
247 [32+14][1] = "SDMA1",
250 static const char *mmhub_client_ids_arcturus[][2] = {
291 static const char *mmhub_client_ids_aldebaran[][2] = {
294 [32+1][0] = "DBGU_IO0",
295 [32+2][0] = "DBGU_IO2",
297 [96+11][0] = "JPEG0",
299 [96+13][0] = "VCNU0",
300 [128+11][0] = "JPEG1",
301 [128+12][0] = "VCN1",
302 [128+13][0] = "VCNU1",
305 [256+0][0] = "SDMA0",
306 [256+1][0] = "SDMA1",
307 [256+2][0] = "SDMA2",
308 [256+3][0] = "SDMA3",
309 [256+4][0] = "SDMA4",
313 [32+1][1] = "DBGU_IO0",
314 [32+2][1] = "DBGU_IO2",
316 [96+11][1] = "JPEG0",
318 [96+13][1] = "VCNU0",
319 [128+11][1] = "JPEG1",
320 [128+12][1] = "VCN1",
321 [128+13][1] = "VCNU1",
324 [256+0][1] = "SDMA0",
325 [256+1][1] = "SDMA1",
326 [256+2][1] = "SDMA2",
327 [256+3][1] = "SDMA3",
328 [256+4][1] = "SDMA4",
332 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
334 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
335 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
338 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
340 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
341 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
344 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
345 (0x000143c0 + 0x00000000),
346 (0x000143c0 + 0x00000800),
347 (0x000143c0 + 0x00001000),
348 (0x000143c0 + 0x00001800),
349 (0x000543c0 + 0x00000000),
350 (0x000543c0 + 0x00000800),
351 (0x000543c0 + 0x00001000),
352 (0x000543c0 + 0x00001800),
353 (0x000943c0 + 0x00000000),
354 (0x000943c0 + 0x00000800),
355 (0x000943c0 + 0x00001000),
356 (0x000943c0 + 0x00001800),
357 (0x000d43c0 + 0x00000000),
358 (0x000d43c0 + 0x00000800),
359 (0x000d43c0 + 0x00001000),
360 (0x000d43c0 + 0x00001800),
361 (0x001143c0 + 0x00000000),
362 (0x001143c0 + 0x00000800),
363 (0x001143c0 + 0x00001000),
364 (0x001143c0 + 0x00001800),
365 (0x001543c0 + 0x00000000),
366 (0x001543c0 + 0x00000800),
367 (0x001543c0 + 0x00001000),
368 (0x001543c0 + 0x00001800),
369 (0x001943c0 + 0x00000000),
370 (0x001943c0 + 0x00000800),
371 (0x001943c0 + 0x00001000),
372 (0x001943c0 + 0x00001800),
373 (0x001d43c0 + 0x00000000),
374 (0x001d43c0 + 0x00000800),
375 (0x001d43c0 + 0x00001000),
376 (0x001d43c0 + 0x00001800),
379 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
380 (0x000143e0 + 0x00000000),
381 (0x000143e0 + 0x00000800),
382 (0x000143e0 + 0x00001000),
383 (0x000143e0 + 0x00001800),
384 (0x000543e0 + 0x00000000),
385 (0x000543e0 + 0x00000800),
386 (0x000543e0 + 0x00001000),
387 (0x000543e0 + 0x00001800),
388 (0x000943e0 + 0x00000000),
389 (0x000943e0 + 0x00000800),
390 (0x000943e0 + 0x00001000),
391 (0x000943e0 + 0x00001800),
392 (0x000d43e0 + 0x00000000),
393 (0x000d43e0 + 0x00000800),
394 (0x000d43e0 + 0x00001000),
395 (0x000d43e0 + 0x00001800),
396 (0x001143e0 + 0x00000000),
397 (0x001143e0 + 0x00000800),
398 (0x001143e0 + 0x00001000),
399 (0x001143e0 + 0x00001800),
400 (0x001543e0 + 0x00000000),
401 (0x001543e0 + 0x00000800),
402 (0x001543e0 + 0x00001000),
403 (0x001543e0 + 0x00001800),
404 (0x001943e0 + 0x00000000),
405 (0x001943e0 + 0x00000800),
406 (0x001943e0 + 0x00001000),
407 (0x001943e0 + 0x00001800),
408 (0x001d43e0 + 0x00000000),
409 (0x001d43e0 + 0x00000800),
410 (0x001d43e0 + 0x00001000),
411 (0x001d43e0 + 0x00001800),
414 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
415 struct amdgpu_irq_src *src,
417 enum amdgpu_interrupt_state state)
419 u32 bits, i, tmp, reg;
421 /* Devices newer then VEGA10/12 shall have these programming
422 sequences performed by PSP BL */
423 if (adev->asic_type >= CHIP_VEGA20)
429 case AMDGPU_IRQ_STATE_DISABLE:
430 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
431 reg = ecc_umc_mcumc_ctrl_addrs[i];
436 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
437 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
443 case AMDGPU_IRQ_STATE_ENABLE:
444 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
445 reg = ecc_umc_mcumc_ctrl_addrs[i];
450 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
451 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
464 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
465 struct amdgpu_irq_src *src,
467 enum amdgpu_interrupt_state state)
469 struct amdgpu_vmhub *hub;
470 u32 tmp, reg, bits, i, j;
472 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
473 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
474 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
475 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
476 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
477 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
478 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
481 case AMDGPU_IRQ_STATE_DISABLE:
482 for (j = 0; j < adev->num_vmhubs; j++) {
483 hub = &adev->vmhub[j];
484 for (i = 0; i < 16; i++) {
485 reg = hub->vm_context0_cntl + i;
487 if (j == AMDGPU_GFXHUB_0)
488 tmp = RREG32_SOC15_IP(GC, reg);
490 tmp = RREG32_SOC15_IP(MMHUB, reg);
494 if (j == AMDGPU_GFXHUB_0)
495 WREG32_SOC15_IP(GC, reg, tmp);
497 WREG32_SOC15_IP(MMHUB, reg, tmp);
501 case AMDGPU_IRQ_STATE_ENABLE:
502 for (j = 0; j < adev->num_vmhubs; j++) {
503 hub = &adev->vmhub[j];
504 for (i = 0; i < 16; i++) {
505 reg = hub->vm_context0_cntl + i;
507 if (j == AMDGPU_GFXHUB_0)
508 tmp = RREG32_SOC15_IP(GC, reg);
510 tmp = RREG32_SOC15_IP(MMHUB, reg);
514 if (j == AMDGPU_GFXHUB_0)
515 WREG32_SOC15_IP(GC, reg, tmp);
517 WREG32_SOC15_IP(MMHUB, reg, tmp);
528 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
529 struct amdgpu_irq_src *source,
530 struct amdgpu_iv_entry *entry)
532 bool retry_fault = !!(entry->src_data[1] & 0x80);
533 bool write_fault = !!(entry->src_data[1] & 0x20);
534 uint32_t status = 0, cid = 0, rw = 0;
535 struct amdgpu_task_info task_info;
536 struct amdgpu_vmhub *hub;
537 const char *mmhub_cid;
538 const char *hub_name;
541 addr = (u64)entry->src_data[0] << 12;
542 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
545 /* Returning 1 here also prevents sending the IV to the KFD */
547 /* Process it onyl if it's the first fault for this address */
548 if (entry->ih != &adev->irq.ih_soft &&
549 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
553 /* Delegate it to a different ring if the hardware hasn't
556 if (entry->ih == &adev->irq.ih) {
557 amdgpu_irq_delegate(adev, entry, 8);
561 /* Try to handle the recoverable page faults by filling page
564 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
568 if (!printk_ratelimit())
571 if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
573 hub = &adev->vmhub[AMDGPU_MMHUB_0];
574 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
576 hub = &adev->vmhub[AMDGPU_MMHUB_1];
578 hub_name = "gfxhub0";
579 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
582 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
583 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
586 "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
587 "pasid:%u, for process %s pid %d thread %s pid %d)\n",
588 hub_name, retry_fault ? "retry" : "no-retry",
589 entry->src_id, entry->ring_id, entry->vmid,
590 entry->pasid, task_info.process_name, task_info.tgid,
591 task_info.task_name, task_info.pid);
592 dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
593 addr, entry->client_id,
594 soc15_ih_clientid_name[entry->client_id]);
596 if (amdgpu_sriov_vf(adev))
600 * Issue a dummy read to wait for the status register to
601 * be updated to avoid reading an incorrect value due to
602 * the new fast GRBM interface.
604 if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
605 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
606 RREG32(hub->vm_l2_pro_fault_status);
608 status = RREG32(hub->vm_l2_pro_fault_status);
609 cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
610 rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
611 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
615 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
617 if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) {
618 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
619 cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
620 gfxhub_client_ids[cid],
623 switch (adev->ip_versions[MMHUB_HWIP][0]) {
624 case IP_VERSION(9, 0, 0):
625 mmhub_cid = mmhub_client_ids_vega10[cid][rw];
627 case IP_VERSION(9, 3, 0):
628 mmhub_cid = mmhub_client_ids_vega12[cid][rw];
630 case IP_VERSION(9, 4, 0):
631 mmhub_cid = mmhub_client_ids_vega20[cid][rw];
633 case IP_VERSION(9, 4, 1):
634 mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
636 case IP_VERSION(9, 1, 0):
637 case IP_VERSION(9, 2, 0):
638 mmhub_cid = mmhub_client_ids_raven[cid][rw];
640 case IP_VERSION(1, 5, 0):
641 case IP_VERSION(2, 4, 0):
642 mmhub_cid = mmhub_client_ids_renoir[cid][rw];
644 case IP_VERSION(9, 4, 2):
645 mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
651 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
652 mmhub_cid ? mmhub_cid : "unknown", cid);
654 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
655 REG_GET_FIELD(status,
656 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
657 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
658 REG_GET_FIELD(status,
659 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
660 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
661 REG_GET_FIELD(status,
662 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
663 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
664 REG_GET_FIELD(status,
665 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
666 dev_err(adev->dev, "\t RW: 0x%x\n", rw);
670 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
671 .set = gmc_v9_0_vm_fault_interrupt_state,
672 .process = gmc_v9_0_process_interrupt,
676 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
677 .set = gmc_v9_0_ecc_interrupt_state,
678 .process = amdgpu_umc_process_ecc_irq,
681 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
683 adev->gmc.vm_fault.num_types = 1;
684 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
686 if (!amdgpu_sriov_vf(adev) &&
687 !adev->gmc.xgmi.connected_to_cpu) {
688 adev->gmc.ecc_irq.num_types = 1;
689 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
693 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
698 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
699 PER_VMID_INVALIDATE_REQ, 1 << vmid);
700 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
701 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
702 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
703 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
704 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
705 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
706 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
707 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
713 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
715 * @adev: amdgpu_device pointer
719 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
722 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
725 return ((vmhub == AMDGPU_MMHUB_0 ||
726 vmhub == AMDGPU_MMHUB_1) &&
727 (!amdgpu_sriov_vf(adev)) &&
728 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
729 (adev->apu_flags & AMD_APU_IS_PICASSO))));
732 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
733 uint8_t vmid, uint16_t *p_pasid)
737 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
739 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
741 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
746 * VMID 0 is the physical GPU addresses as used by the kernel.
747 * VMIDs 1-15 are used for userspace clients and are handled
748 * by the amdgpu vm/hsa code.
752 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
754 * @adev: amdgpu_device pointer
755 * @vmid: vm instance to flush
756 * @vmhub: which hub to flush
757 * @flush_type: the flush type
759 * Flush the TLB for the requested page table using certain type.
761 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
762 uint32_t vmhub, uint32_t flush_type)
764 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
765 const unsigned eng = 17;
766 u32 j, inv_req, inv_req2, tmp;
767 struct amdgpu_vmhub *hub;
769 BUG_ON(vmhub >= adev->num_vmhubs);
771 hub = &adev->vmhub[vmhub];
772 if (adev->gmc.xgmi.num_physical_nodes &&
773 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) {
774 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
775 * heavy-weight TLB flush (type 2), which flushes
776 * both. Due to a race condition with concurrent
777 * memory accesses using the same TLB cache line, we
778 * still need a second TLB flush after this.
780 inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
781 inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
783 inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
787 /* This is necessary for a HW workaround under SRIOV as well
788 * as GFXOFF under bare metal
790 if (adev->gfx.kiq.ring.sched.ready &&
791 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
792 down_read_trylock(&adev->reset_domain->sem)) {
793 uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
794 uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
796 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
798 up_read(&adev->reset_domain->sem);
802 spin_lock(&adev->gmc.invalidate_lock);
805 * It may lose gpuvm invalidate acknowldege state across power-gating
806 * off cycle, add semaphore acquire before invalidation and semaphore
807 * release after invalidation to avoid entering power gated state
811 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
813 for (j = 0; j < adev->usec_timeout; j++) {
814 /* a read return value of 1 means semaphore acquire */
815 if (vmhub == AMDGPU_GFXHUB_0)
816 tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
818 tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
825 if (j >= adev->usec_timeout)
826 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
830 if (vmhub == AMDGPU_GFXHUB_0)
831 WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
833 WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
836 * Issue a dummy read to wait for the ACK register to
837 * be cleared to avoid a false ACK due to the new fast
840 if ((vmhub == AMDGPU_GFXHUB_0) &&
841 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
842 RREG32_NO_KIQ(hub->vm_inv_eng0_req +
843 hub->eng_distance * eng);
845 for (j = 0; j < adev->usec_timeout; j++) {
846 if (vmhub == AMDGPU_GFXHUB_0)
847 tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
849 tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
851 if (tmp & (1 << vmid))
860 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
863 * add semaphore release after invalidation,
864 * write with 0 means semaphore release
866 if (vmhub == AMDGPU_GFXHUB_0)
867 WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
869 WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
872 spin_unlock(&adev->gmc.invalidate_lock);
874 if (j < adev->usec_timeout)
877 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
881 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
883 * @adev: amdgpu_device pointer
884 * @pasid: pasid to be flush
885 * @flush_type: the flush type
886 * @all_hub: flush all hubs
888 * Flush the TLB for the requested pasid.
890 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
891 uint16_t pasid, uint32_t flush_type,
897 uint16_t queried_pasid;
899 u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
900 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
901 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
903 if (amdgpu_in_reset(adev))
906 if (ring->sched.ready && down_read_trylock(&adev->reset_domain->sem)) {
907 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
908 * heavy-weight TLB flush (type 2), which flushes
909 * both. Due to a race condition with concurrent
910 * memory accesses using the same TLB cache line, we
911 * still need a second TLB flush after this.
913 bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
914 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0));
915 /* 2 dwords flush + 8 dwords fence */
916 unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
919 ndw += kiq->pmf->invalidate_tlbs_size;
921 spin_lock(&adev->gfx.kiq.ring_lock);
922 /* 2 dwords flush + 8 dwords fence */
923 amdgpu_ring_alloc(ring, ndw);
925 kiq->pmf->kiq_invalidate_tlbs(ring,
927 kiq->pmf->kiq_invalidate_tlbs(ring,
928 pasid, flush_type, all_hub);
929 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
931 amdgpu_ring_undo(ring);
932 spin_unlock(&adev->gfx.kiq.ring_lock);
933 up_read(&adev->reset_domain->sem);
937 amdgpu_ring_commit(ring);
938 spin_unlock(&adev->gfx.kiq.ring_lock);
939 r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
941 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
942 up_read(&adev->reset_domain->sem);
945 up_read(&adev->reset_domain->sem);
949 for (vmid = 1; vmid < 16; vmid++) {
951 ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
953 if (ret && queried_pasid == pasid) {
955 for (i = 0; i < adev->num_vmhubs; i++)
956 gmc_v9_0_flush_gpu_tlb(adev, vmid,
959 gmc_v9_0_flush_gpu_tlb(adev, vmid,
960 AMDGPU_GFXHUB_0, flush_type);
970 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
971 unsigned vmid, uint64_t pd_addr)
973 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
974 struct amdgpu_device *adev = ring->adev;
975 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
976 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
977 unsigned eng = ring->vm_inv_eng;
980 * It may lose gpuvm invalidate acknowldege state across power-gating
981 * off cycle, add semaphore acquire before invalidation and semaphore
982 * release after invalidation to avoid entering power gated state
986 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
988 /* a read return value of 1 means semaphore acuqire */
989 amdgpu_ring_emit_reg_wait(ring,
990 hub->vm_inv_eng0_sem +
991 hub->eng_distance * eng, 0x1, 0x1);
993 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
994 (hub->ctx_addr_distance * vmid),
995 lower_32_bits(pd_addr));
997 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
998 (hub->ctx_addr_distance * vmid),
999 upper_32_bits(pd_addr));
1001 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
1002 hub->eng_distance * eng,
1003 hub->vm_inv_eng0_ack +
1004 hub->eng_distance * eng,
1007 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
1010 * add semaphore release after invalidation,
1011 * write with 0 means semaphore release
1013 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
1014 hub->eng_distance * eng, 0);
1019 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
1022 struct amdgpu_device *adev = ring->adev;
1025 /* Do nothing because there's no lut register for mmhub1. */
1026 if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
1029 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
1030 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
1032 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
1034 amdgpu_ring_emit_wreg(ring, reg, pasid);
1038 * PTE format on VEGA 10:
1047 * 47:12 4k physical page base address
1057 * PDE format on VEGA 10:
1058 * 63:59 block fragment size
1062 * 47:6 physical base address of PD or PTE
1069 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
1073 case AMDGPU_VM_MTYPE_DEFAULT:
1074 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1075 case AMDGPU_VM_MTYPE_NC:
1076 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1077 case AMDGPU_VM_MTYPE_WC:
1078 return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
1079 case AMDGPU_VM_MTYPE_RW:
1080 return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
1081 case AMDGPU_VM_MTYPE_CC:
1082 return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
1083 case AMDGPU_VM_MTYPE_UC:
1084 return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
1086 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1090 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
1091 uint64_t *addr, uint64_t *flags)
1093 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1094 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
1095 BUG_ON(*addr & 0xFFFF00000000003FULL);
1097 if (!adev->gmc.translate_further)
1100 if (level == AMDGPU_VM_PDB1) {
1101 /* Set the block fragment size */
1102 if (!(*flags & AMDGPU_PDE_PTE))
1103 *flags |= AMDGPU_PDE_BFS(0x9);
1105 } else if (level == AMDGPU_VM_PDB0) {
1106 if (*flags & AMDGPU_PDE_PTE) {
1107 *flags &= ~AMDGPU_PDE_PTE;
1108 if (!(*flags & AMDGPU_PTE_VALID))
1109 *addr |= 1 << PAGE_SHIFT;
1111 *flags |= AMDGPU_PTE_TF;
1116 static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
1117 struct amdgpu_bo *bo,
1118 struct amdgpu_bo_va_mapping *mapping,
1121 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1122 bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM;
1123 bool coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
1124 bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
1128 switch (adev->ip_versions[GC_HWIP][0]) {
1129 case IP_VERSION(9, 4, 1):
1130 case IP_VERSION(9, 4, 2):
1132 if (bo_adev == adev) {
1139 /* FIXME: is this still needed? Or does
1140 * amdgpu_ttm_tt_pde_flags already handle this?
1142 if (adev->ip_versions[GC_HWIP][0] ==
1143 IP_VERSION(9, 4, 2) &&
1144 adev->gmc.xgmi.connected_to_cpu)
1147 if (uncached || coherent)
1151 if (mapping->bo_va->is_xgmi)
1155 if (uncached || coherent)
1159 /* FIXME: is this still needed? Or does
1160 * amdgpu_ttm_tt_pde_flags already handle this?
1166 if (uncached || coherent)
1171 /* FIXME: is this still needed? Or does
1172 * amdgpu_ttm_tt_pde_flags already handle this?
1178 if (mtype != MTYPE_NC)
1179 *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
1180 AMDGPU_PTE_MTYPE_VG10(mtype);
1181 *flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1184 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1185 struct amdgpu_bo_va_mapping *mapping,
1188 struct amdgpu_bo *bo = mapping->bo_va->base.bo;
1190 *flags &= ~AMDGPU_PTE_EXECUTABLE;
1191 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1193 *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1194 *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1196 if (mapping->flags & AMDGPU_PTE_PRT) {
1197 *flags |= AMDGPU_PTE_PRT;
1198 *flags &= ~AMDGPU_PTE_VALID;
1201 if (bo && bo->tbo.resource)
1202 gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo,
1206 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1208 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1211 /* TODO move to DC so GMC doesn't need to hard-code DCN registers */
1213 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1214 size = AMDGPU_VBIOS_VGA_ALLOCATION;
1218 switch (adev->ip_versions[DCE_HWIP][0]) {
1219 case IP_VERSION(1, 0, 0):
1220 case IP_VERSION(1, 0, 1):
1221 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1222 size = (REG_GET_FIELD(viewport,
1223 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1224 REG_GET_FIELD(viewport,
1225 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1228 case IP_VERSION(2, 1, 0):
1229 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2);
1230 size = (REG_GET_FIELD(viewport,
1231 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1232 REG_GET_FIELD(viewport,
1233 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1237 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1238 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1239 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1248 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1249 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1250 .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1251 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1252 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1253 .map_mtype = gmc_v9_0_map_mtype,
1254 .get_vm_pde = gmc_v9_0_get_vm_pde,
1255 .get_vm_pte = gmc_v9_0_get_vm_pte,
1256 .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1259 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1261 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1264 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1266 switch (adev->ip_versions[UMC_HWIP][0]) {
1267 case IP_VERSION(6, 0, 0):
1268 adev->umc.funcs = &umc_v6_0_funcs;
1270 case IP_VERSION(6, 1, 1):
1271 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1272 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1273 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1274 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1275 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1276 adev->umc.ras = &umc_v6_1_ras;
1278 case IP_VERSION(6, 1, 2):
1279 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1280 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1281 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1282 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1283 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1284 adev->umc.ras = &umc_v6_1_ras;
1286 case IP_VERSION(6, 7, 0):
1287 adev->umc.max_ras_err_cnt_per_query =
1288 UMC_V6_7_TOTAL_CHANNEL_NUM * UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL;
1289 adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
1290 adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
1291 adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
1292 if (!adev->gmc.xgmi.connected_to_cpu)
1293 adev->umc.ras = &umc_v6_7_ras;
1294 if (1 & adev->smuio.funcs->get_die_id(adev))
1295 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
1297 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
1303 if (adev->umc.ras) {
1304 amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
1306 strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
1307 adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
1308 adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
1309 adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
1311 /* If don't define special ras_late_init function, use default ras_late_init */
1312 if (!adev->umc.ras->ras_block.ras_late_init)
1313 adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
1315 /* If not defined special ras_cb function, use default ras_cb */
1316 if (!adev->umc.ras->ras_block.ras_cb)
1317 adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
1321 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1323 switch (adev->ip_versions[MMHUB_HWIP][0]) {
1324 case IP_VERSION(9, 4, 1):
1325 adev->mmhub.funcs = &mmhub_v9_4_funcs;
1327 case IP_VERSION(9, 4, 2):
1328 adev->mmhub.funcs = &mmhub_v1_7_funcs;
1331 adev->mmhub.funcs = &mmhub_v1_0_funcs;
1336 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
1338 switch (adev->ip_versions[MMHUB_HWIP][0]) {
1339 case IP_VERSION(9, 4, 0):
1340 adev->mmhub.ras = &mmhub_v1_0_ras;
1342 case IP_VERSION(9, 4, 1):
1343 adev->mmhub.ras = &mmhub_v9_4_ras;
1345 case IP_VERSION(9, 4, 2):
1346 adev->mmhub.ras = &mmhub_v1_7_ras;
1349 /* mmhub ras is not available */
1353 if (adev->mmhub.ras) {
1354 amdgpu_ras_register_ras_block(adev, &adev->mmhub.ras->ras_block);
1356 strcpy(adev->mmhub.ras->ras_block.ras_comm.name, "mmhub");
1357 adev->mmhub.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MMHUB;
1358 adev->mmhub.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
1359 adev->mmhub.ras_if = &adev->mmhub.ras->ras_block.ras_comm;
1363 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1365 adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1368 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
1370 adev->hdp.ras = &hdp_v4_0_ras;
1371 amdgpu_ras_register_ras_block(adev, &adev->hdp.ras->ras_block);
1372 adev->hdp.ras_if = &adev->hdp.ras->ras_block.ras_comm;
1375 static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
1377 /* is UMC the right IP to check for MCA? Maybe DF? */
1378 switch (adev->ip_versions[UMC_HWIP][0]) {
1379 case IP_VERSION(6, 7, 0):
1380 if (!adev->gmc.xgmi.connected_to_cpu)
1381 adev->mca.funcs = &mca_v3_0_funcs;
1388 static int gmc_v9_0_early_init(void *handle)
1391 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1393 /* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */
1394 if (adev->asic_type == CHIP_VEGA20 ||
1395 adev->asic_type == CHIP_ARCTURUS)
1396 adev->gmc.xgmi.supported = true;
1398 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) {
1399 adev->gmc.xgmi.supported = true;
1400 adev->gmc.xgmi.connected_to_cpu =
1401 adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
1404 gmc_v9_0_set_gmc_funcs(adev);
1405 gmc_v9_0_set_irq_funcs(adev);
1406 gmc_v9_0_set_umc_funcs(adev);
1407 gmc_v9_0_set_mmhub_funcs(adev);
1408 gmc_v9_0_set_mmhub_ras_funcs(adev);
1409 gmc_v9_0_set_gfxhub_funcs(adev);
1410 gmc_v9_0_set_hdp_ras_funcs(adev);
1411 gmc_v9_0_set_mca_funcs(adev);
1413 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1414 adev->gmc.shared_aperture_end =
1415 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1416 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1417 adev->gmc.private_aperture_end =
1418 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1420 r = amdgpu_gmc_ras_early_init(adev);
1427 static int gmc_v9_0_late_init(void *handle)
1429 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1432 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1437 * Workaround performance drop issue with VBIOS enables partial
1438 * writes, while disables HBM ECC for vega10.
1440 if (!amdgpu_sriov_vf(adev) &&
1441 (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) {
1442 if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1443 if (adev->df.funcs &&
1444 adev->df.funcs->enable_ecc_force_par_wr_rmw)
1445 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1449 if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
1450 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
1451 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
1452 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
1454 if (adev->hdp.ras && adev->hdp.ras->ras_block.hw_ops &&
1455 adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count)
1456 adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count(adev);
1459 r = amdgpu_gmc_ras_late_init(adev);
1463 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1466 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1467 struct amdgpu_gmc *mc)
1469 u64 base = adev->mmhub.funcs->get_fb_location(adev);
1471 /* add the xgmi offset of the physical node */
1472 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1473 if (adev->gmc.xgmi.connected_to_cpu) {
1474 amdgpu_gmc_sysvm_location(adev, mc);
1476 amdgpu_gmc_vram_location(adev, mc, base);
1477 amdgpu_gmc_gart_location(adev, mc);
1478 amdgpu_gmc_agp_location(adev, mc);
1480 /* base offset of vram pages */
1481 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1483 /* XXX: add the xgmi offset of the physical node? */
1484 adev->vm_manager.vram_base_offset +=
1485 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1489 * gmc_v9_0_mc_init - initialize the memory controller driver params
1491 * @adev: amdgpu_device pointer
1493 * Look up the amount of vram, vram width, and decide how to place
1494 * vram and gart within the GPU's physical address space.
1495 * Returns 0 for success.
1497 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1501 /* size in MB on si */
1502 adev->gmc.mc_vram_size =
1503 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1504 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1506 if (!(adev->flags & AMD_IS_APU) &&
1507 !adev->gmc.xgmi.connected_to_cpu) {
1508 r = amdgpu_device_resize_fb_bar(adev);
1512 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1513 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1515 #ifdef CONFIG_X86_64
1517 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
1518 * interface can use VRAM through here as it appears system reserved
1519 * memory in host address space.
1521 * For APUs, VRAM is just the stolen system memory and can be accessed
1524 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
1527 /* check whether both host-gpu and gpu-gpu xgmi links exist */
1528 if (((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
1529 (adev->gmc.xgmi.supported &&
1530 adev->gmc.xgmi.connected_to_cpu)) {
1531 adev->gmc.aper_base =
1532 adev->gfxhub.funcs->get_mc_fb_offset(adev) +
1533 adev->gmc.xgmi.physical_node_id *
1534 adev->gmc.xgmi.node_segment_size;
1535 adev->gmc.aper_size = adev->gmc.real_vram_size;
1539 /* In case the PCI BAR is larger than the actual amount of vram */
1540 adev->gmc.visible_vram_size = adev->gmc.aper_size;
1541 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
1542 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
1544 /* set the gart size */
1545 if (amdgpu_gart_size == -1) {
1546 switch (adev->ip_versions[GC_HWIP][0]) {
1547 case IP_VERSION(9, 0, 1): /* all engines support GPUVM */
1548 case IP_VERSION(9, 2, 1): /* all engines support GPUVM */
1549 case IP_VERSION(9, 4, 0):
1550 case IP_VERSION(9, 4, 1):
1551 case IP_VERSION(9, 4, 2):
1553 adev->gmc.gart_size = 512ULL << 20;
1555 case IP_VERSION(9, 1, 0): /* DCE SG support */
1556 case IP_VERSION(9, 2, 2): /* DCE SG support */
1557 case IP_VERSION(9, 3, 0):
1558 adev->gmc.gart_size = 1024ULL << 20;
1562 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1565 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
1567 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1572 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1576 if (adev->gart.bo) {
1577 WARN(1, "VEGA10 PCIE GART already initialized\n");
1581 if (adev->gmc.xgmi.connected_to_cpu) {
1582 adev->gmc.vmid0_page_table_depth = 1;
1583 adev->gmc.vmid0_page_table_block_size = 12;
1585 adev->gmc.vmid0_page_table_depth = 0;
1586 adev->gmc.vmid0_page_table_block_size = 0;
1589 /* Initialize common gart structure */
1590 r = amdgpu_gart_init(adev);
1593 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1594 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1595 AMDGPU_PTE_EXECUTABLE;
1597 r = amdgpu_gart_table_vram_alloc(adev);
1601 if (adev->gmc.xgmi.connected_to_cpu) {
1602 r = amdgpu_gmc_pdb0_alloc(adev);
1609 * gmc_v9_0_save_registers - saves regs
1611 * @adev: amdgpu_device pointer
1613 * This saves potential register values that should be
1614 * restored upon resume
1616 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1618 if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
1619 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1)))
1620 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1623 static int gmc_v9_0_sw_init(void *handle)
1625 int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits;
1626 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1628 adev->gfxhub.funcs->init(adev);
1630 adev->mmhub.funcs->init(adev);
1631 if (adev->mca.funcs)
1632 adev->mca.funcs->init(adev);
1634 spin_lock_init(&adev->gmc.invalidate_lock);
1636 r = amdgpu_atomfirmware_get_vram_info(adev,
1637 &vram_width, &vram_type, &vram_vendor);
1638 if (amdgpu_sriov_vf(adev))
1639 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1640 * and DF related registers is not readable, seems hardcord is the
1641 * only way to set the correct vram_width
1643 adev->gmc.vram_width = 2048;
1644 else if (amdgpu_emu_mode != 1)
1645 adev->gmc.vram_width = vram_width;
1647 if (!adev->gmc.vram_width) {
1648 int chansize, numchan;
1650 /* hbm memory channel size */
1651 if (adev->flags & AMD_IS_APU)
1655 if (adev->df.funcs &&
1656 adev->df.funcs->get_hbm_channel_number) {
1657 numchan = adev->df.funcs->get_hbm_channel_number(adev);
1658 adev->gmc.vram_width = numchan * chansize;
1662 adev->gmc.vram_type = vram_type;
1663 adev->gmc.vram_vendor = vram_vendor;
1664 switch (adev->ip_versions[GC_HWIP][0]) {
1665 case IP_VERSION(9, 1, 0):
1666 case IP_VERSION(9, 2, 2):
1667 adev->num_vmhubs = 2;
1669 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1670 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1672 /* vm_size is 128TB + 512GB for legacy 3-level page support */
1673 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1674 adev->gmc.translate_further =
1675 adev->vm_manager.num_level > 1;
1678 case IP_VERSION(9, 0, 1):
1679 case IP_VERSION(9, 2, 1):
1680 case IP_VERSION(9, 4, 0):
1681 case IP_VERSION(9, 3, 0):
1682 case IP_VERSION(9, 4, 2):
1683 adev->num_vmhubs = 2;
1687 * To fulfill 4-level page support,
1688 * vm size is 256TB (48bit), maximum size of Vega10,
1689 * block size 512 (9bit)
1691 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1692 if (amdgpu_sriov_vf(adev))
1693 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1695 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1696 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
1697 adev->gmc.translate_further = adev->vm_manager.num_level > 1;
1699 case IP_VERSION(9, 4, 1):
1700 adev->num_vmhubs = 3;
1702 /* Keep the vm size same with Vega20 */
1703 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1704 adev->gmc.translate_further = adev->vm_manager.num_level > 1;
1710 /* This interrupt is VMC page fault.*/
1711 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1712 &adev->gmc.vm_fault);
1716 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {
1717 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1718 &adev->gmc.vm_fault);
1723 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1724 &adev->gmc.vm_fault);
1729 if (!amdgpu_sriov_vf(adev) &&
1730 !adev->gmc.xgmi.connected_to_cpu) {
1731 /* interrupt sent to DF. */
1732 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1733 &adev->gmc.ecc_irq);
1738 /* Set the internal MC address mask
1739 * This is the max address of the GPU's
1740 * internal address space.
1742 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1744 dma_addr_bits = adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ? 48:44;
1745 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits));
1747 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1750 adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits);
1752 r = gmc_v9_0_mc_init(adev);
1756 amdgpu_gmc_get_vbios_allocations(adev);
1758 /* Memory manager */
1759 r = amdgpu_bo_init(adev);
1763 r = gmc_v9_0_gart_init(adev);
1769 * VMID 0 is reserved for System
1770 * amdgpu graphics/compute will use VMIDs 1..n-1
1771 * amdkfd will use VMIDs n..15
1773 * The first KFD VMID is 8 for GPUs with graphics, 3 for
1774 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
1775 * for video processing.
1777 adev->vm_manager.first_kfd_vmid =
1778 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
1779 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) ? 3 : 8;
1781 amdgpu_vm_manager_init(adev);
1783 gmc_v9_0_save_registers(adev);
1788 static int gmc_v9_0_sw_fini(void *handle)
1790 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1792 amdgpu_gmc_ras_fini(adev);
1793 amdgpu_gem_force_release(adev);
1794 amdgpu_vm_manager_fini(adev);
1795 amdgpu_gart_table_vram_free(adev);
1796 amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
1797 amdgpu_bo_fini(adev);
1802 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1805 switch (adev->ip_versions[MMHUB_HWIP][0]) {
1806 case IP_VERSION(9, 0, 0):
1807 if (amdgpu_sriov_vf(adev))
1810 case IP_VERSION(9, 4, 0):
1811 soc15_program_register_sequence(adev,
1812 golden_settings_mmhub_1_0_0,
1813 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1814 soc15_program_register_sequence(adev,
1815 golden_settings_athub_1_0_0,
1816 ARRAY_SIZE(golden_settings_athub_1_0_0));
1818 case IP_VERSION(9, 1, 0):
1819 case IP_VERSION(9, 2, 0):
1820 /* TODO for renoir */
1821 soc15_program_register_sequence(adev,
1822 golden_settings_athub_1_0_0,
1823 ARRAY_SIZE(golden_settings_athub_1_0_0));
1831 * gmc_v9_0_restore_registers - restores regs
1833 * @adev: amdgpu_device pointer
1835 * This restores register values, saved at suspend.
1837 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
1839 if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
1840 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) {
1841 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
1842 WARN_ON(adev->gmc.sdpif_register !=
1843 RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
1848 * gmc_v9_0_gart_enable - gart enable
1850 * @adev: amdgpu_device pointer
1852 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1856 if (adev->gmc.xgmi.connected_to_cpu)
1857 amdgpu_gmc_init_pdb0(adev);
1859 if (adev->gart.bo == NULL) {
1860 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1864 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
1865 r = adev->gfxhub.funcs->gart_enable(adev);
1869 r = adev->mmhub.funcs->gart_enable(adev);
1873 DRM_INFO("PCIE GART of %uM enabled.\n",
1874 (unsigned)(adev->gmc.gart_size >> 20));
1875 if (adev->gmc.pdb0_bo)
1876 DRM_INFO("PDB0 located at 0x%016llX\n",
1877 (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
1878 DRM_INFO("PTB located at 0x%016llX\n",
1879 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1884 static int gmc_v9_0_hw_init(void *handle)
1886 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1890 /* The sequence of these two function calls matters.*/
1891 gmc_v9_0_init_golden_registers(adev);
1893 if (adev->mode_info.num_crtc) {
1894 /* Lockout access through VGA aperture*/
1895 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1896 /* disable VGA render */
1897 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1900 if (adev->mmhub.funcs->update_power_gating)
1901 adev->mmhub.funcs->update_power_gating(adev, true);
1903 adev->hdp.funcs->init_registers(adev);
1905 /* After HDP is initialized, flush HDP.*/
1906 adev->hdp.funcs->flush_hdp(adev, NULL);
1908 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1913 if (!amdgpu_sriov_vf(adev)) {
1914 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1915 adev->mmhub.funcs->set_fault_enable_default(adev, value);
1917 for (i = 0; i < adev->num_vmhubs; ++i)
1918 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1920 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1921 adev->umc.funcs->init_registers(adev);
1923 r = gmc_v9_0_gart_enable(adev);
1927 if (amdgpu_emu_mode == 1)
1928 return amdgpu_gmc_vram_checking(adev);
1934 * gmc_v9_0_gart_disable - gart disable
1936 * @adev: amdgpu_device pointer
1938 * This disables all VM page table.
1940 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1942 adev->gfxhub.funcs->gart_disable(adev);
1943 adev->mmhub.funcs->gart_disable(adev);
1946 static int gmc_v9_0_hw_fini(void *handle)
1948 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1950 gmc_v9_0_gart_disable(adev);
1952 if (amdgpu_sriov_vf(adev)) {
1953 /* full access mode, so don't touch any GMC register */
1954 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1959 * Pair the operations did in gmc_v9_0_hw_init and thus maintain
1960 * a correct cached state for GMC. Otherwise, the "gate" again
1961 * operation on S3 resuming will fail due to wrong cached state.
1963 if (adev->mmhub.funcs->update_power_gating)
1964 adev->mmhub.funcs->update_power_gating(adev, false);
1966 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1967 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1972 static int gmc_v9_0_suspend(void *handle)
1974 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1976 return gmc_v9_0_hw_fini(adev);
1979 static int gmc_v9_0_resume(void *handle)
1982 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1984 r = gmc_v9_0_hw_init(adev);
1988 amdgpu_vmid_reset_all(adev);
1993 static bool gmc_v9_0_is_idle(void *handle)
1995 /* MC is always ready in GMC v9.*/
1999 static int gmc_v9_0_wait_for_idle(void *handle)
2001 /* There is no need to wait for MC idle in GMC v9.*/
2005 static int gmc_v9_0_soft_reset(void *handle)
2007 /* XXX for emulation.*/
2011 static int gmc_v9_0_set_clockgating_state(void *handle,
2012 enum amd_clockgating_state state)
2014 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2016 adev->mmhub.funcs->set_clockgating(adev, state);
2018 athub_v1_0_set_clockgating(adev, state);
2023 static void gmc_v9_0_get_clockgating_state(void *handle, u64 *flags)
2025 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2027 adev->mmhub.funcs->get_clockgating(adev, flags);
2029 athub_v1_0_get_clockgating(adev, flags);
2032 static int gmc_v9_0_set_powergating_state(void *handle,
2033 enum amd_powergating_state state)
2038 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
2040 .early_init = gmc_v9_0_early_init,
2041 .late_init = gmc_v9_0_late_init,
2042 .sw_init = gmc_v9_0_sw_init,
2043 .sw_fini = gmc_v9_0_sw_fini,
2044 .hw_init = gmc_v9_0_hw_init,
2045 .hw_fini = gmc_v9_0_hw_fini,
2046 .suspend = gmc_v9_0_suspend,
2047 .resume = gmc_v9_0_resume,
2048 .is_idle = gmc_v9_0_is_idle,
2049 .wait_for_idle = gmc_v9_0_wait_for_idle,
2050 .soft_reset = gmc_v9_0_soft_reset,
2051 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
2052 .set_powergating_state = gmc_v9_0_set_powergating_state,
2053 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
2056 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
2058 .type = AMD_IP_BLOCK_TYPE_GMC,
2062 .funcs = &gmc_v9_0_ip_funcs,