Merge tag 'drm-next-2022-12-23' of git://anongit.freedesktop.org/drm/drm
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Dec 2022 19:09:44 +0000 (11:09 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Dec 2022 19:09:44 +0000 (11:09 -0800)
Pull drm fixes from Dave Airlie:
 "Holiday fixes!

  Two batches from amd, and one group of i915 changes.

  amdgpu:
   - Spelling fix
   - BO pin fix
   - Properly handle polaris 10/11 overlap asics
   - GMC9 fix
   - SR-IOV suspend fix
   - DCN 3.1.4 fix
   - KFD userptr locking fix
   - SMU13.x fixes
   - GDS/GWS/OA handling fix
   - Reserved VMID handling fixes
   - FRU EEPROM fix
   - BO validation fixes
   - Avoid large variable on the stack
   - S0ix fixes
   - SMU 13.x fixes
   - VCN fix
   - Add missing fence reference

  amdkfd:
   - Fix init vm error handling
   - Fix double release of compute pasid

  i915
   - Documentation fixes
   - OA-perf related fix
   - VLV/CHV HDMI/DP audio fix
   - Display DDI/Transcoder fix
   - Migrate fixes"

* tag 'drm-next-2022-12-23' of git://anongit.freedesktop.org/drm/drm: (39 commits)
  drm/amdgpu: grab extra fence reference for drm_sched_job_add_dependency
  drm/amdgpu: enable VCN DPG for GC IP v11.0.4
  drm/amdgpu: skip mes self test after s0i3 resume for MES IP v11.0
  drm/amd/pm: correct the fan speed retrieving in PWM for some SMU13 asics
  drm/amd/pm: bump SMU13.0.0 driver_if header to version 0x34
  drm/amdgpu: skip MES for S0ix as well since it's part of GFX
  drm/amd/pm: avoid large variable on kernel stack
  drm/amdkfd: Fix double release compute pasid
  drm/amdkfd: Fix kfd_process_device_init_vm error handling
  drm/amd/pm: update SMU13.0.0 reported maximum shader clock
  drm/amd/pm: correct SMU13.0.0 pstate profiling clock settings
  drm/amd/pm: enable GPO dynamic control support for SMU13.0.7
  drm/amd/pm: enable GPO dynamic control support for SMU13.0.0
  drm/amdgpu: revert "generally allow over-commit during BO allocation"
  drm/amdgpu: Remove unnecessary domain argument
  drm/amdgpu: Fix size validation for non-exclusive domains (v4)
  drm/amdgpu: Check if fru_addr is not NULL (v2)
  drm/i915/ttm: consider CCS for backup objects
  drm/i915/migrate: fix corner case in CCS aux copying
  drm/amdgpu: rework reserved VMID handling
  ...

47 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
drivers/gpu/drm/amd/amdgpu/soc21.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/i915/display/g4x_dp.c
drivers/gpu/drm/i915/display/g4x_hdmi.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
drivers/gpu/drm/i915/gt/intel_gt_mcr.c
drivers/gpu/drm/i915/gt/intel_migrate.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/intel_uncore.c

index f50e3ba4d7a58158e30aec142f9eaebb80bf745a..0040deaf8a83a31e1f7645abbe5499e6440b51c9 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/mm.h>
 #include <linux/kthread.h>
 #include <linux/workqueue.h>
+#include <linux/mmu_notifier.h>
 #include <kgd_kfd_interface.h>
 #include <drm/ttm/ttm_execbuf_util.h>
 #include "amdgpu_sync.h"
@@ -65,6 +66,7 @@ struct kgd_mem {
        struct mutex lock;
        struct amdgpu_bo *bo;
        struct dma_buf *dmabuf;
+       struct hmm_range *range;
        struct list_head attachments;
        /* protected by amdkfd_process_info.lock */
        struct ttm_validate_buffer validate_list;
@@ -75,7 +77,7 @@ struct kgd_mem {
 
        uint32_t alloc_flags;
 
-       atomic_t invalid;
+       uint32_t invalid;
        struct amdkfd_process_info *process_info;
 
        struct amdgpu_sync sync;
@@ -131,7 +133,8 @@ struct amdkfd_process_info {
        struct amdgpu_amdkfd_fence *eviction_fence;
 
        /* MMU-notifier related fields */
-       atomic_t evicted_bos;
+       struct mutex notifier_lock;
+       uint32_t evicted_bos;
        struct delayed_work restore_userptr_work;
        struct pid *pid;
        bool block_mmu_notifications;
@@ -180,7 +183,8 @@ int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
 struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
-int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
+int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
+                               unsigned long cur_seq, struct kgd_mem *mem);
 #else
 static inline
 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
@@ -201,7 +205,8 @@ int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
 }
 
 static inline
-int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
+int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
+                               unsigned long cur_seq, struct kgd_mem *mem)
 {
        return 0;
 }
@@ -265,8 +270,10 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_
        (&((struct amdgpu_fpriv *)                                      \
                ((struct drm_file *)(drm_priv))->driver_priv)->vm)
 
+int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
+                                    struct file *filp, u32 pasid);
 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
-                                       struct file *filp, u32 pasid,
+                                       struct file *filp,
                                        void **process_info,
                                        struct dma_fence **ef);
 void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
index 8782916e64a0423e5584447ed8ecef223bdf43a5..b15091d8310d9d11bbbbd6cedcc415e1e77cce9f 100644 (file)
@@ -964,7 +964,9 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
                 * later stage when it is scheduled by another ioctl called by
                 * CRIU master process for the target pid for restore.
                 */
-               atomic_inc(&mem->invalid);
+               mutex_lock(&process_info->notifier_lock);
+               mem->invalid++;
+               mutex_unlock(&process_info->notifier_lock);
                mutex_unlock(&process_info->lock);
                return 0;
        }
@@ -1301,6 +1303,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
                        return -ENOMEM;
 
                mutex_init(&info->lock);
+               mutex_init(&info->notifier_lock);
                INIT_LIST_HEAD(&info->vm_list_head);
                INIT_LIST_HEAD(&info->kfd_bo_list);
                INIT_LIST_HEAD(&info->userptr_valid_list);
@@ -1317,7 +1320,6 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
                }
 
                info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
-               atomic_set(&info->evicted_bos, 0);
                INIT_DELAYED_WORK(&info->restore_userptr_work,
                                  amdgpu_amdkfd_restore_userptr_worker);
 
@@ -1372,6 +1374,7 @@ reserve_pd_fail:
                put_pid(info->pid);
 create_evict_fence_fail:
                mutex_destroy(&info->lock);
+               mutex_destroy(&info->notifier_lock);
                kfree(info);
        }
        return ret;
@@ -1426,10 +1429,9 @@ static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
        amdgpu_bo_unreserve(bo);
 }
 
-int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
-                                          struct file *filp, u32 pasid,
-                                          void **process_info,
-                                          struct dma_fence **ef)
+int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
+                                    struct file *filp, u32 pasid)
+
 {
        struct amdgpu_fpriv *drv_priv;
        struct amdgpu_vm *avm;
@@ -1440,10 +1442,6 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
                return ret;
        avm = &drv_priv->vm;
 
-       /* Already a compute VM? */
-       if (avm->process_info)
-               return -EINVAL;
-
        /* Free the original amdgpu allocated pasid,
         * will be replaced with kfd allocated pasid.
         */
@@ -1452,14 +1450,36 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
                amdgpu_vm_set_pasid(adev, avm, 0);
        }
 
-       /* Convert VM into a compute VM */
-       ret = amdgpu_vm_make_compute(adev, avm);
+       ret = amdgpu_vm_set_pasid(adev, avm, pasid);
        if (ret)
                return ret;
 
-       ret = amdgpu_vm_set_pasid(adev, avm, pasid);
+       return 0;
+}
+
+int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
+                                          struct file *filp,
+                                          void **process_info,
+                                          struct dma_fence **ef)
+{
+       struct amdgpu_fpriv *drv_priv;
+       struct amdgpu_vm *avm;
+       int ret;
+
+       ret = amdgpu_file_to_fpriv(filp, &drv_priv);
+       if (ret)
+               return ret;
+       avm = &drv_priv->vm;
+
+       /* Already a compute VM? */
+       if (avm->process_info)
+               return -EINVAL;
+
+       /* Convert VM into a compute VM */
+       ret = amdgpu_vm_make_compute(adev, avm);
        if (ret)
                return ret;
+
        /* Initialize KFD part of the VM and process info */
        ret = init_kfd_vm(avm, process_info, ef);
        if (ret)
@@ -1496,6 +1516,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
                cancel_delayed_work_sync(&process_info->restore_userptr_work);
                put_pid(process_info->pid);
                mutex_destroy(&process_info->lock);
+               mutex_destroy(&process_info->notifier_lock);
                kfree(process_info);
        }
 }
@@ -1548,7 +1569,9 @@ int amdgpu_amdkfd_criu_resume(void *p)
 
        mutex_lock(&pinfo->lock);
        pr_debug("scheduling work\n");
-       atomic_inc(&pinfo->evicted_bos);
+       mutex_lock(&pinfo->notifier_lock);
+       pinfo->evicted_bos++;
+       mutex_unlock(&pinfo->notifier_lock);
        if (!READ_ONCE(pinfo->block_mmu_notifications)) {
                ret = -EINVAL;
                goto out_unlock;
@@ -1773,8 +1796,13 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
        list_del(&bo_list_entry->head);
        mutex_unlock(&process_info->lock);
 
-       /* No more MMU notifiers */
-       amdgpu_hmm_unregister(mem->bo);
+       /* Cleanup user pages and MMU notifiers */
+       if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
+               amdgpu_hmm_unregister(mem->bo);
+               mutex_lock(&process_info->notifier_lock);
+               amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
+               mutex_unlock(&process_info->notifier_lock);
+       }
 
        ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
        if (unlikely(ret))
@@ -1864,6 +1892,16 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
         */
        mutex_lock(&mem->process_info->lock);
 
+       /* Lock notifier lock. If we find an invalid userptr BO, we can be
+        * sure that the MMU notifier is no longer running
+        * concurrently and the queues are actually stopped
+        */
+       if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
+               mutex_lock(&mem->process_info->notifier_lock);
+               is_invalid_userptr = !!mem->invalid;
+               mutex_unlock(&mem->process_info->notifier_lock);
+       }
+
        mutex_lock(&mem->lock);
 
        domain = mem->domain;
@@ -2241,34 +2279,38 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
  *
  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
  * cannot do any memory allocations, and cannot take any locks that
- * are held elsewhere while allocating memory. Therefore this is as
- * simple as possible, using atomic counters.
+ * are held elsewhere while allocating memory.
  *
  * It doesn't do anything to the BO itself. The real work happens in
  * restore, where we get updated page addresses. This function only
  * ensures that GPU access to the BO is stopped.
  */
-int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
-                               struct mm_struct *mm)
+int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
+                               unsigned long cur_seq, struct kgd_mem *mem)
 {
        struct amdkfd_process_info *process_info = mem->process_info;
-       int evicted_bos;
        int r = 0;
 
-       /* Do not process MMU notifications until stage-4 IOCTL is received */
+       /* Do not process MMU notifications during CRIU restore until
+        * KFD_CRIU_OP_RESUME IOCTL is received
+        */
        if (READ_ONCE(process_info->block_mmu_notifications))
                return 0;
 
-       atomic_inc(&mem->invalid);
-       evicted_bos = atomic_inc_return(&process_info->evicted_bos);
-       if (evicted_bos == 1) {
+       mutex_lock(&process_info->notifier_lock);
+       mmu_interval_set_seq(mni, cur_seq);
+
+       mem->invalid++;
+       if (++process_info->evicted_bos == 1) {
                /* First eviction, stop the queues */
-               r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
+               r = kgd2kfd_quiesce_mm(mni->mm,
+                                      KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
                if (r)
                        pr_err("Failed to quiesce KFD\n");
                schedule_delayed_work(&process_info->restore_userptr_work,
                        msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
        }
+       mutex_unlock(&process_info->notifier_lock);
 
        return r;
 }
@@ -2285,54 +2327,58 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
        struct kgd_mem *mem, *tmp_mem;
        struct amdgpu_bo *bo;
        struct ttm_operation_ctx ctx = { false, false };
-       int invalid, ret;
+       uint32_t invalid;
+       int ret = 0;
 
-       /* Move all invalidated BOs to the userptr_inval_list and
-        * release their user pages by migration to the CPU domain
-        */
+       mutex_lock(&process_info->notifier_lock);
+
+       /* Move all invalidated BOs to the userptr_inval_list */
        list_for_each_entry_safe(mem, tmp_mem,
                                 &process_info->userptr_valid_list,
-                                validate_list.head) {
-               if (!atomic_read(&mem->invalid))
-                       continue; /* BO is still valid */
-
-               bo = mem->bo;
-
-               if (amdgpu_bo_reserve(bo, true))
-                       return -EAGAIN;
-               amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
-               ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-               amdgpu_bo_unreserve(bo);
-               if (ret) {
-                       pr_err("%s: Failed to invalidate userptr BO\n",
-                              __func__);
-                       return -EAGAIN;
-               }
-
-               list_move_tail(&mem->validate_list.head,
-                              &process_info->userptr_inval_list);
-       }
-
-       if (list_empty(&process_info->userptr_inval_list))
-               return 0; /* All evicted userptr BOs were freed */
+                                validate_list.head)
+               if (mem->invalid)
+                       list_move_tail(&mem->validate_list.head,
+                                      &process_info->userptr_inval_list);
 
        /* Go through userptr_inval_list and update any invalid user_pages */
        list_for_each_entry(mem, &process_info->userptr_inval_list,
                            validate_list.head) {
-               struct hmm_range *range;
-
-               invalid = atomic_read(&mem->invalid);
+               invalid = mem->invalid;
                if (!invalid)
                        /* BO hasn't been invalidated since the last
-                        * revalidation attempt. Keep its BO list.
+                        * revalidation attempt. Keep its page list.
                         */
                        continue;
 
                bo = mem->bo;
 
+               amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
+               mem->range = NULL;
+
+               /* BO reservations and getting user pages (hmm_range_fault)
+                * must happen outside the notifier lock
+                */
+               mutex_unlock(&process_info->notifier_lock);
+
+               /* Move the BO to system (CPU) domain if necessary to unmap
+                * and free the SG table
+                */
+               if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) {
+                       if (amdgpu_bo_reserve(bo, true))
+                               return -EAGAIN;
+                       amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+                       ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+                       amdgpu_bo_unreserve(bo);
+                       if (ret) {
+                               pr_err("%s: Failed to invalidate userptr BO\n",
+                                      __func__);
+                               return -EAGAIN;
+                       }
+               }
+
                /* Get updated user pages */
                ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
-                                                  &range);
+                                                  &mem->range);
                if (ret) {
                        pr_debug("Failed %d to get user pages\n", ret);
 
@@ -2345,30 +2391,32 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
                         */
                        if (ret != -EFAULT)
                                return ret;
-               } else {
 
-                       /*
-                        * FIXME: Cannot ignore the return code, must hold
-                        * notifier_lock
-                        */
-                       amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
+                       ret = 0;
                }
 
+               mutex_lock(&process_info->notifier_lock);
+
                /* Mark the BO as valid unless it was invalidated
                 * again concurrently.
                 */
-               if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
-                       return -EAGAIN;
+               if (mem->invalid != invalid) {
+                       ret = -EAGAIN;
+                       goto unlock_out;
+               }
+               mem->invalid = 0;
        }
 
-       return 0;
+unlock_out:
+       mutex_unlock(&process_info->notifier_lock);
+
+       return ret;
 }
 
 /* Validate invalid userptr BOs
  *
- * Validates BOs on the userptr_inval_list, and moves them back to the
- * userptr_valid_list. Also updates GPUVM page tables with new page
- * addresses and waits for the page table updates to complete.
+ * Validates BOs on the userptr_inval_list. Also updates GPUVM page tables
+ * with new page addresses and waits for the page table updates to complete.
  */
 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
 {
@@ -2439,9 +2487,6 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
                        }
                }
 
-               list_move_tail(&mem->validate_list.head,
-                              &process_info->userptr_valid_list);
-
                /* Update mapping. If the BO was not validated
                 * (because we couldn't get user pages), this will
                 * clear the page table entries, which will result in
@@ -2457,7 +2502,9 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
                        if (ret) {
                                pr_err("%s: update PTE failed\n", __func__);
                                /* make sure this gets validated again */
-                               atomic_inc(&mem->invalid);
+                               mutex_lock(&process_info->notifier_lock);
+                               mem->invalid++;
+                               mutex_unlock(&process_info->notifier_lock);
                                goto unreserve_out;
                        }
                }
@@ -2477,6 +2524,36 @@ out_no_mem:
        return ret;
 }
 
+/* Confirm that all user pages are valid while holding the notifier lock
+ *
+ * Moves valid BOs from the userptr_inval_list back to userptr_val_list.
+ */
+static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info)
+{
+       struct kgd_mem *mem, *tmp_mem;
+       int ret = 0;
+
+       list_for_each_entry_safe(mem, tmp_mem,
+                                &process_info->userptr_inval_list,
+                                validate_list.head) {
+               bool valid = amdgpu_ttm_tt_get_user_pages_done(
+                               mem->bo->tbo.ttm, mem->range);
+
+               mem->range = NULL;
+               if (!valid) {
+                       WARN(!mem->invalid, "Invalid BO not marked invalid");
+                       ret = -EAGAIN;
+                       continue;
+               }
+               WARN(mem->invalid, "Valid BO is marked invalid");
+
+               list_move_tail(&mem->validate_list.head,
+                              &process_info->userptr_valid_list);
+       }
+
+       return ret;
+}
+
 /* Worker callback to restore evicted userptr BOs
  *
  * Tries to update and validate all userptr BOs. If successful and no
@@ -2491,9 +2568,11 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
                             restore_userptr_work);
        struct task_struct *usertask;
        struct mm_struct *mm;
-       int evicted_bos;
+       uint32_t evicted_bos;
 
-       evicted_bos = atomic_read(&process_info->evicted_bos);
+       mutex_lock(&process_info->notifier_lock);
+       evicted_bos = process_info->evicted_bos;
+       mutex_unlock(&process_info->notifier_lock);
        if (!evicted_bos)
                return;
 
@@ -2516,9 +2595,6 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
         * and we can just restart the queues.
         */
        if (!list_empty(&process_info->userptr_inval_list)) {
-               if (atomic_read(&process_info->evicted_bos) != evicted_bos)
-                       goto unlock_out; /* Concurrent eviction, try again */
-
                if (validate_invalid_user_pages(process_info))
                        goto unlock_out;
        }
@@ -2527,10 +2603,17 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
         * be a first eviction that calls quiesce_mm. The eviction
         * reference counting inside KFD will handle this case.
         */
-       if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
-           evicted_bos)
-               goto unlock_out;
-       evicted_bos = 0;
+       mutex_lock(&process_info->notifier_lock);
+       if (process_info->evicted_bos != evicted_bos)
+               goto unlock_notifier_out;
+
+       if (confirm_valid_user_pages_locked(process_info)) {
+               WARN(1, "User pages unexpectedly invalid");
+               goto unlock_notifier_out;
+       }
+
+       process_info->evicted_bos = evicted_bos = 0;
+
        if (kgd2kfd_resume_mm(mm)) {
                pr_err("%s: Failed to resume KFD\n", __func__);
                /* No recovery from this failure. Probably the CP is
@@ -2538,6 +2621,8 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
                 */
        }
 
+unlock_notifier_out:
+       mutex_unlock(&process_info->notifier_lock);
 unlock_out:
        mutex_unlock(&process_info->lock);
 
index cfa411c12072421d348d91a2a1332b1c7c7a6596..afe6af9c013852d91a358c78132fa7f0adafae18 100644 (file)
@@ -3016,14 +3016,15 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
                        continue;
                }
 
-               /* skip suspend of gfx and psp for S0ix
+               /* skip suspend of gfx/mes and psp for S0ix
                 * gfx is in gfxoff state, so on resume it will exit gfxoff just
                 * like at runtime. PSP is also part of the always on hardware
                 * so no need to suspend it.
                 */
                if (adev->in_s0ix &&
                    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
-                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
+                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
+                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
                        continue;
 
                /* XXX handle errors */
@@ -4112,6 +4113,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
 
        adev->in_suspend = true;
 
+       /* Evict the majority of BOs before grabbing the full access */
+       r = amdgpu_device_evict_resources(adev);
+       if (r)
+               return r;
+
        if (amdgpu_sriov_vf(adev)) {
                amdgpu_virt_fini_data_exchange(adev);
                r = amdgpu_virt_request_full_gpu(adev, false);
index 7383272c6a3a62fcee84eacc59c06f2f294d0803..b4f2d61ea0d53b1b5f24e37f6d0236281f6893b4 100644 (file)
@@ -2039,6 +2039,15 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
                         "See modparam exp_hw_support\n");
                return -ENODEV;
        }
+       /* differentiate between P10 and P11 asics with the same DID */
+       if (pdev->device == 0x67FF &&
+           (pdev->revision == 0xE3 ||
+            pdev->revision == 0xE7 ||
+            pdev->revision == 0xF3 ||
+            pdev->revision == 0xF7)) {
+               flags &= ~AMD_ASIC_MASK;
+               flags |= CHIP_POLARIS10;
+       }
 
        /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
         * however, SME requires an indirect IOMMU mapping because the encryption
@@ -2108,12 +2117,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
 
        pci_set_drvdata(pdev, ddev);
 
-       ret = amdgpu_driver_load_kms(adev, ent->driver_data);
+       ret = amdgpu_driver_load_kms(adev, flags);
        if (ret)
                goto err_pci;
 
 retry_init:
-       ret = drm_dev_register(ddev, ent->driver_data);
+       ret = drm_dev_register(ddev, flags);
        if (ret == -EAGAIN && ++retry <= 3) {
                DRM_INFO("retry init %d\n", retry);
                /* Don't request EX mode too frequently which is attacking */
index 2c38ac7bc643d592fc0efbcf8635ab06d40d283e..4620c4712ce32af3e1fee6e7838700255c108d8e 100644 (file)
@@ -64,7 +64,8 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
                            sizeof(atom_ctx->vbios_version)) ||
                    strnstr(atom_ctx->vbios_version, "D163",
                            sizeof(atom_ctx->vbios_version))) {
-                       *fru_addr = FRU_EEPROM_MADDR_6;
+                       if (fru_addr)
+                               *fru_addr = FRU_EEPROM_MADDR_6;
                        return true;
                } else {
                        return false;
@@ -83,7 +84,8 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
                                    sizeof(atom_ctx->vbios_version))) {
                                return false;
                        } else {
-                               *fru_addr = FRU_EEPROM_MADDR_6;
+                               if (fru_addr)
+                                       *fru_addr = FRU_EEPROM_MADDR_6;
                                return true;
                        }
                } else {
index bee38c326537de308e0b3bdf47bcbb1884add21b..bb7350ea1d7594d486eff9b3f2baff582bde4a45 100644 (file)
@@ -113,7 +113,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
        bp.resv = resv;
        bp.preferred_domain = initial_domain;
        bp.flags = flags;
-       bp.domain = initial_domain | AMDGPU_GEM_DOMAIN_CPU;
+       bp.domain = initial_domain;
        bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 
        r = amdgpu_bo_create_user(adev, &bp, &ubo);
@@ -332,10 +332,20 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
        }
 
        initial_domain = (u32)(0xffffffff & args->in.domains);
+retry:
        r = amdgpu_gem_object_create(adev, size, args->in.alignment,
-                                    initial_domain, flags, ttm_bo_type_device,
-                                    resv, &gobj);
+                                    initial_domain,
+                                    flags, ttm_bo_type_device, resv, &gobj);
        if (r && r != -ERESTARTSYS) {
+               if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+                       flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+                       goto retry;
+               }
+
+               if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
+                       initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
+                       goto retry;
+               }
                DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
                                size, initial_domain, args->in.alignment, r);
        }
index 65715cb395d8386e3a87606dccddf2e892acfe02..2dadcfe43d03d253f8a5626770d869e731d2344e 100644 (file)
@@ -105,17 +105,11 @@ static bool amdgpu_hmm_invalidate_hsa(struct mmu_interval_notifier *mni,
                                      unsigned long cur_seq)
 {
        struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
-       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 
        if (!mmu_notifier_range_blockable(range))
                return false;
 
-       mutex_lock(&adev->notifier_lock);
-
-       mmu_interval_set_seq(mni, cur_seq);
-
-       amdgpu_amdkfd_evict_userptr(bo->kfd_bo, bo->notifier.mm);
-       mutex_unlock(&adev->notifier_lock);
+       amdgpu_amdkfd_evict_userptr(mni, cur_seq, bo->kfd_bo);
 
        return true;
 }
@@ -244,9 +238,9 @@ out_free_range:
        return r;
 }
 
-int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
+bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
 {
-       int r;
+       bool r;
 
        r = mmu_interval_read_retry(hmm_range->notifier,
                                    hmm_range->notifier_seq);
index 13ed94d3b01b84e1f2b6e9796e5a2e4b14eec082..e2edcd010cccbf307f6b1d8c4d0cc9d24c4c70a5 100644 (file)
 #include <linux/rwsem.h>
 #include <linux/workqueue.h>
 #include <linux/interval_tree.h>
+#include <linux/mmu_notifier.h>
 
 int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
                               uint64_t start, uint64_t npages, bool readonly,
                               void *owner, struct page **pages,
                               struct hmm_range **phmm_range);
-int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
+bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
 
 #if defined(CONFIG_HMM_MIRROR)
 int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr);
index 2a9a2593dc1834bafff6e50f766b4cdc1c7c0f6d..fcb711a11a5b6bc4d4cff56289c97df89d1a70d4 100644 (file)
@@ -165,6 +165,26 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
                atomic_read(&adev->gpu_reset_counter);
 }
 
+/* Check if we need to switch to another set of resources */
+static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id,
+                                         struct amdgpu_job *job)
+{
+       return id->gds_base != job->gds_base ||
+               id->gds_size != job->gds_size ||
+               id->gws_base != job->gws_base ||
+               id->gws_size != job->gws_size ||
+               id->oa_base != job->oa_base ||
+               id->oa_size != job->oa_size;
+}
+
+/* Check if the id is compatible with the job */
+static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id,
+                                  struct amdgpu_job *job)
+{
+       return  id->pd_gpu_addr == job->vm_pd_addr &&
+               !amdgpu_vmid_gds_switch_needed(id, job);
+}
+
 /**
  * amdgpu_vmid_grab_idle - grab idle VMID
  *
@@ -258,14 +278,15 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 {
        struct amdgpu_device *adev = ring->adev;
        unsigned vmhub = ring->funcs->vmhub;
+       struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
        uint64_t fence_context = adev->fence_context + ring->idx;
        bool needs_flush = vm->use_cpu_for_update;
        uint64_t updates = amdgpu_vm_tlb_seq(vm);
        int r;
 
-       *id = vm->reserved_vmid[vmhub];
+       *id = id_mgr->reserved;
        if ((*id)->owner != vm->immediate.fence_context ||
-           (*id)->pd_gpu_addr != job->vm_pd_addr ||
+           !amdgpu_vmid_compatible(*id, job) ||
            (*id)->flushed_updates < updates ||
            !(*id)->last_flush ||
            ((*id)->last_flush->context != fence_context &&
@@ -294,8 +315,8 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
        if (r)
                return r;
 
-       (*id)->flushed_updates = updates;
        job->vm_needs_flush = needs_flush;
+       job->spm_update_needed = true;
        return 0;
 }
 
@@ -333,7 +354,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
                if ((*id)->owner != vm->immediate.fence_context)
                        continue;
 
-               if ((*id)->pd_gpu_addr != job->vm_pd_addr)
+               if (!amdgpu_vmid_compatible(*id, job))
                        continue;
 
                if (!(*id)->last_flush ||
@@ -355,7 +376,6 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
                if (r)
                        return r;
 
-               (*id)->flushed_updates = updates;
                job->vm_needs_flush |= needs_flush;
                return 0;
        }
@@ -408,22 +428,30 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                        if (r)
                                goto error;
 
-                       id->flushed_updates = amdgpu_vm_tlb_seq(vm);
                        job->vm_needs_flush = true;
                }
 
                list_move_tail(&id->list, &id_mgr->ids_lru);
        }
 
-       id->pd_gpu_addr = job->vm_pd_addr;
-       id->owner = vm->immediate.fence_context;
-
+       job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job);
        if (job->vm_needs_flush) {
+               id->flushed_updates = amdgpu_vm_tlb_seq(vm);
                dma_fence_put(id->last_flush);
                id->last_flush = NULL;
        }
        job->vmid = id - id_mgr->ids;
        job->pasid = vm->pasid;
+
+       id->gds_base = job->gds_base;
+       id->gds_size = job->gds_size;
+       id->gws_base = job->gws_base;
+       id->gws_size = job->gws_size;
+       id->oa_base = job->oa_base;
+       id->oa_size = job->oa_size;
+       id->pd_gpu_addr = job->vm_pd_addr;
+       id->owner = vm->immediate.fence_context;
+
        trace_amdgpu_vm_grab_id(vm, ring, job);
 
 error:
@@ -435,31 +463,27 @@ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
                               struct amdgpu_vm *vm,
                               unsigned vmhub)
 {
-       struct amdgpu_vmid_mgr *id_mgr;
-       struct amdgpu_vmid *idle;
-       int r = 0;
+       struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 
-       id_mgr = &adev->vm_manager.id_mgr[vmhub];
        mutex_lock(&id_mgr->lock);
        if (vm->reserved_vmid[vmhub])
                goto unlock;
-       if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
-           AMDGPU_VM_MAX_RESERVED_VMID) {
-               DRM_ERROR("Over limitation of reserved vmid\n");
-               atomic_dec(&id_mgr->reserved_vmid_num);
-               r = -EINVAL;
-               goto unlock;
+
+       ++id_mgr->reserved_use_count;
+       if (!id_mgr->reserved) {
+               struct amdgpu_vmid *id;
+
+               id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
+                                     list);
+               /* Remove from normal round robin handling */
+               list_del_init(&id->list);
+               id_mgr->reserved = id;
        }
-       /* Select the first entry VMID */
-       idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
-       list_del_init(&idle->list);
-       vm->reserved_vmid[vmhub] = idle;
-       mutex_unlock(&id_mgr->lock);
+       vm->reserved_vmid[vmhub] = true;
 
-       return 0;
 unlock:
        mutex_unlock(&id_mgr->lock);
-       return r;
+       return 0;
 }
 
 void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
@@ -469,12 +493,12 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 
        mutex_lock(&id_mgr->lock);
-       if (vm->reserved_vmid[vmhub]) {
-               list_add(&vm->reserved_vmid[vmhub]->list,
-                       &id_mgr->ids_lru);
-               vm->reserved_vmid[vmhub] = NULL;
-               atomic_dec(&id_mgr->reserved_vmid_num);
+       if (vm->reserved_vmid[vmhub] &&
+           !--id_mgr->reserved_use_count) {
+               /* give the reserved ID back to normal round robin */
+               list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
        }
+       vm->reserved_vmid[vmhub] = false;
        mutex_unlock(&id_mgr->lock);
 }
 
@@ -541,7 +565,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
 
                mutex_init(&id_mgr->lock);
                INIT_LIST_HEAD(&id_mgr->ids_lru);
-               atomic_set(&id_mgr->reserved_vmid_num, 0);
+               id_mgr->reserved_use_count = 0;
 
                /* manage only VMIDs not used by KFD */
                id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
index 57efe61dceedc3e23878f04f7ae1714345bf761a..d1cc09b45da4a1783edfef7d9f182e247f4f3031 100644 (file)
@@ -67,7 +67,8 @@ struct amdgpu_vmid_mgr {
        unsigned                num_ids;
        struct list_head        ids_lru;
        struct amdgpu_vmid      ids[AMDGPU_NUM_VMID];
-       atomic_t                reserved_vmid_num;
+       struct amdgpu_vmid      *reserved;
+       unsigned int            reserved_use_count;
 };
 
 int amdgpu_pasid_alloc(unsigned int bits);
index a372802ea4e09fd6bfcff5664e96c61a4397c5d0..52f2e313ea17f299678b44ce17bfc36a48d2346b 100644 (file)
@@ -53,6 +53,8 @@ struct amdgpu_job {
        uint32_t                preamble_status;
        uint32_t                preemption_status;
        bool                    vm_needs_flush;
+       bool                    gds_switch_needed;
+       bool                    spm_update_needed;
        uint64_t                vm_pd_addr;
        unsigned                vmid;
        unsigned                pasid;
index 919bbea2e3ac2a0117315c4eacb5e736582db202..4e684c2afc709f622a1bbbf1f6d74b1ff3371b76 100644 (file)
@@ -346,17 +346,16 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
  * @adev: amdgpu device object
  * @offset: offset of the BO
  * @size: size of the BO
- * @domain: where to place it
  * @bo_ptr:  used to initialize BOs in structures
  * @cpu_addr: optional CPU address mapping
  *
- * Creates a kernel BO at a specific offset in the address space of the domain.
+ * Creates a kernel BO at a specific offset in VRAM.
  *
  * Returns:
  * 0 on success, negative error code otherwise.
  */
 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
-                              uint64_t offset, uint64_t size, uint32_t domain,
+                              uint64_t offset, uint64_t size,
                               struct amdgpu_bo **bo_ptr, void **cpu_addr)
 {
        struct ttm_operation_ctx ctx = { false, false };
@@ -366,8 +365,9 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
        offset &= PAGE_MASK;
        size = ALIGN(size, PAGE_SIZE);
 
-       r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
-                                     NULL, cpu_addr);
+       r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
+                                     cpu_addr);
        if (r)
                return r;
 
@@ -422,6 +422,8 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
        if (*bo == NULL)
                return;
 
+       WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend);
+
        if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
                if (cpu_addr)
                        amdgpu_bo_kunmap(*bo);
@@ -446,27 +448,24 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
 
        /*
         * If GTT is part of requested domains the check must succeed to
-        * allow fall back to GTT
+        * allow fall back to GTT.
         */
        if (domain & AMDGPU_GEM_DOMAIN_GTT) {
                man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
 
-               if (size < man->size)
+               if (man && size < man->size)
                        return true;
-               else
-                       goto fail;
-       }
-
-       if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
+               else if (!man)
+                       WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized");
+               goto fail;
+       } else if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
                man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
 
-               if (size < man->size)
+               if (man && size < man->size)
                        return true;
-               else
-                       goto fail;
+               goto fail;
        }
 
-
        /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
        return true;
 
@@ -581,7 +580,11 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
                bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
 
        bo->tbo.bdev = &adev->mman.bdev;
-       amdgpu_bo_placement_from_domain(bo, bp->domain);
+       if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
+                         AMDGPU_GEM_DOMAIN_GDS))
+               amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+       else
+               amdgpu_bo_placement_from_domain(bo, bp->domain);
        if (bp->type == ttm_bo_type_kernel)
                bo->tbo.priority = 1;
 
@@ -1506,7 +1509,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
 uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
                                            uint32_t domain)
 {
-       if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
+       if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) &&
+           ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) {
                domain = AMDGPU_GEM_DOMAIN_VRAM;
                if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
                        domain = AMDGPU_GEM_DOMAIN_GTT;
index 147b79c10cbb6b58a74b69f402275fb7f4f68150..93207badf83f39ba8ae96779110a2f45b91dc5bc 100644 (file)
@@ -284,7 +284,7 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
                            u32 domain, struct amdgpu_bo **bo_ptr,
                            u64 *gpu_addr, void **cpu_addr);
 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
-                              uint64_t offset, uint64_t size, uint32_t domain,
+                              uint64_t offset, uint64_t size,
                               struct amdgpu_bo **bo_ptr, void **cpu_addr);
 int amdgpu_bo_create_user(struct amdgpu_device *adev,
                          struct amdgpu_bo_param *bp,
index b4236572eae1b908706898cf75db834c276ef7d3..55e0284b2bdddcac74735570ac75af9e3a4d803e 100644 (file)
@@ -695,8 +695,19 @@ out_unlock:
        return r;
 }
 
+/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
+ */
+void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
+                                     struct hmm_range *range)
+{
+       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+       if (gtt && gtt->userptr && range)
+               amdgpu_hmm_range_get_pages_done(range);
+}
+
 /*
- * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
+ * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
  * Check if the pages backing this ttm range have been invalidated
  *
  * Returns: true if pages are still valid
@@ -714,10 +725,6 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
 
        WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
 
-       /*
-        * FIXME: Must always hold notifier_lock for this, and must
-        * not ignore the return code.
-        */
        return !amdgpu_hmm_range_get_pages_done(range);
 }
 #endif
@@ -1569,7 +1576,6 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
        return amdgpu_bo_create_kernel_at(adev,
                                          adev->mman.fw_vram_usage_start_offset,
                                          adev->mman.fw_vram_usage_size,
-                                         AMDGPU_GEM_DOMAIN_VRAM,
                                          &adev->mman.fw_vram_usage_reserved_bo,
                                          &adev->mman.fw_vram_usage_va);
 }
@@ -1595,7 +1601,6 @@ static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
        return amdgpu_bo_create_kernel_at(adev,
                                          adev->mman.drv_vram_usage_start_offset,
                                          adev->mman.drv_vram_usage_size,
-                                         AMDGPU_GEM_DOMAIN_VRAM,
                                          &adev->mman.drv_vram_usage_reserved_bo,
                                          &adev->mman.drv_vram_usage_va);
 }
@@ -1676,7 +1681,6 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
                ret = amdgpu_bo_create_kernel_at(adev,
                                         ctx->c2p_train_data_offset,
                                         ctx->train_data_size,
-                                        AMDGPU_GEM_DOMAIN_VRAM,
                                         &ctx->c2p_bo,
                                         NULL);
                if (ret) {
@@ -1690,7 +1694,6 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
        ret = amdgpu_bo_create_kernel_at(adev,
                                adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
                                adev->mman.discovery_tmr_size,
-                               AMDGPU_GEM_DOMAIN_VRAM,
                                &adev->mman.discovery_memory,
                                NULL);
        if (ret) {
@@ -1791,21 +1794,18 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
         * avoid display artifacts while transitioning between pre-OS
         * and driver.  */
        r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
-                                      AMDGPU_GEM_DOMAIN_VRAM,
                                       &adev->mman.stolen_vga_memory,
                                       NULL);
        if (r)
                return r;
        r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
                                       adev->mman.stolen_extended_size,
-                                      AMDGPU_GEM_DOMAIN_VRAM,
                                       &adev->mman.stolen_extended_memory,
                                       NULL);
        if (r)
                return r;
        r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
                                       adev->mman.stolen_reserved_size,
-                                      AMDGPU_GEM_DOMAIN_VRAM,
                                       &adev->mman.stolen_reserved_memory,
                                       NULL);
        if (r)
index b4d8ba2789f3617aa2bd8bd4946b4891cd248e88..e2cd5894afc9d2b87726eaa24d9167843bfd9125 100644 (file)
@@ -159,6 +159,8 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
                                 struct hmm_range **range);
+void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
+                                     struct hmm_range *range);
 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
                                       struct hmm_range *range);
 #else
@@ -168,6 +170,10 @@ static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
 {
        return -EPERM;
 }
+static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
+                                                   struct hmm_range *range)
+{
+}
 static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
                                                     struct hmm_range *range)
 {
index 15544f262ec15b7aee6c5e356e753d5eb780cbcb..2994b9db196ffdadf7907ef1e90d1e217b9a557e 100644 (file)
@@ -395,7 +395,6 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
                 */
                if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
                                               AMDGPU_GPU_PAGE_SIZE,
-                                              AMDGPU_GEM_DOMAIN_VRAM,
                                               &bo, NULL))
                        DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
 
index c05cff979004c36d5e55076894f17a107688db52..dc379dc22c77b3e462fd70e4b6a1c5b516be7bc5 100644 (file)
@@ -484,25 +484,20 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
        struct amdgpu_device *adev = ring->adev;
        unsigned vmhub = ring->funcs->vmhub;
        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
-       struct amdgpu_vmid *id;
-       bool gds_switch_needed;
-       bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
 
        if (job->vmid == 0)
                return false;
-       id = &id_mgr->ids[job->vmid];
-       gds_switch_needed = ring->funcs->emit_gds_switch && (
-               id->gds_base != job->gds_base ||
-               id->gds_size != job->gds_size ||
-               id->gws_base != job->gws_base ||
-               id->gws_size != job->gws_size ||
-               id->oa_base != job->oa_base ||
-               id->oa_size != job->oa_size);
-
-       if (amdgpu_vmid_had_gpu_reset(adev, id))
+
+       if (job->vm_needs_flush || ring->has_compute_vm_bug)
+               return true;
+
+       if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
+               return true;
+
+       if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
                return true;
 
-       return vm_flush_needed || gds_switch_needed;
+       return false;
 }
 
 /**
@@ -524,27 +519,20 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
        unsigned vmhub = ring->funcs->vmhub;
        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
        struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
-       bool gds_switch_needed = ring->funcs->emit_gds_switch && (
-               id->gds_base != job->gds_base ||
-               id->gds_size != job->gds_size ||
-               id->gws_base != job->gws_base ||
-               id->gws_size != job->gws_size ||
-               id->oa_base != job->oa_base ||
-               id->oa_size != job->oa_size);
+       bool spm_update_needed = job->spm_update_needed;
+       bool gds_switch_needed = ring->funcs->emit_gds_switch &&
+               job->gds_switch_needed;
        bool vm_flush_needed = job->vm_needs_flush;
        struct dma_fence *fence = NULL;
        bool pasid_mapping_needed = false;
        unsigned patch_offset = 0;
-       bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL));
        int r;
 
-       if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid)
-               adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
-
        if (amdgpu_vmid_had_gpu_reset(adev, id)) {
                gds_switch_needed = true;
                vm_flush_needed = true;
                pasid_mapping_needed = true;
+               spm_update_needed = true;
        }
 
        mutex_lock(&id_mgr->lock);
@@ -577,6 +565,17 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
        if (pasid_mapping_needed)
                amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
 
+       if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
+               adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
+
+       if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
+           gds_switch_needed) {
+               amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
+                                           job->gds_size, job->gws_base,
+                                           job->gws_size, job->oa_base,
+                                           job->oa_size);
+       }
+
        if (vm_flush_needed || pasid_mapping_needed) {
                r = amdgpu_fence_emit(ring, &fence, NULL, 0);
                if (r)
@@ -601,20 +600,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
        }
        dma_fence_put(fence);
 
-       if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
-           gds_switch_needed) {
-               id->gds_base = job->gds_base;
-               id->gds_size = job->gds_size;
-               id->gws_base = job->gws_base;
-               id->gws_size = job->gws_size;
-               id->oa_base = job->oa_base;
-               id->oa_size = job->oa_size;
-               amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
-                                           job->gds_size, job->gws_base,
-                                           job->gws_size, job->oa_base,
-                                           job->oa_size);
-       }
-
        if (ring->funcs->patch_cond_exec)
                amdgpu_ring_patch_cond_exec(ring, patch_offset);
 
@@ -2383,7 +2368,6 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        union drm_amdgpu_vm *args = data;
        struct amdgpu_device *adev = drm_to_adev(dev);
        struct amdgpu_fpriv *fpriv = filp->driver_priv;
-       long timeout = msecs_to_jiffies(2000);
        int r;
 
        switch (args->in.op) {
@@ -2395,21 +2379,6 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        return r;
                break;
        case AMDGPU_VM_OP_UNRESERVE_VMID:
-               if (amdgpu_sriov_runtime(adev))
-                       timeout = 8 * timeout;
-
-               /* Wait vm idle to make sure the vmid set in SPM_VMID is
-                * not referenced anymore.
-                */
-               r = amdgpu_bo_reserve(fpriv->vm.root.bo, true);
-               if (r)
-                       return r;
-
-               r = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
-               if (r < 0)
-                       return r;
-
-               amdgpu_bo_unreserve(fpriv->vm.root.bo);
                amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
                break;
        default:
index 6546e786bf008a7aad40f25c08223710562b71bb..094bb48073031e3582ed2e25f9be1907eb308719 100644 (file)
@@ -119,9 +119,6 @@ struct amdgpu_bo_vm;
 /* Reserve 2MB at top/bottom of address space for kernel use */
 #define AMDGPU_VA_RESERVED_SIZE                        (2ULL << 20)
 
-/* max vmids dedicated for process */
-#define AMDGPU_VM_MAX_RESERVED_VMID    1
-
 /* See vm_update_mode */
 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
@@ -298,8 +295,7 @@ struct amdgpu_vm {
        struct dma_fence        *last_unlocked;
 
        unsigned int            pasid;
-       /* dedicated to vm */
-       struct amdgpu_vmid      *reserved_vmid[AMDGPU_MAX_VMHUBS];
+       bool                    reserved_vmid[AMDGPU_MAX_VMHUBS];
 
        /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
        bool                                    use_cpu_for_update;
index 59cf64216fbb662b54ec0dd6eef34597c730d7af..535cd6569bccf518d79ec129819618cdc9d6ad43 100644 (file)
@@ -238,8 +238,10 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
        /* Wait for PD/PT moves to be completed */
        dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
        dma_resv_for_each_fence_unlocked(&cursor, fence) {
+               dma_fence_get(fence);
                r = drm_sched_job_add_dependency(&p->job->base, fence);
                if (r) {
+                       dma_fence_put(fence);
                        dma_resv_iter_end(&cursor);
                        return r;
                }
index 50386eb2eec8dd3614cb80b400a05cb7bf70b4af..08d6cf79fb15df47aa4a654e4c62437d797ff1fd 100644 (file)
@@ -1185,6 +1185,8 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
                                struct amdgpu_bo_va_mapping *mapping,
                                uint64_t *flags)
 {
+       struct amdgpu_bo *bo = mapping->bo_va->base.bo;
+
        *flags &= ~AMDGPU_PTE_EXECUTABLE;
        *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
 
@@ -1196,7 +1198,7 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
                *flags &= ~AMDGPU_PTE_VALID;
        }
 
-       if (mapping->bo_va->base.bo)
+       if (bo && bo->tbo.resource)
                gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo,
                                             mapping, flags);
 }
index 5459366f49ffe47e6798a82c70a2b56e53f58b0c..970b066b37bb917a0b99fdf54c8855bab86511e1 100644 (file)
@@ -1342,7 +1342,8 @@ static int mes_v11_0_late_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (!amdgpu_in_reset(adev) &&
+       /* it's only intended for use in mes_self_test case, not for s0ix and reset */
+       if (!amdgpu_in_reset(adev) && !adev->in_s0ix &&
            (adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
                amdgpu_mes_self_test(adev);
 
index 7d5fdf450d0cc99f693a1e7f34a3fd5027667535..5562670b7b52193d01f61ad5fb5597f92ce32709 100644 (file)
@@ -666,6 +666,7 @@ static int soc21_common_early_init(void *handle)
                        AMD_CG_SUPPORT_VCN_MGCG |
                        AMD_CG_SUPPORT_JPEG_MGCG;
                adev->pg_flags = AMD_PG_SUPPORT_VCN |
+                       AMD_PG_SUPPORT_VCN_DPG |
                        AMD_PG_SUPPORT_GFX_PG |
                        AMD_PG_SUPPORT_JPEG;
                adev->external_rev_id = adev->rev_id + 0x1;
index a26257171ab7cb1cd7b8b63528c72b69f067fdec..51b1683ac5c1e157dd771a2c102a47e2fa5053e6 100644 (file)
@@ -689,13 +689,13 @@ void kfd_process_destroy_wq(void)
 }
 
 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
-                       struct kfd_process_device *pdd, void *kptr)
+                       struct kfd_process_device *pdd, void **kptr)
 {
        struct kfd_dev *dev = pdd->dev;
 
-       if (kptr) {
+       if (kptr && *kptr) {
                amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
-               kptr = NULL;
+               *kptr = NULL;
        }
 
        amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
@@ -795,7 +795,7 @@ static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
        if (!qpd->ib_kaddr || !qpd->ib_base)
                return;
 
-       kfd_process_free_gpuvm(qpd->ib_mem, pdd, qpd->ib_kaddr);
+       kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr);
 }
 
 struct kfd_process *kfd_create_process(struct file *filep)
@@ -1277,7 +1277,7 @@ static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
        if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
                return;
 
-       kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, qpd->cwsr_kaddr);
+       kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr);
 }
 
 void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
@@ -1576,9 +1576,9 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
        p = pdd->process;
        dev = pdd->dev;
 
-       ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
-               dev->adev, drm_file, p->pasid,
-               &p->kgd_process_info, &p->ef);
+       ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, drm_file,
+                                                    &p->kgd_process_info,
+                                                    &p->ef);
        if (ret) {
                pr_err("Failed to create process VM object\n");
                return ret;
@@ -1593,13 +1593,19 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
        if (ret)
                goto err_init_cwsr;
 
+       ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, drm_file, p->pasid);
+       if (ret)
+               goto err_set_pasid;
+
        pdd->drm_file = drm_file;
 
        return 0;
 
+err_set_pasid:
+       kfd_process_device_destroy_cwsr_dgpu(pdd);
 err_init_cwsr:
+       kfd_process_device_destroy_ib_mem(pdd);
 err_reserve_ib_mem:
-       kfd_process_device_free_bos(pdd);
        pdd->drm_priv = NULL;
 
        return ret;
index 77277d90b6e2ff328635a4d1c3e1624aba30e50d..50c783e19f5ab03fa23444a12e8eb57fed00fb72 100644 (file)
@@ -1503,6 +1503,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                case IP_VERSION(3, 0, 1):
                case IP_VERSION(3, 1, 2):
                case IP_VERSION(3, 1, 3):
+               case IP_VERSION(3, 1, 4):
                case IP_VERSION(3, 1, 5):
                case IP_VERSION(3, 1, 6):
                        init_data.flags.gpu_vm_support = true;
index 0f746bb4e500f0d42e7983f89b1d662948f00cd8..d51f1ce0287480f69f35edb29647008dc651ab81 100644 (file)
@@ -55,7 +55,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
                s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
                s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
        }
-       s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
+       s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
 
        s = &wm->sets[1];
        s->wm_set = 1;
@@ -65,7 +65,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
                s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
                s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
        }
-       s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
+       s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
 
        s = &wm->sets[2];
        s->wm_set = 2;
@@ -75,7 +75,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
                s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
                s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
        }
-       s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
+       s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
 
        s = &wm->sets[3];
        s->wm_set = 3;
@@ -85,7 +85,7 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
                s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
                s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
        }
-       s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
+       s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
 }
 
 void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
index c8ec11839b4d17354fe2800edf38184be89ac073..fe2023f18b7d0b1b9f487e14dea0541ef705caf2 100644 (file)
@@ -159,7 +159,7 @@ static void dcn10_log_hubbub_state(struct dc *dc,
                DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
                DTN_INFO_MICRO_SEC(s->sr_enter);
                DTN_INFO_MICRO_SEC(s->sr_exit);
-               DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
+               DTN_INFO_MICRO_SEC(s->dram_clk_change);
                DTN_INFO("\n");
        }
 
index e8b6065fffad4f70f3b6522ce1a0966748372301..a0f8e31d2adc91a683340faf9d372011e06efc5c 100644 (file)
@@ -83,7 +83,7 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned i
        memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
        dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
 
-       chars_printed = snprintf_count(pBuf, remaining_buffer, "wm_set_index,data_urgent,pte_meta_urgent,sr_enter,sr_exit,dram_clk_chanage\n");
+       chars_printed = snprintf_count(pBuf, remaining_buffer, "wm_set_index,data_urgent,pte_meta_urgent,sr_enter,sr_exit,dram_clk_change\n");
        remaining_buffer -= chars_printed;
        pBuf += chars_printed;
 
@@ -98,7 +98,7 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned i
                        (s->pte_meta_urgent * frac) / ref_clk_mhz / frac, (s->pte_meta_urgent * frac) / ref_clk_mhz % frac,
                        (s->sr_enter * frac) / ref_clk_mhz / frac, (s->sr_enter * frac) / ref_clk_mhz % frac,
                        (s->sr_exit * frac) / ref_clk_mhz / frac, (s->sr_exit * frac) / ref_clk_mhz % frac,
-                       (s->dram_clk_chanage * frac) / ref_clk_mhz / frac, (s->dram_clk_chanage * frac) / ref_clk_mhz % frac);
+                       (s->dram_clk_change * frac) / ref_clk_mhz / frac, (s->dram_clk_change * frac) / ref_clk_mhz % frac);
                remaining_buffer -= chars_printed;
                pBuf += chars_printed;
        }
index aacb1fb5c73eb52a1b4f50acefdc8df65dd71b40..24bd932199366e328323af92b290eda437121c21 100644 (file)
@@ -500,7 +500,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub,
                s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
                s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
        }
-       s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
+       s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
 
        s = &wm->sets[1];
        s->wm_set = 1;
@@ -511,7 +511,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub,
                s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
                s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
        }
-       s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
+       s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
 
        s = &wm->sets[2];
        s->wm_set = 2;
@@ -522,7 +522,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub,
                s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
                s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
        }
-       s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
+       s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
 
        s = &wm->sets[3];
        s->wm_set = 3;
@@ -533,7 +533,7 @@ void hubbub2_wm_read_state(struct hubbub *hubbub,
                s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
                s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
        }
-       s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
+       s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
 }
 
 void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,
index c5e200d09038fba2cf7cfc1eb3ceba438ed33fa9..aeb0e0d9b70a475f6482df74e94e3391a82b04c0 100644 (file)
@@ -635,7 +635,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
                        DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
 
        REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A,
-                        DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_chanage);
+                        DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_change);
 
        s = &wm->sets[1];
        s->wm_set = 1;
@@ -649,7 +649,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
                        DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
 
        REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B,
-                       DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_chanage);
+                       DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_change);
 
        s = &wm->sets[2];
        s->wm_set = 2;
@@ -663,7 +663,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
                        DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
 
        REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C,
-                       DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_chanage);
+                       DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_change);
 
        s = &wm->sets[3];
        s->wm_set = 3;
@@ -677,7 +677,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
                        DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
 
        REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D,
-                       DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
+                       DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_change);
 }
 
 static void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
index 5947c2cb0f301e9af4a5031c65f54e8fd2eebb66..9501403a48a958802a113634c3808d5a6e503c64 100644 (file)
@@ -865,7 +865,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub,
                        DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
 
        REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A,
-                        DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_chanage);
+                        DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_change);
 
        REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A,
                         DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, &s->usr_retrain);
@@ -885,7 +885,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub,
                        DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
 
        REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B,
-                       DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_chanage);
+                       DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_change);
 
        REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B,
                         DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, &s->usr_retrain);
@@ -905,7 +905,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub,
                        DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
 
        REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C,
-                       DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_chanage);
+                       DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_change);
 
        REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C,
                         DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, &s->usr_retrain);
@@ -925,7 +925,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub,
                        DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
 
        REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D,
-                       DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
+                       DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_change);
 
        REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D,
                         DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, &s->usr_retrain);
index f2e1fcb668fb920e1215d7e96c9c1edab4108f16..5b0265c0df61c7ffd5087b2e7d1d2ae6d65adfef 100644 (file)
@@ -46,7 +46,7 @@ struct dcn_hubbub_wm_set {
        uint32_t pte_meta_urgent;
        uint32_t sr_enter;
        uint32_t sr_exit;
-       uint32_t dram_clk_chanage;
+       uint32_t dram_clk_change;
        uint32_t usr_retrain;
        uint32_t fclk_pstate_change;
 };
index b76f0f7e429981edb5b7da856165fe35b91d621b..d6b964cf73bd18fe6c6a7519b2da57e679dc4454 100644 (file)
@@ -522,9 +522,9 @@ typedef enum  {
   TEMP_HOTSPOT_M,
   TEMP_MEM,
   TEMP_VR_GFX,
+  TEMP_VR_SOC,
   TEMP_VR_MEM0,
   TEMP_VR_MEM1,
-  TEMP_VR_SOC,
   TEMP_VR_U,
   TEMP_LIQUID0,
   TEMP_LIQUID1,
index a4e3425b1027c2ab6e8c5a1f3c8957fe2b79d014..4180c71d930f1bd1a2ea8eeb29929e8dc2df4f26 100644 (file)
        __SMU_DUMMY_MAP(GetGfxOffEntryCount),            \
        __SMU_DUMMY_MAP(LogGfxOffResidency),                    \
        __SMU_DUMMY_MAP(SetNumBadMemoryPagesRetired),           \
-       __SMU_DUMMY_MAP(SetBadMemoryPagesRetiredFlagsPerChannel),
+       __SMU_DUMMY_MAP(SetBadMemoryPagesRetiredFlagsPerChannel), \
+       __SMU_DUMMY_MAP(AllowGpo),
 
 #undef __SMU_DUMMY_MAP
 #define __SMU_DUMMY_MAP(type)  SMU_MSG_##type
index 865d6358918d27a0a8152836cf2892a6b4e8f4de..e8c6febb8b64ed9149f8e5a59994d9473abfa79c 100644 (file)
@@ -28,6 +28,7 @@
 #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
 #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
 #define SMU13_DRIVER_IF_VERSION_ALDE 0x08
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x34
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
@@ -272,6 +273,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu);
 
 int smu_v13_0_run_btc(struct smu_context *smu);
 
+int smu_v13_0_gpo_control(struct smu_context *smu,
+                         bool enablement);
+
 int smu_v13_0_deep_sleep_control(struct smu_context *smu,
                                 bool enablement);
 
index f5e90e0a99dfe746843410f431a3d461b91feb1a..e54b760b875bfeb12280490863e9a7ab9d4b5945 100644 (file)
@@ -290,6 +290,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
                smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
                break;
        case IP_VERSION(13, 0, 0):
+               smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0;
+               break;
        case IP_VERSION(13, 0, 10):
                smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10;
                break;
@@ -2180,6 +2182,21 @@ int smu_v13_0_run_btc(struct smu_context *smu)
        return res;
 }
 
+int smu_v13_0_gpo_control(struct smu_context *smu,
+                         bool enablement)
+{
+       int res;
+
+       res = smu_cmn_send_smc_msg_with_param(smu,
+                                             SMU_MSG_AllowGpo,
+                                             enablement ? 1 : 0,
+                                             NULL);
+       if (res)
+               dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement);
+
+       return res;
+}
+
 int smu_v13_0_deep_sleep_control(struct smu_context *smu,
                                 bool enablement)
 {
index 87d7c66e49ef28917bef5f7dd57f2b866d31bcd8..9643b21c636a9ec17d493fd579a724ca1e07eb8e 100644 (file)
@@ -144,6 +144,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
        MSG_MAP(SetNumBadMemoryPagesRetired,    PPSMC_MSG_SetNumBadMemoryPagesRetired,   0),
        MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
                            PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel,   0),
+       MSG_MAP(AllowGpo,                       PPSMC_MSG_SetGpoAllow,           0),
 };
 
 static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -210,6 +211,8 @@ static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] =
        FEA_MAP(MEM_TEMP_READ),
        FEA_MAP(ATHUB_MMHUB_PG),
        FEA_MAP(SOC_PCC),
+       [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+       [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
 };
 
 static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
@@ -540,6 +543,23 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
                                                     dpm_table);
                if (ret)
                        return ret;
+
+               /*
+                * Update the reported maximum shader clock to the value
+                * which can be guarded to be achieved on all cards. This
+                * is aligned with Window setting. And considering that value
+                * might be not the peak frequency the card can achieve, it
+                * is normal some real-time clock frequency can overtake this
+                * labelled maximum clock frequency(for example in pp_dpm_sclk
+                * sysfs output).
+                */
+               if (skutable->DriverReportedClocks.GameClockAc &&
+                   (dpm_table->dpm_levels[dpm_table->count - 1].value >
+                   skutable->DriverReportedClocks.GameClockAc)) {
+                       dpm_table->dpm_levels[dpm_table->count - 1].value =
+                               skutable->DriverReportedClocks.GameClockAc;
+                       dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
+               }
        } else {
                dpm_table->count = 1;
                dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
@@ -802,6 +822,57 @@ static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu,
        return ret;
 }
 
+static int smu_v13_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
+                                            enum smu_clk_type clk_type,
+                                            uint32_t *min,
+                                            uint32_t *max)
+{
+       struct smu_13_0_dpm_context *dpm_context =
+               smu->smu_dpm.dpm_context;
+       struct smu_13_0_dpm_table *dpm_table;
+
+       switch (clk_type) {
+       case SMU_MCLK:
+       case SMU_UCLK:
+               /* uclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.uclk_table;
+               break;
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+               /* gfxclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.gfx_table;
+               break;
+       case SMU_SOCCLK:
+               /* socclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.soc_table;
+               break;
+       case SMU_FCLK:
+               /* fclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.fclk_table;
+               break;
+       case SMU_VCLK:
+       case SMU_VCLK1:
+               /* vclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.vclk_table;
+               break;
+       case SMU_DCLK:
+       case SMU_DCLK1:
+               /* dclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.dclk_table;
+               break;
+       default:
+               dev_err(smu->adev->dev, "Unsupported clock type!\n");
+               return -EINVAL;
+       }
+
+       if (min)
+               *min = dpm_table->min;
+       if (max)
+               *max = dpm_table->max;
+
+       return 0;
+}
+
 static int smu_v13_0_0_read_sensor(struct smu_context *smu,
                                   enum amd_pp_sensors sensor,
                                   void *data,
@@ -1304,9 +1375,17 @@ static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu)
                                &dpm_context->dpm_tables.fclk_table;
        struct smu_umd_pstate_table *pstate_table =
                                &smu->pstate_table;
+       struct smu_table_context *table_context = &smu->smu_table;
+       PPTable_t *pptable = table_context->driver_pptable;
+       DriverReportedClocks_t driver_clocks =
+                       pptable->SkuTable.DriverReportedClocks;
 
        pstate_table->gfxclk_pstate.min = gfx_table->min;
-       pstate_table->gfxclk_pstate.peak = gfx_table->max;
+       if (driver_clocks.GameClockAc &&
+           (driver_clocks.GameClockAc < gfx_table->max))
+               pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
+       else
+               pstate_table->gfxclk_pstate.peak = gfx_table->max;
 
        pstate_table->uclk_pstate.min = mem_table->min;
        pstate_table->uclk_pstate.peak = mem_table->max;
@@ -1323,12 +1402,12 @@ static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu)
        pstate_table->fclk_pstate.min = fclk_table->min;
        pstate_table->fclk_pstate.peak = fclk_table->max;
 
-       /*
-        * For now, just use the mininum clock frequency.
-        * TODO: update them when the real pstate settings available
-        */
-       pstate_table->gfxclk_pstate.standard = gfx_table->min;
-       pstate_table->uclk_pstate.standard = mem_table->min;
+       if (driver_clocks.BaseClockAc &&
+           driver_clocks.BaseClockAc < gfx_table->max)
+               pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
+       else
+               pstate_table->gfxclk_pstate.standard = gfx_table->max;
+       pstate_table->uclk_pstate.standard = mem_table->max;
        pstate_table->socclk_pstate.standard = soc_table->min;
        pstate_table->vclk_pstate.standard = vclk_table->min;
        pstate_table->dclk_pstate.standard = dclk_table->min;
@@ -1362,12 +1441,23 @@ out:
 static int smu_v13_0_0_get_fan_speed_pwm(struct smu_context *smu,
                                         uint32_t *speed)
 {
+       int ret;
+
        if (!speed)
                return -EINVAL;
 
-       return smu_v13_0_0_get_smu_metrics_data(smu,
-                                               METRICS_CURR_FANPWM,
-                                               speed);
+       ret = smu_v13_0_0_get_smu_metrics_data(smu,
+                                              METRICS_CURR_FANPWM,
+                                              speed);
+       if (ret) {
+               dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
+               return ret;
+       }
+
+       /* Convert the PMFW output which is in percent to pwm(255) based */
+       *speed = MIN(*speed * 255 / 100, 255);
+
+       return 0;
 }
 
 static int smu_v13_0_0_get_fan_speed_rpm(struct smu_context *smu,
@@ -1899,7 +1989,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
        .get_enabled_mask = smu_cmn_get_enabled_mask,
        .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable,
        .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
-       .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
+       .get_dpm_ultimate_freq = smu_v13_0_0_get_dpm_ultimate_freq,
        .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
        .read_sensor = smu_v13_0_0_read_sensor,
        .feature_is_enabled = smu_cmn_feature_is_enabled,
@@ -1947,6 +2037,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
        .set_df_cstate = smu_v13_0_0_set_df_cstate,
        .send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num,
        .send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag,
+       .gpo_control = smu_v13_0_gpo_control,
 };
 
 void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
index c3c9ef523e59d0269799dcd8b10267500c01b51a..5c6c6ad011ca63c8d8ccd285b4cf68b1eafc03a8 100644 (file)
@@ -123,6 +123,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
        MSG_MAP(SetMGpuFanBoostLimitRpm,        PPSMC_MSG_SetMGpuFanBoostLimitRpm,     0),
        MSG_MAP(DFCstateControl,                PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
        MSG_MAP(ArmD3,                          PPSMC_MSG_ArmD3,                       0),
+       MSG_MAP(AllowGpo,                       PPSMC_MSG_SetGpoAllow,           0),
 };
 
 static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -189,6 +190,8 @@ static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] =
        FEA_MAP(MEM_TEMP_READ),
        FEA_MAP(ATHUB_MMHUB_PG),
        FEA_MAP(SOC_PCC),
+       [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+       [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
 };
 
 static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
@@ -1360,12 +1363,23 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
 static int smu_v13_0_7_get_fan_speed_pwm(struct smu_context *smu,
                                         uint32_t *speed)
 {
+       int ret;
+
        if (!speed)
                return -EINVAL;
 
-       return smu_v13_0_7_get_smu_metrics_data(smu,
-                                               METRICS_CURR_FANPWM,
-                                               speed);
+       ret = smu_v13_0_7_get_smu_metrics_data(smu,
+                                              METRICS_CURR_FANPWM,
+                                              speed);
+       if (ret) {
+               dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
+               return ret;
+       }
+
+       /* Convert the PMFW output which is in percent to pwm(255) based */
+       *speed = MIN(*speed * 255 / 100, 255);
+
+       return 0;
 }
 
 static int smu_v13_0_7_get_fan_speed_rpm(struct smu_context *smu,
@@ -1437,7 +1451,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
 
 static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf)
 {
-       DpmActivityMonitorCoeffIntExternal_t activity_monitor_external[PP_SMC_POWER_PROFILE_COUNT];
+       DpmActivityMonitorCoeffIntExternal_t *activity_monitor_external;
        uint32_t i, j, size = 0;
        int16_t workload_type = 0;
        int result = 0;
@@ -1445,6 +1459,12 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf
        if (!buf)
                return -EINVAL;
 
+       activity_monitor_external = kcalloc(PP_SMC_POWER_PROFILE_COUNT,
+                                           sizeof(*activity_monitor_external),
+                                           GFP_KERNEL);
+       if (!activity_monitor_external)
+               return -ENOMEM;
+
        size += sysfs_emit_at(buf, size, "                              ");
        for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++)
                size += sysfs_emit_at(buf, size, "%-14s%s", amdgpu_pp_profile_name[i],
@@ -1457,15 +1477,17 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf
                workload_type = smu_cmn_to_asic_specific_index(smu,
                                                               CMN2ASIC_MAPPING_WORKLOAD,
                                                               i);
-               if (workload_type < 0)
-                       return -EINVAL;
+               if (workload_type < 0) {
+                       result = -EINVAL;
+                       goto out;
+               }
 
                result = smu_cmn_update_table(smu,
                                          SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
                                          (void *)(&activity_monitor_external[i]), false);
                if (result) {
                        dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
-                       return result;
+                       goto out;
                }
        }
 
@@ -1493,7 +1515,10 @@ do {                                                                                                     \
        PRINT_DPM_MONITOR(Fclk_BoosterFreq);
 #undef PRINT_DPM_MONITOR
 
-       return size;
+       result = size;
+out:
+       kfree(activity_monitor_external);
+       return result;
 }
 
 static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
@@ -1688,6 +1713,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
        .mode1_reset = smu_v13_0_mode1_reset,
        .set_mp1_state = smu_v13_0_7_set_mp1_state,
        .set_df_cstate = smu_v13_0_7_set_df_cstate,
+       .gpo_control = smu_v13_0_gpo_control,
 };
 
 void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
index 3593938dcd87cdaafc97e2ec9c966d3834bcc0b7..24ef36ec2d3d3ca69b7f48c8f6506ef4c12c9511 100644 (file)
@@ -673,8 +673,6 @@ static void intel_enable_dp(struct intel_atomic_state *state,
        intel_dp_pcon_dsc_configure(intel_dp, pipe_config);
        intel_dp_start_link_train(intel_dp, pipe_config);
        intel_dp_stop_link_train(intel_dp, pipe_config);
-
-       intel_audio_codec_enable(encoder, pipe_config, conn_state);
 }
 
 static void g4x_enable_dp(struct intel_atomic_state *state,
@@ -683,6 +681,7 @@ static void g4x_enable_dp(struct intel_atomic_state *state,
                          const struct drm_connector_state *conn_state)
 {
        intel_enable_dp(state, encoder, pipe_config, conn_state);
+       intel_audio_codec_enable(encoder, pipe_config, conn_state);
        intel_edp_backlight_on(pipe_config, conn_state);
 }
 
@@ -691,6 +690,7 @@ static void vlv_enable_dp(struct intel_atomic_state *state,
                          const struct intel_crtc_state *pipe_config,
                          const struct drm_connector_state *conn_state)
 {
+       intel_audio_codec_enable(encoder, pipe_config, conn_state);
        intel_edp_backlight_on(pipe_config, conn_state);
 }
 
index 121caeaa409b6c96c93a792396dfc8fbf9747ec9..c3580d96765c6c27c7d50dad316adcac822e541e 100644 (file)
@@ -157,10 +157,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
                             &pipe_config->infoframes.hdmi);
 }
 
-static void g4x_enable_hdmi(struct intel_atomic_state *state,
-                           struct intel_encoder *encoder,
-                           const struct intel_crtc_state *pipe_config,
-                           const struct drm_connector_state *conn_state)
+static void g4x_hdmi_enable_port(struct intel_encoder *encoder,
+                                const struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -175,6 +173,16 @@ static void g4x_enable_hdmi(struct intel_atomic_state *state,
 
        intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
        intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+}
+
+static void g4x_enable_hdmi(struct intel_atomic_state *state,
+                           struct intel_encoder *encoder,
+                           const struct intel_crtc_state *pipe_config,
+                           const struct drm_connector_state *conn_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+       g4x_hdmi_enable_port(encoder, pipe_config);
 
        drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
                    !pipe_config->has_hdmi_sink);
@@ -294,6 +302,11 @@ static void vlv_enable_hdmi(struct intel_atomic_state *state,
                            const struct intel_crtc_state *pipe_config,
                            const struct drm_connector_state *conn_state)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+       drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
+                   !pipe_config->has_hdmi_sink);
+       intel_audio_codec_enable(encoder, pipe_config, conn_state);
 }
 
 static void intel_disable_hdmi(struct intel_atomic_state *state,
@@ -415,7 +428,7 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
                              pipe_config->has_infoframe,
                              pipe_config, conn_state);
 
-       g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
+       g4x_hdmi_enable_port(encoder, pipe_config);
 
        vlv_wait_port_ready(dev_priv, dig_port, 0x0);
 }
@@ -492,7 +505,7 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
                              pipe_config->has_infoframe,
                              pipe_config, conn_state);
 
-       g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
+       g4x_hdmi_enable_port(encoder, pipe_config);
 
        vlv_wait_port_ready(dev_priv, dig_port, 0x0);
 
index 67089711d9e25d265ccf0e6d6bf663e7a2a64bae..75070eb07d4bfa97587a173b29fb25582cd00389 100644 (file)
@@ -3679,61 +3679,6 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
        }
 }
 
-static void
-intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp,
-                                 const struct intel_crtc_state *crtc_state)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
-       enum pipe pipe = crtc->pipe;
-       u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
-
-       trans_ddi_func_ctl_value = intel_de_read(dev_priv,
-                                                TRANS_DDI_FUNC_CTL(pipe));
-       trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
-       dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
-
-       trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
-                                     TGL_TRANS_DDI_PORT_MASK);
-       trans_conf_value &= ~PIPECONF_ENABLE;
-       dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
-
-       intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
-       intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
-                      trans_ddi_func_ctl_value);
-       intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
-}
-
-static void
-intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp,
-                                const struct intel_crtc_state *crtc_state)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       enum port port = dig_port->base.port;
-       struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
-       enum pipe pipe = crtc->pipe;
-       u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
-
-       trans_ddi_func_ctl_value = intel_de_read(dev_priv,
-                                                TRANS_DDI_FUNC_CTL(pipe));
-       trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
-       dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
-
-       trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
-                                   TGL_TRANS_DDI_SELECT_PORT(port);
-       trans_conf_value |= PIPECONF_ENABLE;
-       dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
-
-       intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
-       intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
-       intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
-                      trans_ddi_func_ctl_value);
-}
-
 static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
                                         const struct intel_crtc_state *crtc_state)
 {
@@ -3752,14 +3697,10 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
        intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
                                  link_status);
 
-       intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
-
        intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
 
        intel_dp_phy_pattern_update(intel_dp, crtc_state);
 
-       intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state);
-
        drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
                          intel_dp->train_set, crtc_state->lane_count);
 
index 733696057761c9fcb77dd51d7b5b0556466de0cf..1a0886b8aaa1d0501681bd444503ef011117b37a 100644 (file)
@@ -785,6 +785,9 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
        if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
                return false;
 
+       if (obj->flags & I915_BO_ALLOC_CCS_AUX)
+               return true;
+
        for (i = 0; i < obj->mm.n_placements; i++) {
                /* Compression is not allowed for the objects with smem placement */
                if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
index d0d6772e6f36a2e9002b035f91a772a4de773af9..ab4c2f90a56436c9dd8e8aa14a6b1c14867df0cf 100644 (file)
@@ -327,16 +327,18 @@ struct drm_i915_gem_object {
  * dealing with userspace objects the CPU fault handler is free to ignore this.
  */
 #define I915_BO_ALLOC_GPU_ONLY   BIT(6)
+#define I915_BO_ALLOC_CCS_AUX    BIT(7)
 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
                             I915_BO_ALLOC_VOLATILE | \
                             I915_BO_ALLOC_CPU_CLEAR | \
                             I915_BO_ALLOC_USER | \
                             I915_BO_ALLOC_PM_VOLATILE | \
                             I915_BO_ALLOC_PM_EARLY | \
-                            I915_BO_ALLOC_GPU_ONLY)
-#define I915_BO_READONLY          BIT(7)
-#define I915_TILING_QUIRK_BIT     8 /* unknown swizzling; do not release! */
-#define I915_BO_PROTECTED         BIT(9)
+                            I915_BO_ALLOC_GPU_ONLY | \
+                            I915_BO_ALLOC_CCS_AUX)
+#define I915_BO_READONLY          BIT(8)
+#define I915_TILING_QUIRK_BIT     9 /* unknown swizzling; do not release! */
+#define I915_BO_PROTECTED         BIT(10)
        /**
         * @mem_flags - Mutable placement-related flags
         *
index 07e49f22f2de392ceed08e2630972b5472f22924..7e67742bc65e09d29fa3e1c30abfb15f795f49d9 100644 (file)
@@ -50,6 +50,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
                container_of(bo->bdev, typeof(*i915), bdev);
        struct drm_i915_gem_object *backup;
        struct ttm_operation_ctx ctx = {};
+       unsigned int flags;
        int err = 0;
 
        if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
@@ -65,7 +66,22 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
        if (obj->flags & I915_BO_ALLOC_PM_VOLATILE)
                return 0;
 
-       backup = i915_gem_object_create_shmem(i915, obj->base.size);
+       /*
+        * It seems that we might have some framebuffers still pinned at this
+        * stage, but for such objects we might also need to deal with the CCS
+        * aux state. Make sure we force the save/restore of the CCS state,
+        * otherwise we might observe display corruption, when returning from
+        * suspend.
+        */
+       flags = 0;
+       if (i915_gem_object_needs_ccs_pages(obj)) {
+               WARN_ON_ONCE(!i915_gem_object_is_framebuffer(obj));
+               WARN_ON_ONCE(!pm_apply->allow_gpu);
+
+               flags = I915_BO_ALLOC_CCS_AUX;
+       }
+       backup = i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
+                                              obj->base.size, 0, flags);
        if (IS_ERR(backup))
                return PTR_ERR(backup);
 
index d9a8ff9e5e5785934f5e7b52c0a5a97fb8623056..ea86c1ab5dc569bb1973ca2f2c70dc7ff529f63f 100644 (file)
@@ -702,7 +702,7 @@ void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
 }
 
 /**
- * intel_gt_mcr_wait_for_reg_fw - wait until MCR register matches expected state
+ * intel_gt_mcr_wait_for_reg - wait until MCR register matches expected state
  * @gt: GT structure
  * @reg: the register to read
  * @mask: mask to apply to register value
index b405a04135ca2178692284b8f743d9d6cb0246bd..5fb74e71f27b50bbc6dee99dfdd1a7716784fb8a 100644 (file)
@@ -342,6 +342,16 @@ static int emit_no_arbitration(struct i915_request *rq)
        return 0;
 }
 
+static int max_pte_pkt_size(struct i915_request *rq, int pkt)
+{
+       struct intel_ring *ring = rq->ring;
+
+       pkt = min_t(int, pkt, (ring->space - rq->reserved_space) / sizeof(u32) + 5);
+       pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
+
+       return pkt;
+}
+
 static int emit_pte(struct i915_request *rq,
                    struct sgt_dma *it,
                    enum i915_cache_level cache_level,
@@ -388,8 +398,7 @@ static int emit_pte(struct i915_request *rq,
                return PTR_ERR(cs);
 
        /* Pack as many PTE updates as possible into a single MI command */
-       pkt = min_t(int, dword_length, ring->space / sizeof(u32) + 5);
-       pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
+       pkt = max_pte_pkt_size(rq, dword_length);
 
        hdr = cs;
        *cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */
@@ -422,8 +431,7 @@ static int emit_pte(struct i915_request *rq,
                                }
                        }
 
-                       pkt = min_t(int, dword_rem, ring->space / sizeof(u32) + 5);
-                       pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
+                       pkt = max_pte_pkt_size(rq, dword_rem);
 
                        hdr = cs;
                        *cs++ = MI_STORE_DATA_IMM | REG_BIT(21);
@@ -829,14 +837,35 @@ intel_context_migrate_copy(struct intel_context *ce,
                        if (err)
                                goto out_rq;
 
-                       /*
-                        * While we can't always restore/manage the CCS state,
-                        * we still need to ensure we don't leak the CCS state
-                        * from the previous user, so make sure we overwrite it
-                        * with something.
-                        */
-                       err = emit_copy_ccs(rq, dst_offset, INDIRECT_ACCESS,
-                                           dst_offset, DIRECT_ACCESS, len);
+                       if (src_is_lmem) {
+                               /*
+                                * If the src is already in lmem, then we must
+                                * be doing an lmem -> lmem transfer, and so
+                                * should be safe to directly copy the CCS
+                                * state. In this case we have either
+                                * initialised the CCS aux state when first
+                                * clearing the pages (since it is already
+                                * allocated in lmem), or the user has
+                                * potentially populated it, in which case we
+                                * need to copy the CCS state as-is.
+                                */
+                               err = emit_copy_ccs(rq,
+                                                   dst_offset, INDIRECT_ACCESS,
+                                                   src_offset, INDIRECT_ACCESS,
+                                                   len);
+                       } else {
+                               /*
+                                * While we can't always restore/manage the CCS
+                                * state, we still need to ensure we don't leak
+                                * the CCS state from the previous user, so make
+                                * sure we overwrite it with something.
+                                */
+                               err = emit_copy_ccs(rq,
+                                                   dst_offset, INDIRECT_ACCESS,
+                                                   dst_offset, DIRECT_ACCESS,
+                                                   len);
+                       }
+
                        if (err)
                                goto out_rq;
 
index 00e09bb18b13bf537497964580ce561fae6aa150..125b6ca25a7563afcd8bd39efc409e46e6167322 100644 (file)
@@ -1383,6 +1383,9 @@ static u32 oa_context_image_offset(struct intel_context *ce, u32 reg)
        u32 offset, len = (ce->engine->context_size - PAGE_SIZE) / 4;
        u32 *state = ce->lrc_reg_state;
 
+       if (drm_WARN_ON(&ce->engine->i915->drm, !state))
+               return U32_MAX;
+
        for (offset = 0; offset < len; ) {
                if (IS_MI_LRI_CMD(state[offset])) {
                        /*
@@ -1447,7 +1450,8 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
        if (IS_ERR(ce))
                return PTR_ERR(ce);
 
-       if (engine_supports_mi_query(stream->engine)) {
+       if (engine_supports_mi_query(stream->engine) &&
+           HAS_LOGICAL_RING_CONTEXTS(stream->perf->i915)) {
                /*
                 * We are enabling perf query here. If we don't find the context
                 * offset here, just return an error.
index 8006a6c6146607a829feaf3cde21db09648cb95d..614013745fcafe9dbd03d1c74e73044aed0dd375 100644 (file)
@@ -824,9 +824,9 @@ void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
 }
 
 /**
- * intel_uncore_forcewake_put__locked - grab forcewake domain references
+ * intel_uncore_forcewake_put__locked - release forcewake domain references
  * @uncore: the intel_uncore structure
- * @fw_domains: forcewake domains to get reference on
+ * @fw_domains: forcewake domains to put references
  *
  * See intel_uncore_forcewake_put(). This variant places the onus
  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.