#include <linux/mm.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
+#include <linux/mmu_notifier.h>
#include <kgd_kfd_interface.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include "amdgpu_sync.h"
struct mutex lock;
struct amdgpu_bo *bo;
struct dma_buf *dmabuf;
+ struct hmm_range *range;
struct list_head attachments;
/* protected by amdkfd_process_info.lock */
struct ttm_validate_buffer validate_list;
uint32_t alloc_flags;
- atomic_t invalid;
+ uint32_t invalid;
struct amdkfd_process_info *process_info;
struct amdgpu_sync sync;
struct amdgpu_amdkfd_fence *eviction_fence;
/* MMU-notifier related fields */
- atomic_t evicted_bos;
+ struct mutex notifier_lock;
+ uint32_t evicted_bos;
struct delayed_work restore_userptr_work;
struct pid *pid;
bool block_mmu_notifications;
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
-int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
+int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
+ unsigned long cur_seq, struct kgd_mem *mem);
#else
static inline
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
}
static inline
-int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
+int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
+ unsigned long cur_seq, struct kgd_mem *mem)
{
return 0;
}
(&((struct amdgpu_fpriv *) \
((struct drm_file *)(drm_priv))->driver_priv)->vm)
+int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
+ struct file *filp, u32 pasid);
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
- struct file *filp, u32 pasid,
+ struct file *filp,
void **process_info,
struct dma_fence **ef);
void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
* later stage when it is scheduled by another ioctl called by
* CRIU master process for the target pid for restore.
*/
- atomic_inc(&mem->invalid);
+ mutex_lock(&process_info->notifier_lock);
+ mem->invalid++;
+ mutex_unlock(&process_info->notifier_lock);
mutex_unlock(&process_info->lock);
return 0;
}
return -ENOMEM;
mutex_init(&info->lock);
+ mutex_init(&info->notifier_lock);
INIT_LIST_HEAD(&info->vm_list_head);
INIT_LIST_HEAD(&info->kfd_bo_list);
INIT_LIST_HEAD(&info->userptr_valid_list);
}
info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
- atomic_set(&info->evicted_bos, 0);
INIT_DELAYED_WORK(&info->restore_userptr_work,
amdgpu_amdkfd_restore_userptr_worker);
put_pid(info->pid);
create_evict_fence_fail:
mutex_destroy(&info->lock);
+ mutex_destroy(&info->notifier_lock);
kfree(info);
}
return ret;
amdgpu_bo_unreserve(bo);
}
-int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
- struct file *filp, u32 pasid,
- void **process_info,
- struct dma_fence **ef)
+int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
+ struct file *filp, u32 pasid)
+
{
struct amdgpu_fpriv *drv_priv;
struct amdgpu_vm *avm;
return ret;
avm = &drv_priv->vm;
- /* Already a compute VM? */
- if (avm->process_info)
- return -EINVAL;
-
/* Free the original amdgpu allocated pasid,
* will be replaced with kfd allocated pasid.
*/
amdgpu_vm_set_pasid(adev, avm, 0);
}
- /* Convert VM into a compute VM */
- ret = amdgpu_vm_make_compute(adev, avm);
+ ret = amdgpu_vm_set_pasid(adev, avm, pasid);
if (ret)
return ret;
- ret = amdgpu_vm_set_pasid(adev, avm, pasid);
+ return 0;
+}
+
+int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
+ struct file *filp,
+ void **process_info,
+ struct dma_fence **ef)
+{
+ struct amdgpu_fpriv *drv_priv;
+ struct amdgpu_vm *avm;
+ int ret;
+
+ ret = amdgpu_file_to_fpriv(filp, &drv_priv);
+ if (ret)
+ return ret;
+ avm = &drv_priv->vm;
+
+ /* Already a compute VM? */
+ if (avm->process_info)
+ return -EINVAL;
+
+ /* Convert VM into a compute VM */
+ ret = amdgpu_vm_make_compute(adev, avm);
if (ret)
return ret;
+
/* Initialize KFD part of the VM and process info */
ret = init_kfd_vm(avm, process_info, ef);
if (ret)
cancel_delayed_work_sync(&process_info->restore_userptr_work);
put_pid(process_info->pid);
mutex_destroy(&process_info->lock);
+ mutex_destroy(&process_info->notifier_lock);
kfree(process_info);
}
}
mutex_lock(&pinfo->lock);
pr_debug("scheduling work\n");
- atomic_inc(&pinfo->evicted_bos);
+ mutex_lock(&pinfo->notifier_lock);
+ pinfo->evicted_bos++;
+ mutex_unlock(&pinfo->notifier_lock);
if (!READ_ONCE(pinfo->block_mmu_notifications)) {
ret = -EINVAL;
goto out_unlock;
list_del(&bo_list_entry->head);
mutex_unlock(&process_info->lock);
- /* No more MMU notifiers */
- amdgpu_hmm_unregister(mem->bo);
+ /* Cleanup user pages and MMU notifiers */
+ if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
+ amdgpu_hmm_unregister(mem->bo);
+ mutex_lock(&process_info->notifier_lock);
+ amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
+ mutex_unlock(&process_info->notifier_lock);
+ }
ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
if (unlikely(ret))
*/
mutex_lock(&mem->process_info->lock);
+ /* Lock notifier lock. If we find an invalid userptr BO, we can be
+ * sure that the MMU notifier is no longer running
+ * concurrently and the queues are actually stopped
+ */
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
+ mutex_lock(&mem->process_info->notifier_lock);
+ is_invalid_userptr = !!mem->invalid;
+ mutex_unlock(&mem->process_info->notifier_lock);
+ }
+
mutex_lock(&mem->lock);
domain = mem->domain;
*
* Runs in MMU notifier, may be in RECLAIM_FS context. This means it
* cannot do any memory allocations, and cannot take any locks that
- * are held elsewhere while allocating memory. Therefore this is as
- * simple as possible, using atomic counters.
+ * are held elsewhere while allocating memory.
*
* It doesn't do anything to the BO itself. The real work happens in
* restore, where we get updated page addresses. This function only
* ensures that GPU access to the BO is stopped.
*/
-int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
- struct mm_struct *mm)
+int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
+ unsigned long cur_seq, struct kgd_mem *mem)
{
struct amdkfd_process_info *process_info = mem->process_info;
- int evicted_bos;
int r = 0;
- /* Do not process MMU notifications until stage-4 IOCTL is received */
+ /* Do not process MMU notifications during CRIU restore until
+ * KFD_CRIU_OP_RESUME IOCTL is received
+ */
if (READ_ONCE(process_info->block_mmu_notifications))
return 0;
- atomic_inc(&mem->invalid);
- evicted_bos = atomic_inc_return(&process_info->evicted_bos);
- if (evicted_bos == 1) {
+ mutex_lock(&process_info->notifier_lock);
+ mmu_interval_set_seq(mni, cur_seq);
+
+ mem->invalid++;
+ if (++process_info->evicted_bos == 1) {
/* First eviction, stop the queues */
- r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
+ r = kgd2kfd_quiesce_mm(mni->mm,
+ KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
if (r)
pr_err("Failed to quiesce KFD\n");
schedule_delayed_work(&process_info->restore_userptr_work,
msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
}
+ mutex_unlock(&process_info->notifier_lock);
return r;
}
struct kgd_mem *mem, *tmp_mem;
struct amdgpu_bo *bo;
struct ttm_operation_ctx ctx = { false, false };
- int invalid, ret;
+ uint32_t invalid;
+ int ret = 0;
- /* Move all invalidated BOs to the userptr_inval_list and
- * release their user pages by migration to the CPU domain
- */
+ mutex_lock(&process_info->notifier_lock);
+
+ /* Move all invalidated BOs to the userptr_inval_list */
list_for_each_entry_safe(mem, tmp_mem,
&process_info->userptr_valid_list,
- validate_list.head) {
- if (!atomic_read(&mem->invalid))
- continue; /* BO is still valid */
-
- bo = mem->bo;
-
- if (amdgpu_bo_reserve(bo, true))
- return -EAGAIN;
- amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- amdgpu_bo_unreserve(bo);
- if (ret) {
- pr_err("%s: Failed to invalidate userptr BO\n",
- __func__);
- return -EAGAIN;
- }
-
- list_move_tail(&mem->validate_list.head,
- &process_info->userptr_inval_list);
- }
-
- if (list_empty(&process_info->userptr_inval_list))
- return 0; /* All evicted userptr BOs were freed */
+ validate_list.head)
+ if (mem->invalid)
+ list_move_tail(&mem->validate_list.head,
+ &process_info->userptr_inval_list);
/* Go through userptr_inval_list and update any invalid user_pages */
list_for_each_entry(mem, &process_info->userptr_inval_list,
validate_list.head) {
- struct hmm_range *range;
-
- invalid = atomic_read(&mem->invalid);
+ invalid = mem->invalid;
if (!invalid)
/* BO hasn't been invalidated since the last
- * revalidation attempt. Keep its BO list.
+ * revalidation attempt. Keep its page list.
*/
continue;
bo = mem->bo;
+ amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
+ mem->range = NULL;
+
+ /* BO reservations and getting user pages (hmm_range_fault)
+ * must happen outside the notifier lock
+ */
+ mutex_unlock(&process_info->notifier_lock);
+
+ /* Move the BO to system (CPU) domain if necessary to unmap
+ * and free the SG table
+ */
+ if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) {
+ if (amdgpu_bo_reserve(bo, true))
+ return -EAGAIN;
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ amdgpu_bo_unreserve(bo);
+ if (ret) {
+ pr_err("%s: Failed to invalidate userptr BO\n",
+ __func__);
+ return -EAGAIN;
+ }
+ }
+
/* Get updated user pages */
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
- &range);
+ &mem->range);
if (ret) {
pr_debug("Failed %d to get user pages\n", ret);
*/
if (ret != -EFAULT)
return ret;
- } else {
- /*
- * FIXME: Cannot ignore the return code, must hold
- * notifier_lock
- */
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
+ ret = 0;
}
+ mutex_lock(&process_info->notifier_lock);
+
/* Mark the BO as valid unless it was invalidated
* again concurrently.
*/
- if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
- return -EAGAIN;
+ if (mem->invalid != invalid) {
+ ret = -EAGAIN;
+ goto unlock_out;
+ }
+ mem->invalid = 0;
}
- return 0;
+unlock_out:
+ mutex_unlock(&process_info->notifier_lock);
+
+ return ret;
}
/* Validate invalid userptr BOs
*
- * Validates BOs on the userptr_inval_list, and moves them back to the
- * userptr_valid_list. Also updates GPUVM page tables with new page
- * addresses and waits for the page table updates to complete.
+ * Validates BOs on the userptr_inval_list. Also updates GPUVM page tables
+ * with new page addresses and waits for the page table updates to complete.
*/
static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
{
}
}
- list_move_tail(&mem->validate_list.head,
- &process_info->userptr_valid_list);
-
/* Update mapping. If the BO was not validated
* (because we couldn't get user pages), this will
* clear the page table entries, which will result in
if (ret) {
pr_err("%s: update PTE failed\n", __func__);
/* make sure this gets validated again */
- atomic_inc(&mem->invalid);
+ mutex_lock(&process_info->notifier_lock);
+ mem->invalid++;
+ mutex_unlock(&process_info->notifier_lock);
goto unreserve_out;
}
}
return ret;
}
+/* Confirm that all user pages are valid while holding the notifier lock
+ *
+ * Moves valid BOs from the userptr_inval_list back to userptr_val_list.
+ */
+static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info)
+{
+ struct kgd_mem *mem, *tmp_mem;
+ int ret = 0;
+
+ list_for_each_entry_safe(mem, tmp_mem,
+ &process_info->userptr_inval_list,
+ validate_list.head) {
+ bool valid = amdgpu_ttm_tt_get_user_pages_done(
+ mem->bo->tbo.ttm, mem->range);
+
+ mem->range = NULL;
+ if (!valid) {
+ WARN(!mem->invalid, "Invalid BO not marked invalid");
+ ret = -EAGAIN;
+ continue;
+ }
+ WARN(mem->invalid, "Valid BO is marked invalid");
+
+ list_move_tail(&mem->validate_list.head,
+ &process_info->userptr_valid_list);
+ }
+
+ return ret;
+}
+
/* Worker callback to restore evicted userptr BOs
*
* Tries to update and validate all userptr BOs. If successful and no
restore_userptr_work);
struct task_struct *usertask;
struct mm_struct *mm;
- int evicted_bos;
+ uint32_t evicted_bos;
- evicted_bos = atomic_read(&process_info->evicted_bos);
+ mutex_lock(&process_info->notifier_lock);
+ evicted_bos = process_info->evicted_bos;
+ mutex_unlock(&process_info->notifier_lock);
if (!evicted_bos)
return;
* and we can just restart the queues.
*/
if (!list_empty(&process_info->userptr_inval_list)) {
- if (atomic_read(&process_info->evicted_bos) != evicted_bos)
- goto unlock_out; /* Concurrent eviction, try again */
-
if (validate_invalid_user_pages(process_info))
goto unlock_out;
}
* be a first eviction that calls quiesce_mm. The eviction
* reference counting inside KFD will handle this case.
*/
- if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
- evicted_bos)
- goto unlock_out;
- evicted_bos = 0;
+ mutex_lock(&process_info->notifier_lock);
+ if (process_info->evicted_bos != evicted_bos)
+ goto unlock_notifier_out;
+
+ if (confirm_valid_user_pages_locked(process_info)) {
+ WARN(1, "User pages unexpectedly invalid");
+ goto unlock_notifier_out;
+ }
+
+ process_info->evicted_bos = evicted_bos = 0;
+
if (kgd2kfd_resume_mm(mm)) {
pr_err("%s: Failed to resume KFD\n", __func__);
/* No recovery from this failure. Probably the CP is
*/
}
+unlock_notifier_out:
+ mutex_unlock(&process_info->notifier_lock);
unlock_out:
mutex_unlock(&process_info->lock);
continue;
}
- /* skip suspend of gfx and psp for S0ix
+ /* skip suspend of gfx/mes and psp for S0ix
* gfx is in gfxoff state, so on resume it will exit gfxoff just
* like at runtime. PSP is also part of the always on hardware
* so no need to suspend it.
*/
if (adev->in_s0ix &&
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
continue;
/* XXX handle errors */
adev->in_suspend = true;
+ /* Evict the majority of BOs before grabbing the full access */
+ r = amdgpu_device_evict_resources(adev);
+ if (r)
+ return r;
+
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_virt_request_full_gpu(adev, false);
"See modparam exp_hw_support\n");
return -ENODEV;
}
+ /* differentiate between P10 and P11 asics with the same DID */
+ if (pdev->device == 0x67FF &&
+ (pdev->revision == 0xE3 ||
+ pdev->revision == 0xE7 ||
+ pdev->revision == 0xF3 ||
+ pdev->revision == 0xF7)) {
+ flags &= ~AMD_ASIC_MASK;
+ flags |= CHIP_POLARIS10;
+ }
/* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
* however, SME requires an indirect IOMMU mapping because the encryption
pci_set_drvdata(pdev, ddev);
- ret = amdgpu_driver_load_kms(adev, ent->driver_data);
+ ret = amdgpu_driver_load_kms(adev, flags);
if (ret)
goto err_pci;
retry_init:
- ret = drm_dev_register(ddev, ent->driver_data);
+ ret = drm_dev_register(ddev, flags);
if (ret == -EAGAIN && ++retry <= 3) {
DRM_INFO("retry init %d\n", retry);
/* Don't request EX mode too frequently which is attacking */
sizeof(atom_ctx->vbios_version)) ||
strnstr(atom_ctx->vbios_version, "D163",
sizeof(atom_ctx->vbios_version))) {
- *fru_addr = FRU_EEPROM_MADDR_6;
+ if (fru_addr)
+ *fru_addr = FRU_EEPROM_MADDR_6;
return true;
} else {
return false;
sizeof(atom_ctx->vbios_version))) {
return false;
} else {
- *fru_addr = FRU_EEPROM_MADDR_6;
+ if (fru_addr)
+ *fru_addr = FRU_EEPROM_MADDR_6;
return true;
}
} else {
bp.resv = resv;
bp.preferred_domain = initial_domain;
bp.flags = flags;
- bp.domain = initial_domain | AMDGPU_GEM_DOMAIN_CPU;
+ bp.domain = initial_domain;
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
r = amdgpu_bo_create_user(adev, &bp, &ubo);
}
initial_domain = (u32)(0xffffffff & args->in.domains);
+retry:
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
- initial_domain, flags, ttm_bo_type_device,
- resv, &gobj);
+ initial_domain,
+ flags, ttm_bo_type_device, resv, &gobj);
if (r && r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ }
+
+ if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
+ goto retry;
+ }
DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
size, initial_domain, args->in.alignment, r);
}
unsigned long cur_seq)
{
struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
if (!mmu_notifier_range_blockable(range))
return false;
- mutex_lock(&adev->notifier_lock);
-
- mmu_interval_set_seq(mni, cur_seq);
-
- amdgpu_amdkfd_evict_userptr(bo->kfd_bo, bo->notifier.mm);
- mutex_unlock(&adev->notifier_lock);
+ amdgpu_amdkfd_evict_userptr(mni, cur_seq, bo->kfd_bo);
return true;
}
return r;
}
-int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
+bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
{
- int r;
+ bool r;
r = mmu_interval_read_retry(hmm_range->notifier,
hmm_range->notifier_seq);
#include <linux/rwsem.h>
#include <linux/workqueue.h>
#include <linux/interval_tree.h>
+#include <linux/mmu_notifier.h>
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
void *owner, struct page **pages,
struct hmm_range **phmm_range);
-int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
+bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
#if defined(CONFIG_HMM_MIRROR)
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr);
atomic_read(&adev->gpu_reset_counter);
}
+/* Check if we need to switch to another set of resources */
+static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id,
+ struct amdgpu_job *job)
+{
+ return id->gds_base != job->gds_base ||
+ id->gds_size != job->gds_size ||
+ id->gws_base != job->gws_base ||
+ id->gws_size != job->gws_size ||
+ id->oa_base != job->oa_base ||
+ id->oa_size != job->oa_size;
+}
+
+/* Check if the id is compatible with the job */
+static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id,
+ struct amdgpu_job *job)
+{
+ return id->pd_gpu_addr == job->vm_pd_addr &&
+ !amdgpu_vmid_gds_switch_needed(id, job);
+}
+
/**
* amdgpu_vmid_grab_idle - grab idle VMID
*
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx;
bool needs_flush = vm->use_cpu_for_update;
uint64_t updates = amdgpu_vm_tlb_seq(vm);
int r;
- *id = vm->reserved_vmid[vmhub];
+ *id = id_mgr->reserved;
if ((*id)->owner != vm->immediate.fence_context ||
- (*id)->pd_gpu_addr != job->vm_pd_addr ||
+ !amdgpu_vmid_compatible(*id, job) ||
(*id)->flushed_updates < updates ||
!(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
if (r)
return r;
- (*id)->flushed_updates = updates;
job->vm_needs_flush = needs_flush;
+ job->spm_update_needed = true;
return 0;
}
if ((*id)->owner != vm->immediate.fence_context)
continue;
- if ((*id)->pd_gpu_addr != job->vm_pd_addr)
+ if (!amdgpu_vmid_compatible(*id, job))
continue;
if (!(*id)->last_flush ||
if (r)
return r;
- (*id)->flushed_updates = updates;
job->vm_needs_flush |= needs_flush;
return 0;
}
if (r)
goto error;
- id->flushed_updates = amdgpu_vm_tlb_seq(vm);
job->vm_needs_flush = true;
}
list_move_tail(&id->list, &id_mgr->ids_lru);
}
- id->pd_gpu_addr = job->vm_pd_addr;
- id->owner = vm->immediate.fence_context;
-
+ job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job);
if (job->vm_needs_flush) {
+ id->flushed_updates = amdgpu_vm_tlb_seq(vm);
dma_fence_put(id->last_flush);
id->last_flush = NULL;
}
job->vmid = id - id_mgr->ids;
job->pasid = vm->pasid;
+
+ id->gds_base = job->gds_base;
+ id->gds_size = job->gds_size;
+ id->gws_base = job->gws_base;
+ id->gws_size = job->gws_size;
+ id->oa_base = job->oa_base;
+ id->oa_size = job->oa_size;
+ id->pd_gpu_addr = job->vm_pd_addr;
+ id->owner = vm->immediate.fence_context;
+
trace_amdgpu_vm_grab_id(vm, ring, job);
error:
struct amdgpu_vm *vm,
unsigned vmhub)
{
- struct amdgpu_vmid_mgr *id_mgr;
- struct amdgpu_vmid *idle;
- int r = 0;
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- id_mgr = &adev->vm_manager.id_mgr[vmhub];
mutex_lock(&id_mgr->lock);
if (vm->reserved_vmid[vmhub])
goto unlock;
- if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
- AMDGPU_VM_MAX_RESERVED_VMID) {
- DRM_ERROR("Over limitation of reserved vmid\n");
- atomic_dec(&id_mgr->reserved_vmid_num);
- r = -EINVAL;
- goto unlock;
+
+ ++id_mgr->reserved_use_count;
+ if (!id_mgr->reserved) {
+ struct amdgpu_vmid *id;
+
+ id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
+ list);
+ /* Remove from normal round robin handling */
+ list_del_init(&id->list);
+ id_mgr->reserved = id;
}
- /* Select the first entry VMID */
- idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
- list_del_init(&idle->list);
- vm->reserved_vmid[vmhub] = idle;
- mutex_unlock(&id_mgr->lock);
+ vm->reserved_vmid[vmhub] = true;
- return 0;
unlock:
mutex_unlock(&id_mgr->lock);
- return r;
+ return 0;
}
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
mutex_lock(&id_mgr->lock);
- if (vm->reserved_vmid[vmhub]) {
- list_add(&vm->reserved_vmid[vmhub]->list,
- &id_mgr->ids_lru);
- vm->reserved_vmid[vmhub] = NULL;
- atomic_dec(&id_mgr->reserved_vmid_num);
+ if (vm->reserved_vmid[vmhub] &&
+ !--id_mgr->reserved_use_count) {
+ /* give the reserved ID back to normal round robin */
+ list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
}
+ vm->reserved_vmid[vmhub] = false;
mutex_unlock(&id_mgr->lock);
}
mutex_init(&id_mgr->lock);
INIT_LIST_HEAD(&id_mgr->ids_lru);
- atomic_set(&id_mgr->reserved_vmid_num, 0);
+ id_mgr->reserved_use_count = 0;
/* manage only VMIDs not used by KFD */
id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vmid ids[AMDGPU_NUM_VMID];
- atomic_t reserved_vmid_num;
+ struct amdgpu_vmid *reserved;
+ unsigned int reserved_use_count;
};
int amdgpu_pasid_alloc(unsigned int bits);
uint32_t preamble_status;
uint32_t preemption_status;
bool vm_needs_flush;
+ bool gds_switch_needed;
+ bool spm_update_needed;
uint64_t vm_pd_addr;
unsigned vmid;
unsigned pasid;
* @adev: amdgpu device object
* @offset: offset of the BO
* @size: size of the BO
- * @domain: where to place it
* @bo_ptr: used to initialize BOs in structures
* @cpu_addr: optional CPU address mapping
*
- * Creates a kernel BO at a specific offset in the address space of the domain.
+ * Creates a kernel BO at a specific offset in VRAM.
*
* Returns:
* 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
- uint64_t offset, uint64_t size, uint32_t domain,
+ uint64_t offset, uint64_t size,
struct amdgpu_bo **bo_ptr, void **cpu_addr)
{
struct ttm_operation_ctx ctx = { false, false };
offset &= PAGE_MASK;
size = ALIGN(size, PAGE_SIZE);
- r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
- NULL, cpu_addr);
+ r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
+ cpu_addr);
if (r)
return r;
if (*bo == NULL)
return;
+ WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend);
+
if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
if (cpu_addr)
amdgpu_bo_kunmap(*bo);
/*
* If GTT is part of requested domains the check must succeed to
- * allow fall back to GTT
+ * allow fall back to GTT.
*/
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
- if (size < man->size)
+ if (man && size < man->size)
return true;
- else
- goto fail;
- }
-
- if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
+ else if (!man)
+ WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized");
+ goto fail;
+ } else if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
- if (size < man->size)
+ if (man && size < man->size)
return true;
- else
- goto fail;
+ goto fail;
}
-
/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
return true;
bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
bo->tbo.bdev = &adev->mman.bdev;
- amdgpu_bo_placement_from_domain(bo, bp->domain);
+ if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
+ AMDGPU_GEM_DOMAIN_GDS))
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ else
+ amdgpu_bo_placement_from_domain(bo, bp->domain);
if (bp->type == ttm_bo_type_kernel)
bo->tbo.priority = 1;
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
uint32_t domain)
{
- if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
+ if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) &&
+ ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) {
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
domain = AMDGPU_GEM_DOMAIN_GTT;
u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr);
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
- uint64_t offset, uint64_t size, uint32_t domain,
+ uint64_t offset, uint64_t size,
struct amdgpu_bo **bo_ptr, void **cpu_addr);
int amdgpu_bo_create_user(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
return r;
}
+/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
+ */
+void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
+ struct hmm_range *range)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+ if (gtt && gtt->userptr && range)
+ amdgpu_hmm_range_get_pages_done(range);
+}
+
/*
- * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
+ * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
* Check if the pages backing this ttm range have been invalidated
*
* Returns: true if pages are still valid
WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
- /*
- * FIXME: Must always hold notifier_lock for this, and must
- * not ignore the return code.
- */
return !amdgpu_hmm_range_get_pages_done(range);
}
#endif
return amdgpu_bo_create_kernel_at(adev,
adev->mman.fw_vram_usage_start_offset,
adev->mman.fw_vram_usage_size,
- AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.fw_vram_usage_reserved_bo,
&adev->mman.fw_vram_usage_va);
}
return amdgpu_bo_create_kernel_at(adev,
adev->mman.drv_vram_usage_start_offset,
adev->mman.drv_vram_usage_size,
- AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.drv_vram_usage_reserved_bo,
&adev->mman.drv_vram_usage_va);
}
ret = amdgpu_bo_create_kernel_at(adev,
ctx->c2p_train_data_offset,
ctx->train_data_size,
- AMDGPU_GEM_DOMAIN_VRAM,
&ctx->c2p_bo,
NULL);
if (ret) {
ret = amdgpu_bo_create_kernel_at(adev,
adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
adev->mman.discovery_tmr_size,
- AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.discovery_memory,
NULL);
if (ret) {
* avoid display artifacts while transitioning between pre-OS
* and driver. */
r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
- AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.stolen_vga_memory,
NULL);
if (r)
return r;
r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
adev->mman.stolen_extended_size,
- AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.stolen_extended_memory,
NULL);
if (r)
return r;
r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
adev->mman.stolen_reserved_size,
- AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.stolen_reserved_memory,
NULL);
if (r)
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
struct hmm_range **range);
+void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
+ struct hmm_range *range);
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
struct hmm_range *range);
#else
{
return -EPERM;
}
+static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
+ struct hmm_range *range)
+{
+}
static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
struct hmm_range *range)
{
*/
if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
AMDGPU_GPU_PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
&bo, NULL))
DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- struct amdgpu_vmid *id;
- bool gds_switch_needed;
- bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
if (job->vmid == 0)
return false;
- id = &id_mgr->ids[job->vmid];
- gds_switch_needed = ring->funcs->emit_gds_switch && (
- id->gds_base != job->gds_base ||
- id->gds_size != job->gds_size ||
- id->gws_base != job->gws_base ||
- id->gws_size != job->gws_size ||
- id->oa_base != job->oa_base ||
- id->oa_size != job->oa_size);
-
- if (amdgpu_vmid_had_gpu_reset(adev, id))
+
+ if (job->vm_needs_flush || ring->has_compute_vm_bug)
+ return true;
+
+ if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
+ return true;
+
+ if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
return true;
- return vm_flush_needed || gds_switch_needed;
+ return false;
}
/**
unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
- bool gds_switch_needed = ring->funcs->emit_gds_switch && (
- id->gds_base != job->gds_base ||
- id->gds_size != job->gds_size ||
- id->gws_base != job->gws_base ||
- id->gws_size != job->gws_size ||
- id->oa_base != job->oa_base ||
- id->oa_size != job->oa_size);
+ bool spm_update_needed = job->spm_update_needed;
+ bool gds_switch_needed = ring->funcs->emit_gds_switch &&
+ job->gds_switch_needed;
bool vm_flush_needed = job->vm_needs_flush;
struct dma_fence *fence = NULL;
bool pasid_mapping_needed = false;
unsigned patch_offset = 0;
- bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL));
int r;
- if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid)
- adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
-
if (amdgpu_vmid_had_gpu_reset(adev, id)) {
gds_switch_needed = true;
vm_flush_needed = true;
pasid_mapping_needed = true;
+ spm_update_needed = true;
}
mutex_lock(&id_mgr->lock);
if (pasid_mapping_needed)
amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
+ if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
+
+ if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
+ gds_switch_needed) {
+ amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
+ job->gds_size, job->gws_base,
+ job->gws_size, job->oa_base,
+ job->oa_size);
+ }
+
if (vm_flush_needed || pasid_mapping_needed) {
r = amdgpu_fence_emit(ring, &fence, NULL, 0);
if (r)
}
dma_fence_put(fence);
- if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
- gds_switch_needed) {
- id->gds_base = job->gds_base;
- id->gds_size = job->gds_size;
- id->gws_base = job->gws_base;
- id->gws_size = job->gws_size;
- id->oa_base = job->oa_base;
- id->oa_size = job->oa_size;
- amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
- job->gds_size, job->gws_base,
- job->gws_size, job->oa_base,
- job->oa_size);
- }
-
if (ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset);
union drm_amdgpu_vm *args = data;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
- long timeout = msecs_to_jiffies(2000);
int r;
switch (args->in.op) {
return r;
break;
case AMDGPU_VM_OP_UNRESERVE_VMID:
- if (amdgpu_sriov_runtime(adev))
- timeout = 8 * timeout;
-
- /* Wait vm idle to make sure the vmid set in SPM_VMID is
- * not referenced anymore.
- */
- r = amdgpu_bo_reserve(fpriv->vm.root.bo, true);
- if (r)
- return r;
-
- r = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
- if (r < 0)
- return r;
-
- amdgpu_bo_unreserve(fpriv->vm.root.bo);
amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
break;
default:
/* Reserve 2MB at top/bottom of address space for kernel use */
#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20)
-/* max vmids dedicated for process */
-#define AMDGPU_VM_MAX_RESERVED_VMID 1
-
/* See vm_update_mode */
#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
struct dma_fence *last_unlocked;
unsigned int pasid;
- /* dedicated to vm */
- struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
+ bool reserved_vmid[AMDGPU_MAX_VMHUBS];
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
bool use_cpu_for_update;
/* Wait for PD/PT moves to be completed */
dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ dma_fence_get(fence);
r = drm_sched_job_add_dependency(&p->job->base, fence);
if (r) {
+ dma_fence_put(fence);
dma_resv_iter_end(&cursor);
return r;
}
struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags)
{
+ struct amdgpu_bo *bo = mapping->bo_va->base.bo;
+
*flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
*flags &= ~AMDGPU_PTE_VALID;
}
- if (mapping->bo_va->base.bo)
+ if (bo && bo->tbo.resource)
gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo,
mapping, flags);
}
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!amdgpu_in_reset(adev) &&
+ /* it's only intended for use in mes_self_test case, not for s0ix and reset */
+ if (!amdgpu_in_reset(adev) && !adev->in_s0ix &&
(adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
amdgpu_mes_self_test(adev);
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
}
static void kfd_process_free_gpuvm(struct kgd_mem *mem,
- struct kfd_process_device *pdd, void *kptr)
+ struct kfd_process_device *pdd, void **kptr)
{
struct kfd_dev *dev = pdd->dev;
- if (kptr) {
+ if (kptr && *kptr) {
amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
- kptr = NULL;
+ *kptr = NULL;
}
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
if (!qpd->ib_kaddr || !qpd->ib_base)
return;
- kfd_process_free_gpuvm(qpd->ib_mem, pdd, qpd->ib_kaddr);
+ kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr);
}
struct kfd_process *kfd_create_process(struct file *filep)
if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
return;
- kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, qpd->cwsr_kaddr);
+ kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr);
}
void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
p = pdd->process;
dev = pdd->dev;
- ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
- dev->adev, drm_file, p->pasid,
- &p->kgd_process_info, &p->ef);
+ ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, drm_file,
+ &p->kgd_process_info,
+ &p->ef);
if (ret) {
pr_err("Failed to create process VM object\n");
return ret;
if (ret)
goto err_init_cwsr;
+ ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, drm_file, p->pasid);
+ if (ret)
+ goto err_set_pasid;
+
pdd->drm_file = drm_file;
return 0;
+err_set_pasid:
+ kfd_process_device_destroy_cwsr_dgpu(pdd);
err_init_cwsr:
+ kfd_process_device_destroy_ib_mem(pdd);
err_reserve_ib_mem:
- kfd_process_device_free_bos(pdd);
pdd->drm_priv = NULL;
return ret;
case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
init_data.flags.gpu_vm_support = true;
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
}
- s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
+ s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
s = &wm->sets[1];
s->wm_set = 1;
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
}
- s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
+ s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
s = &wm->sets[2];
s->wm_set = 2;
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
}
- s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
+ s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
s = &wm->sets[3];
s->wm_set = 3;
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
}
- s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
+ s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
}
void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
DTN_INFO_MICRO_SEC(s->sr_enter);
DTN_INFO_MICRO_SEC(s->sr_exit);
- DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
+ DTN_INFO_MICRO_SEC(s->dram_clk_change);
DTN_INFO("\n");
}
memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
- chars_printed = snprintf_count(pBuf, remaining_buffer, "wm_set_index,data_urgent,pte_meta_urgent,sr_enter,sr_exit,dram_clk_chanage\n");
+ chars_printed = snprintf_count(pBuf, remaining_buffer, "wm_set_index,data_urgent,pte_meta_urgent,sr_enter,sr_exit,dram_clk_change\n");
remaining_buffer -= chars_printed;
pBuf += chars_printed;
(s->pte_meta_urgent * frac) / ref_clk_mhz / frac, (s->pte_meta_urgent * frac) / ref_clk_mhz % frac,
(s->sr_enter * frac) / ref_clk_mhz / frac, (s->sr_enter * frac) / ref_clk_mhz % frac,
(s->sr_exit * frac) / ref_clk_mhz / frac, (s->sr_exit * frac) / ref_clk_mhz % frac,
- (s->dram_clk_chanage * frac) / ref_clk_mhz / frac, (s->dram_clk_chanage * frac) / ref_clk_mhz % frac);
+ (s->dram_clk_change * frac) / ref_clk_mhz / frac, (s->dram_clk_change * frac) / ref_clk_mhz % frac);
remaining_buffer -= chars_printed;
pBuf += chars_printed;
}
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
}
- s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
+ s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
s = &wm->sets[1];
s->wm_set = 1;
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
}
- s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
+ s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
s = &wm->sets[2];
s->wm_set = 2;
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
}
- s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
+ s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
s = &wm->sets[3];
s->wm_set = 3;
s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
}
- s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
+ s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
}
void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A,
- DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_chanage);
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_change);
s = &wm->sets[1];
s->wm_set = 1;
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B,
- DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_chanage);
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_change);
s = &wm->sets[2];
s->wm_set = 2;
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C,
- DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_chanage);
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_change);
s = &wm->sets[3];
s->wm_set = 3;
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D,
- DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_change);
}
static void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A,
- DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_chanage);
+ DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_change);
REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A,
DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, &s->usr_retrain);
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B,
- DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_chanage);
+ DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_change);
REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B,
DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, &s->usr_retrain);
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C,
- DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_chanage);
+ DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_change);
REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C,
DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, &s->usr_retrain);
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D,
- DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
+ DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_change);
REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D,
DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, &s->usr_retrain);
uint32_t pte_meta_urgent;
uint32_t sr_enter;
uint32_t sr_exit;
- uint32_t dram_clk_chanage;
+ uint32_t dram_clk_change;
uint32_t usr_retrain;
uint32_t fclk_pstate_change;
};
TEMP_HOTSPOT_M,
TEMP_MEM,
TEMP_VR_GFX,
+ TEMP_VR_SOC,
TEMP_VR_MEM0,
TEMP_VR_MEM1,
- TEMP_VR_SOC,
TEMP_VR_U,
TEMP_LIQUID0,
TEMP_LIQUID1,
__SMU_DUMMY_MAP(GetGfxOffEntryCount), \
__SMU_DUMMY_MAP(LogGfxOffResidency), \
__SMU_DUMMY_MAP(SetNumBadMemoryPagesRetired), \
- __SMU_DUMMY_MAP(SetBadMemoryPagesRetiredFlagsPerChannel),
+ __SMU_DUMMY_MAP(SetBadMemoryPagesRetiredFlagsPerChannel), \
+ __SMU_DUMMY_MAP(AllowGpo),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x34
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
int smu_v13_0_run_btc(struct smu_context *smu);
+int smu_v13_0_gpo_control(struct smu_context *smu,
+ bool enablement);
+
int smu_v13_0_deep_sleep_control(struct smu_context *smu,
bool enablement);
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
break;
case IP_VERSION(13, 0, 0):
+ smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0;
+ break;
case IP_VERSION(13, 0, 10):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10;
break;
return res;
}
+int smu_v13_0_gpo_control(struct smu_context *smu,
+ bool enablement)
+{
+ int res;
+
+ res = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_AllowGpo,
+ enablement ? 1 : 0,
+ NULL);
+ if (res)
+ dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement);
+
+ return res;
+}
+
int smu_v13_0_deep_sleep_control(struct smu_context *smu,
bool enablement)
{
MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0),
MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
+ MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
FEA_MAP(MEM_TEMP_READ),
FEA_MAP(ATHUB_MMHUB_PG),
FEA_MAP(SOC_PCC),
+ [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+ [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
};
static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
dpm_table);
if (ret)
return ret;
+
+ /*
+ * Update the reported maximum shader clock to the value
+ * which can be guarded to be achieved on all cards. This
+ * is aligned with Window setting. And considering that value
+ * might be not the peak frequency the card can achieve, it
+ * is normal some real-time clock frequency can overtake this
+ * labelled maximum clock frequency(for example in pp_dpm_sclk
+ * sysfs output).
+ */
+ if (skutable->DriverReportedClocks.GameClockAc &&
+ (dpm_table->dpm_levels[dpm_table->count - 1].value >
+ skutable->DriverReportedClocks.GameClockAc)) {
+ dpm_table->dpm_levels[dpm_table->count - 1].value =
+ skutable->DriverReportedClocks.GameClockAc;
+ dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
+ }
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
return ret;
}
+static int smu_v13_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ struct smu_13_0_dpm_context *dpm_context =
+ smu->smu_dpm.dpm_context;
+ struct smu_13_0_dpm_table *dpm_table;
+
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ /* uclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ /* gfxclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ break;
+ case SMU_SOCCLK:
+ /* socclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.soc_table;
+ break;
+ case SMU_FCLK:
+ /* fclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
+ break;
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ /* vclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
+ break;
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ /* dclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
+ break;
+ default:
+ dev_err(smu->adev->dev, "Unsupported clock type!\n");
+ return -EINVAL;
+ }
+
+ if (min)
+ *min = dpm_table->min;
+ if (max)
+ *max = dpm_table->max;
+
+ return 0;
+}
+
static int smu_v13_0_0_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data,
&dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ DriverReportedClocks_t driver_clocks =
+ pptable->SkuTable.DriverReportedClocks;
pstate_table->gfxclk_pstate.min = gfx_table->min;
- pstate_table->gfxclk_pstate.peak = gfx_table->max;
+ if (driver_clocks.GameClockAc &&
+ (driver_clocks.GameClockAc < gfx_table->max))
+ pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
+ else
+ pstate_table->gfxclk_pstate.peak = gfx_table->max;
pstate_table->uclk_pstate.min = mem_table->min;
pstate_table->uclk_pstate.peak = mem_table->max;
pstate_table->fclk_pstate.min = fclk_table->min;
pstate_table->fclk_pstate.peak = fclk_table->max;
- /*
- * For now, just use the mininum clock frequency.
- * TODO: update them when the real pstate settings available
- */
- pstate_table->gfxclk_pstate.standard = gfx_table->min;
- pstate_table->uclk_pstate.standard = mem_table->min;
+ if (driver_clocks.BaseClockAc &&
+ driver_clocks.BaseClockAc < gfx_table->max)
+ pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
+ else
+ pstate_table->gfxclk_pstate.standard = gfx_table->max;
+ pstate_table->uclk_pstate.standard = mem_table->max;
pstate_table->socclk_pstate.standard = soc_table->min;
pstate_table->vclk_pstate.standard = vclk_table->min;
pstate_table->dclk_pstate.standard = dclk_table->min;
static int smu_v13_0_0_get_fan_speed_pwm(struct smu_context *smu,
uint32_t *speed)
{
+ int ret;
+
if (!speed)
return -EINVAL;
- return smu_v13_0_0_get_smu_metrics_data(smu,
- METRICS_CURR_FANPWM,
- speed);
+ ret = smu_v13_0_0_get_smu_metrics_data(smu,
+ METRICS_CURR_FANPWM,
+ speed);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
+ return ret;
+ }
+
+ /* Convert the PMFW output which is in percent to pwm(255) based */
+ *speed = MIN(*speed * 255 / 100, 255);
+
+ return 0;
}
static int smu_v13_0_0_get_fan_speed_rpm(struct smu_context *smu,
.get_enabled_mask = smu_cmn_get_enabled_mask,
.dpm_set_vcn_enable = smu_v13_0_set_vcn_enable,
.dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
- .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
+ .get_dpm_ultimate_freq = smu_v13_0_0_get_dpm_ultimate_freq,
.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
.read_sensor = smu_v13_0_0_read_sensor,
.feature_is_enabled = smu_cmn_feature_is_enabled,
.set_df_cstate = smu_v13_0_0_set_df_cstate,
.send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num,
.send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag,
+ .gpo_control = smu_v13_0_gpo_control,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
+ MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
FEA_MAP(MEM_TEMP_READ),
FEA_MAP(ATHUB_MMHUB_PG),
FEA_MAP(SOC_PCC),
+ [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+ [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
};
static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
static int smu_v13_0_7_get_fan_speed_pwm(struct smu_context *smu,
uint32_t *speed)
{
+ int ret;
+
if (!speed)
return -EINVAL;
- return smu_v13_0_7_get_smu_metrics_data(smu,
- METRICS_CURR_FANPWM,
- speed);
+ ret = smu_v13_0_7_get_smu_metrics_data(smu,
+ METRICS_CURR_FANPWM,
+ speed);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
+ return ret;
+ }
+
+ /* Convert the PMFW output which is in percent to pwm(255) based */
+ *speed = MIN(*speed * 255 / 100, 255);
+
+ return 0;
}
static int smu_v13_0_7_get_fan_speed_rpm(struct smu_context *smu,
static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf)
{
- DpmActivityMonitorCoeffIntExternal_t activity_monitor_external[PP_SMC_POWER_PROFILE_COUNT];
+ DpmActivityMonitorCoeffIntExternal_t *activity_monitor_external;
uint32_t i, j, size = 0;
int16_t workload_type = 0;
int result = 0;
if (!buf)
return -EINVAL;
+ activity_monitor_external = kcalloc(PP_SMC_POWER_PROFILE_COUNT,
+ sizeof(*activity_monitor_external),
+ GFP_KERNEL);
+ if (!activity_monitor_external)
+ return -ENOMEM;
+
size += sysfs_emit_at(buf, size, " ");
for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++)
size += sysfs_emit_at(buf, size, "%-14s%s", amdgpu_pp_profile_name[i],
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
i);
- if (workload_type < 0)
- return -EINVAL;
+ if (workload_type < 0) {
+ result = -EINVAL;
+ goto out;
+ }
result = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
(void *)(&activity_monitor_external[i]), false);
if (result) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
- return result;
+ goto out;
}
}
PRINT_DPM_MONITOR(Fclk_BoosterFreq);
#undef PRINT_DPM_MONITOR
- return size;
+ result = size;
+out:
+ kfree(activity_monitor_external);
+ return result;
}
static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
.mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_7_set_mp1_state,
.set_df_cstate = smu_v13_0_7_set_df_cstate,
+ .gpo_control = smu_v13_0_gpo_control,
};
void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
intel_dp_pcon_dsc_configure(intel_dp, pipe_config);
intel_dp_start_link_train(intel_dp, pipe_config);
intel_dp_stop_link_train(intel_dp, pipe_config);
-
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void g4x_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
intel_enable_dp(state, encoder, pipe_config, conn_state);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
}
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
}
&pipe_config->infoframes.hdmi);
}
-static void g4x_enable_hdmi(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void g4x_hdmi_enable_port(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+}
+
+static void g4x_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ g4x_hdmi_enable_port(encoder, pipe_config);
drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
!pipe_config->has_hdmi_sink);
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
+ !pipe_config->has_hdmi_sink);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void intel_disable_hdmi(struct intel_atomic_state *state,
pipe_config->has_infoframe,
pipe_config, conn_state);
- g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
+ g4x_hdmi_enable_port(encoder, pipe_config);
vlv_wait_port_ready(dev_priv, dig_port, 0x0);
}
pipe_config->has_infoframe,
pipe_config, conn_state);
- g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
+ g4x_hdmi_enable_port(encoder, pipe_config);
vlv_wait_port_ready(dev_priv, dig_port, 0x0);
}
}
-static void
-intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
- enum pipe pipe = crtc->pipe;
- u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
-
- trans_ddi_func_ctl_value = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(pipe));
- trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
- dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
-
- trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
- TGL_TRANS_DDI_PORT_MASK);
- trans_conf_value &= ~PIPECONF_ENABLE;
- dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
-
- intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
- trans_ddi_func_ctl_value);
- intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
-}
-
-static void
-intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = dig_port->base.port;
- struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
- enum pipe pipe = crtc->pipe;
- u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
-
- trans_ddi_func_ctl_value = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(pipe));
- trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
- dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
-
- trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
- TGL_TRANS_DDI_SELECT_PORT(port);
- trans_conf_value |= PIPECONF_ENABLE;
- dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
-
- intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
- intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
- trans_ddi_func_ctl_value);
-}
-
static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
link_status);
- intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
-
intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
intel_dp_phy_pattern_update(intel_dp, crtc_state);
- intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state);
-
drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
intel_dp->train_set, crtc_state->lane_count);
if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
return false;
+ if (obj->flags & I915_BO_ALLOC_CCS_AUX)
+ return true;
+
for (i = 0; i < obj->mm.n_placements; i++) {
/* Compression is not allowed for the objects with smem placement */
if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
* dealing with userspace objects the CPU fault handler is free to ignore this.
*/
#define I915_BO_ALLOC_GPU_ONLY BIT(6)
+#define I915_BO_ALLOC_CCS_AUX BIT(7)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
I915_BO_ALLOC_VOLATILE | \
I915_BO_ALLOC_CPU_CLEAR | \
I915_BO_ALLOC_USER | \
I915_BO_ALLOC_PM_VOLATILE | \
I915_BO_ALLOC_PM_EARLY | \
- I915_BO_ALLOC_GPU_ONLY)
-#define I915_BO_READONLY BIT(7)
-#define I915_TILING_QUIRK_BIT 8 /* unknown swizzling; do not release! */
-#define I915_BO_PROTECTED BIT(9)
+ I915_BO_ALLOC_GPU_ONLY | \
+ I915_BO_ALLOC_CCS_AUX)
+#define I915_BO_READONLY BIT(8)
+#define I915_TILING_QUIRK_BIT 9 /* unknown swizzling; do not release! */
+#define I915_BO_PROTECTED BIT(10)
/**
* @mem_flags - Mutable placement-related flags
*
container_of(bo->bdev, typeof(*i915), bdev);
struct drm_i915_gem_object *backup;
struct ttm_operation_ctx ctx = {};
+ unsigned int flags;
int err = 0;
if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
if (obj->flags & I915_BO_ALLOC_PM_VOLATILE)
return 0;
- backup = i915_gem_object_create_shmem(i915, obj->base.size);
+ /*
+ * It seems that we might have some framebuffers still pinned at this
+ * stage, but for such objects we might also need to deal with the CCS
+ * aux state. Make sure we force the save/restore of the CCS state,
+ * otherwise we might observe display corruption, when returning from
+ * suspend.
+ */
+ flags = 0;
+ if (i915_gem_object_needs_ccs_pages(obj)) {
+ WARN_ON_ONCE(!i915_gem_object_is_framebuffer(obj));
+ WARN_ON_ONCE(!pm_apply->allow_gpu);
+
+ flags = I915_BO_ALLOC_CCS_AUX;
+ }
+ backup = i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
+ obj->base.size, 0, flags);
if (IS_ERR(backup))
return PTR_ERR(backup);
}
/**
- * intel_gt_mcr_wait_for_reg_fw - wait until MCR register matches expected state
+ * intel_gt_mcr_wait_for_reg - wait until MCR register matches expected state
* @gt: GT structure
* @reg: the register to read
* @mask: mask to apply to register value
return 0;
}
+static int max_pte_pkt_size(struct i915_request *rq, int pkt)
+{
+ struct intel_ring *ring = rq->ring;
+
+ pkt = min_t(int, pkt, (ring->space - rq->reserved_space) / sizeof(u32) + 5);
+ pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
+
+ return pkt;
+}
+
static int emit_pte(struct i915_request *rq,
struct sgt_dma *it,
enum i915_cache_level cache_level,
return PTR_ERR(cs);
/* Pack as many PTE updates as possible into a single MI command */
- pkt = min_t(int, dword_length, ring->space / sizeof(u32) + 5);
- pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
+ pkt = max_pte_pkt_size(rq, dword_length);
hdr = cs;
*cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */
}
}
- pkt = min_t(int, dword_rem, ring->space / sizeof(u32) + 5);
- pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
+ pkt = max_pte_pkt_size(rq, dword_rem);
hdr = cs;
*cs++ = MI_STORE_DATA_IMM | REG_BIT(21);
if (err)
goto out_rq;
- /*
- * While we can't always restore/manage the CCS state,
- * we still need to ensure we don't leak the CCS state
- * from the previous user, so make sure we overwrite it
- * with something.
- */
- err = emit_copy_ccs(rq, dst_offset, INDIRECT_ACCESS,
- dst_offset, DIRECT_ACCESS, len);
+ if (src_is_lmem) {
+ /*
+ * If the src is already in lmem, then we must
+ * be doing an lmem -> lmem transfer, and so
+ * should be safe to directly copy the CCS
+ * state. In this case we have either
+ * initialised the CCS aux state when first
+ * clearing the pages (since it is already
+ * allocated in lmem), or the user has
+ * potentially populated it, in which case we
+ * need to copy the CCS state as-is.
+ */
+ err = emit_copy_ccs(rq,
+ dst_offset, INDIRECT_ACCESS,
+ src_offset, INDIRECT_ACCESS,
+ len);
+ } else {
+ /*
+ * While we can't always restore/manage the CCS
+ * state, we still need to ensure we don't leak
+ * the CCS state from the previous user, so make
+ * sure we overwrite it with something.
+ */
+ err = emit_copy_ccs(rq,
+ dst_offset, INDIRECT_ACCESS,
+ dst_offset, DIRECT_ACCESS,
+ len);
+ }
+
if (err)
goto out_rq;
u32 offset, len = (ce->engine->context_size - PAGE_SIZE) / 4;
u32 *state = ce->lrc_reg_state;
+ if (drm_WARN_ON(&ce->engine->i915->drm, !state))
+ return U32_MAX;
+
for (offset = 0; offset < len; ) {
if (IS_MI_LRI_CMD(state[offset])) {
/*
if (IS_ERR(ce))
return PTR_ERR(ce);
- if (engine_supports_mi_query(stream->engine)) {
+ if (engine_supports_mi_query(stream->engine) &&
+ HAS_LOGICAL_RING_CONTEXTS(stream->perf->i915)) {
/*
* We are enabling perf query here. If we don't find the context
* offset here, just return an error.
}
/**
- * intel_uncore_forcewake_put__locked - grab forcewake domain references
+ * intel_uncore_forcewake_put__locked - release forcewake domain references
* @uncore: the intel_uncore structure
- * @fw_domains: forcewake domains to get reference on
+ * @fw_domains: forcewake domains to put references
*
* See intel_uncore_forcewake_put(). This variant places the onus
* on the caller to explicitly handle the dev_priv->uncore.lock spinlock.