return 0;
err_free:
+ ttm_resource_fini(man, &node->base.base);
kfree(node);
err_out:
if (!(res->placement & TTM_PL_FLAG_TEMPORARY))
atomic64_sub(res->num_pages, &mgr->used);
+ ttm_resource_fini(man, res);
kfree(node);
}
*
* Re-init the gart for each known BO in the GTT.
*/
-int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
+void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
{
struct amdgpu_gtt_node *node;
struct drm_mm_node *mm_node;
struct amdgpu_device *adev;
- int r = 0;
adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
spin_lock(&mgr->lock);
drm_mm_for_each_node(mm_node, &mgr->mm) {
node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
- r = amdgpu_ttm_recover_gart(node->tbo);
- if (r)
- break;
+ amdgpu_ttm_recover_gart(node->tbo);
}
spin_unlock(&mgr->lock);
amdgpu_gart_invalidate_tlb(adev);
-
- return r;
}
/**
man->use_tt = true;
man->func = &amdgpu_gtt_mgr_func;
- ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT);
+ ttm_resource_manager_init(man, &adev->mman.bdev,
+ gtt_size >> PAGE_SHIFT);
start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;