Merge remote-tracking branch 'airlied/drm-next' into drm-misc-next
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ttm.c
index 6a3028c1358017f1617eabfe18f93bcf0bd8be08..b5fa003c134168ae858b7c9a12c0c5345637975c 100644 (file)
@@ -203,7 +203,9 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
        abo = container_of(bo, struct amdgpu_bo, tbo);
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
        abo = container_of(bo, struct amdgpu_bo, tbo);
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
-               if (adev->mman.buffer_funcs_ring->ready == false) {
+               if (adev->mman.buffer_funcs &&
+                   adev->mman.buffer_funcs_ring &&
+                   adev->mman.buffer_funcs_ring->ready == false) {
                        amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
                } else {
                        amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
                        amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
                } else {
                        amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
@@ -763,7 +765,7 @@ int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
 {
        struct amdgpu_ttm_tt *gtt, *tmp;
        struct ttm_mem_reg bo_mem;
 {
        struct amdgpu_ttm_tt *gtt, *tmp;
        struct ttm_mem_reg bo_mem;
-       uint32_t flags;
+       uint64_t flags;
        int r;
 
        bo_mem.mem_type = TTM_PL_TT;
        int r;
 
        bo_mem.mem_type = TTM_PL_TT;
@@ -1038,11 +1040,17 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
                                            const struct ttm_place *place)
 {
 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
                                            const struct ttm_place *place)
 {
-       if (bo->mem.mem_type == TTM_PL_VRAM &&
-           bo->mem.start == AMDGPU_BO_INVALID_OFFSET) {
-               unsigned long num_pages = bo->mem.num_pages;
-               struct drm_mm_node *node = bo->mem.mm_node;
+       unsigned long num_pages = bo->mem.num_pages;
+       struct drm_mm_node *node = bo->mem.mm_node;
+
+       if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
+               return ttm_bo_eviction_valuable(bo, place);
+
+       switch (bo->mem.mem_type) {
+       case TTM_PL_TT:
+               return true;
 
 
+       case TTM_PL_VRAM:
                /* Check each drm MM node individually */
                while (num_pages) {
                        if (place->fpfn < (node->start + node->size) &&
                /* Check each drm MM node individually */
                while (num_pages) {
                        if (place->fpfn < (node->start + node->size) &&
@@ -1052,8 +1060,10 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
                        num_pages -= node->size;
                        ++node;
                }
                        num_pages -= node->size;
                        ++node;
                }
+               break;
 
 
-               return false;
+       default:
+               break;
        }
 
        return ttm_bo_eviction_valuable(bo, place);
        }
 
        return ttm_bo_eviction_valuable(bo, place);
@@ -1188,7 +1198,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
                return;
        amdgpu_ttm_debugfs_fini(adev);
        if (adev->stollen_vga_memory) {
                return;
        amdgpu_ttm_debugfs_fini(adev);
        if (adev->stollen_vga_memory) {
-               r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
+               r = amdgpu_bo_reserve(adev->stollen_vga_memory, true);
                if (r == 0) {
                        amdgpu_bo_unpin(adev->stollen_vga_memory);
                        amdgpu_bo_unreserve(adev->stollen_vga_memory);
                if (r == 0) {
                        amdgpu_bo_unpin(adev->stollen_vga_memory);
                        amdgpu_bo_unreserve(adev->stollen_vga_memory);
@@ -1401,6 +1411,8 @@ error_free:
 
 #if defined(CONFIG_DEBUG_FS)
 
 
 #if defined(CONFIG_DEBUG_FS)
 
+extern void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager
+                                *man);
 static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *)m->private;
 static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *)m->private;
@@ -1414,11 +1426,17 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
        spin_lock(&glob->lru_lock);
        drm_mm_print(mm, &p);
        spin_unlock(&glob->lru_lock);
        spin_lock(&glob->lru_lock);
        drm_mm_print(mm, &p);
        spin_unlock(&glob->lru_lock);
-       if (ttm_pl == TTM_PL_VRAM)
+       switch (ttm_pl) {
+       case TTM_PL_VRAM:
                seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
                           adev->mman.bdev.man[ttm_pl].size,
                           (u64)atomic64_read(&adev->vram_usage) >> 20,
                           (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
                seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
                           adev->mman.bdev.man[ttm_pl].size,
                           (u64)atomic64_read(&adev->vram_usage) >> 20,
                           (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
+               break;
+       case TTM_PL_TT:
+               amdgpu_gtt_mgr_print(m, &adev->mman.bdev.man[TTM_PL_TT]);
+               break;
+       }
        return 0;
 }
 
        return 0;
 }