1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
6 #include <drm/ttm/ttm_placement.h>
7 #include <drm/ttm/ttm_tt.h>
10 #include "intel_memory_region.h"
11 #include "intel_region_ttm.h"
13 #include "gem/i915_gem_region.h"
14 #include "gem/i915_gem_ttm.h"
15 #include "gem/i915_gem_ttm_move.h"
16 #include "gem/i915_gem_ttm_pm.h"
19 * i915_ttm_backup_free - Free any backup attached to this object
20 * @obj: The object whose backup is to be freed.
22 void i915_ttm_backup_free(struct drm_i915_gem_object *obj)
24 if (obj->ttm.backup) {
25 i915_gem_object_put(obj->ttm.backup);
26 obj->ttm.backup = NULL;
31 * struct i915_gem_ttm_pm_apply - Apply-to-region subclass for restore
32 * @base: The i915_gem_apply_to_region we derive from.
33 * @allow_gpu: Whether using the gpu blitter is allowed.
34 * @backup_pinned: On backup, backup also pinned objects.
36 struct i915_gem_ttm_pm_apply {
37 struct i915_gem_apply_to_region base;
39 bool backup_pinned : 1;
42 static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
43 struct drm_i915_gem_object *obj)
45 struct i915_gem_ttm_pm_apply *pm_apply =
46 container_of(apply, typeof(*pm_apply), base);
47 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
48 struct ttm_buffer_object *backup_bo;
49 struct drm_i915_private *i915 =
50 container_of(bo->bdev, typeof(*i915), bdev);
51 struct drm_i915_gem_object *backup;
52 struct ttm_operation_ctx ctx = {};
56 if (!i915_ttm_cpu_maps_iomem(bo->resource) || obj->ttm.backup)
59 if (pm_apply->allow_gpu && i915_gem_object_evictable(obj))
60 return ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
62 if (!pm_apply->backup_pinned ||
63 (pm_apply->allow_gpu && (obj->flags & I915_BO_ALLOC_PM_EARLY)))
66 if (obj->flags & I915_BO_ALLOC_PM_VOLATILE)
70 * It seems that we might have some framebuffers still pinned at this
71 * stage, but for such objects we might also need to deal with the CCS
72 * aux state. Make sure we force the save/restore of the CCS state,
73 * otherwise we might observe display corruption, when returning from
77 if (i915_gem_object_needs_ccs_pages(obj)) {
78 WARN_ON_ONCE(!i915_gem_object_is_framebuffer(obj));
79 WARN_ON_ONCE(!pm_apply->allow_gpu);
81 flags = I915_BO_ALLOC_CCS_AUX;
83 backup = i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
84 obj->base.size, 0, flags);
86 return PTR_ERR(backup);
88 err = i915_gem_object_lock(backup, apply->ww);
92 backup_bo = i915_gem_to_ttm(backup);
93 err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
97 err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false);
100 "Unable to copy from device to system memory, err:%pe\n",
102 goto out_no_populate;
104 ttm_bo_wait_ctx(backup_bo, &ctx);
106 obj->ttm.backup = backup;
110 i915_gem_ww_unlock_single(backup);
112 i915_gem_object_put(backup);
117 static int i915_ttm_recover(struct i915_gem_apply_to_region *apply,
118 struct drm_i915_gem_object *obj)
120 i915_ttm_backup_free(obj);
125 * i915_ttm_recover_region - Free the backup of all objects of a region
126 * @mr: The memory region
128 * Checks all objects of a region if there is backup attached and if so
129 * frees that backup. Typically this is called to recover after a partially
132 void i915_ttm_recover_region(struct intel_memory_region *mr)
134 static const struct i915_gem_apply_to_region_ops recover_ops = {
135 .process_obj = i915_ttm_recover,
137 struct i915_gem_apply_to_region apply = {.ops = &recover_ops};
140 ret = i915_gem_process_region(mr, &apply);
145 * i915_ttm_backup_region - Back up all objects of a region to smem.
146 * @mr: The memory region
147 * @flags: TTM backup flags
149 * Loops over all objects of a region and either evicts them if they are
150 * evictable or backs them up using a backup object if they are pinned.
152 * Return: Zero on success. Negative error code on error.
154 int i915_ttm_backup_region(struct intel_memory_region *mr, u32 flags)
156 static const struct i915_gem_apply_to_region_ops backup_ops = {
157 .process_obj = i915_ttm_backup,
159 struct i915_gem_ttm_pm_apply pm_apply = {
160 .base = {.ops = &backup_ops},
161 .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
162 .backup_pinned = flags & I915_TTM_BACKUP_PINNED,
165 return i915_gem_process_region(mr, &pm_apply.base);
168 static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
169 struct drm_i915_gem_object *obj)
171 struct i915_gem_ttm_pm_apply *pm_apply =
172 container_of(apply, typeof(*pm_apply), base);
173 struct drm_i915_gem_object *backup = obj->ttm.backup;
174 struct ttm_buffer_object *backup_bo = i915_gem_to_ttm(backup);
175 struct ttm_operation_ctx ctx = {};
181 if (!pm_apply->allow_gpu && !(obj->flags & I915_BO_ALLOC_PM_EARLY))
184 err = i915_gem_object_lock(backup, apply->ww);
188 /* Content may have been swapped. */
189 if (!backup_bo->resource)
190 err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx);
192 err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
194 err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
197 ttm_bo_wait_ctx(backup_bo, &ctx);
199 obj->ttm.backup = NULL;
203 i915_gem_ww_unlock_single(backup);
206 i915_gem_object_put(backup);
212 * i915_ttm_restore_region - Restore backed-up objects of a region from smem.
213 * @mr: The memory region
214 * @flags: TTM backup flags
216 * Loops over all objects of a region and if they are backed-up, restores
219 * Return: Zero on success. Negative error code on error.
221 int i915_ttm_restore_region(struct intel_memory_region *mr, u32 flags)
223 static const struct i915_gem_apply_to_region_ops restore_ops = {
224 .process_obj = i915_ttm_restore,
226 struct i915_gem_ttm_pm_apply pm_apply = {
227 .base = {.ops = &restore_ops},
228 .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
231 return i915_gem_process_region(mr, &pm_apply.base);