Merge tag 'mips_6.2_1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gem / i915_gem_ttm_pm.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5
6 #include <drm/ttm/ttm_placement.h>
7 #include <drm/ttm/ttm_tt.h>
8
9 #include "i915_drv.h"
10 #include "intel_memory_region.h"
11 #include "intel_region_ttm.h"
12
13 #include "gem/i915_gem_region.h"
14 #include "gem/i915_gem_ttm.h"
15 #include "gem/i915_gem_ttm_move.h"
16 #include "gem/i915_gem_ttm_pm.h"
17
18 /**
19  * i915_ttm_backup_free - Free any backup attached to this object
20  * @obj: The object whose backup is to be freed.
21  */
22 void i915_ttm_backup_free(struct drm_i915_gem_object *obj)
23 {
24         if (obj->ttm.backup) {
25                 i915_gem_object_put(obj->ttm.backup);
26                 obj->ttm.backup = NULL;
27         }
28 }
29
30 /**
31  * struct i915_gem_ttm_pm_apply - Apply-to-region subclass for restore
32  * @base: The i915_gem_apply_to_region we derive from.
33  * @allow_gpu: Whether using the gpu blitter is allowed.
34  * @backup_pinned: On backup, backup also pinned objects.
35  */
36 struct i915_gem_ttm_pm_apply {
37         struct i915_gem_apply_to_region base;
38         bool allow_gpu : 1;
39         bool backup_pinned : 1;
40 };
41
42 static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
43                            struct drm_i915_gem_object *obj)
44 {
45         struct i915_gem_ttm_pm_apply *pm_apply =
46                 container_of(apply, typeof(*pm_apply), base);
47         struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
48         struct ttm_buffer_object *backup_bo;
49         struct drm_i915_private *i915 =
50                 container_of(bo->bdev, typeof(*i915), bdev);
51         struct drm_i915_gem_object *backup;
52         struct ttm_operation_ctx ctx = {};
53         int err = 0;
54
55         if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
56                 return 0;
57
58         if (pm_apply->allow_gpu && i915_gem_object_evictable(obj))
59                 return ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
60
61         if (!pm_apply->backup_pinned ||
62             (pm_apply->allow_gpu && (obj->flags & I915_BO_ALLOC_PM_EARLY)))
63                 return 0;
64
65         if (obj->flags & I915_BO_ALLOC_PM_VOLATILE)
66                 return 0;
67
68         backup = i915_gem_object_create_shmem(i915, obj->base.size);
69         if (IS_ERR(backup))
70                 return PTR_ERR(backup);
71
72         err = i915_gem_object_lock(backup, apply->ww);
73         if (err)
74                 goto out_no_lock;
75
76         backup_bo = i915_gem_to_ttm(backup);
77         err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
78         if (err)
79                 goto out_no_populate;
80
81         err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false);
82         if (err) {
83                 drm_err(&i915->drm,
84                         "Unable to copy from device to system memory, err:%pe\n",
85                         ERR_PTR(err));
86                 goto out_no_populate;
87         }
88         ttm_bo_wait_ctx(backup_bo, &ctx);
89
90         obj->ttm.backup = backup;
91         return 0;
92
93 out_no_populate:
94         i915_gem_ww_unlock_single(backup);
95 out_no_lock:
96         i915_gem_object_put(backup);
97
98         return err;
99 }
100
101 static int i915_ttm_recover(struct i915_gem_apply_to_region *apply,
102                             struct drm_i915_gem_object *obj)
103 {
104         i915_ttm_backup_free(obj);
105         return 0;
106 }
107
108 /**
109  * i915_ttm_recover_region - Free the backup of all objects of a region
110  * @mr: The memory region
111  *
112  * Checks all objects of a region if there is backup attached and if so
113  * frees that backup. Typically this is called to recover after a partially
114  * performed backup.
115  */
116 void i915_ttm_recover_region(struct intel_memory_region *mr)
117 {
118         static const struct i915_gem_apply_to_region_ops recover_ops = {
119                 .process_obj = i915_ttm_recover,
120         };
121         struct i915_gem_apply_to_region apply = {.ops = &recover_ops};
122         int ret;
123
124         ret = i915_gem_process_region(mr, &apply);
125         GEM_WARN_ON(ret);
126 }
127
128 /**
129  * i915_ttm_backup_region - Back up all objects of a region to smem.
130  * @mr: The memory region
131  * @allow_gpu: Whether to allow the gpu blitter for this backup.
132  * @backup_pinned: Backup also pinned objects.
133  *
134  * Loops over all objects of a region and either evicts them if they are
135  * evictable or backs them up using a backup object if they are pinned.
136  *
137  * Return: Zero on success. Negative error code on error.
138  */
139 int i915_ttm_backup_region(struct intel_memory_region *mr, u32 flags)
140 {
141         static const struct i915_gem_apply_to_region_ops backup_ops = {
142                 .process_obj = i915_ttm_backup,
143         };
144         struct i915_gem_ttm_pm_apply pm_apply = {
145                 .base = {.ops = &backup_ops},
146                 .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
147                 .backup_pinned = flags & I915_TTM_BACKUP_PINNED,
148         };
149
150         return i915_gem_process_region(mr, &pm_apply.base);
151 }
152
153 static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
154                             struct drm_i915_gem_object *obj)
155 {
156         struct i915_gem_ttm_pm_apply *pm_apply =
157                 container_of(apply, typeof(*pm_apply), base);
158         struct drm_i915_gem_object *backup = obj->ttm.backup;
159         struct ttm_buffer_object *backup_bo = i915_gem_to_ttm(backup);
160         struct ttm_operation_ctx ctx = {};
161         int err;
162
163         if (!backup)
164                 return 0;
165
166         if (!pm_apply->allow_gpu && !(obj->flags & I915_BO_ALLOC_PM_EARLY))
167                 return 0;
168
169         err = i915_gem_object_lock(backup, apply->ww);
170         if (err)
171                 return err;
172
173         /* Content may have been swapped. */
174         err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
175         if (!err) {
176                 err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
177                                             false);
178                 GEM_WARN_ON(err);
179                 ttm_bo_wait_ctx(backup_bo, &ctx);
180
181                 obj->ttm.backup = NULL;
182                 err = 0;
183         }
184
185         i915_gem_ww_unlock_single(backup);
186
187         if (!err)
188                 i915_gem_object_put(backup);
189
190         return err;
191 }
192
193 /**
194  * i915_ttm_restore_region - Restore backed-up objects of a region from smem.
195  * @mr: The memory region
196  * @allow_gpu: Whether to allow the gpu blitter to recover.
197  *
198  * Loops over all objects of a region and if they are backed-up, restores
199  * them from smem.
200  *
201  * Return: Zero on success. Negative error code on error.
202  */
203 int i915_ttm_restore_region(struct intel_memory_region *mr, u32 flags)
204 {
205         static const struct i915_gem_apply_to_region_ops restore_ops = {
206                 .process_obj = i915_ttm_restore,
207         };
208         struct i915_gem_ttm_pm_apply pm_apply = {
209                 .base = {.ops = &restore_ops},
210                 .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
211         };
212
213         return i915_gem_process_region(mr, &pm_apply.base);
214 }