11890e96ed65d9e3c90d294e992b36f7193f2148
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gem / i915_gem_pm.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_gt_pm.h"
9
10 #include "i915_drv.h"
11 #include "i915_globals.h"
12
13 static void i915_gem_park(struct drm_i915_private *i915)
14 {
15         struct intel_engine_cs *engine;
16         enum intel_engine_id id;
17
18         lockdep_assert_held(&i915->drm.struct_mutex);
19
20         for_each_engine(engine, i915, id)
21                 i915_gem_batch_pool_fini(&engine->batch_pool);
22
23         i915_timelines_park(i915);
24         i915_vma_parked(i915);
25
26         i915_globals_park();
27 }
28
29 static void idle_work_handler(struct work_struct *work)
30 {
31         struct drm_i915_private *i915 =
32                 container_of(work, typeof(*i915), gem.idle_work);
33         bool restart = true;
34
35         cancel_delayed_work(&i915->gem.retire_work);
36         mutex_lock(&i915->drm.struct_mutex);
37
38         intel_wakeref_lock(&i915->gt.wakeref);
39         if (!intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work)) {
40                 i915_gem_park(i915);
41                 restart = false;
42         }
43         intel_wakeref_unlock(&i915->gt.wakeref);
44
45         mutex_unlock(&i915->drm.struct_mutex);
46         if (restart)
47                 queue_delayed_work(i915->wq,
48                                    &i915->gem.retire_work,
49                                    round_jiffies_up_relative(HZ));
50 }
51
52 static void retire_work_handler(struct work_struct *work)
53 {
54         struct drm_i915_private *i915 =
55                 container_of(work, typeof(*i915), gem.retire_work.work);
56
57         /* Come back later if the device is busy... */
58         if (mutex_trylock(&i915->drm.struct_mutex)) {
59                 i915_retire_requests(i915);
60                 mutex_unlock(&i915->drm.struct_mutex);
61         }
62
63         queue_delayed_work(i915->wq,
64                            &i915->gem.retire_work,
65                            round_jiffies_up_relative(HZ));
66 }
67
68 static int pm_notifier(struct notifier_block *nb,
69                        unsigned long action,
70                        void *data)
71 {
72         struct drm_i915_private *i915 =
73                 container_of(nb, typeof(*i915), gem.pm_notifier);
74
75         switch (action) {
76         case INTEL_GT_UNPARK:
77                 i915_globals_unpark();
78                 queue_delayed_work(i915->wq,
79                                    &i915->gem.retire_work,
80                                    round_jiffies_up_relative(HZ));
81                 break;
82
83         case INTEL_GT_PARK:
84                 queue_work(i915->wq, &i915->gem.idle_work);
85                 break;
86         }
87
88         return NOTIFY_OK;
89 }
90
91 static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
92 {
93         bool result = true;
94
95         do {
96                 if (i915_gem_wait_for_idle(i915,
97                                            I915_WAIT_LOCKED |
98                                            I915_WAIT_FOR_IDLE_BOOST,
99                                            I915_GEM_IDLE_TIMEOUT) == -ETIME) {
100                         /* XXX hide warning from gem_eio */
101                         if (i915_modparams.reset) {
102                                 dev_err(i915->drm.dev,
103                                         "Failed to idle engines, declaring wedged!\n");
104                                 GEM_TRACE_DUMP();
105                         }
106
107                         /*
108                          * Forcibly cancel outstanding work and leave
109                          * the gpu quiet.
110                          */
111                         i915_gem_set_wedged(i915);
112                         result = false;
113                 }
114         } while (i915_retire_requests(i915) && result);
115
116         GEM_BUG_ON(i915->gt.awake);
117         return result;
118 }
119
120 bool i915_gem_load_power_context(struct drm_i915_private *i915)
121 {
122         return switch_to_kernel_context_sync(i915);
123 }
124
125 void i915_gem_suspend(struct drm_i915_private *i915)
126 {
127         GEM_TRACE("\n");
128
129         intel_wakeref_auto(&i915->mm.userfault_wakeref, 0);
130         flush_workqueue(i915->wq);
131
132         mutex_lock(&i915->drm.struct_mutex);
133
134         /*
135          * We have to flush all the executing contexts to main memory so
136          * that they can saved in the hibernation image. To ensure the last
137          * context image is coherent, we have to switch away from it. That
138          * leaves the i915->kernel_context still active when
139          * we actually suspend, and its image in memory may not match the GPU
140          * state. Fortunately, the kernel_context is disposable and we do
141          * not rely on its state.
142          */
143         switch_to_kernel_context_sync(i915);
144
145         mutex_unlock(&i915->drm.struct_mutex);
146
147         /*
148          * Assert that we successfully flushed all the work and
149          * reset the GPU back to its idle, low power state.
150          */
151         GEM_BUG_ON(i915->gt.awake);
152         flush_work(&i915->gem.idle_work);
153
154         cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
155
156         i915_gem_drain_freed_objects(i915);
157
158         intel_uc_suspend(i915);
159 }
160
161 void i915_gem_suspend_late(struct drm_i915_private *i915)
162 {
163         struct drm_i915_gem_object *obj;
164         struct list_head *phases[] = {
165                 &i915->mm.unbound_list,
166                 &i915->mm.bound_list,
167                 NULL
168         }, **phase;
169
170         /*
171          * Neither the BIOS, ourselves or any other kernel
172          * expects the system to be in execlists mode on startup,
173          * so we need to reset the GPU back to legacy mode. And the only
174          * known way to disable logical contexts is through a GPU reset.
175          *
176          * So in order to leave the system in a known default configuration,
177          * always reset the GPU upon unload and suspend. Afterwards we then
178          * clean up the GEM state tracking, flushing off the requests and
179          * leaving the system in a known idle state.
180          *
181          * Note that is of the upmost importance that the GPU is idle and
182          * all stray writes are flushed *before* we dismantle the backing
183          * storage for the pinned objects.
184          *
185          * However, since we are uncertain that resetting the GPU on older
186          * machines is a good idea, we don't - just in case it leaves the
187          * machine in an unusable condition.
188          */
189
190         for (phase = phases; *phase; phase++) {
191                 list_for_each_entry(obj, *phase, mm.link) {
192                         i915_gem_object_lock(obj);
193                         WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
194                         i915_gem_object_unlock(obj);
195                 }
196         }
197
198         intel_uc_sanitize(i915);
199         i915_gem_sanitize(i915);
200 }
201
202 void i915_gem_resume(struct drm_i915_private *i915)
203 {
204         GEM_TRACE("\n");
205
206         WARN_ON(i915->gt.awake);
207
208         mutex_lock(&i915->drm.struct_mutex);
209         intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
210
211         i915_gem_restore_gtt_mappings(i915);
212         i915_gem_restore_fences(i915);
213
214         /*
215          * As we didn't flush the kernel context before suspend, we cannot
216          * guarantee that the context image is complete. So let's just reset
217          * it and start again.
218          */
219         intel_gt_resume(i915);
220
221         if (i915_gem_init_hw(i915))
222                 goto err_wedged;
223
224         intel_uc_resume(i915);
225
226         /* Always reload a context for powersaving. */
227         if (!i915_gem_load_power_context(i915))
228                 goto err_wedged;
229
230 out_unlock:
231         intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
232         mutex_unlock(&i915->drm.struct_mutex);
233         return;
234
235 err_wedged:
236         if (!i915_reset_failed(i915)) {
237                 dev_err(i915->drm.dev,
238                         "Failed to re-initialize GPU, declaring it wedged!\n");
239                 i915_gem_set_wedged(i915);
240         }
241         goto out_unlock;
242 }
243
244 void i915_gem_init__pm(struct drm_i915_private *i915)
245 {
246         INIT_WORK(&i915->gem.idle_work, idle_work_handler);
247         INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
248
249         i915->gem.pm_notifier.notifier_call = pm_notifier;
250         blocking_notifier_chain_register(&i915->gt.pm_notifications,
251                                          &i915->gem.pm_notifier);
252 }