Merge tag 'drm-misc-next-2020-02-10' of git://anongit.freedesktop.org/drm/drm-misc...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gt / intel_gt_pm.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6
7 #include <linux/suspend.h>
8
9 #include "i915_drv.h"
10 #include "i915_globals.h"
11 #include "i915_params.h"
12 #include "intel_context.h"
13 #include "intel_engine_pm.h"
14 #include "intel_gt.h"
15 #include "intel_gt_pm.h"
16 #include "intel_gt_requests.h"
17 #include "intel_llc.h"
18 #include "intel_pm.h"
19 #include "intel_rc6.h"
20 #include "intel_rps.h"
21 #include "intel_wakeref.h"
22
23 static void user_forcewake(struct intel_gt *gt, bool suspend)
24 {
25         int count = atomic_read(&gt->user_wakeref);
26
27         /* Inside suspend/resume so single threaded, no races to worry about. */
28         if (likely(!count))
29                 return;
30
31         intel_gt_pm_get(gt);
32         if (suspend) {
33                 GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
34                 atomic_sub(count, &gt->wakeref.count);
35         } else {
36                 atomic_add(count, &gt->wakeref.count);
37         }
38         intel_gt_pm_put(gt);
39 }
40
41 static int __gt_unpark(struct intel_wakeref *wf)
42 {
43         struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
44         struct drm_i915_private *i915 = gt->i915;
45
46         GT_TRACE(gt, "\n");
47
48         i915_globals_unpark();
49
50         /*
51          * It seems that the DMC likes to transition between the DC states a lot
52          * when there are no connected displays (no active power domains) during
53          * command submission.
54          *
55          * This activity has negative impact on the performance of the chip with
56          * huge latencies observed in the interrupt handler and elsewhere.
57          *
58          * Work around it by grabbing a GT IRQ power domain whilst there is any
59          * GT activity, preventing any DC state transitions.
60          */
61         gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
62         GEM_BUG_ON(!gt->awake);
63
64         intel_rc6_unpark(&gt->rc6);
65         intel_rps_unpark(&gt->rps);
66         i915_pmu_gt_unparked(i915);
67
68         intel_gt_unpark_requests(gt);
69
70         return 0;
71 }
72
73 static int __gt_park(struct intel_wakeref *wf)
74 {
75         struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
76         intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
77         struct drm_i915_private *i915 = gt->i915;
78
79         GT_TRACE(gt, "\n");
80
81         intel_gt_park_requests(gt);
82
83         i915_vma_parked(gt);
84         i915_pmu_gt_parked(i915);
85         intel_rps_park(&gt->rps);
86         intel_rc6_park(&gt->rc6);
87
88         /* Everything switched off, flush any residual interrupt just in case */
89         intel_synchronize_irq(i915);
90
91         /* Defer dropping the display power well for 100ms, it's slow! */
92         GEM_BUG_ON(!wakeref);
93         intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
94
95         i915_globals_park();
96
97         return 0;
98 }
99
100 static const struct intel_wakeref_ops wf_ops = {
101         .get = __gt_unpark,
102         .put = __gt_park,
103 };
104
105 void intel_gt_pm_init_early(struct intel_gt *gt)
106 {
107         intel_wakeref_init(&gt->wakeref, gt->uncore->rpm, &wf_ops);
108 }
109
110 void intel_gt_pm_init(struct intel_gt *gt)
111 {
112         /*
113          * Enabling power-management should be "self-healing". If we cannot
114          * enable a feature, simply leave it disabled with a notice to the
115          * user.
116          */
117         intel_rc6_init(&gt->rc6);
118         intel_rps_init(&gt->rps);
119 }
120
121 static bool reset_engines(struct intel_gt *gt)
122 {
123         if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
124                 return false;
125
126         return __intel_gt_reset(gt, ALL_ENGINES) == 0;
127 }
128
129 static void gt_sanitize(struct intel_gt *gt, bool force)
130 {
131         struct intel_engine_cs *engine;
132         enum intel_engine_id id;
133         intel_wakeref_t wakeref;
134
135         GT_TRACE(gt, "force:%s", yesno(force));
136
137         /* Use a raw wakeref to avoid calling intel_display_power_get early */
138         wakeref = intel_runtime_pm_get(gt->uncore->rpm);
139         intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
140
141         /*
142          * As we have just resumed the machine and woken the device up from
143          * deep PCI sleep (presumably D3_cold), assume the HW has been reset
144          * back to defaults, recovering from whatever wedged state we left it
145          * in and so worth trying to use the device once more.
146          */
147         if (intel_gt_is_wedged(gt))
148                 intel_gt_unset_wedged(gt);
149
150         intel_uc_sanitize(&gt->uc);
151
152         for_each_engine(engine, gt, id)
153                 if (engine->reset.prepare)
154                         engine->reset.prepare(engine);
155
156         intel_uc_reset_prepare(&gt->uc);
157
158         if (reset_engines(gt) || force) {
159                 for_each_engine(engine, gt, id)
160                         __intel_engine_reset(engine, false);
161         }
162
163         for_each_engine(engine, gt, id)
164                 if (engine->reset.finish)
165                         engine->reset.finish(engine);
166
167         intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
168         intel_runtime_pm_put(gt->uncore->rpm, wakeref);
169 }
170
171 void intel_gt_pm_fini(struct intel_gt *gt)
172 {
173         intel_rc6_fini(&gt->rc6);
174 }
175
176 int intel_gt_resume(struct intel_gt *gt)
177 {
178         struct intel_engine_cs *engine;
179         enum intel_engine_id id;
180         int err;
181
182         err = intel_gt_has_init_error(gt);
183         if (err)
184                 return err;
185
186         GT_TRACE(gt, "\n");
187
188         /*
189          * After resume, we may need to poke into the pinned kernel
190          * contexts to paper over any damage caused by the sudden suspend.
191          * Only the kernel contexts should remain pinned over suspend,
192          * allowing us to fixup the user contexts on their first pin.
193          */
194         intel_gt_pm_get(gt);
195
196         intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
197         intel_rc6_sanitize(&gt->rc6);
198         gt_sanitize(gt, true);
199         if (intel_gt_is_wedged(gt)) {
200                 err = -EIO;
201                 goto out_fw;
202         }
203
204         /* Only when the HW is re-initialised, can we replay the requests */
205         err = intel_gt_init_hw(gt);
206         if (err) {
207                 dev_err(gt->i915->drm.dev,
208                         "Failed to initialize GPU, declaring it wedged!\n");
209                 goto err_wedged;
210         }
211
212         intel_rps_enable(&gt->rps);
213         intel_llc_enable(&gt->llc);
214
215         for_each_engine(engine, gt, id) {
216                 intel_engine_pm_get(engine);
217
218                 engine->serial++; /* kernel context lost */
219                 err = engine->resume(engine);
220
221                 intel_engine_pm_put(engine);
222                 if (err) {
223                         dev_err(gt->i915->drm.dev,
224                                 "Failed to restart %s (%d)\n",
225                                 engine->name, err);
226                         goto err_wedged;
227                 }
228         }
229
230         intel_rc6_enable(&gt->rc6);
231
232         intel_uc_resume(&gt->uc);
233
234         user_forcewake(gt, false);
235
236 out_fw:
237         intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
238         intel_gt_pm_put(gt);
239         return err;
240
241 err_wedged:
242         intel_gt_set_wedged(gt);
243         goto out_fw;
244 }
245
246 static void wait_for_suspend(struct intel_gt *gt)
247 {
248         if (!intel_gt_pm_is_awake(gt))
249                 return;
250
251         if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
252                 /*
253                  * Forcibly cancel outstanding work and leave
254                  * the gpu quiet.
255                  */
256                 intel_gt_set_wedged(gt);
257                 intel_gt_retire_requests(gt);
258         }
259
260         intel_gt_pm_wait_for_idle(gt);
261 }
262
263 void intel_gt_suspend_prepare(struct intel_gt *gt)
264 {
265         user_forcewake(gt, true);
266         wait_for_suspend(gt);
267
268         intel_uc_suspend(&gt->uc);
269 }
270
271 static suspend_state_t pm_suspend_target(void)
272 {
273 #if IS_ENABLED(CONFIG_SUSPEND) && IS_ENABLED(CONFIG_PM_SLEEP)
274         return pm_suspend_target_state;
275 #else
276         return PM_SUSPEND_TO_IDLE;
277 #endif
278 }
279
280 void intel_gt_suspend_late(struct intel_gt *gt)
281 {
282         intel_wakeref_t wakeref;
283
284         /* We expect to be idle already; but also want to be independent */
285         wait_for_suspend(gt);
286
287         if (is_mock_gt(gt))
288                 return;
289
290         GEM_BUG_ON(gt->awake);
291
292         /*
293          * On disabling the device, we want to turn off HW access to memory
294          * that we no longer own.
295          *
296          * However, not all suspend-states disable the device. S0 (s2idle)
297          * is effectively runtime-suspend, the device is left powered on
298          * but needs to be put into a low power state. We need to keep
299          * powermanagement enabled, but we also retain system state and so
300          * it remains safe to keep on using our allocated memory.
301          */
302         if (pm_suspend_target() == PM_SUSPEND_TO_IDLE)
303                 return;
304
305         with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
306                 intel_rps_disable(&gt->rps);
307                 intel_rc6_disable(&gt->rc6);
308                 intel_llc_disable(&gt->llc);
309         }
310
311         gt_sanitize(gt, false);
312
313         GT_TRACE(gt, "\n");
314 }
315
316 void intel_gt_runtime_suspend(struct intel_gt *gt)
317 {
318         intel_uc_runtime_suspend(&gt->uc);
319
320         GT_TRACE(gt, "\n");
321 }
322
323 int intel_gt_runtime_resume(struct intel_gt *gt)
324 {
325         GT_TRACE(gt, "\n");
326         intel_gt_init_swizzling(gt);
327
328         return intel_uc_runtime_resume(&gt->uc);
329 }
330
331 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
332 #include "selftest_gt_pm.c"
333 #endif