2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_gt_pm.h"
11 #include "i915_globals.h"
13 static void i915_gem_park(struct drm_i915_private *i915)
15 struct intel_engine_cs *engine;
16 enum intel_engine_id id;
18 lockdep_assert_held(&i915->drm.struct_mutex);
20 for_each_engine(engine, i915, id)
21 i915_gem_batch_pool_fini(&engine->batch_pool);
23 i915_timelines_park(i915);
24 i915_vma_parked(i915);
29 static void idle_work_handler(struct work_struct *work)
31 struct drm_i915_private *i915 =
32 container_of(work, typeof(*i915), gem.idle_work);
35 cancel_delayed_work(&i915->gem.retire_work);
36 mutex_lock(&i915->drm.struct_mutex);
38 intel_wakeref_lock(&i915->gt.wakeref);
39 if (!intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work)) {
43 intel_wakeref_unlock(&i915->gt.wakeref);
45 mutex_unlock(&i915->drm.struct_mutex);
47 queue_delayed_work(i915->wq,
48 &i915->gem.retire_work,
49 round_jiffies_up_relative(HZ));
52 static void retire_work_handler(struct work_struct *work)
54 struct drm_i915_private *i915 =
55 container_of(work, typeof(*i915), gem.retire_work.work);
57 /* Come back later if the device is busy... */
58 if (mutex_trylock(&i915->drm.struct_mutex)) {
59 i915_retire_requests(i915);
60 mutex_unlock(&i915->drm.struct_mutex);
63 queue_delayed_work(i915->wq,
64 &i915->gem.retire_work,
65 round_jiffies_up_relative(HZ));
68 static int pm_notifier(struct notifier_block *nb,
72 struct drm_i915_private *i915 =
73 container_of(nb, typeof(*i915), gem.pm_notifier);
77 i915_globals_unpark();
78 queue_delayed_work(i915->wq,
79 &i915->gem.retire_work,
80 round_jiffies_up_relative(HZ));
84 queue_work(i915->wq, &i915->gem.idle_work);
91 static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
96 if (i915_gem_wait_for_idle(i915,
98 I915_WAIT_FOR_IDLE_BOOST,
99 I915_GEM_IDLE_TIMEOUT) == -ETIME) {
100 /* XXX hide warning from gem_eio */
101 if (i915_modparams.reset) {
102 dev_err(i915->drm.dev,
103 "Failed to idle engines, declaring wedged!\n");
108 * Forcibly cancel outstanding work and leave
111 i915_gem_set_wedged(i915);
114 } while (i915_retire_requests(i915) && result);
116 GEM_BUG_ON(i915->gt.awake);
120 bool i915_gem_load_power_context(struct drm_i915_private *i915)
122 return switch_to_kernel_context_sync(i915);
125 void i915_gem_suspend(struct drm_i915_private *i915)
129 intel_wakeref_auto(&i915->mm.userfault_wakeref, 0);
130 flush_workqueue(i915->wq);
132 mutex_lock(&i915->drm.struct_mutex);
135 * We have to flush all the executing contexts to main memory so
136 * that they can saved in the hibernation image. To ensure the last
137 * context image is coherent, we have to switch away from it. That
138 * leaves the i915->kernel_context still active when
139 * we actually suspend, and its image in memory may not match the GPU
140 * state. Fortunately, the kernel_context is disposable and we do
141 * not rely on its state.
143 switch_to_kernel_context_sync(i915);
145 mutex_unlock(&i915->drm.struct_mutex);
148 * Assert that we successfully flushed all the work and
149 * reset the GPU back to its idle, low power state.
151 GEM_BUG_ON(i915->gt.awake);
152 flush_work(&i915->gem.idle_work);
154 cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
156 i915_gem_drain_freed_objects(i915);
158 intel_uc_suspend(i915);
161 void i915_gem_suspend_late(struct drm_i915_private *i915)
163 struct drm_i915_gem_object *obj;
164 struct list_head *phases[] = {
165 &i915->mm.unbound_list,
166 &i915->mm.bound_list,
171 * Neither the BIOS, ourselves or any other kernel
172 * expects the system to be in execlists mode on startup,
173 * so we need to reset the GPU back to legacy mode. And the only
174 * known way to disable logical contexts is through a GPU reset.
176 * So in order to leave the system in a known default configuration,
177 * always reset the GPU upon unload and suspend. Afterwards we then
178 * clean up the GEM state tracking, flushing off the requests and
179 * leaving the system in a known idle state.
181 * Note that is of the upmost importance that the GPU is idle and
182 * all stray writes are flushed *before* we dismantle the backing
183 * storage for the pinned objects.
185 * However, since we are uncertain that resetting the GPU on older
186 * machines is a good idea, we don't - just in case it leaves the
187 * machine in an unusable condition.
190 for (phase = phases; *phase; phase++) {
191 list_for_each_entry(obj, *phase, mm.link) {
192 i915_gem_object_lock(obj);
193 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
194 i915_gem_object_unlock(obj);
198 intel_uc_sanitize(i915);
199 i915_gem_sanitize(i915);
202 void i915_gem_resume(struct drm_i915_private *i915)
206 WARN_ON(i915->gt.awake);
208 mutex_lock(&i915->drm.struct_mutex);
209 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
211 i915_gem_restore_gtt_mappings(i915);
212 i915_gem_restore_fences(i915);
215 * As we didn't flush the kernel context before suspend, we cannot
216 * guarantee that the context image is complete. So let's just reset
217 * it and start again.
219 intel_gt_resume(i915);
221 if (i915_gem_init_hw(i915))
224 intel_uc_resume(i915);
226 /* Always reload a context for powersaving. */
227 if (!i915_gem_load_power_context(i915))
231 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
232 mutex_unlock(&i915->drm.struct_mutex);
236 if (!i915_reset_failed(i915)) {
237 dev_err(i915->drm.dev,
238 "Failed to re-initialize GPU, declaring it wedged!\n");
239 i915_gem_set_wedged(i915);
244 void i915_gem_init__pm(struct drm_i915_private *i915)
246 INIT_WORK(&i915->gem.idle_work, idle_work_handler);
247 INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
249 i915->gem.pm_notifier.notifier_call = pm_notifier;
250 blocking_notifier_chain_register(&i915->gt.pm_notifications,
251 &i915->gem.pm_notifier);