2 * Copyright © 2012-2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
32 #include <drm/drm_print.h>
39 * The i915 driver supports dynamic enabling and disabling of entire hardware
40 * blocks at runtime. This is especially important on the display side where
41 * software is supposed to control many power gates manually on recent hardware,
42 * since on the GT side a lot of the power management is done by the hardware.
43 * But even there some manual control at the device level is required.
45 * Since i915 supports a diverse set of platforms with a unified codebase and
46 * hardware engineers just love to shuffle functionality around between power
47 * domains there's a sizeable amount of indirection required. This file provides
48 * generic functions to the driver for grabbing and releasing references for
49 * abstract power domains. It then maps those to the actual power wells
50 * present for a given platform.
53 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
55 #include <linux/sort.h>
59 static noinline depot_stack_handle_t __save_depot_stack(void)
61 unsigned long entries[STACKDEPTH];
64 n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
65 return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
68 static void __print_depot_stack(depot_stack_handle_t stack,
69 char *buf, int sz, int indent)
71 unsigned long *entries;
72 unsigned int nr_entries;
74 nr_entries = stack_depot_fetch(stack, &entries);
75 stack_trace_snprint(buf, sz, entries, nr_entries, indent);
78 static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
80 spin_lock_init(&rpm->debug.lock);
83 static noinline depot_stack_handle_t
84 track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
86 depot_stack_handle_t stack, *stacks;
92 stack = __save_depot_stack();
96 spin_lock_irqsave(&rpm->debug.lock, flags);
98 if (!rpm->debug.count)
99 rpm->debug.last_acquire = stack;
101 stacks = krealloc(rpm->debug.owners,
102 (rpm->debug.count + 1) * sizeof(*stacks),
103 GFP_NOWAIT | __GFP_NOWARN);
105 stacks[rpm->debug.count++] = stack;
106 rpm->debug.owners = stacks;
111 spin_unlock_irqrestore(&rpm->debug.lock, flags);
116 static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
117 depot_stack_handle_t stack)
119 unsigned long flags, n;
122 if (unlikely(stack == -1))
125 spin_lock_irqsave(&rpm->debug.lock, flags);
126 for (n = rpm->debug.count; n--; ) {
127 if (rpm->debug.owners[n] == stack) {
128 memmove(rpm->debug.owners + n,
129 rpm->debug.owners + n + 1,
130 (--rpm->debug.count - n) * sizeof(stack));
135 spin_unlock_irqrestore(&rpm->debug.lock, flags);
138 "Unmatched wakeref (tracking %lu), count %u\n",
139 rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
142 buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
146 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
147 DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
149 stack = READ_ONCE(rpm->debug.last_release);
151 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
152 DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
159 static int cmphandle(const void *_a, const void *_b)
161 const depot_stack_handle_t * const a = _a, * const b = _b;
172 __print_intel_runtime_pm_wakeref(struct drm_printer *p,
173 const struct intel_runtime_pm_debug *dbg)
178 buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
182 if (dbg->last_acquire) {
183 __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
184 drm_printf(p, "Wakeref last acquired:\n%s", buf);
187 if (dbg->last_release) {
188 __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
189 drm_printf(p, "Wakeref last released:\n%s", buf);
192 drm_printf(p, "Wakeref count: %lu\n", dbg->count);
194 sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
196 for (i = 0; i < dbg->count; i++) {
197 depot_stack_handle_t stack = dbg->owners[i];
201 while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
203 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
204 drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
211 __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
212 struct intel_runtime_pm_debug *saved)
216 debug->owners = NULL;
218 debug->last_release = __save_depot_stack();
222 dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
224 struct drm_printer p;
229 p = drm_debug_printer("i915");
230 __print_intel_runtime_pm_wakeref(&p, debug);
232 kfree(debug->owners);
236 __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
238 struct intel_runtime_pm_debug dbg = {};
241 if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
246 __untrack_all_wakerefs(&rpm->debug, &dbg);
247 spin_unlock_irqrestore(&rpm->debug.lock, flags);
249 dump_and_free_wakeref_tracking(&dbg);
253 untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
255 struct intel_runtime_pm_debug dbg = {};
258 spin_lock_irqsave(&rpm->debug.lock, flags);
259 __untrack_all_wakerefs(&rpm->debug, &dbg);
260 spin_unlock_irqrestore(&rpm->debug.lock, flags);
262 dump_and_free_wakeref_tracking(&dbg);
265 void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
266 struct drm_printer *p)
268 struct intel_runtime_pm_debug dbg = {};
271 unsigned long alloc = dbg.count;
272 depot_stack_handle_t *s;
274 spin_lock_irq(&rpm->debug.lock);
275 dbg.count = rpm->debug.count;
276 if (dbg.count <= alloc) {
279 dbg.count * sizeof(*s));
281 dbg.last_acquire = rpm->debug.last_acquire;
282 dbg.last_release = rpm->debug.last_release;
283 spin_unlock_irq(&rpm->debug.lock);
284 if (dbg.count <= alloc)
287 s = krealloc(dbg.owners,
288 dbg.count * sizeof(*s),
289 GFP_NOWAIT | __GFP_NOWARN);
296 __print_intel_runtime_pm_wakeref(p, &dbg);
304 static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
308 static depot_stack_handle_t
309 track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
314 static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
315 intel_wakeref_t wref)
320 __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
322 atomic_dec(&rpm->wakeref_count);
326 untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
333 intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool wakelock)
336 atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
337 assert_rpm_wakelock_held(rpm);
339 atomic_inc(&rpm->wakeref_count);
340 assert_rpm_raw_wakeref_held(rpm);
345 intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
348 assert_rpm_wakelock_held(rpm);
349 atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
351 assert_rpm_raw_wakeref_held(rpm);
354 __intel_wakeref_dec_and_check_tracking(rpm);
357 static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
362 ret = pm_runtime_get_sync(rpm->kdev);
363 WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
365 intel_runtime_pm_acquire(rpm, wakelock);
367 return track_intel_runtime_pm_wakeref(rpm);
371 * intel_runtime_pm_get_raw - grab a raw runtime pm reference
372 * @rpm: the intel_runtime_pm structure
374 * This is the unlocked version of intel_display_power_is_enabled() and should
375 * only be used from error capture and recovery code where deadlocks are
377 * This function grabs a device-level runtime pm reference (mostly used for
378 * asynchronous PM management from display code) and ensures that it is powered
379 * up. Raw references are not considered during wakelock assert checks.
381 * Any runtime pm reference obtained by this function must have a symmetric
382 * call to intel_runtime_pm_put_raw() to release the reference again.
384 * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates
385 * as True if the wakeref was acquired, or False otherwise.
387 intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm)
389 return __intel_runtime_pm_get(rpm, false);
393 * intel_runtime_pm_get - grab a runtime pm reference
394 * @rpm: the intel_runtime_pm structure
396 * This function grabs a device-level runtime pm reference (mostly used for GEM
397 * code to ensure the GTT or GT is on) and ensures that it is powered up.
399 * Any runtime pm reference obtained by this function must have a symmetric
400 * call to intel_runtime_pm_put() to release the reference again.
402 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
404 intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
406 return __intel_runtime_pm_get(rpm, true);
410 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
411 * @rpm: the intel_runtime_pm structure
413 * This function grabs a device-level runtime pm reference if the device is
414 * already in use and ensures that it is powered up. It is illegal to try
415 * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
417 * Any runtime pm reference obtained by this function must have a symmetric
418 * call to intel_runtime_pm_put() to release the reference again.
420 * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
421 * as True if the wakeref was acquired, or False otherwise.
423 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
425 if (IS_ENABLED(CONFIG_PM)) {
427 * In cases runtime PM is disabled by the RPM core and we get
428 * an -EINVAL return value we are not supposed to call this
429 * function, since the power state is undefined. This applies
430 * atm to the late/early system suspend/resume handlers.
432 if (pm_runtime_get_if_in_use(rpm->kdev) <= 0)
436 intel_runtime_pm_acquire(rpm, true);
438 return track_intel_runtime_pm_wakeref(rpm);
442 * intel_runtime_pm_get_noresume - grab a runtime pm reference
443 * @rpm: the intel_runtime_pm structure
445 * This function grabs a device-level runtime pm reference (mostly used for GEM
446 * code to ensure the GTT or GT is on).
448 * It will _not_ power up the device but instead only check that it's powered
449 * on. Therefore it is only valid to call this functions from contexts where
450 * the device is known to be powered up and where trying to power it up would
451 * result in hilarity and deadlocks. That pretty much means only the system
452 * suspend/resume code where this is used to grab runtime pm references for
453 * delayed setup down in work items.
455 * Any runtime pm reference obtained by this function must have a symmetric
456 * call to intel_runtime_pm_put() to release the reference again.
458 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
460 intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
462 assert_rpm_wakelock_held(rpm);
463 pm_runtime_get_noresume(rpm->kdev);
465 intel_runtime_pm_acquire(rpm, true);
467 return track_intel_runtime_pm_wakeref(rpm);
470 static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm,
471 intel_wakeref_t wref,
474 struct device *kdev = rpm->kdev;
476 untrack_intel_runtime_pm_wakeref(rpm, wref);
478 intel_runtime_pm_release(rpm, wakelock);
480 pm_runtime_mark_last_busy(kdev);
481 pm_runtime_put_autosuspend(kdev);
485 * intel_runtime_pm_put_raw - release a raw runtime pm reference
486 * @rpm: the intel_runtime_pm structure
487 * @wref: wakeref acquired for the reference that is being released
489 * This function drops the device-level runtime pm reference obtained by
490 * intel_runtime_pm_get_raw() and might power down the corresponding
491 * hardware block right away if this is the last reference.
494 intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
496 __intel_runtime_pm_put(rpm, wref, false);
500 * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
501 * @rpm: the intel_runtime_pm structure
503 * This function drops the device-level runtime pm reference obtained by
504 * intel_runtime_pm_get() and might power down the corresponding
505 * hardware block right away if this is the last reference.
507 * This function exists only for historical reasons and should be avoided in
508 * new code, as the correctness of its use cannot be checked. Always use
509 * intel_runtime_pm_put() instead.
511 void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm)
513 __intel_runtime_pm_put(rpm, -1, true);
516 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
518 * intel_runtime_pm_put - release a runtime pm reference
519 * @rpm: the intel_runtime_pm structure
520 * @wref: wakeref acquired for the reference that is being released
522 * This function drops the device-level runtime pm reference obtained by
523 * intel_runtime_pm_get() and might power down the corresponding
524 * hardware block right away if this is the last reference.
526 void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
528 __intel_runtime_pm_put(rpm, wref, true);
533 * intel_runtime_pm_enable - enable runtime pm
534 * @rpm: the intel_runtime_pm structure
536 * This function enables runtime pm at the end of the driver load sequence.
538 * Note that this function does currently not enable runtime pm for the
539 * subordinate display power domains. That is done by
540 * intel_power_domains_enable().
542 void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
544 struct device *kdev = rpm->kdev;
547 * Disable the system suspend direct complete optimization, which can
548 * leave the device suspended skipping the driver's suspend handlers
549 * if the device was already runtime suspended. This is needed due to
550 * the difference in our runtime and system suspend sequence and
551 * becaue the HDA driver may require us to enable the audio power
552 * domain during system suspend.
554 dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
556 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
557 pm_runtime_mark_last_busy(kdev);
560 * Take a permanent reference to disable the RPM functionality and drop
561 * it only when unloading the driver. Use the low level get/put helpers,
562 * so the driver's own RPM reference tracking asserts also work on
563 * platforms without RPM support.
565 if (!rpm->available) {
568 pm_runtime_dont_use_autosuspend(kdev);
569 ret = pm_runtime_get_sync(kdev);
570 WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
572 pm_runtime_use_autosuspend(kdev);
576 * The core calls the driver load handler with an RPM reference held.
577 * We drop that here and will reacquire it during unloading in
578 * intel_power_domains_fini().
580 pm_runtime_put_autosuspend(kdev);
583 void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
585 struct device *kdev = rpm->kdev;
587 /* Transfer rpm ownership back to core */
588 WARN(pm_runtime_get_sync(kdev) < 0,
589 "Failed to pass rpm ownership back to core\n");
591 pm_runtime_dont_use_autosuspend(kdev);
594 pm_runtime_put(kdev);
597 void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm)
599 int count = atomic_read(&rpm->wakeref_count);
602 "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
603 intel_rpm_raw_wakeref_count(count),
604 intel_rpm_wakelock_count(count));
606 untrack_all_intel_runtime_pm_wakerefs(rpm);
609 void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
611 struct drm_i915_private *i915 =
612 container_of(rpm, struct drm_i915_private, runtime_pm);
613 struct pci_dev *pdev = i915->drm.pdev;
614 struct device *kdev = &pdev->dev;
617 rpm->available = HAS_RUNTIME_PM(i915);
619 init_intel_runtime_pm_wakeref(rpm);