2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
41 typedef int (*pm_callback_t)(struct device *);
44 * The entries in the dpm_list list are in a depth first order, simply
45 * because children are guaranteed to be discovered after parents, and
46 * are inserted at the back of the list on discovery.
48 * Since device_pm_add() may be called with a device lock held,
49 * we must never try to acquire a device lock while holding
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
63 static int async_error;
65 static const char *pm_verb(int event)
68 case PM_EVENT_SUSPEND:
74 case PM_EVENT_QUIESCE:
76 case PM_EVENT_HIBERNATE:
80 case PM_EVENT_RESTORE:
82 case PM_EVENT_RECOVER:
85 return "(unknown PM event)";
90 * device_pm_sleep_init - Initialize system suspend-related device fields.
91 * @dev: Device object being initialized.
93 void device_pm_sleep_init(struct device *dev)
95 dev->power.is_prepared = false;
96 dev->power.is_suspended = false;
97 dev->power.is_noirq_suspended = false;
98 dev->power.is_late_suspended = false;
99 init_completion(&dev->power.completion);
100 complete_all(&dev->power.completion);
101 dev->power.wakeup = NULL;
102 INIT_LIST_HEAD(&dev->power.entry);
106 * device_pm_lock - Lock the list of active devices used by the PM core.
108 void device_pm_lock(void)
110 mutex_lock(&dpm_list_mtx);
114 * device_pm_unlock - Unlock the list of active devices used by the PM core.
116 void device_pm_unlock(void)
118 mutex_unlock(&dpm_list_mtx);
122 * device_pm_add - Add a device to the PM core's list of active devices.
123 * @dev: Device to add to the list.
125 void device_pm_add(struct device *dev)
127 pr_debug("PM: Adding info for %s:%s\n",
128 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
129 device_pm_check_callbacks(dev);
130 mutex_lock(&dpm_list_mtx);
131 if (dev->parent && dev->parent->power.is_prepared)
132 dev_warn(dev, "parent %s should not be sleeping\n",
133 dev_name(dev->parent));
134 list_add_tail(&dev->power.entry, &dpm_list);
135 dev->power.in_dpm_list = true;
136 mutex_unlock(&dpm_list_mtx);
140 * device_pm_remove - Remove a device from the PM core's list of active devices.
141 * @dev: Device to be removed from the list.
143 void device_pm_remove(struct device *dev)
145 pr_debug("PM: Removing info for %s:%s\n",
146 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
147 complete_all(&dev->power.completion);
148 mutex_lock(&dpm_list_mtx);
149 list_del_init(&dev->power.entry);
150 dev->power.in_dpm_list = false;
151 mutex_unlock(&dpm_list_mtx);
152 device_wakeup_disable(dev);
153 pm_runtime_remove(dev);
154 device_pm_check_callbacks(dev);
158 * device_pm_move_before - Move device in the PM core's list of active devices.
159 * @deva: Device to move in dpm_list.
160 * @devb: Device @deva should come before.
162 void device_pm_move_before(struct device *deva, struct device *devb)
164 pr_debug("PM: Moving %s:%s before %s:%s\n",
165 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
166 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
167 /* Delete deva from dpm_list and reinsert before devb. */
168 list_move_tail(&deva->power.entry, &devb->power.entry);
172 * device_pm_move_after - Move device in the PM core's list of active devices.
173 * @deva: Device to move in dpm_list.
174 * @devb: Device @deva should come after.
176 void device_pm_move_after(struct device *deva, struct device *devb)
178 pr_debug("PM: Moving %s:%s after %s:%s\n",
179 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
180 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
181 /* Delete deva from dpm_list and reinsert after devb. */
182 list_move(&deva->power.entry, &devb->power.entry);
186 * device_pm_move_last - Move device to end of the PM core's list of devices.
187 * @dev: Device to move in dpm_list.
189 void device_pm_move_last(struct device *dev)
191 pr_debug("PM: Moving %s:%s to end of list\n",
192 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
193 list_move_tail(&dev->power.entry, &dpm_list);
196 static ktime_t initcall_debug_start(struct device *dev, void *cb)
198 if (!pm_print_times_enabled)
201 dev_info(dev, "calling %pF @ %i, parent: %s\n", cb,
202 task_pid_nr(current),
203 dev->parent ? dev_name(dev->parent) : "none");
207 static void initcall_debug_report(struct device *dev, ktime_t calltime,
213 if (!pm_print_times_enabled)
216 rettime = ktime_get();
217 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
219 dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error,
220 (unsigned long long)nsecs >> 10);
224 * dpm_wait - Wait for a PM operation to complete.
225 * @dev: Device to wait for.
226 * @async: If unset, wait only if the device's power.async_suspend flag is set.
228 static void dpm_wait(struct device *dev, bool async)
233 if (async || (pm_async_enabled && dev->power.async_suspend))
234 wait_for_completion(&dev->power.completion);
237 static int dpm_wait_fn(struct device *dev, void *async_ptr)
239 dpm_wait(dev, *((bool *)async_ptr));
243 static void dpm_wait_for_children(struct device *dev, bool async)
245 device_for_each_child(dev, &async, dpm_wait_fn);
248 static void dpm_wait_for_suppliers(struct device *dev, bool async)
250 struct device_link *link;
253 idx = device_links_read_lock();
256 * If the supplier goes away right after we've checked the link to it,
257 * we'll wait for its completion to change the state, but that's fine,
258 * because the only things that will block as a result are the SRCU
259 * callbacks freeing the link objects for the links in the list we're
262 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
263 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
264 dpm_wait(link->supplier, async);
266 device_links_read_unlock(idx);
269 static void dpm_wait_for_superior(struct device *dev, bool async)
271 dpm_wait(dev->parent, async);
272 dpm_wait_for_suppliers(dev, async);
275 static void dpm_wait_for_consumers(struct device *dev, bool async)
277 struct device_link *link;
280 idx = device_links_read_lock();
283 * The status of a device link can only be changed from "dormant" by a
284 * probe, but that cannot happen during system suspend/resume. In
285 * theory it can change to "dormant" at that time, but then it is
286 * reasonable to wait for the target device anyway (eg. if it goes
287 * away, it's better to wait for it to go away completely and then
288 * continue instead of trying to continue in parallel with its
291 list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
292 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
293 dpm_wait(link->consumer, async);
295 device_links_read_unlock(idx);
298 static void dpm_wait_for_subordinate(struct device *dev, bool async)
300 dpm_wait_for_children(dev, async);
301 dpm_wait_for_consumers(dev, async);
305 * pm_op - Return the PM operation appropriate for given PM event.
306 * @ops: PM operations to choose from.
307 * @state: PM transition of the system being carried out.
309 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
311 switch (state.event) {
312 #ifdef CONFIG_SUSPEND
313 case PM_EVENT_SUSPEND:
315 case PM_EVENT_RESUME:
317 #endif /* CONFIG_SUSPEND */
318 #ifdef CONFIG_HIBERNATE_CALLBACKS
319 case PM_EVENT_FREEZE:
320 case PM_EVENT_QUIESCE:
322 case PM_EVENT_HIBERNATE:
323 return ops->poweroff;
325 case PM_EVENT_RECOVER:
328 case PM_EVENT_RESTORE:
330 #endif /* CONFIG_HIBERNATE_CALLBACKS */
337 * pm_late_early_op - Return the PM operation appropriate for given PM event.
338 * @ops: PM operations to choose from.
339 * @state: PM transition of the system being carried out.
341 * Runtime PM is disabled for @dev while this function is being executed.
343 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
346 switch (state.event) {
347 #ifdef CONFIG_SUSPEND
348 case PM_EVENT_SUSPEND:
349 return ops->suspend_late;
350 case PM_EVENT_RESUME:
351 return ops->resume_early;
352 #endif /* CONFIG_SUSPEND */
353 #ifdef CONFIG_HIBERNATE_CALLBACKS
354 case PM_EVENT_FREEZE:
355 case PM_EVENT_QUIESCE:
356 return ops->freeze_late;
357 case PM_EVENT_HIBERNATE:
358 return ops->poweroff_late;
360 case PM_EVENT_RECOVER:
361 return ops->thaw_early;
362 case PM_EVENT_RESTORE:
363 return ops->restore_early;
364 #endif /* CONFIG_HIBERNATE_CALLBACKS */
371 * pm_noirq_op - Return the PM operation appropriate for given PM event.
372 * @ops: PM operations to choose from.
373 * @state: PM transition of the system being carried out.
375 * The driver of @dev will not receive interrupts while this function is being
378 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
380 switch (state.event) {
381 #ifdef CONFIG_SUSPEND
382 case PM_EVENT_SUSPEND:
383 return ops->suspend_noirq;
384 case PM_EVENT_RESUME:
385 return ops->resume_noirq;
386 #endif /* CONFIG_SUSPEND */
387 #ifdef CONFIG_HIBERNATE_CALLBACKS
388 case PM_EVENT_FREEZE:
389 case PM_EVENT_QUIESCE:
390 return ops->freeze_noirq;
391 case PM_EVENT_HIBERNATE:
392 return ops->poweroff_noirq;
394 case PM_EVENT_RECOVER:
395 return ops->thaw_noirq;
396 case PM_EVENT_RESTORE:
397 return ops->restore_noirq;
398 #endif /* CONFIG_HIBERNATE_CALLBACKS */
404 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
406 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
407 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
408 ", may wakeup" : "");
411 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
414 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
415 dev_name(dev), pm_verb(state.event), info, error);
418 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
425 calltime = ktime_get();
426 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
427 do_div(usecs64, NSEC_PER_USEC);
432 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
433 info ?: "", info ? " " : "", pm_verb(state.event),
434 error ? "aborted" : "complete",
435 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
438 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
439 pm_message_t state, const char *info)
447 calltime = initcall_debug_start(dev, cb);
449 pm_dev_dbg(dev, state, info);
450 trace_device_pm_callback_start(dev, info, state.event);
452 trace_device_pm_callback_end(dev, error);
453 suspend_report_result(cb, error);
455 initcall_debug_report(dev, calltime, cb, error);
460 #ifdef CONFIG_DPM_WATCHDOG
461 struct dpm_watchdog {
463 struct task_struct *tsk;
464 struct timer_list timer;
467 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
468 struct dpm_watchdog wd
471 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
472 * @data: Watchdog object address.
474 * Called when a driver has timed out suspending or resuming.
475 * There's not much we can do here to recover so panic() to
476 * capture a crash-dump in pstore.
478 static void dpm_watchdog_handler(struct timer_list *t)
480 struct dpm_watchdog *wd = from_timer(wd, t, timer);
482 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
483 show_stack(wd->tsk, NULL);
484 panic("%s %s: unrecoverable failure\n",
485 dev_driver_string(wd->dev), dev_name(wd->dev));
489 * dpm_watchdog_set - Enable pm watchdog for given device.
490 * @wd: Watchdog. Must be allocated on the stack.
491 * @dev: Device to handle.
493 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
495 struct timer_list *timer = &wd->timer;
500 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
501 /* use same timeout value for both suspend and resume */
502 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
507 * dpm_watchdog_clear - Disable suspend/resume watchdog.
508 * @wd: Watchdog to disable.
510 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
512 struct timer_list *timer = &wd->timer;
514 del_timer_sync(timer);
515 destroy_timer_on_stack(timer);
518 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
519 #define dpm_watchdog_set(x, y)
520 #define dpm_watchdog_clear(x)
523 /*------------------------- Resume routines -------------------------*/
526 * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
527 * @dev: Target device.
529 * Make the core skip the "early resume" and "resume" phases for @dev.
531 * This function can be called by middle-layer code during the "noirq" phase of
532 * system resume if necessary, but not by device drivers.
534 void dev_pm_skip_next_resume_phases(struct device *dev)
536 dev->power.is_late_suspended = false;
537 dev->power.is_suspended = false;
541 * suspend_event - Return a "suspend" message for given "resume" one.
542 * @resume_msg: PM message representing a system-wide resume transition.
544 static pm_message_t suspend_event(pm_message_t resume_msg)
546 switch (resume_msg.event) {
547 case PM_EVENT_RESUME:
550 case PM_EVENT_RESTORE:
552 case PM_EVENT_RECOVER:
553 return PMSG_HIBERNATE;
559 * dev_pm_may_skip_resume - System-wide device resume optimization check.
560 * @dev: Target device.
562 * Checks whether or not the device may be left in suspend after a system-wide
563 * transition to the working state.
565 bool dev_pm_may_skip_resume(struct device *dev)
567 return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
570 static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
574 pm_callback_t callback;
577 if (dev->pm_domain) {
578 info = "noirq power domain ";
579 callback = pm_noirq_op(&dev->pm_domain->ops, state);
580 } else if (dev->type && dev->type->pm) {
581 info = "noirq type ";
582 callback = pm_noirq_op(dev->type->pm, state);
583 } else if (dev->class && dev->class->pm) {
584 info = "noirq class ";
585 callback = pm_noirq_op(dev->class->pm, state);
586 } else if (dev->bus && dev->bus->pm) {
588 callback = pm_noirq_op(dev->bus->pm, state);
599 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
601 const char **info_p);
603 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
605 const char **info_p);
608 * device_resume_noirq - Execute a "noirq resume" callback for given device.
609 * @dev: Device to handle.
610 * @state: PM transition of the system being carried out.
611 * @async: If true, the device is being resumed asynchronously.
613 * The driver of @dev will not receive interrupts while this function is being
616 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
618 pm_callback_t callback;
626 if (dev->power.syscore || dev->power.direct_complete)
629 if (!dev->power.is_noirq_suspended)
632 dpm_wait_for_superior(dev, async);
634 skip_resume = dev_pm_may_skip_resume(dev);
636 callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
643 if (dev_pm_smart_suspend_and_suspended(dev)) {
644 pm_message_t suspend_msg = suspend_event(state);
647 * If "freeze" callbacks have been skipped during a transition
648 * related to hibernation, the subsequent "thaw" callbacks must
649 * be skipped too or bad things may happen. Otherwise, resume
650 * callbacks are going to be run for the device, so its runtime
651 * PM status must be changed to reflect the new state after the
652 * transition under way.
654 if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
655 !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
656 if (state.event == PM_EVENT_THAW) {
660 pm_runtime_set_active(dev);
665 if (dev->driver && dev->driver->pm) {
666 info = "noirq driver ";
667 callback = pm_noirq_op(dev->driver->pm, state);
671 error = dpm_run_callback(callback, dev, state, info);
674 dev->power.is_noirq_suspended = false;
678 * The device is going to be left in suspend, but it might not
679 * have been in runtime suspend before the system suspended, so
680 * its runtime PM status needs to be updated to avoid confusing
681 * the runtime PM framework when runtime PM is enabled for the
684 pm_runtime_set_suspended(dev);
685 dev_pm_skip_next_resume_phases(dev);
689 complete_all(&dev->power.completion);
694 static bool is_async(struct device *dev)
696 return dev->power.async_suspend && pm_async_enabled
697 && !pm_trace_is_enabled();
700 static void async_resume_noirq(void *data, async_cookie_t cookie)
702 struct device *dev = (struct device *)data;
705 error = device_resume_noirq(dev, pm_transition, true);
707 pm_dev_err(dev, pm_transition, " async", error);
712 void dpm_noirq_resume_devices(pm_message_t state)
715 ktime_t starttime = ktime_get();
717 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
718 mutex_lock(&dpm_list_mtx);
719 pm_transition = state;
722 * Advanced the async threads upfront,
723 * in case the starting of async threads is
724 * delayed by non-async resuming devices.
726 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
727 reinit_completion(&dev->power.completion);
730 async_schedule(async_resume_noirq, dev);
734 while (!list_empty(&dpm_noirq_list)) {
735 dev = to_device(dpm_noirq_list.next);
737 list_move_tail(&dev->power.entry, &dpm_late_early_list);
738 mutex_unlock(&dpm_list_mtx);
740 if (!is_async(dev)) {
743 error = device_resume_noirq(dev, state, false);
745 suspend_stats.failed_resume_noirq++;
746 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
747 dpm_save_failed_dev(dev_name(dev));
748 pm_dev_err(dev, state, " noirq", error);
752 mutex_lock(&dpm_list_mtx);
755 mutex_unlock(&dpm_list_mtx);
756 async_synchronize_full();
757 dpm_show_time(starttime, state, 0, "noirq");
758 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
761 void dpm_noirq_end(void)
763 resume_device_irqs();
764 device_wakeup_disarm_wake_irqs();
769 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
770 * @state: PM transition of the system being carried out.
772 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
773 * allow device drivers' interrupt handlers to be called.
775 void dpm_resume_noirq(pm_message_t state)
777 dpm_noirq_resume_devices(state);
781 static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
785 pm_callback_t callback;
788 if (dev->pm_domain) {
789 info = "early power domain ";
790 callback = pm_late_early_op(&dev->pm_domain->ops, state);
791 } else if (dev->type && dev->type->pm) {
792 info = "early type ";
793 callback = pm_late_early_op(dev->type->pm, state);
794 } else if (dev->class && dev->class->pm) {
795 info = "early class ";
796 callback = pm_late_early_op(dev->class->pm, state);
797 } else if (dev->bus && dev->bus->pm) {
799 callback = pm_late_early_op(dev->bus->pm, state);
811 * device_resume_early - Execute an "early resume" callback for given device.
812 * @dev: Device to handle.
813 * @state: PM transition of the system being carried out.
814 * @async: If true, the device is being resumed asynchronously.
816 * Runtime PM is disabled for @dev while this function is being executed.
818 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
820 pm_callback_t callback;
827 if (dev->power.syscore || dev->power.direct_complete)
830 if (!dev->power.is_late_suspended)
833 dpm_wait_for_superior(dev, async);
835 callback = dpm_subsys_resume_early_cb(dev, state, &info);
837 if (!callback && dev->driver && dev->driver->pm) {
838 info = "early driver ";
839 callback = pm_late_early_op(dev->driver->pm, state);
842 error = dpm_run_callback(callback, dev, state, info);
843 dev->power.is_late_suspended = false;
848 pm_runtime_enable(dev);
849 complete_all(&dev->power.completion);
853 static void async_resume_early(void *data, async_cookie_t cookie)
855 struct device *dev = (struct device *)data;
858 error = device_resume_early(dev, pm_transition, true);
860 pm_dev_err(dev, pm_transition, " async", error);
866 * dpm_resume_early - Execute "early resume" callbacks for all devices.
867 * @state: PM transition of the system being carried out.
869 void dpm_resume_early(pm_message_t state)
872 ktime_t starttime = ktime_get();
874 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
875 mutex_lock(&dpm_list_mtx);
876 pm_transition = state;
879 * Advanced the async threads upfront,
880 * in case the starting of async threads is
881 * delayed by non-async resuming devices.
883 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
884 reinit_completion(&dev->power.completion);
887 async_schedule(async_resume_early, dev);
891 while (!list_empty(&dpm_late_early_list)) {
892 dev = to_device(dpm_late_early_list.next);
894 list_move_tail(&dev->power.entry, &dpm_suspended_list);
895 mutex_unlock(&dpm_list_mtx);
897 if (!is_async(dev)) {
900 error = device_resume_early(dev, state, false);
902 suspend_stats.failed_resume_early++;
903 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
904 dpm_save_failed_dev(dev_name(dev));
905 pm_dev_err(dev, state, " early", error);
908 mutex_lock(&dpm_list_mtx);
911 mutex_unlock(&dpm_list_mtx);
912 async_synchronize_full();
913 dpm_show_time(starttime, state, 0, "early");
914 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
918 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
919 * @state: PM transition of the system being carried out.
921 void dpm_resume_start(pm_message_t state)
923 dpm_resume_noirq(state);
924 dpm_resume_early(state);
926 EXPORT_SYMBOL_GPL(dpm_resume_start);
929 * device_resume - Execute "resume" callbacks for given device.
930 * @dev: Device to handle.
931 * @state: PM transition of the system being carried out.
932 * @async: If true, the device is being resumed asynchronously.
934 static int device_resume(struct device *dev, pm_message_t state, bool async)
936 pm_callback_t callback = NULL;
937 const char *info = NULL;
939 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
944 if (dev->power.syscore)
947 if (dev->power.direct_complete) {
948 /* Match the pm_runtime_disable() in __device_suspend(). */
949 pm_runtime_enable(dev);
953 dpm_wait_for_superior(dev, async);
954 dpm_watchdog_set(&wd, dev);
958 * This is a fib. But we'll allow new children to be added below
959 * a resumed device, even if the device hasn't been completed yet.
961 dev->power.is_prepared = false;
963 if (!dev->power.is_suspended)
966 if (dev->pm_domain) {
967 info = "power domain ";
968 callback = pm_op(&dev->pm_domain->ops, state);
972 if (dev->type && dev->type->pm) {
974 callback = pm_op(dev->type->pm, state);
978 if (dev->class && dev->class->pm) {
980 callback = pm_op(dev->class->pm, state);
987 callback = pm_op(dev->bus->pm, state);
988 } else if (dev->bus->resume) {
989 info = "legacy bus ";
990 callback = dev->bus->resume;
996 if (!callback && dev->driver && dev->driver->pm) {
998 callback = pm_op(dev->driver->pm, state);
1002 error = dpm_run_callback(callback, dev, state, info);
1003 dev->power.is_suspended = false;
1007 dpm_watchdog_clear(&wd);
1010 complete_all(&dev->power.completion);
1012 TRACE_RESUME(error);
1017 static void async_resume(void *data, async_cookie_t cookie)
1019 struct device *dev = (struct device *)data;
1022 error = device_resume(dev, pm_transition, true);
1024 pm_dev_err(dev, pm_transition, " async", error);
1029 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1030 * @state: PM transition of the system being carried out.
1032 * Execute the appropriate "resume" callback for all devices whose status
1033 * indicates that they are suspended.
1035 void dpm_resume(pm_message_t state)
1038 ktime_t starttime = ktime_get();
1040 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1043 mutex_lock(&dpm_list_mtx);
1044 pm_transition = state;
1047 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1048 reinit_completion(&dev->power.completion);
1049 if (is_async(dev)) {
1051 async_schedule(async_resume, dev);
1055 while (!list_empty(&dpm_suspended_list)) {
1056 dev = to_device(dpm_suspended_list.next);
1058 if (!is_async(dev)) {
1061 mutex_unlock(&dpm_list_mtx);
1063 error = device_resume(dev, state, false);
1065 suspend_stats.failed_resume++;
1066 dpm_save_failed_step(SUSPEND_RESUME);
1067 dpm_save_failed_dev(dev_name(dev));
1068 pm_dev_err(dev, state, "", error);
1071 mutex_lock(&dpm_list_mtx);
1073 if (!list_empty(&dev->power.entry))
1074 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1077 mutex_unlock(&dpm_list_mtx);
1078 async_synchronize_full();
1079 dpm_show_time(starttime, state, 0, NULL);
1083 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1087 * device_complete - Complete a PM transition for given device.
1088 * @dev: Device to handle.
1089 * @state: PM transition of the system being carried out.
1091 static void device_complete(struct device *dev, pm_message_t state)
1093 void (*callback)(struct device *) = NULL;
1094 const char *info = NULL;
1096 if (dev->power.syscore)
1101 if (dev->pm_domain) {
1102 info = "completing power domain ";
1103 callback = dev->pm_domain->ops.complete;
1104 } else if (dev->type && dev->type->pm) {
1105 info = "completing type ";
1106 callback = dev->type->pm->complete;
1107 } else if (dev->class && dev->class->pm) {
1108 info = "completing class ";
1109 callback = dev->class->pm->complete;
1110 } else if (dev->bus && dev->bus->pm) {
1111 info = "completing bus ";
1112 callback = dev->bus->pm->complete;
1115 if (!callback && dev->driver && dev->driver->pm) {
1116 info = "completing driver ";
1117 callback = dev->driver->pm->complete;
1121 pm_dev_dbg(dev, state, info);
1127 pm_runtime_put(dev);
1131 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1132 * @state: PM transition of the system being carried out.
1134 * Execute the ->complete() callbacks for all devices whose PM status is not
1135 * DPM_ON (this allows new devices to be registered).
1137 void dpm_complete(pm_message_t state)
1139 struct list_head list;
1141 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1144 INIT_LIST_HEAD(&list);
1145 mutex_lock(&dpm_list_mtx);
1146 while (!list_empty(&dpm_prepared_list)) {
1147 struct device *dev = to_device(dpm_prepared_list.prev);
1150 dev->power.is_prepared = false;
1151 list_move(&dev->power.entry, &list);
1152 mutex_unlock(&dpm_list_mtx);
1154 trace_device_pm_callback_start(dev, "", state.event);
1155 device_complete(dev, state);
1156 trace_device_pm_callback_end(dev, 0);
1158 mutex_lock(&dpm_list_mtx);
1161 list_splice(&list, &dpm_list);
1162 mutex_unlock(&dpm_list_mtx);
1164 /* Allow device probing and trigger re-probing of deferred devices */
1165 device_unblock_probing();
1166 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1170 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1171 * @state: PM transition of the system being carried out.
1173 * Execute "resume" callbacks for all devices and complete the PM transition of
1176 void dpm_resume_end(pm_message_t state)
1179 dpm_complete(state);
1181 EXPORT_SYMBOL_GPL(dpm_resume_end);
1184 /*------------------------- Suspend routines -------------------------*/
1187 * resume_event - Return a "resume" message for given "suspend" sleep state.
1188 * @sleep_state: PM message representing a sleep state.
1190 * Return a PM message representing the resume event corresponding to given
1193 static pm_message_t resume_event(pm_message_t sleep_state)
1195 switch (sleep_state.event) {
1196 case PM_EVENT_SUSPEND:
1198 case PM_EVENT_FREEZE:
1199 case PM_EVENT_QUIESCE:
1200 return PMSG_RECOVER;
1201 case PM_EVENT_HIBERNATE:
1202 return PMSG_RESTORE;
1207 static void dpm_superior_set_must_resume(struct device *dev)
1209 struct device_link *link;
1213 dev->parent->power.must_resume = true;
1215 idx = device_links_read_lock();
1217 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1218 link->supplier->power.must_resume = true;
1220 device_links_read_unlock(idx);
1223 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1225 const char **info_p)
1227 pm_callback_t callback;
1230 if (dev->pm_domain) {
1231 info = "noirq power domain ";
1232 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1233 } else if (dev->type && dev->type->pm) {
1234 info = "noirq type ";
1235 callback = pm_noirq_op(dev->type->pm, state);
1236 } else if (dev->class && dev->class->pm) {
1237 info = "noirq class ";
1238 callback = pm_noirq_op(dev->class->pm, state);
1239 } else if (dev->bus && dev->bus->pm) {
1240 info = "noirq bus ";
1241 callback = pm_noirq_op(dev->bus->pm, state);
1252 static bool device_must_resume(struct device *dev, pm_message_t state,
1253 bool no_subsys_suspend_noirq)
1255 pm_message_t resume_msg = resume_event(state);
1258 * If all of the device driver's "noirq", "late" and "early" callbacks
1259 * are invoked directly by the core, the decision to allow the device to
1260 * stay in suspend can be based on its current runtime PM status and its
1263 if (no_subsys_suspend_noirq &&
1264 !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1265 !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1266 !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1267 return !pm_runtime_status_suspended(dev) &&
1268 (resume_msg.event != PM_EVENT_RESUME ||
1269 (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1272 * The only safe strategy here is to require that if the device may not
1273 * be left in suspend, resume callbacks must be invoked for it.
1275 return !dev->power.may_skip_resume;
1279 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1280 * @dev: Device to handle.
1281 * @state: PM transition of the system being carried out.
1282 * @async: If true, the device is being suspended asynchronously.
1284 * The driver of @dev will not receive interrupts while this function is being
1287 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1289 pm_callback_t callback;
1291 bool no_subsys_cb = false;
1297 dpm_wait_for_subordinate(dev, async);
1302 if (pm_wakeup_pending()) {
1303 async_error = -EBUSY;
1307 if (dev->power.syscore || dev->power.direct_complete)
1310 callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
1314 no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1316 if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1319 if (dev->driver && dev->driver->pm) {
1320 info = "noirq driver ";
1321 callback = pm_noirq_op(dev->driver->pm, state);
1325 error = dpm_run_callback(callback, dev, state, info);
1327 async_error = error;
1332 dev->power.is_noirq_suspended = true;
1334 if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1335 dev->power.must_resume = dev->power.must_resume ||
1336 atomic_read(&dev->power.usage_count) > 1 ||
1337 device_must_resume(dev, state, no_subsys_cb);
1339 dev->power.must_resume = true;
1342 if (dev->power.must_resume)
1343 dpm_superior_set_must_resume(dev);
1346 complete_all(&dev->power.completion);
1347 TRACE_SUSPEND(error);
1351 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1353 struct device *dev = (struct device *)data;
1356 error = __device_suspend_noirq(dev, pm_transition, true);
1358 dpm_save_failed_dev(dev_name(dev));
1359 pm_dev_err(dev, pm_transition, " async", error);
1365 static int device_suspend_noirq(struct device *dev)
1367 reinit_completion(&dev->power.completion);
1369 if (is_async(dev)) {
1371 async_schedule(async_suspend_noirq, dev);
1374 return __device_suspend_noirq(dev, pm_transition, false);
1377 void dpm_noirq_begin(void)
1380 device_wakeup_arm_wake_irqs();
1381 suspend_device_irqs();
1384 int dpm_noirq_suspend_devices(pm_message_t state)
1386 ktime_t starttime = ktime_get();
1389 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1390 mutex_lock(&dpm_list_mtx);
1391 pm_transition = state;
1394 while (!list_empty(&dpm_late_early_list)) {
1395 struct device *dev = to_device(dpm_late_early_list.prev);
1398 mutex_unlock(&dpm_list_mtx);
1400 error = device_suspend_noirq(dev);
1402 mutex_lock(&dpm_list_mtx);
1404 pm_dev_err(dev, state, " noirq", error);
1405 dpm_save_failed_dev(dev_name(dev));
1409 if (!list_empty(&dev->power.entry))
1410 list_move(&dev->power.entry, &dpm_noirq_list);
1416 mutex_unlock(&dpm_list_mtx);
1417 async_synchronize_full();
1419 error = async_error;
1422 suspend_stats.failed_suspend_noirq++;
1423 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1425 dpm_show_time(starttime, state, error, "noirq");
1426 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1431 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1432 * @state: PM transition of the system being carried out.
1434 * Prevent device drivers' interrupt handlers from being called and invoke
1435 * "noirq" suspend callbacks for all non-sysdev devices.
1437 int dpm_suspend_noirq(pm_message_t state)
1442 ret = dpm_noirq_suspend_devices(state);
1444 dpm_resume_noirq(resume_event(state));
1449 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1451 struct device *parent = dev->parent;
1456 spin_lock_irq(&parent->power.lock);
1458 if (dev->power.wakeup_path && !parent->power.ignore_children)
1459 parent->power.wakeup_path = true;
1461 spin_unlock_irq(&parent->power.lock);
1464 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1466 const char **info_p)
1468 pm_callback_t callback;
1471 if (dev->pm_domain) {
1472 info = "late power domain ";
1473 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1474 } else if (dev->type && dev->type->pm) {
1475 info = "late type ";
1476 callback = pm_late_early_op(dev->type->pm, state);
1477 } else if (dev->class && dev->class->pm) {
1478 info = "late class ";
1479 callback = pm_late_early_op(dev->class->pm, state);
1480 } else if (dev->bus && dev->bus->pm) {
1482 callback = pm_late_early_op(dev->bus->pm, state);
1494 * __device_suspend_late - Execute a "late suspend" callback for given device.
1495 * @dev: Device to handle.
1496 * @state: PM transition of the system being carried out.
1497 * @async: If true, the device is being suspended asynchronously.
1499 * Runtime PM is disabled for @dev while this function is being executed.
1501 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1503 pm_callback_t callback;
1510 __pm_runtime_disable(dev, false);
1512 dpm_wait_for_subordinate(dev, async);
1517 if (pm_wakeup_pending()) {
1518 async_error = -EBUSY;
1522 if (dev->power.syscore || dev->power.direct_complete)
1525 callback = dpm_subsys_suspend_late_cb(dev, state, &info);
1529 if (dev_pm_smart_suspend_and_suspended(dev) &&
1530 !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1533 if (dev->driver && dev->driver->pm) {
1534 info = "late driver ";
1535 callback = pm_late_early_op(dev->driver->pm, state);
1539 error = dpm_run_callback(callback, dev, state, info);
1541 async_error = error;
1544 dpm_propagate_wakeup_to_parent(dev);
1547 dev->power.is_late_suspended = true;
1550 TRACE_SUSPEND(error);
1551 complete_all(&dev->power.completion);
1555 static void async_suspend_late(void *data, async_cookie_t cookie)
1557 struct device *dev = (struct device *)data;
1560 error = __device_suspend_late(dev, pm_transition, true);
1562 dpm_save_failed_dev(dev_name(dev));
1563 pm_dev_err(dev, pm_transition, " async", error);
1568 static int device_suspend_late(struct device *dev)
1570 reinit_completion(&dev->power.completion);
1572 if (is_async(dev)) {
1574 async_schedule(async_suspend_late, dev);
1578 return __device_suspend_late(dev, pm_transition, false);
1582 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1583 * @state: PM transition of the system being carried out.
1585 int dpm_suspend_late(pm_message_t state)
1587 ktime_t starttime = ktime_get();
1590 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1591 mutex_lock(&dpm_list_mtx);
1592 pm_transition = state;
1595 while (!list_empty(&dpm_suspended_list)) {
1596 struct device *dev = to_device(dpm_suspended_list.prev);
1599 mutex_unlock(&dpm_list_mtx);
1601 error = device_suspend_late(dev);
1603 mutex_lock(&dpm_list_mtx);
1604 if (!list_empty(&dev->power.entry))
1605 list_move(&dev->power.entry, &dpm_late_early_list);
1608 pm_dev_err(dev, state, " late", error);
1609 dpm_save_failed_dev(dev_name(dev));
1618 mutex_unlock(&dpm_list_mtx);
1619 async_synchronize_full();
1621 error = async_error;
1623 suspend_stats.failed_suspend_late++;
1624 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1625 dpm_resume_early(resume_event(state));
1627 dpm_show_time(starttime, state, error, "late");
1628 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1633 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1634 * @state: PM transition of the system being carried out.
1636 int dpm_suspend_end(pm_message_t state)
1638 int error = dpm_suspend_late(state);
1642 error = dpm_suspend_noirq(state);
1644 dpm_resume_early(resume_event(state));
1650 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1653 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1654 * @dev: Device to suspend.
1655 * @state: PM transition of the system being carried out.
1656 * @cb: Suspend callback to execute.
1657 * @info: string description of caller.
1659 static int legacy_suspend(struct device *dev, pm_message_t state,
1660 int (*cb)(struct device *dev, pm_message_t state),
1666 calltime = initcall_debug_start(dev, cb);
1668 trace_device_pm_callback_start(dev, info, state.event);
1669 error = cb(dev, state);
1670 trace_device_pm_callback_end(dev, error);
1671 suspend_report_result(cb, error);
1673 initcall_debug_report(dev, calltime, cb, error);
1678 static void dpm_clear_superiors_direct_complete(struct device *dev)
1680 struct device_link *link;
1684 spin_lock_irq(&dev->parent->power.lock);
1685 dev->parent->power.direct_complete = false;
1686 spin_unlock_irq(&dev->parent->power.lock);
1689 idx = device_links_read_lock();
1691 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1692 spin_lock_irq(&link->supplier->power.lock);
1693 link->supplier->power.direct_complete = false;
1694 spin_unlock_irq(&link->supplier->power.lock);
1697 device_links_read_unlock(idx);
1701 * __device_suspend - Execute "suspend" callbacks for given device.
1702 * @dev: Device to handle.
1703 * @state: PM transition of the system being carried out.
1704 * @async: If true, the device is being suspended asynchronously.
1706 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1708 pm_callback_t callback = NULL;
1709 const char *info = NULL;
1711 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1716 dpm_wait_for_subordinate(dev, async);
1719 dev->power.direct_complete = false;
1724 * If a device configured to wake up the system from sleep states
1725 * has been suspended at run time and there's a resume request pending
1726 * for it, this is equivalent to the device signaling wakeup, so the
1727 * system suspend operation should be aborted.
1729 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1730 pm_wakeup_event(dev, 0);
1732 if (pm_wakeup_pending()) {
1733 dev->power.direct_complete = false;
1734 async_error = -EBUSY;
1738 if (dev->power.syscore)
1741 if (dev->power.direct_complete) {
1742 if (pm_runtime_status_suspended(dev)) {
1743 pm_runtime_disable(dev);
1744 if (pm_runtime_status_suspended(dev))
1747 pm_runtime_enable(dev);
1749 dev->power.direct_complete = false;
1752 dev->power.may_skip_resume = false;
1753 dev->power.must_resume = false;
1755 dpm_watchdog_set(&wd, dev);
1758 if (dev->pm_domain) {
1759 info = "power domain ";
1760 callback = pm_op(&dev->pm_domain->ops, state);
1764 if (dev->type && dev->type->pm) {
1766 callback = pm_op(dev->type->pm, state);
1770 if (dev->class && dev->class->pm) {
1772 callback = pm_op(dev->class->pm, state);
1779 callback = pm_op(dev->bus->pm, state);
1780 } else if (dev->bus->suspend) {
1781 pm_dev_dbg(dev, state, "legacy bus ");
1782 error = legacy_suspend(dev, state, dev->bus->suspend,
1789 if (!callback && dev->driver && dev->driver->pm) {
1791 callback = pm_op(dev->driver->pm, state);
1794 error = dpm_run_callback(callback, dev, state, info);
1798 dev->power.is_suspended = true;
1799 if (device_may_wakeup(dev))
1800 dev->power.wakeup_path = true;
1802 dpm_propagate_wakeup_to_parent(dev);
1803 dpm_clear_superiors_direct_complete(dev);
1807 dpm_watchdog_clear(&wd);
1811 async_error = error;
1813 complete_all(&dev->power.completion);
1814 TRACE_SUSPEND(error);
1818 static void async_suspend(void *data, async_cookie_t cookie)
1820 struct device *dev = (struct device *)data;
1823 error = __device_suspend(dev, pm_transition, true);
1825 dpm_save_failed_dev(dev_name(dev));
1826 pm_dev_err(dev, pm_transition, " async", error);
1832 static int device_suspend(struct device *dev)
1834 reinit_completion(&dev->power.completion);
1836 if (is_async(dev)) {
1838 async_schedule(async_suspend, dev);
1842 return __device_suspend(dev, pm_transition, false);
1846 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1847 * @state: PM transition of the system being carried out.
1849 int dpm_suspend(pm_message_t state)
1851 ktime_t starttime = ktime_get();
1854 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1860 mutex_lock(&dpm_list_mtx);
1861 pm_transition = state;
1863 while (!list_empty(&dpm_prepared_list)) {
1864 struct device *dev = to_device(dpm_prepared_list.prev);
1867 mutex_unlock(&dpm_list_mtx);
1869 error = device_suspend(dev);
1871 mutex_lock(&dpm_list_mtx);
1873 pm_dev_err(dev, state, "", error);
1874 dpm_save_failed_dev(dev_name(dev));
1878 if (!list_empty(&dev->power.entry))
1879 list_move(&dev->power.entry, &dpm_suspended_list);
1884 mutex_unlock(&dpm_list_mtx);
1885 async_synchronize_full();
1887 error = async_error;
1889 suspend_stats.failed_suspend++;
1890 dpm_save_failed_step(SUSPEND_SUSPEND);
1892 dpm_show_time(starttime, state, error, NULL);
1893 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1898 * device_prepare - Prepare a device for system power transition.
1899 * @dev: Device to handle.
1900 * @state: PM transition of the system being carried out.
1902 * Execute the ->prepare() callback(s) for given device. No new children of the
1903 * device may be registered after this function has returned.
1905 static int device_prepare(struct device *dev, pm_message_t state)
1907 int (*callback)(struct device *) = NULL;
1910 if (dev->power.syscore)
1913 WARN_ON(!pm_runtime_enabled(dev) &&
1914 dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1915 DPM_FLAG_LEAVE_SUSPENDED));
1918 * If a device's parent goes into runtime suspend at the wrong time,
1919 * it won't be possible to resume the device. To prevent this we
1920 * block runtime suspend here, during the prepare phase, and allow
1921 * it again during the complete phase.
1923 pm_runtime_get_noresume(dev);
1927 dev->power.wakeup_path = false;
1929 if (dev->power.no_pm_callbacks)
1933 callback = dev->pm_domain->ops.prepare;
1934 else if (dev->type && dev->type->pm)
1935 callback = dev->type->pm->prepare;
1936 else if (dev->class && dev->class->pm)
1937 callback = dev->class->pm->prepare;
1938 else if (dev->bus && dev->bus->pm)
1939 callback = dev->bus->pm->prepare;
1941 if (!callback && dev->driver && dev->driver->pm)
1942 callback = dev->driver->pm->prepare;
1945 ret = callback(dev);
1951 suspend_report_result(callback, ret);
1952 pm_runtime_put(dev);
1956 * A positive return value from ->prepare() means "this device appears
1957 * to be runtime-suspended and its state is fine, so if it really is
1958 * runtime-suspended, you can leave it in that state provided that you
1959 * will do the same thing with all of its descendants". This only
1960 * applies to suspend transitions, however.
1962 spin_lock_irq(&dev->power.lock);
1963 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1964 ((pm_runtime_suspended(dev) && ret > 0) ||
1965 dev->power.no_pm_callbacks) &&
1966 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1967 spin_unlock_irq(&dev->power.lock);
1972 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1973 * @state: PM transition of the system being carried out.
1975 * Execute the ->prepare() callback(s) for all devices.
1977 int dpm_prepare(pm_message_t state)
1981 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1985 * Give a chance for the known devices to complete their probes, before
1986 * disable probing of devices. This sync point is important at least
1987 * at boot time + hibernation restore.
1989 wait_for_device_probe();
1991 * It is unsafe if probing of devices will happen during suspend or
1992 * hibernation and system behavior will be unpredictable in this case.
1993 * So, let's prohibit device's probing here and defer their probes
1994 * instead. The normal behavior will be restored in dpm_complete().
1996 device_block_probing();
1998 mutex_lock(&dpm_list_mtx);
1999 while (!list_empty(&dpm_list)) {
2000 struct device *dev = to_device(dpm_list.next);
2003 mutex_unlock(&dpm_list_mtx);
2005 trace_device_pm_callback_start(dev, "", state.event);
2006 error = device_prepare(dev, state);
2007 trace_device_pm_callback_end(dev, error);
2009 mutex_lock(&dpm_list_mtx);
2011 if (error == -EAGAIN) {
2016 printk(KERN_INFO "PM: Device %s not prepared "
2017 "for power transition: code %d\n",
2018 dev_name(dev), error);
2022 dev->power.is_prepared = true;
2023 if (!list_empty(&dev->power.entry))
2024 list_move_tail(&dev->power.entry, &dpm_prepared_list);
2027 mutex_unlock(&dpm_list_mtx);
2028 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2033 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2034 * @state: PM transition of the system being carried out.
2036 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2037 * callbacks for them.
2039 int dpm_suspend_start(pm_message_t state)
2043 error = dpm_prepare(state);
2045 suspend_stats.failed_prepare++;
2046 dpm_save_failed_step(SUSPEND_PREPARE);
2048 error = dpm_suspend(state);
2051 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2053 void __suspend_report_result(const char *function, void *fn, int ret)
2056 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
2058 EXPORT_SYMBOL_GPL(__suspend_report_result);
2061 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2062 * @dev: Device to wait for.
2063 * @subordinate: Device that needs to wait for @dev.
2065 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2067 dpm_wait(dev, subordinate->power.async_suspend);
2070 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2073 * dpm_for_each_dev - device iterator.
2074 * @data: data for the callback.
2075 * @fn: function to be called for each device.
2077 * Iterate over devices in dpm_list, and call @fn for each device,
2080 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2088 list_for_each_entry(dev, &dpm_list, power.entry)
2092 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2094 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2099 return !ops->prepare &&
2101 !ops->suspend_late &&
2102 !ops->suspend_noirq &&
2103 !ops->resume_noirq &&
2104 !ops->resume_early &&
2109 void device_pm_check_callbacks(struct device *dev)
2111 spin_lock_irq(&dev->power.lock);
2112 dev->power.no_pm_callbacks =
2113 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2114 !dev->bus->suspend && !dev->bus->resume)) &&
2115 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2116 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2117 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2118 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2119 !dev->driver->suspend && !dev->driver->resume));
2120 spin_unlock_irq(&dev->power.lock);
2123 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2125 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2126 pm_runtime_status_suspended(dev);