2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/cpuidle.h>
36 #include <linux/timer.h>
41 typedef int (*pm_callback_t)(struct device *);
44 * The entries in the dpm_list list are in a depth first order, simply
45 * because children are guaranteed to be discovered after parents, and
46 * are inserted at the back of the list on discovery.
48 * Since device_pm_add() may be called with a device lock held,
49 * we must never try to acquire a device lock while holding
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
63 static int async_error;
65 static const char *pm_verb(int event)
68 case PM_EVENT_SUSPEND:
74 case PM_EVENT_QUIESCE:
76 case PM_EVENT_HIBERNATE:
80 case PM_EVENT_RESTORE:
82 case PM_EVENT_RECOVER:
85 return "(unknown PM event)";
90 * device_pm_sleep_init - Initialize system suspend-related device fields.
91 * @dev: Device object being initialized.
93 void device_pm_sleep_init(struct device *dev)
95 dev->power.is_prepared = false;
96 dev->power.is_suspended = false;
97 dev->power.is_noirq_suspended = false;
98 dev->power.is_late_suspended = false;
99 init_completion(&dev->power.completion);
100 complete_all(&dev->power.completion);
101 dev->power.wakeup = NULL;
102 INIT_LIST_HEAD(&dev->power.entry);
106 * device_pm_lock - Lock the list of active devices used by the PM core.
108 void device_pm_lock(void)
110 mutex_lock(&dpm_list_mtx);
114 * device_pm_unlock - Unlock the list of active devices used by the PM core.
116 void device_pm_unlock(void)
118 mutex_unlock(&dpm_list_mtx);
122 * device_pm_add - Add a device to the PM core's list of active devices.
123 * @dev: Device to add to the list.
125 void device_pm_add(struct device *dev)
127 pr_debug("PM: Adding info for %s:%s\n",
128 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
129 device_pm_check_callbacks(dev);
130 mutex_lock(&dpm_list_mtx);
131 if (dev->parent && dev->parent->power.is_prepared)
132 dev_warn(dev, "parent %s should not be sleeping\n",
133 dev_name(dev->parent));
134 list_add_tail(&dev->power.entry, &dpm_list);
135 dev->power.in_dpm_list = true;
136 mutex_unlock(&dpm_list_mtx);
140 * device_pm_remove - Remove a device from the PM core's list of active devices.
141 * @dev: Device to be removed from the list.
143 void device_pm_remove(struct device *dev)
145 pr_debug("PM: Removing info for %s:%s\n",
146 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
147 complete_all(&dev->power.completion);
148 mutex_lock(&dpm_list_mtx);
149 list_del_init(&dev->power.entry);
150 dev->power.in_dpm_list = false;
151 mutex_unlock(&dpm_list_mtx);
152 device_wakeup_disable(dev);
153 pm_runtime_remove(dev);
154 device_pm_check_callbacks(dev);
158 * device_pm_move_before - Move device in the PM core's list of active devices.
159 * @deva: Device to move in dpm_list.
160 * @devb: Device @deva should come before.
162 void device_pm_move_before(struct device *deva, struct device *devb)
164 pr_debug("PM: Moving %s:%s before %s:%s\n",
165 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
166 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
167 /* Delete deva from dpm_list and reinsert before devb. */
168 list_move_tail(&deva->power.entry, &devb->power.entry);
172 * device_pm_move_after - Move device in the PM core's list of active devices.
173 * @deva: Device to move in dpm_list.
174 * @devb: Device @deva should come after.
176 void device_pm_move_after(struct device *deva, struct device *devb)
178 pr_debug("PM: Moving %s:%s after %s:%s\n",
179 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
180 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
181 /* Delete deva from dpm_list and reinsert after devb. */
182 list_move(&deva->power.entry, &devb->power.entry);
186 * device_pm_move_last - Move device to end of the PM core's list of devices.
187 * @dev: Device to move in dpm_list.
189 void device_pm_move_last(struct device *dev)
191 pr_debug("PM: Moving %s:%s to end of list\n",
192 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
193 list_move_tail(&dev->power.entry, &dpm_list);
196 static ktime_t initcall_debug_start(struct device *dev)
198 ktime_t calltime = 0;
200 if (pm_print_times_enabled) {
201 pr_info("calling %s+ @ %i, parent: %s\n",
202 dev_name(dev), task_pid_nr(current),
203 dev->parent ? dev_name(dev->parent) : "none");
204 calltime = ktime_get();
210 static void initcall_debug_report(struct device *dev, ktime_t calltime,
211 int error, pm_message_t state,
217 rettime = ktime_get();
218 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
220 if (pm_print_times_enabled) {
221 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
222 error, (unsigned long long)nsecs >> 10);
227 * dpm_wait - Wait for a PM operation to complete.
228 * @dev: Device to wait for.
229 * @async: If unset, wait only if the device's power.async_suspend flag is set.
231 static void dpm_wait(struct device *dev, bool async)
236 if (async || (pm_async_enabled && dev->power.async_suspend))
237 wait_for_completion(&dev->power.completion);
240 static int dpm_wait_fn(struct device *dev, void *async_ptr)
242 dpm_wait(dev, *((bool *)async_ptr));
246 static void dpm_wait_for_children(struct device *dev, bool async)
248 device_for_each_child(dev, &async, dpm_wait_fn);
251 static void dpm_wait_for_suppliers(struct device *dev, bool async)
253 struct device_link *link;
256 idx = device_links_read_lock();
259 * If the supplier goes away right after we've checked the link to it,
260 * we'll wait for its completion to change the state, but that's fine,
261 * because the only things that will block as a result are the SRCU
262 * callbacks freeing the link objects for the links in the list we're
265 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
266 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
267 dpm_wait(link->supplier, async);
269 device_links_read_unlock(idx);
272 static void dpm_wait_for_superior(struct device *dev, bool async)
274 dpm_wait(dev->parent, async);
275 dpm_wait_for_suppliers(dev, async);
278 static void dpm_wait_for_consumers(struct device *dev, bool async)
280 struct device_link *link;
283 idx = device_links_read_lock();
286 * The status of a device link can only be changed from "dormant" by a
287 * probe, but that cannot happen during system suspend/resume. In
288 * theory it can change to "dormant" at that time, but then it is
289 * reasonable to wait for the target device anyway (eg. if it goes
290 * away, it's better to wait for it to go away completely and then
291 * continue instead of trying to continue in parallel with its
294 list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
295 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
296 dpm_wait(link->consumer, async);
298 device_links_read_unlock(idx);
301 static void dpm_wait_for_subordinate(struct device *dev, bool async)
303 dpm_wait_for_children(dev, async);
304 dpm_wait_for_consumers(dev, async);
308 * pm_op - Return the PM operation appropriate for given PM event.
309 * @ops: PM operations to choose from.
310 * @state: PM transition of the system being carried out.
312 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
314 switch (state.event) {
315 #ifdef CONFIG_SUSPEND
316 case PM_EVENT_SUSPEND:
318 case PM_EVENT_RESUME:
320 #endif /* CONFIG_SUSPEND */
321 #ifdef CONFIG_HIBERNATE_CALLBACKS
322 case PM_EVENT_FREEZE:
323 case PM_EVENT_QUIESCE:
325 case PM_EVENT_HIBERNATE:
326 return ops->poweroff;
328 case PM_EVENT_RECOVER:
331 case PM_EVENT_RESTORE:
333 #endif /* CONFIG_HIBERNATE_CALLBACKS */
340 * pm_late_early_op - Return the PM operation appropriate for given PM event.
341 * @ops: PM operations to choose from.
342 * @state: PM transition of the system being carried out.
344 * Runtime PM is disabled for @dev while this function is being executed.
346 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
349 switch (state.event) {
350 #ifdef CONFIG_SUSPEND
351 case PM_EVENT_SUSPEND:
352 return ops->suspend_late;
353 case PM_EVENT_RESUME:
354 return ops->resume_early;
355 #endif /* CONFIG_SUSPEND */
356 #ifdef CONFIG_HIBERNATE_CALLBACKS
357 case PM_EVENT_FREEZE:
358 case PM_EVENT_QUIESCE:
359 return ops->freeze_late;
360 case PM_EVENT_HIBERNATE:
361 return ops->poweroff_late;
363 case PM_EVENT_RECOVER:
364 return ops->thaw_early;
365 case PM_EVENT_RESTORE:
366 return ops->restore_early;
367 #endif /* CONFIG_HIBERNATE_CALLBACKS */
374 * pm_noirq_op - Return the PM operation appropriate for given PM event.
375 * @ops: PM operations to choose from.
376 * @state: PM transition of the system being carried out.
378 * The driver of @dev will not receive interrupts while this function is being
381 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
383 switch (state.event) {
384 #ifdef CONFIG_SUSPEND
385 case PM_EVENT_SUSPEND:
386 return ops->suspend_noirq;
387 case PM_EVENT_RESUME:
388 return ops->resume_noirq;
389 #endif /* CONFIG_SUSPEND */
390 #ifdef CONFIG_HIBERNATE_CALLBACKS
391 case PM_EVENT_FREEZE:
392 case PM_EVENT_QUIESCE:
393 return ops->freeze_noirq;
394 case PM_EVENT_HIBERNATE:
395 return ops->poweroff_noirq;
397 case PM_EVENT_RECOVER:
398 return ops->thaw_noirq;
399 case PM_EVENT_RESTORE:
400 return ops->restore_noirq;
401 #endif /* CONFIG_HIBERNATE_CALLBACKS */
407 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
409 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
410 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
411 ", may wakeup" : "");
414 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
417 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
418 dev_name(dev), pm_verb(state.event), info, error);
421 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
428 calltime = ktime_get();
429 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
430 do_div(usecs64, NSEC_PER_USEC);
435 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
436 info ?: "", info ? " " : "", pm_verb(state.event),
437 error ? "aborted" : "complete",
438 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
441 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
442 pm_message_t state, const char *info)
450 calltime = initcall_debug_start(dev);
452 pm_dev_dbg(dev, state, info);
453 trace_device_pm_callback_start(dev, info, state.event);
455 trace_device_pm_callback_end(dev, error);
456 suspend_report_result(cb, error);
458 initcall_debug_report(dev, calltime, error, state, info);
463 #ifdef CONFIG_DPM_WATCHDOG
464 struct dpm_watchdog {
466 struct task_struct *tsk;
467 struct timer_list timer;
470 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
471 struct dpm_watchdog wd
474 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
475 * @data: Watchdog object address.
477 * Called when a driver has timed out suspending or resuming.
478 * There's not much we can do here to recover so panic() to
479 * capture a crash-dump in pstore.
481 static void dpm_watchdog_handler(struct timer_list *t)
483 struct dpm_watchdog *wd = from_timer(wd, t, timer);
485 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
486 show_stack(wd->tsk, NULL);
487 panic("%s %s: unrecoverable failure\n",
488 dev_driver_string(wd->dev), dev_name(wd->dev));
492 * dpm_watchdog_set - Enable pm watchdog for given device.
493 * @wd: Watchdog. Must be allocated on the stack.
494 * @dev: Device to handle.
496 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
498 struct timer_list *timer = &wd->timer;
503 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
504 /* use same timeout value for both suspend and resume */
505 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
510 * dpm_watchdog_clear - Disable suspend/resume watchdog.
511 * @wd: Watchdog to disable.
513 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
515 struct timer_list *timer = &wd->timer;
517 del_timer_sync(timer);
518 destroy_timer_on_stack(timer);
521 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
522 #define dpm_watchdog_set(x, y)
523 #define dpm_watchdog_clear(x)
526 /*------------------------- Resume routines -------------------------*/
529 * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
530 * @dev: Target device.
532 * Make the core skip the "early resume" and "resume" phases for @dev.
534 * This function can be called by middle-layer code during the "noirq" phase of
535 * system resume if necessary, but not by device drivers.
537 void dev_pm_skip_next_resume_phases(struct device *dev)
539 dev->power.is_late_suspended = false;
540 dev->power.is_suspended = false;
544 * device_resume_noirq - Execute a "noirq resume" callback for given device.
545 * @dev: Device to handle.
546 * @state: PM transition of the system being carried out.
547 * @async: If true, the device is being resumed asynchronously.
549 * The driver of @dev will not receive interrupts while this function is being
552 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
554 pm_callback_t callback = NULL;
555 const char *info = NULL;
561 if (dev->power.syscore || dev->power.direct_complete)
564 if (!dev->power.is_noirq_suspended)
567 dpm_wait_for_superior(dev, async);
569 if (dev->pm_domain) {
570 info = "noirq power domain ";
571 callback = pm_noirq_op(&dev->pm_domain->ops, state);
572 } else if (dev->type && dev->type->pm) {
573 info = "noirq type ";
574 callback = pm_noirq_op(dev->type->pm, state);
575 } else if (dev->class && dev->class->pm) {
576 info = "noirq class ";
577 callback = pm_noirq_op(dev->class->pm, state);
578 } else if (dev->bus && dev->bus->pm) {
580 callback = pm_noirq_op(dev->bus->pm, state);
583 if (!callback && dev->driver && dev->driver->pm) {
584 info = "noirq driver ";
585 callback = pm_noirq_op(dev->driver->pm, state);
588 error = dpm_run_callback(callback, dev, state, info);
589 dev->power.is_noirq_suspended = false;
592 complete_all(&dev->power.completion);
597 static bool is_async(struct device *dev)
599 return dev->power.async_suspend && pm_async_enabled
600 && !pm_trace_is_enabled();
603 static void async_resume_noirq(void *data, async_cookie_t cookie)
605 struct device *dev = (struct device *)data;
608 error = device_resume_noirq(dev, pm_transition, true);
610 pm_dev_err(dev, pm_transition, " async", error);
615 void dpm_noirq_resume_devices(pm_message_t state)
618 ktime_t starttime = ktime_get();
620 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
621 mutex_lock(&dpm_list_mtx);
622 pm_transition = state;
625 * Advanced the async threads upfront,
626 * in case the starting of async threads is
627 * delayed by non-async resuming devices.
629 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
630 reinit_completion(&dev->power.completion);
633 async_schedule(async_resume_noirq, dev);
637 while (!list_empty(&dpm_noirq_list)) {
638 dev = to_device(dpm_noirq_list.next);
640 list_move_tail(&dev->power.entry, &dpm_late_early_list);
641 mutex_unlock(&dpm_list_mtx);
643 if (!is_async(dev)) {
646 error = device_resume_noirq(dev, state, false);
648 suspend_stats.failed_resume_noirq++;
649 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
650 dpm_save_failed_dev(dev_name(dev));
651 pm_dev_err(dev, state, " noirq", error);
655 mutex_lock(&dpm_list_mtx);
658 mutex_unlock(&dpm_list_mtx);
659 async_synchronize_full();
660 dpm_show_time(starttime, state, 0, "noirq");
661 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
664 void dpm_noirq_end(void)
666 resume_device_irqs();
667 device_wakeup_disarm_wake_irqs();
672 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
673 * @state: PM transition of the system being carried out.
675 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
676 * allow device drivers' interrupt handlers to be called.
678 void dpm_resume_noirq(pm_message_t state)
680 dpm_noirq_resume_devices(state);
685 * device_resume_early - Execute an "early resume" callback for given device.
686 * @dev: Device to handle.
687 * @state: PM transition of the system being carried out.
688 * @async: If true, the device is being resumed asynchronously.
690 * Runtime PM is disabled for @dev while this function is being executed.
692 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
694 pm_callback_t callback = NULL;
695 const char *info = NULL;
701 if (dev->power.syscore || dev->power.direct_complete)
704 if (!dev->power.is_late_suspended)
707 dpm_wait_for_superior(dev, async);
709 if (dev->pm_domain) {
710 info = "early power domain ";
711 callback = pm_late_early_op(&dev->pm_domain->ops, state);
712 } else if (dev->type && dev->type->pm) {
713 info = "early type ";
714 callback = pm_late_early_op(dev->type->pm, state);
715 } else if (dev->class && dev->class->pm) {
716 info = "early class ";
717 callback = pm_late_early_op(dev->class->pm, state);
718 } else if (dev->bus && dev->bus->pm) {
720 callback = pm_late_early_op(dev->bus->pm, state);
723 if (!callback && dev->driver && dev->driver->pm) {
724 info = "early driver ";
725 callback = pm_late_early_op(dev->driver->pm, state);
728 error = dpm_run_callback(callback, dev, state, info);
729 dev->power.is_late_suspended = false;
734 pm_runtime_enable(dev);
735 complete_all(&dev->power.completion);
739 static void async_resume_early(void *data, async_cookie_t cookie)
741 struct device *dev = (struct device *)data;
744 error = device_resume_early(dev, pm_transition, true);
746 pm_dev_err(dev, pm_transition, " async", error);
752 * dpm_resume_early - Execute "early resume" callbacks for all devices.
753 * @state: PM transition of the system being carried out.
755 void dpm_resume_early(pm_message_t state)
758 ktime_t starttime = ktime_get();
760 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
761 mutex_lock(&dpm_list_mtx);
762 pm_transition = state;
765 * Advanced the async threads upfront,
766 * in case the starting of async threads is
767 * delayed by non-async resuming devices.
769 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
770 reinit_completion(&dev->power.completion);
773 async_schedule(async_resume_early, dev);
777 while (!list_empty(&dpm_late_early_list)) {
778 dev = to_device(dpm_late_early_list.next);
780 list_move_tail(&dev->power.entry, &dpm_suspended_list);
781 mutex_unlock(&dpm_list_mtx);
783 if (!is_async(dev)) {
786 error = device_resume_early(dev, state, false);
788 suspend_stats.failed_resume_early++;
789 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
790 dpm_save_failed_dev(dev_name(dev));
791 pm_dev_err(dev, state, " early", error);
794 mutex_lock(&dpm_list_mtx);
797 mutex_unlock(&dpm_list_mtx);
798 async_synchronize_full();
799 dpm_show_time(starttime, state, 0, "early");
800 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
804 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
805 * @state: PM transition of the system being carried out.
807 void dpm_resume_start(pm_message_t state)
809 dpm_resume_noirq(state);
810 dpm_resume_early(state);
812 EXPORT_SYMBOL_GPL(dpm_resume_start);
815 * device_resume - Execute "resume" callbacks for given device.
816 * @dev: Device to handle.
817 * @state: PM transition of the system being carried out.
818 * @async: If true, the device is being resumed asynchronously.
820 static int device_resume(struct device *dev, pm_message_t state, bool async)
822 pm_callback_t callback = NULL;
823 const char *info = NULL;
825 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
830 if (dev->power.syscore)
833 if (dev->power.direct_complete) {
834 /* Match the pm_runtime_disable() in __device_suspend(). */
835 pm_runtime_enable(dev);
839 dpm_wait_for_superior(dev, async);
840 dpm_watchdog_set(&wd, dev);
844 * This is a fib. But we'll allow new children to be added below
845 * a resumed device, even if the device hasn't been completed yet.
847 dev->power.is_prepared = false;
849 if (!dev->power.is_suspended)
852 if (dev->pm_domain) {
853 info = "power domain ";
854 callback = pm_op(&dev->pm_domain->ops, state);
858 if (dev->type && dev->type->pm) {
860 callback = pm_op(dev->type->pm, state);
864 if (dev->class && dev->class->pm) {
866 callback = pm_op(dev->class->pm, state);
873 callback = pm_op(dev->bus->pm, state);
874 } else if (dev->bus->resume) {
875 info = "legacy bus ";
876 callback = dev->bus->resume;
882 if (!callback && dev->driver && dev->driver->pm) {
884 callback = pm_op(dev->driver->pm, state);
888 error = dpm_run_callback(callback, dev, state, info);
889 dev->power.is_suspended = false;
893 dpm_watchdog_clear(&wd);
896 complete_all(&dev->power.completion);
903 static void async_resume(void *data, async_cookie_t cookie)
905 struct device *dev = (struct device *)data;
908 error = device_resume(dev, pm_transition, true);
910 pm_dev_err(dev, pm_transition, " async", error);
915 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
916 * @state: PM transition of the system being carried out.
918 * Execute the appropriate "resume" callback for all devices whose status
919 * indicates that they are suspended.
921 void dpm_resume(pm_message_t state)
924 ktime_t starttime = ktime_get();
926 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
929 mutex_lock(&dpm_list_mtx);
930 pm_transition = state;
933 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
934 reinit_completion(&dev->power.completion);
937 async_schedule(async_resume, dev);
941 while (!list_empty(&dpm_suspended_list)) {
942 dev = to_device(dpm_suspended_list.next);
944 if (!is_async(dev)) {
947 mutex_unlock(&dpm_list_mtx);
949 error = device_resume(dev, state, false);
951 suspend_stats.failed_resume++;
952 dpm_save_failed_step(SUSPEND_RESUME);
953 dpm_save_failed_dev(dev_name(dev));
954 pm_dev_err(dev, state, "", error);
957 mutex_lock(&dpm_list_mtx);
959 if (!list_empty(&dev->power.entry))
960 list_move_tail(&dev->power.entry, &dpm_prepared_list);
963 mutex_unlock(&dpm_list_mtx);
964 async_synchronize_full();
965 dpm_show_time(starttime, state, 0, NULL);
968 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
972 * device_complete - Complete a PM transition for given device.
973 * @dev: Device to handle.
974 * @state: PM transition of the system being carried out.
976 static void device_complete(struct device *dev, pm_message_t state)
978 void (*callback)(struct device *) = NULL;
979 const char *info = NULL;
981 if (dev->power.syscore)
986 if (dev->pm_domain) {
987 info = "completing power domain ";
988 callback = dev->pm_domain->ops.complete;
989 } else if (dev->type && dev->type->pm) {
990 info = "completing type ";
991 callback = dev->type->pm->complete;
992 } else if (dev->class && dev->class->pm) {
993 info = "completing class ";
994 callback = dev->class->pm->complete;
995 } else if (dev->bus && dev->bus->pm) {
996 info = "completing bus ";
997 callback = dev->bus->pm->complete;
1000 if (!callback && dev->driver && dev->driver->pm) {
1001 info = "completing driver ";
1002 callback = dev->driver->pm->complete;
1006 pm_dev_dbg(dev, state, info);
1012 pm_runtime_put(dev);
1016 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1017 * @state: PM transition of the system being carried out.
1019 * Execute the ->complete() callbacks for all devices whose PM status is not
1020 * DPM_ON (this allows new devices to be registered).
1022 void dpm_complete(pm_message_t state)
1024 struct list_head list;
1026 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1029 INIT_LIST_HEAD(&list);
1030 mutex_lock(&dpm_list_mtx);
1031 while (!list_empty(&dpm_prepared_list)) {
1032 struct device *dev = to_device(dpm_prepared_list.prev);
1035 dev->power.is_prepared = false;
1036 list_move(&dev->power.entry, &list);
1037 mutex_unlock(&dpm_list_mtx);
1039 trace_device_pm_callback_start(dev, "", state.event);
1040 device_complete(dev, state);
1041 trace_device_pm_callback_end(dev, 0);
1043 mutex_lock(&dpm_list_mtx);
1046 list_splice(&list, &dpm_list);
1047 mutex_unlock(&dpm_list_mtx);
1049 /* Allow device probing and trigger re-probing of deferred devices */
1050 device_unblock_probing();
1051 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1055 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1056 * @state: PM transition of the system being carried out.
1058 * Execute "resume" callbacks for all devices and complete the PM transition of
1061 void dpm_resume_end(pm_message_t state)
1064 dpm_complete(state);
1066 EXPORT_SYMBOL_GPL(dpm_resume_end);
1069 /*------------------------- Suspend routines -------------------------*/
1072 * resume_event - Return a "resume" message for given "suspend" sleep state.
1073 * @sleep_state: PM message representing a sleep state.
1075 * Return a PM message representing the resume event corresponding to given
1078 static pm_message_t resume_event(pm_message_t sleep_state)
1080 switch (sleep_state.event) {
1081 case PM_EVENT_SUSPEND:
1083 case PM_EVENT_FREEZE:
1084 case PM_EVENT_QUIESCE:
1085 return PMSG_RECOVER;
1086 case PM_EVENT_HIBERNATE:
1087 return PMSG_RESTORE;
1093 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1094 * @dev: Device to handle.
1095 * @state: PM transition of the system being carried out.
1096 * @async: If true, the device is being suspended asynchronously.
1098 * The driver of @dev will not receive interrupts while this function is being
1101 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1103 pm_callback_t callback = NULL;
1104 const char *info = NULL;
1110 dpm_wait_for_subordinate(dev, async);
1115 if (pm_wakeup_pending()) {
1116 async_error = -EBUSY;
1120 if (dev->power.syscore || dev->power.direct_complete)
1123 if (dev->pm_domain) {
1124 info = "noirq power domain ";
1125 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1126 } else if (dev->type && dev->type->pm) {
1127 info = "noirq type ";
1128 callback = pm_noirq_op(dev->type->pm, state);
1129 } else if (dev->class && dev->class->pm) {
1130 info = "noirq class ";
1131 callback = pm_noirq_op(dev->class->pm, state);
1132 } else if (dev->bus && dev->bus->pm) {
1133 info = "noirq bus ";
1134 callback = pm_noirq_op(dev->bus->pm, state);
1137 if (!callback && dev->driver && dev->driver->pm) {
1138 info = "noirq driver ";
1139 callback = pm_noirq_op(dev->driver->pm, state);
1142 error = dpm_run_callback(callback, dev, state, info);
1144 dev->power.is_noirq_suspended = true;
1146 async_error = error;
1149 complete_all(&dev->power.completion);
1150 TRACE_SUSPEND(error);
1154 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1156 struct device *dev = (struct device *)data;
1159 error = __device_suspend_noirq(dev, pm_transition, true);
1161 dpm_save_failed_dev(dev_name(dev));
1162 pm_dev_err(dev, pm_transition, " async", error);
1168 static int device_suspend_noirq(struct device *dev)
1170 reinit_completion(&dev->power.completion);
1172 if (is_async(dev)) {
1174 async_schedule(async_suspend_noirq, dev);
1177 return __device_suspend_noirq(dev, pm_transition, false);
1180 void dpm_noirq_begin(void)
1183 device_wakeup_arm_wake_irqs();
1184 suspend_device_irqs();
1187 int dpm_noirq_suspend_devices(pm_message_t state)
1189 ktime_t starttime = ktime_get();
1192 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1193 mutex_lock(&dpm_list_mtx);
1194 pm_transition = state;
1197 while (!list_empty(&dpm_late_early_list)) {
1198 struct device *dev = to_device(dpm_late_early_list.prev);
1201 mutex_unlock(&dpm_list_mtx);
1203 error = device_suspend_noirq(dev);
1205 mutex_lock(&dpm_list_mtx);
1207 pm_dev_err(dev, state, " noirq", error);
1208 dpm_save_failed_dev(dev_name(dev));
1212 if (!list_empty(&dev->power.entry))
1213 list_move(&dev->power.entry, &dpm_noirq_list);
1219 mutex_unlock(&dpm_list_mtx);
1220 async_synchronize_full();
1222 error = async_error;
1225 suspend_stats.failed_suspend_noirq++;
1226 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1228 dpm_show_time(starttime, state, error, "noirq");
1229 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1234 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1235 * @state: PM transition of the system being carried out.
1237 * Prevent device drivers' interrupt handlers from being called and invoke
1238 * "noirq" suspend callbacks for all non-sysdev devices.
1240 int dpm_suspend_noirq(pm_message_t state)
1245 ret = dpm_noirq_suspend_devices(state);
1247 dpm_resume_noirq(resume_event(state));
1253 * __device_suspend_late - Execute a "late suspend" callback for given device.
1254 * @dev: Device to handle.
1255 * @state: PM transition of the system being carried out.
1256 * @async: If true, the device is being suspended asynchronously.
1258 * Runtime PM is disabled for @dev while this function is being executed.
1260 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1262 pm_callback_t callback = NULL;
1263 const char *info = NULL;
1269 __pm_runtime_disable(dev, false);
1271 dpm_wait_for_subordinate(dev, async);
1276 if (pm_wakeup_pending()) {
1277 async_error = -EBUSY;
1281 if (dev->power.syscore || dev->power.direct_complete)
1284 if (dev->pm_domain) {
1285 info = "late power domain ";
1286 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1287 } else if (dev->type && dev->type->pm) {
1288 info = "late type ";
1289 callback = pm_late_early_op(dev->type->pm, state);
1290 } else if (dev->class && dev->class->pm) {
1291 info = "late class ";
1292 callback = pm_late_early_op(dev->class->pm, state);
1293 } else if (dev->bus && dev->bus->pm) {
1295 callback = pm_late_early_op(dev->bus->pm, state);
1298 if (!callback && dev->driver && dev->driver->pm) {
1299 info = "late driver ";
1300 callback = pm_late_early_op(dev->driver->pm, state);
1303 error = dpm_run_callback(callback, dev, state, info);
1305 dev->power.is_late_suspended = true;
1307 async_error = error;
1310 TRACE_SUSPEND(error);
1311 complete_all(&dev->power.completion);
1315 static void async_suspend_late(void *data, async_cookie_t cookie)
1317 struct device *dev = (struct device *)data;
1320 error = __device_suspend_late(dev, pm_transition, true);
1322 dpm_save_failed_dev(dev_name(dev));
1323 pm_dev_err(dev, pm_transition, " async", error);
1328 static int device_suspend_late(struct device *dev)
1330 reinit_completion(&dev->power.completion);
1332 if (is_async(dev)) {
1334 async_schedule(async_suspend_late, dev);
1338 return __device_suspend_late(dev, pm_transition, false);
1342 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1343 * @state: PM transition of the system being carried out.
1345 int dpm_suspend_late(pm_message_t state)
1347 ktime_t starttime = ktime_get();
1350 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1351 mutex_lock(&dpm_list_mtx);
1352 pm_transition = state;
1355 while (!list_empty(&dpm_suspended_list)) {
1356 struct device *dev = to_device(dpm_suspended_list.prev);
1359 mutex_unlock(&dpm_list_mtx);
1361 error = device_suspend_late(dev);
1363 mutex_lock(&dpm_list_mtx);
1364 if (!list_empty(&dev->power.entry))
1365 list_move(&dev->power.entry, &dpm_late_early_list);
1368 pm_dev_err(dev, state, " late", error);
1369 dpm_save_failed_dev(dev_name(dev));
1378 mutex_unlock(&dpm_list_mtx);
1379 async_synchronize_full();
1381 error = async_error;
1383 suspend_stats.failed_suspend_late++;
1384 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1385 dpm_resume_early(resume_event(state));
1387 dpm_show_time(starttime, state, error, "late");
1388 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1393 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1394 * @state: PM transition of the system being carried out.
1396 int dpm_suspend_end(pm_message_t state)
1398 int error = dpm_suspend_late(state);
1402 error = dpm_suspend_noirq(state);
1404 dpm_resume_early(resume_event(state));
1410 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1413 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1414 * @dev: Device to suspend.
1415 * @state: PM transition of the system being carried out.
1416 * @cb: Suspend callback to execute.
1417 * @info: string description of caller.
1419 static int legacy_suspend(struct device *dev, pm_message_t state,
1420 int (*cb)(struct device *dev, pm_message_t state),
1426 calltime = initcall_debug_start(dev);
1428 trace_device_pm_callback_start(dev, info, state.event);
1429 error = cb(dev, state);
1430 trace_device_pm_callback_end(dev, error);
1431 suspend_report_result(cb, error);
1433 initcall_debug_report(dev, calltime, error, state, info);
1438 static void dpm_clear_suppliers_direct_complete(struct device *dev)
1440 struct device_link *link;
1443 idx = device_links_read_lock();
1445 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1446 spin_lock_irq(&link->supplier->power.lock);
1447 link->supplier->power.direct_complete = false;
1448 spin_unlock_irq(&link->supplier->power.lock);
1451 device_links_read_unlock(idx);
1455 * __device_suspend - Execute "suspend" callbacks for given device.
1456 * @dev: Device to handle.
1457 * @state: PM transition of the system being carried out.
1458 * @async: If true, the device is being suspended asynchronously.
1460 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1462 pm_callback_t callback = NULL;
1463 const char *info = NULL;
1465 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1470 dpm_wait_for_subordinate(dev, async);
1476 * If a device configured to wake up the system from sleep states
1477 * has been suspended at run time and there's a resume request pending
1478 * for it, this is equivalent to the device signaling wakeup, so the
1479 * system suspend operation should be aborted.
1481 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1482 pm_wakeup_event(dev, 0);
1484 if (pm_wakeup_pending()) {
1485 async_error = -EBUSY;
1489 if (dev->power.syscore)
1492 if (dev->power.direct_complete) {
1493 if (pm_runtime_status_suspended(dev)) {
1494 pm_runtime_disable(dev);
1495 if (pm_runtime_status_suspended(dev))
1498 pm_runtime_enable(dev);
1500 dev->power.direct_complete = false;
1503 dpm_watchdog_set(&wd, dev);
1506 if (dev->pm_domain) {
1507 info = "power domain ";
1508 callback = pm_op(&dev->pm_domain->ops, state);
1512 if (dev->type && dev->type->pm) {
1514 callback = pm_op(dev->type->pm, state);
1518 if (dev->class && dev->class->pm) {
1520 callback = pm_op(dev->class->pm, state);
1527 callback = pm_op(dev->bus->pm, state);
1528 } else if (dev->bus->suspend) {
1529 pm_dev_dbg(dev, state, "legacy bus ");
1530 error = legacy_suspend(dev, state, dev->bus->suspend,
1537 if (!callback && dev->driver && dev->driver->pm) {
1539 callback = pm_op(dev->driver->pm, state);
1542 error = dpm_run_callback(callback, dev, state, info);
1546 struct device *parent = dev->parent;
1548 dev->power.is_suspended = true;
1550 spin_lock_irq(&parent->power.lock);
1552 dev->parent->power.direct_complete = false;
1553 if (dev->power.wakeup_path
1554 && !dev->parent->power.ignore_children)
1555 dev->parent->power.wakeup_path = true;
1557 spin_unlock_irq(&parent->power.lock);
1559 dpm_clear_suppliers_direct_complete(dev);
1563 dpm_watchdog_clear(&wd);
1567 async_error = error;
1569 complete_all(&dev->power.completion);
1570 TRACE_SUSPEND(error);
1574 static void async_suspend(void *data, async_cookie_t cookie)
1576 struct device *dev = (struct device *)data;
1579 error = __device_suspend(dev, pm_transition, true);
1581 dpm_save_failed_dev(dev_name(dev));
1582 pm_dev_err(dev, pm_transition, " async", error);
1588 static int device_suspend(struct device *dev)
1590 reinit_completion(&dev->power.completion);
1592 if (is_async(dev)) {
1594 async_schedule(async_suspend, dev);
1598 return __device_suspend(dev, pm_transition, false);
1602 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1603 * @state: PM transition of the system being carried out.
1605 int dpm_suspend(pm_message_t state)
1607 ktime_t starttime = ktime_get();
1610 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1615 mutex_lock(&dpm_list_mtx);
1616 pm_transition = state;
1618 while (!list_empty(&dpm_prepared_list)) {
1619 struct device *dev = to_device(dpm_prepared_list.prev);
1622 mutex_unlock(&dpm_list_mtx);
1624 error = device_suspend(dev);
1626 mutex_lock(&dpm_list_mtx);
1628 pm_dev_err(dev, state, "", error);
1629 dpm_save_failed_dev(dev_name(dev));
1633 if (!list_empty(&dev->power.entry))
1634 list_move(&dev->power.entry, &dpm_suspended_list);
1639 mutex_unlock(&dpm_list_mtx);
1640 async_synchronize_full();
1642 error = async_error;
1644 suspend_stats.failed_suspend++;
1645 dpm_save_failed_step(SUSPEND_SUSPEND);
1647 dpm_show_time(starttime, state, error, NULL);
1648 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1653 * device_prepare - Prepare a device for system power transition.
1654 * @dev: Device to handle.
1655 * @state: PM transition of the system being carried out.
1657 * Execute the ->prepare() callback(s) for given device. No new children of the
1658 * device may be registered after this function has returned.
1660 static int device_prepare(struct device *dev, pm_message_t state)
1662 int (*callback)(struct device *) = NULL;
1665 if (dev->power.syscore)
1668 WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
1669 !pm_runtime_enabled(dev));
1672 * If a device's parent goes into runtime suspend at the wrong time,
1673 * it won't be possible to resume the device. To prevent this we
1674 * block runtime suspend here, during the prepare phase, and allow
1675 * it again during the complete phase.
1677 pm_runtime_get_noresume(dev);
1681 dev->power.wakeup_path = device_may_wakeup(dev);
1683 if (dev->power.no_pm_callbacks) {
1684 ret = 1; /* Let device go direct_complete */
1689 callback = dev->pm_domain->ops.prepare;
1690 else if (dev->type && dev->type->pm)
1691 callback = dev->type->pm->prepare;
1692 else if (dev->class && dev->class->pm)
1693 callback = dev->class->pm->prepare;
1694 else if (dev->bus && dev->bus->pm)
1695 callback = dev->bus->pm->prepare;
1697 if (!callback && dev->driver && dev->driver->pm)
1698 callback = dev->driver->pm->prepare;
1701 ret = callback(dev);
1707 suspend_report_result(callback, ret);
1708 pm_runtime_put(dev);
1712 * A positive return value from ->prepare() means "this device appears
1713 * to be runtime-suspended and its state is fine, so if it really is
1714 * runtime-suspended, you can leave it in that state provided that you
1715 * will do the same thing with all of its descendants". This only
1716 * applies to suspend transitions, however.
1718 spin_lock_irq(&dev->power.lock);
1719 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1720 pm_runtime_suspended(dev) && ret > 0 &&
1721 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1722 spin_unlock_irq(&dev->power.lock);
1727 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1728 * @state: PM transition of the system being carried out.
1730 * Execute the ->prepare() callback(s) for all devices.
1732 int dpm_prepare(pm_message_t state)
1736 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1740 * Give a chance for the known devices to complete their probes, before
1741 * disable probing of devices. This sync point is important at least
1742 * at boot time + hibernation restore.
1744 wait_for_device_probe();
1746 * It is unsafe if probing of devices will happen during suspend or
1747 * hibernation and system behavior will be unpredictable in this case.
1748 * So, let's prohibit device's probing here and defer their probes
1749 * instead. The normal behavior will be restored in dpm_complete().
1751 device_block_probing();
1753 mutex_lock(&dpm_list_mtx);
1754 while (!list_empty(&dpm_list)) {
1755 struct device *dev = to_device(dpm_list.next);
1758 mutex_unlock(&dpm_list_mtx);
1760 trace_device_pm_callback_start(dev, "", state.event);
1761 error = device_prepare(dev, state);
1762 trace_device_pm_callback_end(dev, error);
1764 mutex_lock(&dpm_list_mtx);
1766 if (error == -EAGAIN) {
1771 printk(KERN_INFO "PM: Device %s not prepared "
1772 "for power transition: code %d\n",
1773 dev_name(dev), error);
1777 dev->power.is_prepared = true;
1778 if (!list_empty(&dev->power.entry))
1779 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1782 mutex_unlock(&dpm_list_mtx);
1783 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1788 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1789 * @state: PM transition of the system being carried out.
1791 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1792 * callbacks for them.
1794 int dpm_suspend_start(pm_message_t state)
1798 error = dpm_prepare(state);
1800 suspend_stats.failed_prepare++;
1801 dpm_save_failed_step(SUSPEND_PREPARE);
1803 error = dpm_suspend(state);
1806 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1808 void __suspend_report_result(const char *function, void *fn, int ret)
1811 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1813 EXPORT_SYMBOL_GPL(__suspend_report_result);
1816 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1817 * @dev: Device to wait for.
1818 * @subordinate: Device that needs to wait for @dev.
1820 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1822 dpm_wait(dev, subordinate->power.async_suspend);
1825 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1828 * dpm_for_each_dev - device iterator.
1829 * @data: data for the callback.
1830 * @fn: function to be called for each device.
1832 * Iterate over devices in dpm_list, and call @fn for each device,
1835 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1843 list_for_each_entry(dev, &dpm_list, power.entry)
1847 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1849 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1854 return !ops->prepare &&
1856 !ops->suspend_late &&
1857 !ops->suspend_noirq &&
1858 !ops->resume_noirq &&
1859 !ops->resume_early &&
1864 void device_pm_check_callbacks(struct device *dev)
1866 spin_lock_irq(&dev->power.lock);
1867 dev->power.no_pm_callbacks =
1868 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1869 !dev->bus->suspend && !dev->bus->resume)) &&
1870 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1871 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1872 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1873 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
1874 !dev->driver->suspend && !dev->driver->resume));
1875 spin_unlock_irq(&dev->power.lock);
1878 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
1880 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
1881 pm_runtime_status_suspended(dev);