Merge tag 'regmap-fix-v5.0-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 /*
44  * The entries in the dpm_list list are in a depth first order, simply
45  * because children are guaranteed to be discovered after parents, and
46  * are inserted at the back of the list on discovery.
47  *
48  * Since device_pm_add() may be called with a device lock held,
49  * we must never try to acquire a device lock while holding
50  * dpm_list_mutex.
51  */
52
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
62
63 static int async_error;
64
65 static const char *pm_verb(int event)
66 {
67         switch (event) {
68         case PM_EVENT_SUSPEND:
69                 return "suspend";
70         case PM_EVENT_RESUME:
71                 return "resume";
72         case PM_EVENT_FREEZE:
73                 return "freeze";
74         case PM_EVENT_QUIESCE:
75                 return "quiesce";
76         case PM_EVENT_HIBERNATE:
77                 return "hibernate";
78         case PM_EVENT_THAW:
79                 return "thaw";
80         case PM_EVENT_RESTORE:
81                 return "restore";
82         case PM_EVENT_RECOVER:
83                 return "recover";
84         default:
85                 return "(unknown PM event)";
86         }
87 }
88
89 /**
90  * device_pm_sleep_init - Initialize system suspend-related device fields.
91  * @dev: Device object being initialized.
92  */
93 void device_pm_sleep_init(struct device *dev)
94 {
95         dev->power.is_prepared = false;
96         dev->power.is_suspended = false;
97         dev->power.is_noirq_suspended = false;
98         dev->power.is_late_suspended = false;
99         init_completion(&dev->power.completion);
100         complete_all(&dev->power.completion);
101         dev->power.wakeup = NULL;
102         INIT_LIST_HEAD(&dev->power.entry);
103 }
104
105 /**
106  * device_pm_lock - Lock the list of active devices used by the PM core.
107  */
108 void device_pm_lock(void)
109 {
110         mutex_lock(&dpm_list_mtx);
111 }
112
113 /**
114  * device_pm_unlock - Unlock the list of active devices used by the PM core.
115  */
116 void device_pm_unlock(void)
117 {
118         mutex_unlock(&dpm_list_mtx);
119 }
120
121 /**
122  * device_pm_add - Add a device to the PM core's list of active devices.
123  * @dev: Device to add to the list.
124  */
125 void device_pm_add(struct device *dev)
126 {
127         pr_debug("PM: Adding info for %s:%s\n",
128                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
129         device_pm_check_callbacks(dev);
130         mutex_lock(&dpm_list_mtx);
131         if (dev->parent && dev->parent->power.is_prepared)
132                 dev_warn(dev, "parent %s should not be sleeping\n",
133                         dev_name(dev->parent));
134         list_add_tail(&dev->power.entry, &dpm_list);
135         dev->power.in_dpm_list = true;
136         mutex_unlock(&dpm_list_mtx);
137 }
138
139 /**
140  * device_pm_remove - Remove a device from the PM core's list of active devices.
141  * @dev: Device to be removed from the list.
142  */
143 void device_pm_remove(struct device *dev)
144 {
145         pr_debug("PM: Removing info for %s:%s\n",
146                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
147         complete_all(&dev->power.completion);
148         mutex_lock(&dpm_list_mtx);
149         list_del_init(&dev->power.entry);
150         dev->power.in_dpm_list = false;
151         mutex_unlock(&dpm_list_mtx);
152         device_wakeup_disable(dev);
153         pm_runtime_remove(dev);
154         device_pm_check_callbacks(dev);
155 }
156
157 /**
158  * device_pm_move_before - Move device in the PM core's list of active devices.
159  * @deva: Device to move in dpm_list.
160  * @devb: Device @deva should come before.
161  */
162 void device_pm_move_before(struct device *deva, struct device *devb)
163 {
164         pr_debug("PM: Moving %s:%s before %s:%s\n",
165                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
166                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
167         /* Delete deva from dpm_list and reinsert before devb. */
168         list_move_tail(&deva->power.entry, &devb->power.entry);
169 }
170
171 /**
172  * device_pm_move_after - Move device in the PM core's list of active devices.
173  * @deva: Device to move in dpm_list.
174  * @devb: Device @deva should come after.
175  */
176 void device_pm_move_after(struct device *deva, struct device *devb)
177 {
178         pr_debug("PM: Moving %s:%s after %s:%s\n",
179                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
180                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
181         /* Delete deva from dpm_list and reinsert after devb. */
182         list_move(&deva->power.entry, &devb->power.entry);
183 }
184
185 /**
186  * device_pm_move_last - Move device to end of the PM core's list of devices.
187  * @dev: Device to move in dpm_list.
188  */
189 void device_pm_move_last(struct device *dev)
190 {
191         pr_debug("PM: Moving %s:%s to end of list\n",
192                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
193         list_move_tail(&dev->power.entry, &dpm_list);
194 }
195
196 static ktime_t initcall_debug_start(struct device *dev, void *cb)
197 {
198         if (!pm_print_times_enabled)
199                 return 0;
200
201         dev_info(dev, "calling %pF @ %i, parent: %s\n", cb,
202                  task_pid_nr(current),
203                  dev->parent ? dev_name(dev->parent) : "none");
204         return ktime_get();
205 }
206
207 static void initcall_debug_report(struct device *dev, ktime_t calltime,
208                                   void *cb, int error)
209 {
210         ktime_t rettime;
211         s64 nsecs;
212
213         if (!pm_print_times_enabled)
214                 return;
215
216         rettime = ktime_get();
217         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
218
219         dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error,
220                  (unsigned long long)nsecs >> 10);
221 }
222
223 /**
224  * dpm_wait - Wait for a PM operation to complete.
225  * @dev: Device to wait for.
226  * @async: If unset, wait only if the device's power.async_suspend flag is set.
227  */
228 static void dpm_wait(struct device *dev, bool async)
229 {
230         if (!dev)
231                 return;
232
233         if (async || (pm_async_enabled && dev->power.async_suspend))
234                 wait_for_completion(&dev->power.completion);
235 }
236
237 static int dpm_wait_fn(struct device *dev, void *async_ptr)
238 {
239         dpm_wait(dev, *((bool *)async_ptr));
240         return 0;
241 }
242
243 static void dpm_wait_for_children(struct device *dev, bool async)
244 {
245        device_for_each_child(dev, &async, dpm_wait_fn);
246 }
247
248 static void dpm_wait_for_suppliers(struct device *dev, bool async)
249 {
250         struct device_link *link;
251         int idx;
252
253         idx = device_links_read_lock();
254
255         /*
256          * If the supplier goes away right after we've checked the link to it,
257          * we'll wait for its completion to change the state, but that's fine,
258          * because the only things that will block as a result are the SRCU
259          * callbacks freeing the link objects for the links in the list we're
260          * walking.
261          */
262         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
263                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
264                         dpm_wait(link->supplier, async);
265
266         device_links_read_unlock(idx);
267 }
268
269 static void dpm_wait_for_superior(struct device *dev, bool async)
270 {
271         dpm_wait(dev->parent, async);
272         dpm_wait_for_suppliers(dev, async);
273 }
274
275 static void dpm_wait_for_consumers(struct device *dev, bool async)
276 {
277         struct device_link *link;
278         int idx;
279
280         idx = device_links_read_lock();
281
282         /*
283          * The status of a device link can only be changed from "dormant" by a
284          * probe, but that cannot happen during system suspend/resume.  In
285          * theory it can change to "dormant" at that time, but then it is
286          * reasonable to wait for the target device anyway (eg. if it goes
287          * away, it's better to wait for it to go away completely and then
288          * continue instead of trying to continue in parallel with its
289          * unregistration).
290          */
291         list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
292                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
293                         dpm_wait(link->consumer, async);
294
295         device_links_read_unlock(idx);
296 }
297
298 static void dpm_wait_for_subordinate(struct device *dev, bool async)
299 {
300         dpm_wait_for_children(dev, async);
301         dpm_wait_for_consumers(dev, async);
302 }
303
304 /**
305  * pm_op - Return the PM operation appropriate for given PM event.
306  * @ops: PM operations to choose from.
307  * @state: PM transition of the system being carried out.
308  */
309 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
310 {
311         switch (state.event) {
312 #ifdef CONFIG_SUSPEND
313         case PM_EVENT_SUSPEND:
314                 return ops->suspend;
315         case PM_EVENT_RESUME:
316                 return ops->resume;
317 #endif /* CONFIG_SUSPEND */
318 #ifdef CONFIG_HIBERNATE_CALLBACKS
319         case PM_EVENT_FREEZE:
320         case PM_EVENT_QUIESCE:
321                 return ops->freeze;
322         case PM_EVENT_HIBERNATE:
323                 return ops->poweroff;
324         case PM_EVENT_THAW:
325         case PM_EVENT_RECOVER:
326                 return ops->thaw;
327                 break;
328         case PM_EVENT_RESTORE:
329                 return ops->restore;
330 #endif /* CONFIG_HIBERNATE_CALLBACKS */
331         }
332
333         return NULL;
334 }
335
336 /**
337  * pm_late_early_op - Return the PM operation appropriate for given PM event.
338  * @ops: PM operations to choose from.
339  * @state: PM transition of the system being carried out.
340  *
341  * Runtime PM is disabled for @dev while this function is being executed.
342  */
343 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
344                                       pm_message_t state)
345 {
346         switch (state.event) {
347 #ifdef CONFIG_SUSPEND
348         case PM_EVENT_SUSPEND:
349                 return ops->suspend_late;
350         case PM_EVENT_RESUME:
351                 return ops->resume_early;
352 #endif /* CONFIG_SUSPEND */
353 #ifdef CONFIG_HIBERNATE_CALLBACKS
354         case PM_EVENT_FREEZE:
355         case PM_EVENT_QUIESCE:
356                 return ops->freeze_late;
357         case PM_EVENT_HIBERNATE:
358                 return ops->poweroff_late;
359         case PM_EVENT_THAW:
360         case PM_EVENT_RECOVER:
361                 return ops->thaw_early;
362         case PM_EVENT_RESTORE:
363                 return ops->restore_early;
364 #endif /* CONFIG_HIBERNATE_CALLBACKS */
365         }
366
367         return NULL;
368 }
369
370 /**
371  * pm_noirq_op - Return the PM operation appropriate for given PM event.
372  * @ops: PM operations to choose from.
373  * @state: PM transition of the system being carried out.
374  *
375  * The driver of @dev will not receive interrupts while this function is being
376  * executed.
377  */
378 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
379 {
380         switch (state.event) {
381 #ifdef CONFIG_SUSPEND
382         case PM_EVENT_SUSPEND:
383                 return ops->suspend_noirq;
384         case PM_EVENT_RESUME:
385                 return ops->resume_noirq;
386 #endif /* CONFIG_SUSPEND */
387 #ifdef CONFIG_HIBERNATE_CALLBACKS
388         case PM_EVENT_FREEZE:
389         case PM_EVENT_QUIESCE:
390                 return ops->freeze_noirq;
391         case PM_EVENT_HIBERNATE:
392                 return ops->poweroff_noirq;
393         case PM_EVENT_THAW:
394         case PM_EVENT_RECOVER:
395                 return ops->thaw_noirq;
396         case PM_EVENT_RESTORE:
397                 return ops->restore_noirq;
398 #endif /* CONFIG_HIBERNATE_CALLBACKS */
399         }
400
401         return NULL;
402 }
403
404 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
405 {
406         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
407                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
408                 ", may wakeup" : "");
409 }
410
411 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
412                         int error)
413 {
414         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
415                 dev_name(dev), pm_verb(state.event), info, error);
416 }
417
418 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
419                           const char *info)
420 {
421         ktime_t calltime;
422         u64 usecs64;
423         int usecs;
424
425         calltime = ktime_get();
426         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
427         do_div(usecs64, NSEC_PER_USEC);
428         usecs = usecs64;
429         if (usecs == 0)
430                 usecs = 1;
431
432         pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
433                   info ?: "", info ? " " : "", pm_verb(state.event),
434                   error ? "aborted" : "complete",
435                   usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
436 }
437
438 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
439                             pm_message_t state, const char *info)
440 {
441         ktime_t calltime;
442         int error;
443
444         if (!cb)
445                 return 0;
446
447         calltime = initcall_debug_start(dev, cb);
448
449         pm_dev_dbg(dev, state, info);
450         trace_device_pm_callback_start(dev, info, state.event);
451         error = cb(dev);
452         trace_device_pm_callback_end(dev, error);
453         suspend_report_result(cb, error);
454
455         initcall_debug_report(dev, calltime, cb, error);
456
457         return error;
458 }
459
460 #ifdef CONFIG_DPM_WATCHDOG
461 struct dpm_watchdog {
462         struct device           *dev;
463         struct task_struct      *tsk;
464         struct timer_list       timer;
465 };
466
467 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
468         struct dpm_watchdog wd
469
470 /**
471  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
472  * @data: Watchdog object address.
473  *
474  * Called when a driver has timed out suspending or resuming.
475  * There's not much we can do here to recover so panic() to
476  * capture a crash-dump in pstore.
477  */
478 static void dpm_watchdog_handler(struct timer_list *t)
479 {
480         struct dpm_watchdog *wd = from_timer(wd, t, timer);
481
482         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
483         show_stack(wd->tsk, NULL);
484         panic("%s %s: unrecoverable failure\n",
485                 dev_driver_string(wd->dev), dev_name(wd->dev));
486 }
487
488 /**
489  * dpm_watchdog_set - Enable pm watchdog for given device.
490  * @wd: Watchdog. Must be allocated on the stack.
491  * @dev: Device to handle.
492  */
493 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
494 {
495         struct timer_list *timer = &wd->timer;
496
497         wd->dev = dev;
498         wd->tsk = current;
499
500         timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
501         /* use same timeout value for both suspend and resume */
502         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
503         add_timer(timer);
504 }
505
506 /**
507  * dpm_watchdog_clear - Disable suspend/resume watchdog.
508  * @wd: Watchdog to disable.
509  */
510 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
511 {
512         struct timer_list *timer = &wd->timer;
513
514         del_timer_sync(timer);
515         destroy_timer_on_stack(timer);
516 }
517 #else
518 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
519 #define dpm_watchdog_set(x, y)
520 #define dpm_watchdog_clear(x)
521 #endif
522
523 /*------------------------- Resume routines -------------------------*/
524
525 /**
526  * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
527  * @dev: Target device.
528  *
529  * Make the core skip the "early resume" and "resume" phases for @dev.
530  *
531  * This function can be called by middle-layer code during the "noirq" phase of
532  * system resume if necessary, but not by device drivers.
533  */
534 void dev_pm_skip_next_resume_phases(struct device *dev)
535 {
536         dev->power.is_late_suspended = false;
537         dev->power.is_suspended = false;
538 }
539
540 /**
541  * suspend_event - Return a "suspend" message for given "resume" one.
542  * @resume_msg: PM message representing a system-wide resume transition.
543  */
544 static pm_message_t suspend_event(pm_message_t resume_msg)
545 {
546         switch (resume_msg.event) {
547         case PM_EVENT_RESUME:
548                 return PMSG_SUSPEND;
549         case PM_EVENT_THAW:
550         case PM_EVENT_RESTORE:
551                 return PMSG_FREEZE;
552         case PM_EVENT_RECOVER:
553                 return PMSG_HIBERNATE;
554         }
555         return PMSG_ON;
556 }
557
558 /**
559  * dev_pm_may_skip_resume - System-wide device resume optimization check.
560  * @dev: Target device.
561  *
562  * Checks whether or not the device may be left in suspend after a system-wide
563  * transition to the working state.
564  */
565 bool dev_pm_may_skip_resume(struct device *dev)
566 {
567         return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
568 }
569
570 static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
571                                                 pm_message_t state,
572                                                 const char **info_p)
573 {
574         pm_callback_t callback;
575         const char *info;
576
577         if (dev->pm_domain) {
578                 info = "noirq power domain ";
579                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
580         } else if (dev->type && dev->type->pm) {
581                 info = "noirq type ";
582                 callback = pm_noirq_op(dev->type->pm, state);
583         } else if (dev->class && dev->class->pm) {
584                 info = "noirq class ";
585                 callback = pm_noirq_op(dev->class->pm, state);
586         } else if (dev->bus && dev->bus->pm) {
587                 info = "noirq bus ";
588                 callback = pm_noirq_op(dev->bus->pm, state);
589         } else {
590                 return NULL;
591         }
592
593         if (info_p)
594                 *info_p = info;
595
596         return callback;
597 }
598
599 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
600                                                  pm_message_t state,
601                                                  const char **info_p);
602
603 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
604                                                 pm_message_t state,
605                                                 const char **info_p);
606
607 /**
608  * device_resume_noirq - Execute a "noirq resume" callback for given device.
609  * @dev: Device to handle.
610  * @state: PM transition of the system being carried out.
611  * @async: If true, the device is being resumed asynchronously.
612  *
613  * The driver of @dev will not receive interrupts while this function is being
614  * executed.
615  */
616 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
617 {
618         pm_callback_t callback;
619         const char *info;
620         bool skip_resume;
621         int error = 0;
622
623         TRACE_DEVICE(dev);
624         TRACE_RESUME(0);
625
626         if (dev->power.syscore || dev->power.direct_complete)
627                 goto Out;
628
629         if (!dev->power.is_noirq_suspended)
630                 goto Out;
631
632         dpm_wait_for_superior(dev, async);
633
634         skip_resume = dev_pm_may_skip_resume(dev);
635
636         callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
637         if (callback)
638                 goto Run;
639
640         if (skip_resume)
641                 goto Skip;
642
643         if (dev_pm_smart_suspend_and_suspended(dev)) {
644                 pm_message_t suspend_msg = suspend_event(state);
645
646                 /*
647                  * If "freeze" callbacks have been skipped during a transition
648                  * related to hibernation, the subsequent "thaw" callbacks must
649                  * be skipped too or bad things may happen.  Otherwise, resume
650                  * callbacks are going to be run for the device, so its runtime
651                  * PM status must be changed to reflect the new state after the
652                  * transition under way.
653                  */
654                 if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
655                     !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
656                         if (state.event == PM_EVENT_THAW) {
657                                 skip_resume = true;
658                                 goto Skip;
659                         } else {
660                                 pm_runtime_set_active(dev);
661                         }
662                 }
663         }
664
665         if (dev->driver && dev->driver->pm) {
666                 info = "noirq driver ";
667                 callback = pm_noirq_op(dev->driver->pm, state);
668         }
669
670 Run:
671         error = dpm_run_callback(callback, dev, state, info);
672
673 Skip:
674         dev->power.is_noirq_suspended = false;
675
676         if (skip_resume) {
677                 /*
678                  * The device is going to be left in suspend, but it might not
679                  * have been in runtime suspend before the system suspended, so
680                  * its runtime PM status needs to be updated to avoid confusing
681                  * the runtime PM framework when runtime PM is enabled for the
682                  * device again.
683                  */
684                 pm_runtime_set_suspended(dev);
685                 dev_pm_skip_next_resume_phases(dev);
686         }
687
688 Out:
689         complete_all(&dev->power.completion);
690         TRACE_RESUME(error);
691         return error;
692 }
693
694 static bool is_async(struct device *dev)
695 {
696         return dev->power.async_suspend && pm_async_enabled
697                 && !pm_trace_is_enabled();
698 }
699
700 static void async_resume_noirq(void *data, async_cookie_t cookie)
701 {
702         struct device *dev = (struct device *)data;
703         int error;
704
705         error = device_resume_noirq(dev, pm_transition, true);
706         if (error)
707                 pm_dev_err(dev, pm_transition, " async", error);
708
709         put_device(dev);
710 }
711
712 void dpm_noirq_resume_devices(pm_message_t state)
713 {
714         struct device *dev;
715         ktime_t starttime = ktime_get();
716
717         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
718         mutex_lock(&dpm_list_mtx);
719         pm_transition = state;
720
721         /*
722          * Advanced the async threads upfront,
723          * in case the starting of async threads is
724          * delayed by non-async resuming devices.
725          */
726         list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
727                 reinit_completion(&dev->power.completion);
728                 if (is_async(dev)) {
729                         get_device(dev);
730                         async_schedule(async_resume_noirq, dev);
731                 }
732         }
733
734         while (!list_empty(&dpm_noirq_list)) {
735                 dev = to_device(dpm_noirq_list.next);
736                 get_device(dev);
737                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
738                 mutex_unlock(&dpm_list_mtx);
739
740                 if (!is_async(dev)) {
741                         int error;
742
743                         error = device_resume_noirq(dev, state, false);
744                         if (error) {
745                                 suspend_stats.failed_resume_noirq++;
746                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
747                                 dpm_save_failed_dev(dev_name(dev));
748                                 pm_dev_err(dev, state, " noirq", error);
749                         }
750                 }
751
752                 mutex_lock(&dpm_list_mtx);
753                 put_device(dev);
754         }
755         mutex_unlock(&dpm_list_mtx);
756         async_synchronize_full();
757         dpm_show_time(starttime, state, 0, "noirq");
758         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
759 }
760
761 void dpm_noirq_end(void)
762 {
763         resume_device_irqs();
764         device_wakeup_disarm_wake_irqs();
765         cpuidle_resume();
766 }
767
768 /**
769  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
770  * @state: PM transition of the system being carried out.
771  *
772  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
773  * allow device drivers' interrupt handlers to be called.
774  */
775 void dpm_resume_noirq(pm_message_t state)
776 {
777         dpm_noirq_resume_devices(state);
778         dpm_noirq_end();
779 }
780
781 static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
782                                                 pm_message_t state,
783                                                 const char **info_p)
784 {
785         pm_callback_t callback;
786         const char *info;
787
788         if (dev->pm_domain) {
789                 info = "early power domain ";
790                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
791         } else if (dev->type && dev->type->pm) {
792                 info = "early type ";
793                 callback = pm_late_early_op(dev->type->pm, state);
794         } else if (dev->class && dev->class->pm) {
795                 info = "early class ";
796                 callback = pm_late_early_op(dev->class->pm, state);
797         } else if (dev->bus && dev->bus->pm) {
798                 info = "early bus ";
799                 callback = pm_late_early_op(dev->bus->pm, state);
800         } else {
801                 return NULL;
802         }
803
804         if (info_p)
805                 *info_p = info;
806
807         return callback;
808 }
809
810 /**
811  * device_resume_early - Execute an "early resume" callback for given device.
812  * @dev: Device to handle.
813  * @state: PM transition of the system being carried out.
814  * @async: If true, the device is being resumed asynchronously.
815  *
816  * Runtime PM is disabled for @dev while this function is being executed.
817  */
818 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
819 {
820         pm_callback_t callback;
821         const char *info;
822         int error = 0;
823
824         TRACE_DEVICE(dev);
825         TRACE_RESUME(0);
826
827         if (dev->power.syscore || dev->power.direct_complete)
828                 goto Out;
829
830         if (!dev->power.is_late_suspended)
831                 goto Out;
832
833         dpm_wait_for_superior(dev, async);
834
835         callback = dpm_subsys_resume_early_cb(dev, state, &info);
836
837         if (!callback && dev->driver && dev->driver->pm) {
838                 info = "early driver ";
839                 callback = pm_late_early_op(dev->driver->pm, state);
840         }
841
842         error = dpm_run_callback(callback, dev, state, info);
843         dev->power.is_late_suspended = false;
844
845  Out:
846         TRACE_RESUME(error);
847
848         pm_runtime_enable(dev);
849         complete_all(&dev->power.completion);
850         return error;
851 }
852
853 static void async_resume_early(void *data, async_cookie_t cookie)
854 {
855         struct device *dev = (struct device *)data;
856         int error;
857
858         error = device_resume_early(dev, pm_transition, true);
859         if (error)
860                 pm_dev_err(dev, pm_transition, " async", error);
861
862         put_device(dev);
863 }
864
865 /**
866  * dpm_resume_early - Execute "early resume" callbacks for all devices.
867  * @state: PM transition of the system being carried out.
868  */
869 void dpm_resume_early(pm_message_t state)
870 {
871         struct device *dev;
872         ktime_t starttime = ktime_get();
873
874         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
875         mutex_lock(&dpm_list_mtx);
876         pm_transition = state;
877
878         /*
879          * Advanced the async threads upfront,
880          * in case the starting of async threads is
881          * delayed by non-async resuming devices.
882          */
883         list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
884                 reinit_completion(&dev->power.completion);
885                 if (is_async(dev)) {
886                         get_device(dev);
887                         async_schedule(async_resume_early, dev);
888                 }
889         }
890
891         while (!list_empty(&dpm_late_early_list)) {
892                 dev = to_device(dpm_late_early_list.next);
893                 get_device(dev);
894                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
895                 mutex_unlock(&dpm_list_mtx);
896
897                 if (!is_async(dev)) {
898                         int error;
899
900                         error = device_resume_early(dev, state, false);
901                         if (error) {
902                                 suspend_stats.failed_resume_early++;
903                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
904                                 dpm_save_failed_dev(dev_name(dev));
905                                 pm_dev_err(dev, state, " early", error);
906                         }
907                 }
908                 mutex_lock(&dpm_list_mtx);
909                 put_device(dev);
910         }
911         mutex_unlock(&dpm_list_mtx);
912         async_synchronize_full();
913         dpm_show_time(starttime, state, 0, "early");
914         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
915 }
916
917 /**
918  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
919  * @state: PM transition of the system being carried out.
920  */
921 void dpm_resume_start(pm_message_t state)
922 {
923         dpm_resume_noirq(state);
924         dpm_resume_early(state);
925 }
926 EXPORT_SYMBOL_GPL(dpm_resume_start);
927
928 /**
929  * device_resume - Execute "resume" callbacks for given device.
930  * @dev: Device to handle.
931  * @state: PM transition of the system being carried out.
932  * @async: If true, the device is being resumed asynchronously.
933  */
934 static int device_resume(struct device *dev, pm_message_t state, bool async)
935 {
936         pm_callback_t callback = NULL;
937         const char *info = NULL;
938         int error = 0;
939         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
940
941         TRACE_DEVICE(dev);
942         TRACE_RESUME(0);
943
944         if (dev->power.syscore)
945                 goto Complete;
946
947         if (dev->power.direct_complete) {
948                 /* Match the pm_runtime_disable() in __device_suspend(). */
949                 pm_runtime_enable(dev);
950                 goto Complete;
951         }
952
953         dpm_wait_for_superior(dev, async);
954         dpm_watchdog_set(&wd, dev);
955         device_lock(dev);
956
957         /*
958          * This is a fib.  But we'll allow new children to be added below
959          * a resumed device, even if the device hasn't been completed yet.
960          */
961         dev->power.is_prepared = false;
962
963         if (!dev->power.is_suspended)
964                 goto Unlock;
965
966         if (dev->pm_domain) {
967                 info = "power domain ";
968                 callback = pm_op(&dev->pm_domain->ops, state);
969                 goto Driver;
970         }
971
972         if (dev->type && dev->type->pm) {
973                 info = "type ";
974                 callback = pm_op(dev->type->pm, state);
975                 goto Driver;
976         }
977
978         if (dev->class && dev->class->pm) {
979                 info = "class ";
980                 callback = pm_op(dev->class->pm, state);
981                 goto Driver;
982         }
983
984         if (dev->bus) {
985                 if (dev->bus->pm) {
986                         info = "bus ";
987                         callback = pm_op(dev->bus->pm, state);
988                 } else if (dev->bus->resume) {
989                         info = "legacy bus ";
990                         callback = dev->bus->resume;
991                         goto End;
992                 }
993         }
994
995  Driver:
996         if (!callback && dev->driver && dev->driver->pm) {
997                 info = "driver ";
998                 callback = pm_op(dev->driver->pm, state);
999         }
1000
1001  End:
1002         error = dpm_run_callback(callback, dev, state, info);
1003         dev->power.is_suspended = false;
1004
1005  Unlock:
1006         device_unlock(dev);
1007         dpm_watchdog_clear(&wd);
1008
1009  Complete:
1010         complete_all(&dev->power.completion);
1011
1012         TRACE_RESUME(error);
1013
1014         return error;
1015 }
1016
1017 static void async_resume(void *data, async_cookie_t cookie)
1018 {
1019         struct device *dev = (struct device *)data;
1020         int error;
1021
1022         error = device_resume(dev, pm_transition, true);
1023         if (error)
1024                 pm_dev_err(dev, pm_transition, " async", error);
1025         put_device(dev);
1026 }
1027
1028 /**
1029  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1030  * @state: PM transition of the system being carried out.
1031  *
1032  * Execute the appropriate "resume" callback for all devices whose status
1033  * indicates that they are suspended.
1034  */
1035 void dpm_resume(pm_message_t state)
1036 {
1037         struct device *dev;
1038         ktime_t starttime = ktime_get();
1039
1040         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1041         might_sleep();
1042
1043         mutex_lock(&dpm_list_mtx);
1044         pm_transition = state;
1045         async_error = 0;
1046
1047         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1048                 reinit_completion(&dev->power.completion);
1049                 if (is_async(dev)) {
1050                         get_device(dev);
1051                         async_schedule(async_resume, dev);
1052                 }
1053         }
1054
1055         while (!list_empty(&dpm_suspended_list)) {
1056                 dev = to_device(dpm_suspended_list.next);
1057                 get_device(dev);
1058                 if (!is_async(dev)) {
1059                         int error;
1060
1061                         mutex_unlock(&dpm_list_mtx);
1062
1063                         error = device_resume(dev, state, false);
1064                         if (error) {
1065                                 suspend_stats.failed_resume++;
1066                                 dpm_save_failed_step(SUSPEND_RESUME);
1067                                 dpm_save_failed_dev(dev_name(dev));
1068                                 pm_dev_err(dev, state, "", error);
1069                         }
1070
1071                         mutex_lock(&dpm_list_mtx);
1072                 }
1073                 if (!list_empty(&dev->power.entry))
1074                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1075                 put_device(dev);
1076         }
1077         mutex_unlock(&dpm_list_mtx);
1078         async_synchronize_full();
1079         dpm_show_time(starttime, state, 0, NULL);
1080
1081         cpufreq_resume();
1082         devfreq_resume();
1083         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1084 }
1085
1086 /**
1087  * device_complete - Complete a PM transition for given device.
1088  * @dev: Device to handle.
1089  * @state: PM transition of the system being carried out.
1090  */
1091 static void device_complete(struct device *dev, pm_message_t state)
1092 {
1093         void (*callback)(struct device *) = NULL;
1094         const char *info = NULL;
1095
1096         if (dev->power.syscore)
1097                 return;
1098
1099         device_lock(dev);
1100
1101         if (dev->pm_domain) {
1102                 info = "completing power domain ";
1103                 callback = dev->pm_domain->ops.complete;
1104         } else if (dev->type && dev->type->pm) {
1105                 info = "completing type ";
1106                 callback = dev->type->pm->complete;
1107         } else if (dev->class && dev->class->pm) {
1108                 info = "completing class ";
1109                 callback = dev->class->pm->complete;
1110         } else if (dev->bus && dev->bus->pm) {
1111                 info = "completing bus ";
1112                 callback = dev->bus->pm->complete;
1113         }
1114
1115         if (!callback && dev->driver && dev->driver->pm) {
1116                 info = "completing driver ";
1117                 callback = dev->driver->pm->complete;
1118         }
1119
1120         if (callback) {
1121                 pm_dev_dbg(dev, state, info);
1122                 callback(dev);
1123         }
1124
1125         device_unlock(dev);
1126
1127         pm_runtime_put(dev);
1128 }
1129
1130 /**
1131  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1132  * @state: PM transition of the system being carried out.
1133  *
1134  * Execute the ->complete() callbacks for all devices whose PM status is not
1135  * DPM_ON (this allows new devices to be registered).
1136  */
1137 void dpm_complete(pm_message_t state)
1138 {
1139         struct list_head list;
1140
1141         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1142         might_sleep();
1143
1144         INIT_LIST_HEAD(&list);
1145         mutex_lock(&dpm_list_mtx);
1146         while (!list_empty(&dpm_prepared_list)) {
1147                 struct device *dev = to_device(dpm_prepared_list.prev);
1148
1149                 get_device(dev);
1150                 dev->power.is_prepared = false;
1151                 list_move(&dev->power.entry, &list);
1152                 mutex_unlock(&dpm_list_mtx);
1153
1154                 trace_device_pm_callback_start(dev, "", state.event);
1155                 device_complete(dev, state);
1156                 trace_device_pm_callback_end(dev, 0);
1157
1158                 mutex_lock(&dpm_list_mtx);
1159                 put_device(dev);
1160         }
1161         list_splice(&list, &dpm_list);
1162         mutex_unlock(&dpm_list_mtx);
1163
1164         /* Allow device probing and trigger re-probing of deferred devices */
1165         device_unblock_probing();
1166         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1167 }
1168
1169 /**
1170  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1171  * @state: PM transition of the system being carried out.
1172  *
1173  * Execute "resume" callbacks for all devices and complete the PM transition of
1174  * the system.
1175  */
1176 void dpm_resume_end(pm_message_t state)
1177 {
1178         dpm_resume(state);
1179         dpm_complete(state);
1180 }
1181 EXPORT_SYMBOL_GPL(dpm_resume_end);
1182
1183
1184 /*------------------------- Suspend routines -------------------------*/
1185
1186 /**
1187  * resume_event - Return a "resume" message for given "suspend" sleep state.
1188  * @sleep_state: PM message representing a sleep state.
1189  *
1190  * Return a PM message representing the resume event corresponding to given
1191  * sleep state.
1192  */
1193 static pm_message_t resume_event(pm_message_t sleep_state)
1194 {
1195         switch (sleep_state.event) {
1196         case PM_EVENT_SUSPEND:
1197                 return PMSG_RESUME;
1198         case PM_EVENT_FREEZE:
1199         case PM_EVENT_QUIESCE:
1200                 return PMSG_RECOVER;
1201         case PM_EVENT_HIBERNATE:
1202                 return PMSG_RESTORE;
1203         }
1204         return PMSG_ON;
1205 }
1206
1207 static void dpm_superior_set_must_resume(struct device *dev)
1208 {
1209         struct device_link *link;
1210         int idx;
1211
1212         if (dev->parent)
1213                 dev->parent->power.must_resume = true;
1214
1215         idx = device_links_read_lock();
1216
1217         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1218                 link->supplier->power.must_resume = true;
1219
1220         device_links_read_unlock(idx);
1221 }
1222
1223 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1224                                                  pm_message_t state,
1225                                                  const char **info_p)
1226 {
1227         pm_callback_t callback;
1228         const char *info;
1229
1230         if (dev->pm_domain) {
1231                 info = "noirq power domain ";
1232                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1233         } else if (dev->type && dev->type->pm) {
1234                 info = "noirq type ";
1235                 callback = pm_noirq_op(dev->type->pm, state);
1236         } else if (dev->class && dev->class->pm) {
1237                 info = "noirq class ";
1238                 callback = pm_noirq_op(dev->class->pm, state);
1239         } else if (dev->bus && dev->bus->pm) {
1240                 info = "noirq bus ";
1241                 callback = pm_noirq_op(dev->bus->pm, state);
1242         } else {
1243                 return NULL;
1244         }
1245
1246         if (info_p)
1247                 *info_p = info;
1248
1249         return callback;
1250 }
1251
1252 static bool device_must_resume(struct device *dev, pm_message_t state,
1253                                bool no_subsys_suspend_noirq)
1254 {
1255         pm_message_t resume_msg = resume_event(state);
1256
1257         /*
1258          * If all of the device driver's "noirq", "late" and "early" callbacks
1259          * are invoked directly by the core, the decision to allow the device to
1260          * stay in suspend can be based on its current runtime PM status and its
1261          * wakeup settings.
1262          */
1263         if (no_subsys_suspend_noirq &&
1264             !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1265             !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1266             !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1267                 return !pm_runtime_status_suspended(dev) &&
1268                         (resume_msg.event != PM_EVENT_RESUME ||
1269                          (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1270
1271         /*
1272          * The only safe strategy here is to require that if the device may not
1273          * be left in suspend, resume callbacks must be invoked for it.
1274          */
1275         return !dev->power.may_skip_resume;
1276 }
1277
1278 /**
1279  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1280  * @dev: Device to handle.
1281  * @state: PM transition of the system being carried out.
1282  * @async: If true, the device is being suspended asynchronously.
1283  *
1284  * The driver of @dev will not receive interrupts while this function is being
1285  * executed.
1286  */
1287 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1288 {
1289         pm_callback_t callback;
1290         const char *info;
1291         bool no_subsys_cb = false;
1292         int error = 0;
1293
1294         TRACE_DEVICE(dev);
1295         TRACE_SUSPEND(0);
1296
1297         dpm_wait_for_subordinate(dev, async);
1298
1299         if (async_error)
1300                 goto Complete;
1301
1302         if (pm_wakeup_pending()) {
1303                 async_error = -EBUSY;
1304                 goto Complete;
1305         }
1306
1307         if (dev->power.syscore || dev->power.direct_complete)
1308                 goto Complete;
1309
1310         callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
1311         if (callback)
1312                 goto Run;
1313
1314         no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1315
1316         if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1317                 goto Skip;
1318
1319         if (dev->driver && dev->driver->pm) {
1320                 info = "noirq driver ";
1321                 callback = pm_noirq_op(dev->driver->pm, state);
1322         }
1323
1324 Run:
1325         error = dpm_run_callback(callback, dev, state, info);
1326         if (error) {
1327                 async_error = error;
1328                 goto Complete;
1329         }
1330
1331 Skip:
1332         dev->power.is_noirq_suspended = true;
1333
1334         if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1335                 dev->power.must_resume = dev->power.must_resume ||
1336                                 atomic_read(&dev->power.usage_count) > 1 ||
1337                                 device_must_resume(dev, state, no_subsys_cb);
1338         } else {
1339                 dev->power.must_resume = true;
1340         }
1341
1342         if (dev->power.must_resume)
1343                 dpm_superior_set_must_resume(dev);
1344
1345 Complete:
1346         complete_all(&dev->power.completion);
1347         TRACE_SUSPEND(error);
1348         return error;
1349 }
1350
1351 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1352 {
1353         struct device *dev = (struct device *)data;
1354         int error;
1355
1356         error = __device_suspend_noirq(dev, pm_transition, true);
1357         if (error) {
1358                 dpm_save_failed_dev(dev_name(dev));
1359                 pm_dev_err(dev, pm_transition, " async", error);
1360         }
1361
1362         put_device(dev);
1363 }
1364
1365 static int device_suspend_noirq(struct device *dev)
1366 {
1367         reinit_completion(&dev->power.completion);
1368
1369         if (is_async(dev)) {
1370                 get_device(dev);
1371                 async_schedule(async_suspend_noirq, dev);
1372                 return 0;
1373         }
1374         return __device_suspend_noirq(dev, pm_transition, false);
1375 }
1376
1377 void dpm_noirq_begin(void)
1378 {
1379         cpuidle_pause();
1380         device_wakeup_arm_wake_irqs();
1381         suspend_device_irqs();
1382 }
1383
1384 int dpm_noirq_suspend_devices(pm_message_t state)
1385 {
1386         ktime_t starttime = ktime_get();
1387         int error = 0;
1388
1389         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1390         mutex_lock(&dpm_list_mtx);
1391         pm_transition = state;
1392         async_error = 0;
1393
1394         while (!list_empty(&dpm_late_early_list)) {
1395                 struct device *dev = to_device(dpm_late_early_list.prev);
1396
1397                 get_device(dev);
1398                 mutex_unlock(&dpm_list_mtx);
1399
1400                 error = device_suspend_noirq(dev);
1401
1402                 mutex_lock(&dpm_list_mtx);
1403                 if (error) {
1404                         pm_dev_err(dev, state, " noirq", error);
1405                         dpm_save_failed_dev(dev_name(dev));
1406                         put_device(dev);
1407                         break;
1408                 }
1409                 if (!list_empty(&dev->power.entry))
1410                         list_move(&dev->power.entry, &dpm_noirq_list);
1411                 put_device(dev);
1412
1413                 if (async_error)
1414                         break;
1415         }
1416         mutex_unlock(&dpm_list_mtx);
1417         async_synchronize_full();
1418         if (!error)
1419                 error = async_error;
1420
1421         if (error) {
1422                 suspend_stats.failed_suspend_noirq++;
1423                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1424         }
1425         dpm_show_time(starttime, state, error, "noirq");
1426         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1427         return error;
1428 }
1429
1430 /**
1431  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1432  * @state: PM transition of the system being carried out.
1433  *
1434  * Prevent device drivers' interrupt handlers from being called and invoke
1435  * "noirq" suspend callbacks for all non-sysdev devices.
1436  */
1437 int dpm_suspend_noirq(pm_message_t state)
1438 {
1439         int ret;
1440
1441         dpm_noirq_begin();
1442         ret = dpm_noirq_suspend_devices(state);
1443         if (ret)
1444                 dpm_resume_noirq(resume_event(state));
1445
1446         return ret;
1447 }
1448
1449 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1450 {
1451         struct device *parent = dev->parent;
1452
1453         if (!parent)
1454                 return;
1455
1456         spin_lock_irq(&parent->power.lock);
1457
1458         if (dev->power.wakeup_path && !parent->power.ignore_children)
1459                 parent->power.wakeup_path = true;
1460
1461         spin_unlock_irq(&parent->power.lock);
1462 }
1463
1464 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1465                                                 pm_message_t state,
1466                                                 const char **info_p)
1467 {
1468         pm_callback_t callback;
1469         const char *info;
1470
1471         if (dev->pm_domain) {
1472                 info = "late power domain ";
1473                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1474         } else if (dev->type && dev->type->pm) {
1475                 info = "late type ";
1476                 callback = pm_late_early_op(dev->type->pm, state);
1477         } else if (dev->class && dev->class->pm) {
1478                 info = "late class ";
1479                 callback = pm_late_early_op(dev->class->pm, state);
1480         } else if (dev->bus && dev->bus->pm) {
1481                 info = "late bus ";
1482                 callback = pm_late_early_op(dev->bus->pm, state);
1483         } else {
1484                 return NULL;
1485         }
1486
1487         if (info_p)
1488                 *info_p = info;
1489
1490         return callback;
1491 }
1492
1493 /**
1494  * __device_suspend_late - Execute a "late suspend" callback for given device.
1495  * @dev: Device to handle.
1496  * @state: PM transition of the system being carried out.
1497  * @async: If true, the device is being suspended asynchronously.
1498  *
1499  * Runtime PM is disabled for @dev while this function is being executed.
1500  */
1501 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1502 {
1503         pm_callback_t callback;
1504         const char *info;
1505         int error = 0;
1506
1507         TRACE_DEVICE(dev);
1508         TRACE_SUSPEND(0);
1509
1510         __pm_runtime_disable(dev, false);
1511
1512         dpm_wait_for_subordinate(dev, async);
1513
1514         if (async_error)
1515                 goto Complete;
1516
1517         if (pm_wakeup_pending()) {
1518                 async_error = -EBUSY;
1519                 goto Complete;
1520         }
1521
1522         if (dev->power.syscore || dev->power.direct_complete)
1523                 goto Complete;
1524
1525         callback = dpm_subsys_suspend_late_cb(dev, state, &info);
1526         if (callback)
1527                 goto Run;
1528
1529         if (dev_pm_smart_suspend_and_suspended(dev) &&
1530             !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1531                 goto Skip;
1532
1533         if (dev->driver && dev->driver->pm) {
1534                 info = "late driver ";
1535                 callback = pm_late_early_op(dev->driver->pm, state);
1536         }
1537
1538 Run:
1539         error = dpm_run_callback(callback, dev, state, info);
1540         if (error) {
1541                 async_error = error;
1542                 goto Complete;
1543         }
1544         dpm_propagate_wakeup_to_parent(dev);
1545
1546 Skip:
1547         dev->power.is_late_suspended = true;
1548
1549 Complete:
1550         TRACE_SUSPEND(error);
1551         complete_all(&dev->power.completion);
1552         return error;
1553 }
1554
1555 static void async_suspend_late(void *data, async_cookie_t cookie)
1556 {
1557         struct device *dev = (struct device *)data;
1558         int error;
1559
1560         error = __device_suspend_late(dev, pm_transition, true);
1561         if (error) {
1562                 dpm_save_failed_dev(dev_name(dev));
1563                 pm_dev_err(dev, pm_transition, " async", error);
1564         }
1565         put_device(dev);
1566 }
1567
1568 static int device_suspend_late(struct device *dev)
1569 {
1570         reinit_completion(&dev->power.completion);
1571
1572         if (is_async(dev)) {
1573                 get_device(dev);
1574                 async_schedule(async_suspend_late, dev);
1575                 return 0;
1576         }
1577
1578         return __device_suspend_late(dev, pm_transition, false);
1579 }
1580
1581 /**
1582  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1583  * @state: PM transition of the system being carried out.
1584  */
1585 int dpm_suspend_late(pm_message_t state)
1586 {
1587         ktime_t starttime = ktime_get();
1588         int error = 0;
1589
1590         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1591         mutex_lock(&dpm_list_mtx);
1592         pm_transition = state;
1593         async_error = 0;
1594
1595         while (!list_empty(&dpm_suspended_list)) {
1596                 struct device *dev = to_device(dpm_suspended_list.prev);
1597
1598                 get_device(dev);
1599                 mutex_unlock(&dpm_list_mtx);
1600
1601                 error = device_suspend_late(dev);
1602
1603                 mutex_lock(&dpm_list_mtx);
1604                 if (!list_empty(&dev->power.entry))
1605                         list_move(&dev->power.entry, &dpm_late_early_list);
1606
1607                 if (error) {
1608                         pm_dev_err(dev, state, " late", error);
1609                         dpm_save_failed_dev(dev_name(dev));
1610                         put_device(dev);
1611                         break;
1612                 }
1613                 put_device(dev);
1614
1615                 if (async_error)
1616                         break;
1617         }
1618         mutex_unlock(&dpm_list_mtx);
1619         async_synchronize_full();
1620         if (!error)
1621                 error = async_error;
1622         if (error) {
1623                 suspend_stats.failed_suspend_late++;
1624                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1625                 dpm_resume_early(resume_event(state));
1626         }
1627         dpm_show_time(starttime, state, error, "late");
1628         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1629         return error;
1630 }
1631
1632 /**
1633  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1634  * @state: PM transition of the system being carried out.
1635  */
1636 int dpm_suspend_end(pm_message_t state)
1637 {
1638         int error = dpm_suspend_late(state);
1639         if (error)
1640                 return error;
1641
1642         error = dpm_suspend_noirq(state);
1643         if (error) {
1644                 dpm_resume_early(resume_event(state));
1645                 return error;
1646         }
1647
1648         return 0;
1649 }
1650 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1651
1652 /**
1653  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1654  * @dev: Device to suspend.
1655  * @state: PM transition of the system being carried out.
1656  * @cb: Suspend callback to execute.
1657  * @info: string description of caller.
1658  */
1659 static int legacy_suspend(struct device *dev, pm_message_t state,
1660                           int (*cb)(struct device *dev, pm_message_t state),
1661                           const char *info)
1662 {
1663         int error;
1664         ktime_t calltime;
1665
1666         calltime = initcall_debug_start(dev, cb);
1667
1668         trace_device_pm_callback_start(dev, info, state.event);
1669         error = cb(dev, state);
1670         trace_device_pm_callback_end(dev, error);
1671         suspend_report_result(cb, error);
1672
1673         initcall_debug_report(dev, calltime, cb, error);
1674
1675         return error;
1676 }
1677
1678 static void dpm_clear_superiors_direct_complete(struct device *dev)
1679 {
1680         struct device_link *link;
1681         int idx;
1682
1683         if (dev->parent) {
1684                 spin_lock_irq(&dev->parent->power.lock);
1685                 dev->parent->power.direct_complete = false;
1686                 spin_unlock_irq(&dev->parent->power.lock);
1687         }
1688
1689         idx = device_links_read_lock();
1690
1691         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1692                 spin_lock_irq(&link->supplier->power.lock);
1693                 link->supplier->power.direct_complete = false;
1694                 spin_unlock_irq(&link->supplier->power.lock);
1695         }
1696
1697         device_links_read_unlock(idx);
1698 }
1699
1700 /**
1701  * __device_suspend - Execute "suspend" callbacks for given device.
1702  * @dev: Device to handle.
1703  * @state: PM transition of the system being carried out.
1704  * @async: If true, the device is being suspended asynchronously.
1705  */
1706 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1707 {
1708         pm_callback_t callback = NULL;
1709         const char *info = NULL;
1710         int error = 0;
1711         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1712
1713         TRACE_DEVICE(dev);
1714         TRACE_SUSPEND(0);
1715
1716         dpm_wait_for_subordinate(dev, async);
1717
1718         if (async_error) {
1719                 dev->power.direct_complete = false;
1720                 goto Complete;
1721         }
1722
1723         /*
1724          * If a device configured to wake up the system from sleep states
1725          * has been suspended at run time and there's a resume request pending
1726          * for it, this is equivalent to the device signaling wakeup, so the
1727          * system suspend operation should be aborted.
1728          */
1729         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1730                 pm_wakeup_event(dev, 0);
1731
1732         if (pm_wakeup_pending()) {
1733                 dev->power.direct_complete = false;
1734                 async_error = -EBUSY;
1735                 goto Complete;
1736         }
1737
1738         if (dev->power.syscore)
1739                 goto Complete;
1740
1741         if (dev->power.direct_complete) {
1742                 if (pm_runtime_status_suspended(dev)) {
1743                         pm_runtime_disable(dev);
1744                         if (pm_runtime_status_suspended(dev))
1745                                 goto Complete;
1746
1747                         pm_runtime_enable(dev);
1748                 }
1749                 dev->power.direct_complete = false;
1750         }
1751
1752         dev->power.may_skip_resume = false;
1753         dev->power.must_resume = false;
1754
1755         dpm_watchdog_set(&wd, dev);
1756         device_lock(dev);
1757
1758         if (dev->pm_domain) {
1759                 info = "power domain ";
1760                 callback = pm_op(&dev->pm_domain->ops, state);
1761                 goto Run;
1762         }
1763
1764         if (dev->type && dev->type->pm) {
1765                 info = "type ";
1766                 callback = pm_op(dev->type->pm, state);
1767                 goto Run;
1768         }
1769
1770         if (dev->class && dev->class->pm) {
1771                 info = "class ";
1772                 callback = pm_op(dev->class->pm, state);
1773                 goto Run;
1774         }
1775
1776         if (dev->bus) {
1777                 if (dev->bus->pm) {
1778                         info = "bus ";
1779                         callback = pm_op(dev->bus->pm, state);
1780                 } else if (dev->bus->suspend) {
1781                         pm_dev_dbg(dev, state, "legacy bus ");
1782                         error = legacy_suspend(dev, state, dev->bus->suspend,
1783                                                 "legacy bus ");
1784                         goto End;
1785                 }
1786         }
1787
1788  Run:
1789         if (!callback && dev->driver && dev->driver->pm) {
1790                 info = "driver ";
1791                 callback = pm_op(dev->driver->pm, state);
1792         }
1793
1794         error = dpm_run_callback(callback, dev, state, info);
1795
1796  End:
1797         if (!error) {
1798                 dev->power.is_suspended = true;
1799                 if (device_may_wakeup(dev))
1800                         dev->power.wakeup_path = true;
1801
1802                 dpm_propagate_wakeup_to_parent(dev);
1803                 dpm_clear_superiors_direct_complete(dev);
1804         }
1805
1806         device_unlock(dev);
1807         dpm_watchdog_clear(&wd);
1808
1809  Complete:
1810         if (error)
1811                 async_error = error;
1812
1813         complete_all(&dev->power.completion);
1814         TRACE_SUSPEND(error);
1815         return error;
1816 }
1817
1818 static void async_suspend(void *data, async_cookie_t cookie)
1819 {
1820         struct device *dev = (struct device *)data;
1821         int error;
1822
1823         error = __device_suspend(dev, pm_transition, true);
1824         if (error) {
1825                 dpm_save_failed_dev(dev_name(dev));
1826                 pm_dev_err(dev, pm_transition, " async", error);
1827         }
1828
1829         put_device(dev);
1830 }
1831
1832 static int device_suspend(struct device *dev)
1833 {
1834         reinit_completion(&dev->power.completion);
1835
1836         if (is_async(dev)) {
1837                 get_device(dev);
1838                 async_schedule(async_suspend, dev);
1839                 return 0;
1840         }
1841
1842         return __device_suspend(dev, pm_transition, false);
1843 }
1844
1845 /**
1846  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1847  * @state: PM transition of the system being carried out.
1848  */
1849 int dpm_suspend(pm_message_t state)
1850 {
1851         ktime_t starttime = ktime_get();
1852         int error = 0;
1853
1854         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1855         might_sleep();
1856
1857         devfreq_suspend();
1858         cpufreq_suspend();
1859
1860         mutex_lock(&dpm_list_mtx);
1861         pm_transition = state;
1862         async_error = 0;
1863         while (!list_empty(&dpm_prepared_list)) {
1864                 struct device *dev = to_device(dpm_prepared_list.prev);
1865
1866                 get_device(dev);
1867                 mutex_unlock(&dpm_list_mtx);
1868
1869                 error = device_suspend(dev);
1870
1871                 mutex_lock(&dpm_list_mtx);
1872                 if (error) {
1873                         pm_dev_err(dev, state, "", error);
1874                         dpm_save_failed_dev(dev_name(dev));
1875                         put_device(dev);
1876                         break;
1877                 }
1878                 if (!list_empty(&dev->power.entry))
1879                         list_move(&dev->power.entry, &dpm_suspended_list);
1880                 put_device(dev);
1881                 if (async_error)
1882                         break;
1883         }
1884         mutex_unlock(&dpm_list_mtx);
1885         async_synchronize_full();
1886         if (!error)
1887                 error = async_error;
1888         if (error) {
1889                 suspend_stats.failed_suspend++;
1890                 dpm_save_failed_step(SUSPEND_SUSPEND);
1891         }
1892         dpm_show_time(starttime, state, error, NULL);
1893         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1894         return error;
1895 }
1896
1897 /**
1898  * device_prepare - Prepare a device for system power transition.
1899  * @dev: Device to handle.
1900  * @state: PM transition of the system being carried out.
1901  *
1902  * Execute the ->prepare() callback(s) for given device.  No new children of the
1903  * device may be registered after this function has returned.
1904  */
1905 static int device_prepare(struct device *dev, pm_message_t state)
1906 {
1907         int (*callback)(struct device *) = NULL;
1908         int ret = 0;
1909
1910         if (dev->power.syscore)
1911                 return 0;
1912
1913         WARN_ON(!pm_runtime_enabled(dev) &&
1914                 dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1915                                               DPM_FLAG_LEAVE_SUSPENDED));
1916
1917         /*
1918          * If a device's parent goes into runtime suspend at the wrong time,
1919          * it won't be possible to resume the device.  To prevent this we
1920          * block runtime suspend here, during the prepare phase, and allow
1921          * it again during the complete phase.
1922          */
1923         pm_runtime_get_noresume(dev);
1924
1925         device_lock(dev);
1926
1927         dev->power.wakeup_path = false;
1928
1929         if (dev->power.no_pm_callbacks)
1930                 goto unlock;
1931
1932         if (dev->pm_domain)
1933                 callback = dev->pm_domain->ops.prepare;
1934         else if (dev->type && dev->type->pm)
1935                 callback = dev->type->pm->prepare;
1936         else if (dev->class && dev->class->pm)
1937                 callback = dev->class->pm->prepare;
1938         else if (dev->bus && dev->bus->pm)
1939                 callback = dev->bus->pm->prepare;
1940
1941         if (!callback && dev->driver && dev->driver->pm)
1942                 callback = dev->driver->pm->prepare;
1943
1944         if (callback)
1945                 ret = callback(dev);
1946
1947 unlock:
1948         device_unlock(dev);
1949
1950         if (ret < 0) {
1951                 suspend_report_result(callback, ret);
1952                 pm_runtime_put(dev);
1953                 return ret;
1954         }
1955         /*
1956          * A positive return value from ->prepare() means "this device appears
1957          * to be runtime-suspended and its state is fine, so if it really is
1958          * runtime-suspended, you can leave it in that state provided that you
1959          * will do the same thing with all of its descendants".  This only
1960          * applies to suspend transitions, however.
1961          */
1962         spin_lock_irq(&dev->power.lock);
1963         dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1964                 ((pm_runtime_suspended(dev) && ret > 0) ||
1965                  dev->power.no_pm_callbacks) &&
1966                 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1967         spin_unlock_irq(&dev->power.lock);
1968         return 0;
1969 }
1970
1971 /**
1972  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1973  * @state: PM transition of the system being carried out.
1974  *
1975  * Execute the ->prepare() callback(s) for all devices.
1976  */
1977 int dpm_prepare(pm_message_t state)
1978 {
1979         int error = 0;
1980
1981         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1982         might_sleep();
1983
1984         /*
1985          * Give a chance for the known devices to complete their probes, before
1986          * disable probing of devices. This sync point is important at least
1987          * at boot time + hibernation restore.
1988          */
1989         wait_for_device_probe();
1990         /*
1991          * It is unsafe if probing of devices will happen during suspend or
1992          * hibernation and system behavior will be unpredictable in this case.
1993          * So, let's prohibit device's probing here and defer their probes
1994          * instead. The normal behavior will be restored in dpm_complete().
1995          */
1996         device_block_probing();
1997
1998         mutex_lock(&dpm_list_mtx);
1999         while (!list_empty(&dpm_list)) {
2000                 struct device *dev = to_device(dpm_list.next);
2001
2002                 get_device(dev);
2003                 mutex_unlock(&dpm_list_mtx);
2004
2005                 trace_device_pm_callback_start(dev, "", state.event);
2006                 error = device_prepare(dev, state);
2007                 trace_device_pm_callback_end(dev, error);
2008
2009                 mutex_lock(&dpm_list_mtx);
2010                 if (error) {
2011                         if (error == -EAGAIN) {
2012                                 put_device(dev);
2013                                 error = 0;
2014                                 continue;
2015                         }
2016                         printk(KERN_INFO "PM: Device %s not prepared "
2017                                 "for power transition: code %d\n",
2018                                 dev_name(dev), error);
2019                         put_device(dev);
2020                         break;
2021                 }
2022                 dev->power.is_prepared = true;
2023                 if (!list_empty(&dev->power.entry))
2024                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
2025                 put_device(dev);
2026         }
2027         mutex_unlock(&dpm_list_mtx);
2028         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2029         return error;
2030 }
2031
2032 /**
2033  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2034  * @state: PM transition of the system being carried out.
2035  *
2036  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2037  * callbacks for them.
2038  */
2039 int dpm_suspend_start(pm_message_t state)
2040 {
2041         int error;
2042
2043         error = dpm_prepare(state);
2044         if (error) {
2045                 suspend_stats.failed_prepare++;
2046                 dpm_save_failed_step(SUSPEND_PREPARE);
2047         } else
2048                 error = dpm_suspend(state);
2049         return error;
2050 }
2051 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2052
2053 void __suspend_report_result(const char *function, void *fn, int ret)
2054 {
2055         if (ret)
2056                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
2057 }
2058 EXPORT_SYMBOL_GPL(__suspend_report_result);
2059
2060 /**
2061  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2062  * @dev: Device to wait for.
2063  * @subordinate: Device that needs to wait for @dev.
2064  */
2065 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2066 {
2067         dpm_wait(dev, subordinate->power.async_suspend);
2068         return async_error;
2069 }
2070 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2071
2072 /**
2073  * dpm_for_each_dev - device iterator.
2074  * @data: data for the callback.
2075  * @fn: function to be called for each device.
2076  *
2077  * Iterate over devices in dpm_list, and call @fn for each device,
2078  * passing it @data.
2079  */
2080 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2081 {
2082         struct device *dev;
2083
2084         if (!fn)
2085                 return;
2086
2087         device_pm_lock();
2088         list_for_each_entry(dev, &dpm_list, power.entry)
2089                 fn(dev, data);
2090         device_pm_unlock();
2091 }
2092 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2093
2094 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2095 {
2096         if (!ops)
2097                 return true;
2098
2099         return !ops->prepare &&
2100                !ops->suspend &&
2101                !ops->suspend_late &&
2102                !ops->suspend_noirq &&
2103                !ops->resume_noirq &&
2104                !ops->resume_early &&
2105                !ops->resume &&
2106                !ops->complete;
2107 }
2108
2109 void device_pm_check_callbacks(struct device *dev)
2110 {
2111         spin_lock_irq(&dev->power.lock);
2112         dev->power.no_pm_callbacks =
2113                 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2114                  !dev->bus->suspend && !dev->bus->resume)) &&
2115                 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2116                 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2117                 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2118                 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2119                  !dev->driver->suspend && !dev->driver->resume));
2120         spin_unlock_irq(&dev->power.lock);
2121 }
2122
2123 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2124 {
2125         return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2126                 pm_runtime_status_suspended(dev);
2127 }