2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 * This file is released under the GPLv2.
10 #include <linux/sched/mm.h>
11 #include <linux/ktime.h>
12 #include <linux/hrtimer.h>
13 #include <linux/export.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/pm_wakeirq.h>
16 #include <trace/events/rpm.h>
21 typedef int (*pm_callback_t)(struct device *);
23 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
26 const struct dev_pm_ops *ops;
29 ops = &dev->pm_domain->ops;
30 else if (dev->type && dev->type->pm)
32 else if (dev->class && dev->class->pm)
34 else if (dev->bus && dev->bus->pm)
40 cb = *(pm_callback_t *)((void *)ops + cb_offset);
44 if (!cb && dev->driver && dev->driver->pm)
45 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
50 #define RPM_GET_CALLBACK(dev, callback) \
51 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
53 static int rpm_resume(struct device *dev, int rpmflags);
54 static int rpm_suspend(struct device *dev, int rpmflags);
57 * update_pm_runtime_accounting - Update the time accounting of power states
58 * @dev: Device to update the accounting for
60 * In order to be able to have time accounting of the various power states
61 * (as used by programs such as PowerTOP to show the effectiveness of runtime
62 * PM), we need to track the time spent in each state.
63 * update_pm_runtime_accounting must be called each time before the
64 * runtime_status field is updated, to account the time in the old state
67 void update_pm_runtime_accounting(struct device *dev)
69 unsigned long now = jiffies;
72 delta = now - dev->power.accounting_timestamp;
74 dev->power.accounting_timestamp = now;
76 if (dev->power.disable_depth > 0)
79 if (dev->power.runtime_status == RPM_SUSPENDED)
80 dev->power.suspended_jiffies += delta;
82 dev->power.active_jiffies += delta;
85 static void __update_runtime_status(struct device *dev, enum rpm_status status)
87 update_pm_runtime_accounting(dev);
88 dev->power.runtime_status = status;
92 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
93 * @dev: Device to handle.
95 static void pm_runtime_deactivate_timer(struct device *dev)
97 if (dev->power.timer_expires > 0) {
98 hrtimer_cancel(&dev->power.suspend_timer);
99 dev->power.timer_expires = 0;
104 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
105 * @dev: Device to handle.
107 static void pm_runtime_cancel_pending(struct device *dev)
109 pm_runtime_deactivate_timer(dev);
111 * In case there's a request pending, make sure its work function will
112 * return without doing anything.
114 dev->power.request = RPM_REQ_NONE;
118 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
119 * @dev: Device to handle.
121 * Compute the autosuspend-delay expiration time based on the device's
122 * power.last_busy time. If the delay has already expired or is disabled
123 * (negative) or the power.use_autosuspend flag isn't set, return 0.
124 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
126 * This function may be called either with or without dev->power.lock held.
127 * Either way it can be racy, since power.last_busy may be updated at any time.
129 u64 pm_runtime_autosuspend_expiration(struct device *dev)
131 int autosuspend_delay;
132 u64 last_busy, expires = 0;
133 u64 now = ktime_get_mono_fast_ns();
135 if (!dev->power.use_autosuspend)
138 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
139 if (autosuspend_delay < 0)
142 last_busy = READ_ONCE(dev->power.last_busy);
144 expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC;
146 expires = 0; /* Already expired. */
151 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
153 static int dev_memalloc_noio(struct device *dev, void *data)
155 return dev->power.memalloc_noio;
159 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
160 * @dev: Device to handle.
161 * @enable: True for setting the flag and False for clearing the flag.
163 * Set the flag for all devices in the path from the device to the
164 * root device in the device tree if @enable is true, otherwise clear
165 * the flag for devices in the path whose siblings don't set the flag.
167 * The function should only be called by block device, or network
168 * device driver for solving the deadlock problem during runtime
171 * If memory allocation with GFP_KERNEL is called inside runtime
172 * resume/suspend callback of any one of its ancestors(or the
173 * block device itself), the deadlock may be triggered inside the
174 * memory allocation since it might not complete until the block
175 * device becomes active and the involed page I/O finishes. The
176 * situation is pointed out first by Alan Stern. Network device
177 * are involved in iSCSI kind of situation.
179 * The lock of dev_hotplug_mutex is held in the function for handling
180 * hotplug race because pm_runtime_set_memalloc_noio() may be called
183 * The function should be called between device_add() and device_del()
184 * on the affected device(block/network device).
186 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
188 static DEFINE_MUTEX(dev_hotplug_mutex);
190 mutex_lock(&dev_hotplug_mutex);
194 /* hold power lock since bitfield is not SMP-safe. */
195 spin_lock_irq(&dev->power.lock);
196 enabled = dev->power.memalloc_noio;
197 dev->power.memalloc_noio = enable;
198 spin_unlock_irq(&dev->power.lock);
201 * not need to enable ancestors any more if the device
204 if (enabled && enable)
210 * clear flag of the parent device only if all the
211 * children don't set the flag because ancestor's
212 * flag was set by any one of the descendants.
214 if (!dev || (!enable &&
215 device_for_each_child(dev, NULL,
219 mutex_unlock(&dev_hotplug_mutex);
221 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
224 * rpm_check_suspend_allowed - Test whether a device may be suspended.
225 * @dev: Device to test.
227 static int rpm_check_suspend_allowed(struct device *dev)
231 if (dev->power.runtime_error)
233 else if (dev->power.disable_depth > 0)
235 else if (atomic_read(&dev->power.usage_count) > 0)
237 else if (!dev->power.ignore_children &&
238 atomic_read(&dev->power.child_count))
241 /* Pending resume requests take precedence over suspends. */
242 else if ((dev->power.deferred_resume
243 && dev->power.runtime_status == RPM_SUSPENDING)
244 || (dev->power.request_pending
245 && dev->power.request == RPM_REQ_RESUME))
247 else if (__dev_pm_qos_read_value(dev) == 0)
249 else if (dev->power.runtime_status == RPM_SUSPENDED)
255 static int rpm_get_suppliers(struct device *dev)
257 struct device_link *link;
259 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
262 if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
263 READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
266 retval = pm_runtime_get_sync(link->supplier);
267 /* Ignore suppliers with disabled runtime PM. */
268 if (retval < 0 && retval != -EACCES) {
269 pm_runtime_put_noidle(link->supplier);
272 refcount_inc(&link->rpm_active);
277 static void rpm_put_suppliers(struct device *dev)
279 struct device_link *link;
281 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
282 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
285 while (refcount_dec_not_one(&link->rpm_active))
286 pm_runtime_put(link->supplier);
291 * __rpm_callback - Run a given runtime PM callback for a given device.
292 * @cb: Runtime PM callback to run.
293 * @dev: Device to run the callback for.
295 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
296 __releases(&dev->power.lock) __acquires(&dev->power.lock)
299 bool use_links = dev->power.links_count > 0;
301 if (dev->power.irq_safe) {
302 spin_unlock(&dev->power.lock);
304 spin_unlock_irq(&dev->power.lock);
307 * Resume suppliers if necessary.
309 * The device's runtime PM status cannot change until this
310 * routine returns, so it is safe to read the status outside of
313 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
314 idx = device_links_read_lock();
316 retval = rpm_get_suppliers(dev);
320 device_links_read_unlock(idx);
326 if (dev->power.irq_safe) {
327 spin_lock(&dev->power.lock);
330 * If the device is suspending and the callback has returned
331 * success, drop the usage counters of the suppliers that have
332 * been reference counted on its resume.
334 * Do that if resume fails too.
337 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
338 || (dev->power.runtime_status == RPM_RESUMING && retval))) {
339 idx = device_links_read_lock();
342 rpm_put_suppliers(dev);
344 device_links_read_unlock(idx);
347 spin_lock_irq(&dev->power.lock);
354 * rpm_idle - Notify device bus type if the device can be suspended.
355 * @dev: Device to notify the bus type about.
356 * @rpmflags: Flag bits.
358 * Check if the device's runtime PM status allows it to be suspended. If
359 * another idle notification has been started earlier, return immediately. If
360 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
361 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
362 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
364 * This function must be called under dev->power.lock with interrupts disabled.
366 static int rpm_idle(struct device *dev, int rpmflags)
368 int (*callback)(struct device *);
371 trace_rpm_idle_rcuidle(dev, rpmflags);
372 retval = rpm_check_suspend_allowed(dev);
374 ; /* Conditions are wrong. */
376 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
377 else if (dev->power.runtime_status != RPM_ACTIVE)
381 * Any pending request other than an idle notification takes
382 * precedence over us, except that the timer may be running.
384 else if (dev->power.request_pending &&
385 dev->power.request > RPM_REQ_IDLE)
388 /* Act as though RPM_NOWAIT is always set. */
389 else if (dev->power.idle_notification)
390 retval = -EINPROGRESS;
394 /* Pending requests need to be canceled. */
395 dev->power.request = RPM_REQ_NONE;
397 if (dev->power.no_callbacks)
400 /* Carry out an asynchronous or a synchronous idle notification. */
401 if (rpmflags & RPM_ASYNC) {
402 dev->power.request = RPM_REQ_IDLE;
403 if (!dev->power.request_pending) {
404 dev->power.request_pending = true;
405 queue_work(pm_wq, &dev->power.work);
407 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
411 dev->power.idle_notification = true;
413 callback = RPM_GET_CALLBACK(dev, runtime_idle);
416 retval = __rpm_callback(callback, dev);
418 dev->power.idle_notification = false;
419 wake_up_all(&dev->power.wait_queue);
422 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
423 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
427 * rpm_callback - Run a given runtime PM callback for a given device.
428 * @cb: Runtime PM callback to run.
429 * @dev: Device to run the callback for.
431 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
438 if (dev->power.memalloc_noio) {
439 unsigned int noio_flag;
442 * Deadlock might be caused if memory allocation with
443 * GFP_KERNEL happens inside runtime_suspend and
444 * runtime_resume callbacks of one block device's
445 * ancestor or the block device itself. Network
446 * device might be thought as part of iSCSI block
447 * device, so network device and its ancestor should
448 * be marked as memalloc_noio too.
450 noio_flag = memalloc_noio_save();
451 retval = __rpm_callback(cb, dev);
452 memalloc_noio_restore(noio_flag);
454 retval = __rpm_callback(cb, dev);
457 dev->power.runtime_error = retval;
458 return retval != -EACCES ? retval : -EIO;
462 * rpm_suspend - Carry out runtime suspend of given device.
463 * @dev: Device to suspend.
464 * @rpmflags: Flag bits.
466 * Check if the device's runtime PM status allows it to be suspended.
467 * Cancel a pending idle notification, autosuspend or suspend. If
468 * another suspend has been started earlier, either return immediately
469 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
470 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
471 * otherwise run the ->runtime_suspend() callback directly. When
472 * ->runtime_suspend succeeded, if a deferred resume was requested while
473 * the callback was running then carry it out, otherwise send an idle
474 * notification for its parent (if the suspend succeeded and both
475 * ignore_children of parent->power and irq_safe of dev->power are not set).
476 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
477 * flag is set and the next autosuspend-delay expiration time is in the
478 * future, schedule another autosuspend attempt.
480 * This function must be called under dev->power.lock with interrupts disabled.
482 static int rpm_suspend(struct device *dev, int rpmflags)
483 __releases(&dev->power.lock) __acquires(&dev->power.lock)
485 int (*callback)(struct device *);
486 struct device *parent = NULL;
489 trace_rpm_suspend_rcuidle(dev, rpmflags);
492 retval = rpm_check_suspend_allowed(dev);
495 ; /* Conditions are wrong. */
497 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
498 else if (dev->power.runtime_status == RPM_RESUMING &&
499 !(rpmflags & RPM_ASYNC))
504 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
505 if ((rpmflags & RPM_AUTO)
506 && dev->power.runtime_status != RPM_SUSPENDING) {
507 u64 expires = pm_runtime_autosuspend_expiration(dev);
510 /* Pending requests need to be canceled. */
511 dev->power.request = RPM_REQ_NONE;
514 * Optimization: If the timer is already running and is
515 * set to expire at or before the autosuspend delay,
516 * avoid the overhead of resetting it. Just let it
517 * expire; pm_suspend_timer_fn() will take care of the
520 if (!(dev->power.timer_expires &&
521 dev->power.timer_expires <= expires)) {
523 * We add a slack of 25% to gather wakeups
524 * without sacrificing the granularity.
526 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
527 (NSEC_PER_MSEC >> 2);
529 dev->power.timer_expires = expires;
530 hrtimer_start_range_ns(&dev->power.suspend_timer,
531 ns_to_ktime(expires),
535 dev->power.timer_autosuspends = 1;
540 /* Other scheduled or pending requests need to be canceled. */
541 pm_runtime_cancel_pending(dev);
543 if (dev->power.runtime_status == RPM_SUSPENDING) {
546 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
547 retval = -EINPROGRESS;
551 if (dev->power.irq_safe) {
552 spin_unlock(&dev->power.lock);
556 spin_lock(&dev->power.lock);
560 /* Wait for the other suspend running in parallel with us. */
562 prepare_to_wait(&dev->power.wait_queue, &wait,
563 TASK_UNINTERRUPTIBLE);
564 if (dev->power.runtime_status != RPM_SUSPENDING)
567 spin_unlock_irq(&dev->power.lock);
571 spin_lock_irq(&dev->power.lock);
573 finish_wait(&dev->power.wait_queue, &wait);
577 if (dev->power.no_callbacks)
578 goto no_callback; /* Assume success. */
580 /* Carry out an asynchronous or a synchronous suspend. */
581 if (rpmflags & RPM_ASYNC) {
582 dev->power.request = (rpmflags & RPM_AUTO) ?
583 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
584 if (!dev->power.request_pending) {
585 dev->power.request_pending = true;
586 queue_work(pm_wq, &dev->power.work);
591 __update_runtime_status(dev, RPM_SUSPENDING);
593 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
595 dev_pm_enable_wake_irq_check(dev, true);
596 retval = rpm_callback(callback, dev);
601 __update_runtime_status(dev, RPM_SUSPENDED);
602 pm_runtime_deactivate_timer(dev);
605 parent = dev->parent;
606 atomic_add_unless(&parent->power.child_count, -1, 0);
608 wake_up_all(&dev->power.wait_queue);
610 if (dev->power.deferred_resume) {
611 dev->power.deferred_resume = false;
617 /* Maybe the parent is now able to suspend. */
618 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
619 spin_unlock(&dev->power.lock);
621 spin_lock(&parent->power.lock);
622 rpm_idle(parent, RPM_ASYNC);
623 spin_unlock(&parent->power.lock);
625 spin_lock(&dev->power.lock);
629 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
634 dev_pm_disable_wake_irq_check(dev);
635 __update_runtime_status(dev, RPM_ACTIVE);
636 dev->power.deferred_resume = false;
637 wake_up_all(&dev->power.wait_queue);
639 if (retval == -EAGAIN || retval == -EBUSY) {
640 dev->power.runtime_error = 0;
643 * If the callback routine failed an autosuspend, and
644 * if the last_busy time has been updated so that there
645 * is a new autosuspend expiration time, automatically
646 * reschedule another autosuspend.
648 if ((rpmflags & RPM_AUTO) &&
649 pm_runtime_autosuspend_expiration(dev) != 0)
652 pm_runtime_cancel_pending(dev);
658 * rpm_resume - Carry out runtime resume of given device.
659 * @dev: Device to resume.
660 * @rpmflags: Flag bits.
662 * Check if the device's runtime PM status allows it to be resumed. Cancel
663 * any scheduled or pending requests. If another resume has been started
664 * earlier, either return immediately or wait for it to finish, depending on the
665 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
666 * parallel with this function, either tell the other process to resume after
667 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
668 * flag is set then queue a resume request; otherwise run the
669 * ->runtime_resume() callback directly. Queue an idle notification for the
670 * device if the resume succeeded.
672 * This function must be called under dev->power.lock with interrupts disabled.
674 static int rpm_resume(struct device *dev, int rpmflags)
675 __releases(&dev->power.lock) __acquires(&dev->power.lock)
677 int (*callback)(struct device *);
678 struct device *parent = NULL;
681 trace_rpm_resume_rcuidle(dev, rpmflags);
684 if (dev->power.runtime_error)
686 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
687 && dev->power.runtime_status == RPM_ACTIVE)
689 else if (dev->power.disable_depth > 0)
695 * Other scheduled or pending requests need to be canceled. Small
696 * optimization: If an autosuspend timer is running, leave it running
697 * rather than cancelling it now only to restart it again in the near
700 dev->power.request = RPM_REQ_NONE;
701 if (!dev->power.timer_autosuspends)
702 pm_runtime_deactivate_timer(dev);
704 if (dev->power.runtime_status == RPM_ACTIVE) {
709 if (dev->power.runtime_status == RPM_RESUMING
710 || dev->power.runtime_status == RPM_SUSPENDING) {
713 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
714 if (dev->power.runtime_status == RPM_SUSPENDING)
715 dev->power.deferred_resume = true;
717 retval = -EINPROGRESS;
721 if (dev->power.irq_safe) {
722 spin_unlock(&dev->power.lock);
726 spin_lock(&dev->power.lock);
730 /* Wait for the operation carried out in parallel with us. */
732 prepare_to_wait(&dev->power.wait_queue, &wait,
733 TASK_UNINTERRUPTIBLE);
734 if (dev->power.runtime_status != RPM_RESUMING
735 && dev->power.runtime_status != RPM_SUSPENDING)
738 spin_unlock_irq(&dev->power.lock);
742 spin_lock_irq(&dev->power.lock);
744 finish_wait(&dev->power.wait_queue, &wait);
749 * See if we can skip waking up the parent. This is safe only if
750 * power.no_callbacks is set, because otherwise we don't know whether
751 * the resume will actually succeed.
753 if (dev->power.no_callbacks && !parent && dev->parent) {
754 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
755 if (dev->parent->power.disable_depth > 0
756 || dev->parent->power.ignore_children
757 || dev->parent->power.runtime_status == RPM_ACTIVE) {
758 atomic_inc(&dev->parent->power.child_count);
759 spin_unlock(&dev->parent->power.lock);
761 goto no_callback; /* Assume success. */
763 spin_unlock(&dev->parent->power.lock);
766 /* Carry out an asynchronous or a synchronous resume. */
767 if (rpmflags & RPM_ASYNC) {
768 dev->power.request = RPM_REQ_RESUME;
769 if (!dev->power.request_pending) {
770 dev->power.request_pending = true;
771 queue_work(pm_wq, &dev->power.work);
777 if (!parent && dev->parent) {
779 * Increment the parent's usage counter and resume it if
780 * necessary. Not needed if dev is irq-safe; then the
781 * parent is permanently resumed.
783 parent = dev->parent;
784 if (dev->power.irq_safe)
786 spin_unlock(&dev->power.lock);
788 pm_runtime_get_noresume(parent);
790 spin_lock(&parent->power.lock);
792 * Resume the parent if it has runtime PM enabled and not been
793 * set to ignore its children.
795 if (!parent->power.disable_depth
796 && !parent->power.ignore_children) {
797 rpm_resume(parent, 0);
798 if (parent->power.runtime_status != RPM_ACTIVE)
801 spin_unlock(&parent->power.lock);
803 spin_lock(&dev->power.lock);
810 if (dev->power.no_callbacks)
811 goto no_callback; /* Assume success. */
813 __update_runtime_status(dev, RPM_RESUMING);
815 callback = RPM_GET_CALLBACK(dev, runtime_resume);
817 dev_pm_disable_wake_irq_check(dev);
818 retval = rpm_callback(callback, dev);
820 __update_runtime_status(dev, RPM_SUSPENDED);
821 pm_runtime_cancel_pending(dev);
822 dev_pm_enable_wake_irq_check(dev, false);
825 __update_runtime_status(dev, RPM_ACTIVE);
826 pm_runtime_mark_last_busy(dev);
828 atomic_inc(&parent->power.child_count);
830 wake_up_all(&dev->power.wait_queue);
833 rpm_idle(dev, RPM_ASYNC);
836 if (parent && !dev->power.irq_safe) {
837 spin_unlock_irq(&dev->power.lock);
839 pm_runtime_put(parent);
841 spin_lock_irq(&dev->power.lock);
844 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
850 * pm_runtime_work - Universal runtime PM work function.
851 * @work: Work structure used for scheduling the execution of this function.
853 * Use @work to get the device object the work is to be done for, determine what
854 * is to be done and execute the appropriate runtime PM function.
856 static void pm_runtime_work(struct work_struct *work)
858 struct device *dev = container_of(work, struct device, power.work);
859 enum rpm_request req;
861 spin_lock_irq(&dev->power.lock);
863 if (!dev->power.request_pending)
866 req = dev->power.request;
867 dev->power.request = RPM_REQ_NONE;
868 dev->power.request_pending = false;
874 rpm_idle(dev, RPM_NOWAIT);
876 case RPM_REQ_SUSPEND:
877 rpm_suspend(dev, RPM_NOWAIT);
879 case RPM_REQ_AUTOSUSPEND:
880 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
883 rpm_resume(dev, RPM_NOWAIT);
888 spin_unlock_irq(&dev->power.lock);
892 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
893 * @data: Device pointer passed by pm_schedule_suspend().
895 * Check if the time is right and queue a suspend request.
897 static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
899 struct device *dev = container_of(timer, struct device, power.suspend_timer);
903 spin_lock_irqsave(&dev->power.lock, flags);
905 expires = dev->power.timer_expires;
907 * If 'expires' is after the current time, we've been called
910 if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
911 dev->power.timer_expires = 0;
912 rpm_suspend(dev, dev->power.timer_autosuspends ?
913 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
916 spin_unlock_irqrestore(&dev->power.lock, flags);
918 return HRTIMER_NORESTART;
922 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
923 * @dev: Device to suspend.
924 * @delay: Time to wait before submitting a suspend request, in milliseconds.
926 int pm_schedule_suspend(struct device *dev, unsigned int delay)
932 spin_lock_irqsave(&dev->power.lock, flags);
935 retval = rpm_suspend(dev, RPM_ASYNC);
939 retval = rpm_check_suspend_allowed(dev);
943 /* Other scheduled or pending requests need to be canceled. */
944 pm_runtime_cancel_pending(dev);
946 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
947 dev->power.timer_expires = expires;
948 dev->power.timer_autosuspends = 0;
949 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
952 spin_unlock_irqrestore(&dev->power.lock, flags);
956 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
959 * __pm_runtime_idle - Entry point for runtime idle operations.
960 * @dev: Device to send idle notification for.
961 * @rpmflags: Flag bits.
963 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
964 * return immediately if it is larger than zero. Then carry out an idle
965 * notification, either synchronous or asynchronous.
967 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
968 * or if pm_runtime_irq_safe() has been called.
970 int __pm_runtime_idle(struct device *dev, int rpmflags)
975 if (rpmflags & RPM_GET_PUT) {
976 if (!atomic_dec_and_test(&dev->power.usage_count))
980 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
982 spin_lock_irqsave(&dev->power.lock, flags);
983 retval = rpm_idle(dev, rpmflags);
984 spin_unlock_irqrestore(&dev->power.lock, flags);
988 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
991 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
992 * @dev: Device to suspend.
993 * @rpmflags: Flag bits.
995 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
996 * return immediately if it is larger than zero. Then carry out a suspend,
997 * either synchronous or asynchronous.
999 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1000 * or if pm_runtime_irq_safe() has been called.
1002 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1004 unsigned long flags;
1007 if (rpmflags & RPM_GET_PUT) {
1008 if (!atomic_dec_and_test(&dev->power.usage_count))
1012 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1014 spin_lock_irqsave(&dev->power.lock, flags);
1015 retval = rpm_suspend(dev, rpmflags);
1016 spin_unlock_irqrestore(&dev->power.lock, flags);
1020 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1023 * __pm_runtime_resume - Entry point for runtime resume operations.
1024 * @dev: Device to resume.
1025 * @rpmflags: Flag bits.
1027 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
1028 * carry out a resume, either synchronous or asynchronous.
1030 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1031 * or if pm_runtime_irq_safe() has been called.
1033 int __pm_runtime_resume(struct device *dev, int rpmflags)
1035 unsigned long flags;
1038 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1039 dev->power.runtime_status != RPM_ACTIVE);
1041 if (rpmflags & RPM_GET_PUT)
1042 atomic_inc(&dev->power.usage_count);
1044 spin_lock_irqsave(&dev->power.lock, flags);
1045 retval = rpm_resume(dev, rpmflags);
1046 spin_unlock_irqrestore(&dev->power.lock, flags);
1050 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1053 * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
1054 * @dev: Device to handle.
1056 * Return -EINVAL if runtime PM is disabled for the device.
1058 * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
1059 * and the runtime PM usage counter is nonzero, increment the counter and
1060 * return 1. Otherwise return 0 without changing the counter.
1062 int pm_runtime_get_if_in_use(struct device *dev)
1064 unsigned long flags;
1067 spin_lock_irqsave(&dev->power.lock, flags);
1068 retval = dev->power.disable_depth > 0 ? -EINVAL :
1069 dev->power.runtime_status == RPM_ACTIVE
1070 && atomic_inc_not_zero(&dev->power.usage_count);
1071 spin_unlock_irqrestore(&dev->power.lock, flags);
1074 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1077 * __pm_runtime_set_status - Set runtime PM status of a device.
1078 * @dev: Device to handle.
1079 * @status: New runtime PM status of the device.
1081 * If runtime PM of the device is disabled or its power.runtime_error field is
1082 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1083 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1084 * However, if the device has a parent and the parent is not active, and the
1085 * parent's power.ignore_children flag is unset, the device's status cannot be
1086 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1088 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1089 * and the device parent's counter of unsuspended children is modified to
1090 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1091 * notification request for the parent is submitted.
1093 * If @dev has any suppliers (as reflected by device links to them), and @status
1094 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1095 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1096 * of the @status value) and the suppliers will be deacticated on exit. The
1097 * error returned by the failing supplier activation will be returned in that
1100 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1102 struct device *parent = dev->parent;
1103 bool notify_parent = false;
1106 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1109 spin_lock_irq(&dev->power.lock);
1112 * Prevent PM-runtime from being enabled for the device or return an
1113 * error if it is enabled already and working.
1115 if (dev->power.runtime_error || dev->power.disable_depth)
1116 dev->power.disable_depth++;
1120 spin_unlock_irq(&dev->power.lock);
1126 * If the new status is RPM_ACTIVE, the suppliers can be activated
1127 * upfront regardless of the current status, because next time
1128 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1129 * involved will be dropped down to one anyway.
1131 if (status == RPM_ACTIVE) {
1132 int idx = device_links_read_lock();
1134 error = rpm_get_suppliers(dev);
1136 status = RPM_SUSPENDED;
1138 device_links_read_unlock(idx);
1141 spin_lock_irq(&dev->power.lock);
1143 if (dev->power.runtime_status == status || !parent)
1146 if (status == RPM_SUSPENDED) {
1147 atomic_add_unless(&parent->power.child_count, -1, 0);
1148 notify_parent = !parent->power.ignore_children;
1150 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1153 * It is invalid to put an active child under a parent that is
1154 * not active, has runtime PM enabled and the
1155 * 'power.ignore_children' flag unset.
1157 if (!parent->power.disable_depth
1158 && !parent->power.ignore_children
1159 && parent->power.runtime_status != RPM_ACTIVE) {
1160 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1164 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1165 atomic_inc(&parent->power.child_count);
1168 spin_unlock(&parent->power.lock);
1171 status = RPM_SUSPENDED;
1177 __update_runtime_status(dev, status);
1179 dev->power.runtime_error = 0;
1182 spin_unlock_irq(&dev->power.lock);
1185 pm_request_idle(parent);
1187 if (status == RPM_SUSPENDED) {
1188 int idx = device_links_read_lock();
1190 rpm_put_suppliers(dev);
1192 device_links_read_unlock(idx);
1195 pm_runtime_enable(dev);
1199 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1202 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1203 * @dev: Device to handle.
1205 * Flush all pending requests for the device from pm_wq and wait for all
1206 * runtime PM operations involving the device in progress to complete.
1208 * Should be called under dev->power.lock with interrupts disabled.
1210 static void __pm_runtime_barrier(struct device *dev)
1212 pm_runtime_deactivate_timer(dev);
1214 if (dev->power.request_pending) {
1215 dev->power.request = RPM_REQ_NONE;
1216 spin_unlock_irq(&dev->power.lock);
1218 cancel_work_sync(&dev->power.work);
1220 spin_lock_irq(&dev->power.lock);
1221 dev->power.request_pending = false;
1224 if (dev->power.runtime_status == RPM_SUSPENDING
1225 || dev->power.runtime_status == RPM_RESUMING
1226 || dev->power.idle_notification) {
1229 /* Suspend, wake-up or idle notification in progress. */
1231 prepare_to_wait(&dev->power.wait_queue, &wait,
1232 TASK_UNINTERRUPTIBLE);
1233 if (dev->power.runtime_status != RPM_SUSPENDING
1234 && dev->power.runtime_status != RPM_RESUMING
1235 && !dev->power.idle_notification)
1237 spin_unlock_irq(&dev->power.lock);
1241 spin_lock_irq(&dev->power.lock);
1243 finish_wait(&dev->power.wait_queue, &wait);
1248 * pm_runtime_barrier - Flush pending requests and wait for completions.
1249 * @dev: Device to handle.
1251 * Prevent the device from being suspended by incrementing its usage counter and
1252 * if there's a pending resume request for the device, wake the device up.
1253 * Next, make sure that all pending requests for the device have been flushed
1254 * from pm_wq and wait for all runtime PM operations involving the device in
1255 * progress to complete.
1258 * 1, if there was a resume request pending and the device had to be woken up,
1261 int pm_runtime_barrier(struct device *dev)
1265 pm_runtime_get_noresume(dev);
1266 spin_lock_irq(&dev->power.lock);
1268 if (dev->power.request_pending
1269 && dev->power.request == RPM_REQ_RESUME) {
1274 __pm_runtime_barrier(dev);
1276 spin_unlock_irq(&dev->power.lock);
1277 pm_runtime_put_noidle(dev);
1281 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1284 * __pm_runtime_disable - Disable runtime PM of a device.
1285 * @dev: Device to handle.
1286 * @check_resume: If set, check if there's a resume request for the device.
1288 * Increment power.disable_depth for the device and if it was zero previously,
1289 * cancel all pending runtime PM requests for the device and wait for all
1290 * operations in progress to complete. The device can be either active or
1291 * suspended after its runtime PM has been disabled.
1293 * If @check_resume is set and there's a resume request pending when
1294 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1295 * function will wake up the device before disabling its runtime PM.
1297 void __pm_runtime_disable(struct device *dev, bool check_resume)
1299 spin_lock_irq(&dev->power.lock);
1301 if (dev->power.disable_depth > 0) {
1302 dev->power.disable_depth++;
1307 * Wake up the device if there's a resume request pending, because that
1308 * means there probably is some I/O to process and disabling runtime PM
1309 * shouldn't prevent the device from processing the I/O.
1311 if (check_resume && dev->power.request_pending
1312 && dev->power.request == RPM_REQ_RESUME) {
1314 * Prevent suspends and idle notifications from being carried
1315 * out after we have woken up the device.
1317 pm_runtime_get_noresume(dev);
1321 pm_runtime_put_noidle(dev);
1324 if (!dev->power.disable_depth++)
1325 __pm_runtime_barrier(dev);
1328 spin_unlock_irq(&dev->power.lock);
1330 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1333 * pm_runtime_enable - Enable runtime PM of a device.
1334 * @dev: Device to handle.
1336 void pm_runtime_enable(struct device *dev)
1338 unsigned long flags;
1340 spin_lock_irqsave(&dev->power.lock, flags);
1342 if (dev->power.disable_depth > 0)
1343 dev->power.disable_depth--;
1345 dev_warn(dev, "Unbalanced %s!\n", __func__);
1347 WARN(!dev->power.disable_depth &&
1348 dev->power.runtime_status == RPM_SUSPENDED &&
1349 !dev->power.ignore_children &&
1350 atomic_read(&dev->power.child_count) > 0,
1351 "Enabling runtime PM for inactive device (%s) with active children\n",
1354 spin_unlock_irqrestore(&dev->power.lock, flags);
1356 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1359 * pm_runtime_forbid - Block runtime PM of a device.
1360 * @dev: Device to handle.
1362 * Increase the device's usage count and clear its power.runtime_auto flag,
1363 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1366 void pm_runtime_forbid(struct device *dev)
1368 spin_lock_irq(&dev->power.lock);
1369 if (!dev->power.runtime_auto)
1372 dev->power.runtime_auto = false;
1373 atomic_inc(&dev->power.usage_count);
1377 spin_unlock_irq(&dev->power.lock);
1379 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1382 * pm_runtime_allow - Unblock runtime PM of a device.
1383 * @dev: Device to handle.
1385 * Decrease the device's usage count and set its power.runtime_auto flag.
1387 void pm_runtime_allow(struct device *dev)
1389 spin_lock_irq(&dev->power.lock);
1390 if (dev->power.runtime_auto)
1393 dev->power.runtime_auto = true;
1394 if (atomic_dec_and_test(&dev->power.usage_count))
1395 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1398 spin_unlock_irq(&dev->power.lock);
1400 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1403 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1404 * @dev: Device to handle.
1406 * Set the power.no_callbacks flag, which tells the PM core that this
1407 * device is power-managed through its parent and has no runtime PM
1408 * callbacks of its own. The runtime sysfs attributes will be removed.
1410 void pm_runtime_no_callbacks(struct device *dev)
1412 spin_lock_irq(&dev->power.lock);
1413 dev->power.no_callbacks = 1;
1414 spin_unlock_irq(&dev->power.lock);
1415 if (device_is_registered(dev))
1416 rpm_sysfs_remove(dev);
1418 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1421 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1422 * @dev: Device to handle
1424 * Set the power.irq_safe flag, which tells the PM core that the
1425 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1426 * always be invoked with the spinlock held and interrupts disabled. It also
1427 * causes the parent's usage counter to be permanently incremented, preventing
1428 * the parent from runtime suspending -- otherwise an irq-safe child might have
1429 * to wait for a non-irq-safe parent.
1431 void pm_runtime_irq_safe(struct device *dev)
1434 pm_runtime_get_sync(dev->parent);
1435 spin_lock_irq(&dev->power.lock);
1436 dev->power.irq_safe = 1;
1437 spin_unlock_irq(&dev->power.lock);
1439 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1442 * update_autosuspend - Handle a change to a device's autosuspend settings.
1443 * @dev: Device to handle.
1444 * @old_delay: The former autosuspend_delay value.
1445 * @old_use: The former use_autosuspend value.
1447 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1448 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1450 * This function must be called under dev->power.lock with interrupts disabled.
1452 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1454 int delay = dev->power.autosuspend_delay;
1456 /* Should runtime suspend be prevented now? */
1457 if (dev->power.use_autosuspend && delay < 0) {
1459 /* If it used to be allowed then prevent it. */
1460 if (!old_use || old_delay >= 0) {
1461 atomic_inc(&dev->power.usage_count);
1466 /* Runtime suspend should be allowed now. */
1469 /* If it used to be prevented then allow it. */
1470 if (old_use && old_delay < 0)
1471 atomic_dec(&dev->power.usage_count);
1473 /* Maybe we can autosuspend now. */
1474 rpm_idle(dev, RPM_AUTO);
1479 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1480 * @dev: Device to handle.
1481 * @delay: Value of the new delay in milliseconds.
1483 * Set the device's power.autosuspend_delay value. If it changes to negative
1484 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1485 * changes the other way, allow runtime suspends.
1487 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1489 int old_delay, old_use;
1491 spin_lock_irq(&dev->power.lock);
1492 old_delay = dev->power.autosuspend_delay;
1493 old_use = dev->power.use_autosuspend;
1494 dev->power.autosuspend_delay = delay;
1495 update_autosuspend(dev, old_delay, old_use);
1496 spin_unlock_irq(&dev->power.lock);
1498 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1501 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1502 * @dev: Device to handle.
1503 * @use: New value for use_autosuspend.
1505 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1506 * suspends as needed.
1508 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1510 int old_delay, old_use;
1512 spin_lock_irq(&dev->power.lock);
1513 old_delay = dev->power.autosuspend_delay;
1514 old_use = dev->power.use_autosuspend;
1515 dev->power.use_autosuspend = use;
1516 update_autosuspend(dev, old_delay, old_use);
1517 spin_unlock_irq(&dev->power.lock);
1519 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1522 * pm_runtime_init - Initialize runtime PM fields in given device object.
1523 * @dev: Device object to initialize.
1525 void pm_runtime_init(struct device *dev)
1527 dev->power.runtime_status = RPM_SUSPENDED;
1528 dev->power.idle_notification = false;
1530 dev->power.disable_depth = 1;
1531 atomic_set(&dev->power.usage_count, 0);
1533 dev->power.runtime_error = 0;
1535 atomic_set(&dev->power.child_count, 0);
1536 pm_suspend_ignore_children(dev, false);
1537 dev->power.runtime_auto = true;
1539 dev->power.request_pending = false;
1540 dev->power.request = RPM_REQ_NONE;
1541 dev->power.deferred_resume = false;
1542 dev->power.accounting_timestamp = jiffies;
1543 INIT_WORK(&dev->power.work, pm_runtime_work);
1545 dev->power.timer_expires = 0;
1546 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1547 dev->power.suspend_timer.function = pm_suspend_timer_fn;
1549 init_waitqueue_head(&dev->power.wait_queue);
1553 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1554 * @dev: Device object to re-initialize.
1556 void pm_runtime_reinit(struct device *dev)
1558 if (!pm_runtime_enabled(dev)) {
1559 if (dev->power.runtime_status == RPM_ACTIVE)
1560 pm_runtime_set_suspended(dev);
1561 if (dev->power.irq_safe) {
1562 spin_lock_irq(&dev->power.lock);
1563 dev->power.irq_safe = 0;
1564 spin_unlock_irq(&dev->power.lock);
1566 pm_runtime_put(dev->parent);
1572 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1573 * @dev: Device object being removed from device hierarchy.
1575 void pm_runtime_remove(struct device *dev)
1577 __pm_runtime_disable(dev, false);
1578 pm_runtime_reinit(dev);
1582 * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1583 * @dev: Device whose driver is going to be removed.
1585 * Check links from this device to any consumers and if any of them have active
1586 * runtime PM references to the device, drop the usage counter of the device
1587 * (as many times as needed).
1589 * Links with the DL_FLAG_STATELESS flag set are ignored.
1591 * Since the device is guaranteed to be runtime-active at the point this is
1592 * called, nothing else needs to be done here.
1594 * Moreover, this is called after device_links_busy() has returned 'false', so
1595 * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1596 * therefore rpm_active can't be manipulated concurrently.
1598 void pm_runtime_clean_up_links(struct device *dev)
1600 struct device_link *link;
1603 idx = device_links_read_lock();
1605 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
1606 if (link->flags & DL_FLAG_STATELESS)
1609 while (refcount_dec_not_one(&link->rpm_active))
1610 pm_runtime_put_noidle(dev);
1613 device_links_read_unlock(idx);
1617 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1618 * @dev: Consumer device.
1620 void pm_runtime_get_suppliers(struct device *dev)
1622 struct device_link *link;
1625 idx = device_links_read_lock();
1627 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1628 if (link->flags & DL_FLAG_PM_RUNTIME) {
1629 refcount_inc(&link->rpm_active);
1630 pm_runtime_get_sync(link->supplier);
1633 device_links_read_unlock(idx);
1637 * pm_runtime_put_suppliers - Drop references to supplier devices.
1638 * @dev: Consumer device.
1640 void pm_runtime_put_suppliers(struct device *dev)
1642 struct device_link *link;
1645 idx = device_links_read_lock();
1647 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1648 if (link->flags & DL_FLAG_PM_RUNTIME &&
1649 refcount_dec_not_one(&link->rpm_active))
1650 pm_runtime_put(link->supplier);
1652 device_links_read_unlock(idx);
1655 void pm_runtime_new_link(struct device *dev)
1657 spin_lock_irq(&dev->power.lock);
1658 dev->power.links_count++;
1659 spin_unlock_irq(&dev->power.lock);
1663 * pm_runtime_active_link - Set up new device link as active for PM-runtime.
1664 * @link: Device link to be set up as active.
1665 * @supplier: Supplier end of the link.
1667 * Add 2 to the rpm_active refcount of @link and increment the PM-runtime
1668 * usage counter of @supplier once more in case the link is being added while
1669 * the consumer driver is probing and pm_runtime_put_suppliers() will be called
1672 * Note that this doesn't prevent rpm_put_suppliers() from decreasing the link's
1673 * rpm_active refcount down to one, so runtime suspend of the consumer end of
1674 * @link is not affected.
1676 void pm_runtime_active_link(struct device_link *link, struct device *supplier)
1678 refcount_add(2, &link->rpm_active);
1679 pm_runtime_get_noresume(supplier);
1682 void pm_runtime_drop_link(struct device *dev)
1684 spin_lock_irq(&dev->power.lock);
1685 WARN_ON(dev->power.links_count == 0);
1686 dev->power.links_count--;
1687 spin_unlock_irq(&dev->power.lock);
1690 static bool pm_runtime_need_not_resume(struct device *dev)
1692 return atomic_read(&dev->power.usage_count) <= 1 &&
1693 (atomic_read(&dev->power.child_count) == 0 ||
1694 dev->power.ignore_children);
1698 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1699 * @dev: Device to suspend.
1701 * Disable runtime PM so we safely can check the device's runtime PM status and
1702 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1703 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
1704 * usage and children counters don't indicate that the device was in use before
1705 * the system-wide transition under way, decrement its parent's children counter
1706 * (if there is a parent). Keep runtime PM disabled to preserve the state
1707 * unless we encounter errors.
1709 * Typically this function may be invoked from a system suspend callback to make
1710 * sure the device is put into low power state and it should only be used during
1711 * system-wide PM transitions to sleep states. It assumes that the analogous
1712 * pm_runtime_force_resume() will be used to resume the device.
1714 int pm_runtime_force_suspend(struct device *dev)
1716 int (*callback)(struct device *);
1719 pm_runtime_disable(dev);
1720 if (pm_runtime_status_suspended(dev))
1723 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1725 ret = callback ? callback(dev) : 0;
1730 * If the device can stay in suspend after the system-wide transition
1731 * to the working state that will follow, drop the children counter of
1732 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1733 * function will be called again for it in the meantime.
1735 if (pm_runtime_need_not_resume(dev))
1736 pm_runtime_set_suspended(dev);
1738 __update_runtime_status(dev, RPM_SUSPENDED);
1743 pm_runtime_enable(dev);
1746 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1749 * pm_runtime_force_resume - Force a device into resume state if needed.
1750 * @dev: Device to resume.
1752 * Prior invoking this function we expect the user to have brought the device
1753 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1754 * those actions and bring the device into full power, if it is expected to be
1755 * used on system resume. In the other case, we defer the resume to be managed
1758 * Typically this function may be invoked from a system resume callback.
1760 int pm_runtime_force_resume(struct device *dev)
1762 int (*callback)(struct device *);
1765 if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1769 * The value of the parent's children counter is correct already, so
1770 * just update the status of the device.
1772 __update_runtime_status(dev, RPM_ACTIVE);
1774 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1776 ret = callback ? callback(dev) : 0;
1778 pm_runtime_set_suspended(dev);
1782 pm_runtime_mark_last_busy(dev);
1784 pm_runtime_enable(dev);
1787 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);