2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include "intel_runtime_pm.h"
8 #include "intel_wakeref.h"
10 static void rpm_get(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
12 wf->wakeref = intel_runtime_pm_get(rpm);
15 static void rpm_put(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
17 intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
19 intel_runtime_pm_put(rpm, wakeref);
20 INTEL_WAKEREF_BUG_ON(!wakeref);
23 int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
24 struct intel_wakeref *wf,
25 int (*fn)(struct intel_wakeref *wf))
28 * Treat get/put as different subclasses, as we may need to run
29 * the put callback from under the shrinker and do not want to
30 * cross-contanimate that callback with any extra work performed
31 * upon acquiring the wakeref.
33 mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
34 if (!atomic_read(&wf->count)) {
42 mutex_unlock(&wf->mutex);
46 smp_mb__before_atomic(); /* release wf->count */
48 atomic_inc(&wf->count);
49 mutex_unlock(&wf->mutex);
51 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
55 int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
56 struct intel_wakeref *wf,
57 int (*fn)(struct intel_wakeref *wf))
65 atomic_inc(&wf->count);
66 mutex_unlock(&wf->mutex);
71 void __intel_wakeref_init(struct intel_wakeref *wf, struct lock_class_key *key)
73 __mutex_init(&wf->mutex, "wakeref", key);
74 atomic_set(&wf->count, 0);
78 static void wakeref_auto_timeout(struct timer_list *t)
80 struct intel_wakeref_auto *wf = from_timer(wf, t, timer);
81 intel_wakeref_t wakeref;
84 if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags))
87 wakeref = fetch_and_zero(&wf->wakeref);
88 spin_unlock_irqrestore(&wf->lock, flags);
90 intel_runtime_pm_put(wf->rpm, wakeref);
93 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
94 struct intel_runtime_pm *rpm)
96 spin_lock_init(&wf->lock);
97 timer_setup(&wf->timer, wakeref_auto_timeout, 0);
98 refcount_set(&wf->count, 0);
103 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
108 if (del_timer_sync(&wf->timer))
109 wakeref_auto_timeout(&wf->timer);
113 /* Our mission is that we only extend an already active wakeref */
114 assert_rpm_wakelock_held(wf->rpm);
116 if (!refcount_inc_not_zero(&wf->count)) {
117 spin_lock_irqsave(&wf->lock, flags);
118 if (!refcount_inc_not_zero(&wf->count)) {
119 INTEL_WAKEREF_BUG_ON(wf->wakeref);
120 wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm);
121 refcount_set(&wf->count, 1);
123 spin_unlock_irqrestore(&wf->lock, flags);
127 * If we extend a pending timer, we will only get a single timer
128 * callback and so need to cancel the local inc by running the
129 * elided callback to keep the wf->count balanced.
131 if (mod_timer(&wf->timer, jiffies + timeout))
132 wakeref_auto_timeout(&wf->timer);
135 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
137 intel_wakeref_auto(wf, 0);
138 INTEL_WAKEREF_BUG_ON(wf->wakeref);