1 /* SPDX-License-Identifier: GPL-2.0-only */
4 #define MUTEX_WAITER mutex_waiter
6 static inline struct mutex_waiter *
7 __ww_waiter_first(struct mutex *lock)
9 struct mutex_waiter *w;
11 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list);
12 if (list_entry_is_head(w, &lock->wait_list, list))
18 static inline struct mutex_waiter *
19 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
21 w = list_next_entry(w, list);
22 if (list_entry_is_head(w, &lock->wait_list, list))
28 static inline struct mutex_waiter *
29 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
31 w = list_prev_entry(w, list);
32 if (list_entry_is_head(w, &lock->wait_list, list))
38 static inline struct mutex_waiter *
39 __ww_waiter_last(struct mutex *lock)
41 struct mutex_waiter *w;
43 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list);
44 if (list_entry_is_head(w, &lock->wait_list, list))
51 __ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
53 struct list_head *p = &lock->wait_list;
56 __mutex_add_waiter(lock, waiter, p);
59 static inline struct task_struct *
60 __ww_mutex_owner(struct mutex *lock)
62 return __mutex_owner(lock);
66 __ww_mutex_has_waiters(struct mutex *lock)
68 return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS;
73 * The newer transactions are killed when:
74 * It (the new transaction) makes a request for a lock being held
75 * by an older transaction.
78 * The newer transactions are wounded when:
79 * An older transaction makes a request for a lock being held by
80 * the newer transaction.
84 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
87 static __always_inline void
88 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
90 #ifdef CONFIG_DEBUG_MUTEXES
92 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
93 * but released with a normal mutex_unlock in this call.
95 * This should never happen, always use ww_mutex_unlock.
97 DEBUG_LOCKS_WARN_ON(ww->ctx);
100 * Not quite done after calling ww_acquire_done() ?
102 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
104 if (ww_ctx->contending_lock) {
106 * After -EDEADLK you tried to
107 * acquire a different ww_mutex? Bad!
109 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
112 * You called ww_mutex_lock after receiving -EDEADLK,
113 * but 'forgot' to unlock everything else first?
115 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
116 ww_ctx->contending_lock = NULL;
120 * Naughty, using a different class will lead to undefined behavior!
122 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
129 * Determine if context @a is 'after' context @b. IOW, @a is a younger
130 * transaction than @b and depending on algorithm either needs to wait for
134 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
137 return (signed long)(a->stamp - b->stamp) > 0;
141 * Wait-Die; wake a younger waiter context (when locks held) such that it can
144 * Among waiters with context, only the first one can have other locks acquired
145 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
146 * __ww_mutex_check_kill() wake any but the earliest context.
149 __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
150 struct ww_acquire_ctx *ww_ctx)
152 if (!ww_ctx->is_wait_die)
155 if (waiter->ww_ctx->acquired > 0 &&
156 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
157 debug_mutex_wake_waiter(lock, waiter);
158 wake_up_process(waiter->task);
165 * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
167 * Wound the lock holder if there are waiters with older transactions than
168 * the lock holders. Even if multiple waiters may wound the lock holder,
169 * it's sufficient that only one does.
171 static bool __ww_mutex_wound(struct MUTEX *lock,
172 struct ww_acquire_ctx *ww_ctx,
173 struct ww_acquire_ctx *hold_ctx)
175 struct task_struct *owner = __ww_mutex_owner(lock);
177 lockdep_assert_held(&lock->wait_lock);
180 * Possible through __ww_mutex_add_waiter() when we race with
181 * ww_mutex_set_context_fastpath(). In that case we'll get here again
182 * through __ww_mutex_check_waiters().
188 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
189 * it cannot go away because we'll have FLAG_WAITERS set and hold
195 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
196 hold_ctx->wounded = 1;
199 * wake_up_process() paired with set_current_state()
200 * inserts sufficient barriers to make sure @owner either sees
201 * it's wounded in __ww_mutex_check_kill() or has a
202 * wakeup pending to re-read the wounded state.
204 if (owner != current)
205 wake_up_process(owner);
214 * We just acquired @lock under @ww_ctx, if there are later contexts waiting
215 * behind us on the wait-list, check if they need to die, or wound us.
217 * See __ww_mutex_add_waiter() for the list-order construction; basically the
218 * list is ordered by stamp, smallest (oldest) first.
220 * This relies on never mixing wait-die/wound-wait on the same wait-list;
221 * which is currently ensured by that being a ww_class property.
223 * The current task must not be on the wait list.
226 __ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
228 struct MUTEX_WAITER *cur;
230 lockdep_assert_held(&lock->wait_lock);
232 for (cur = __ww_waiter_first(lock); cur;
233 cur = __ww_waiter_next(lock, cur)) {
238 if (__ww_mutex_die(lock, cur, ww_ctx) ||
239 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
245 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
246 * and wake up any waiters so they can recheck.
248 static __always_inline void
249 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
251 ww_mutex_lock_acquired(lock, ctx);
254 * The lock->ctx update should be visible on all cores before
255 * the WAITERS check is done, otherwise contended waiters might be
256 * missed. The contended waiters will either see ww_ctx == NULL
257 * and keep spinning, or it will acquire wait_lock, add itself
258 * to waiter list and sleep.
260 smp_mb(); /* See comments above and below. */
263 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS
265 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx
267 * The memory barrier above pairs with the memory barrier in
268 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
269 * and/or !empty list.
271 if (likely(!__ww_mutex_has_waiters(&lock->base)))
275 * Uh oh, we raced in fastpath, check if any of the waiters need to
278 raw_spin_lock(&lock->base.wait_lock);
279 __ww_mutex_check_waiters(&lock->base, ctx);
280 raw_spin_unlock(&lock->base.wait_lock);
283 static __always_inline int
284 __ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
286 if (ww_ctx->acquired > 0) {
287 #ifdef CONFIG_DEBUG_MUTEXES
290 ww = container_of(lock, struct ww_mutex, base);
291 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
292 ww_ctx->contending_lock = ww;
301 * Check the wound condition for the current lock acquire.
303 * Wound-Wait: If we're wounded, kill ourself.
305 * Wait-Die: If we're trying to acquire a lock already held by an older
306 * context, kill ourselves.
308 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
309 * look at waiters before us in the wait-list.
312 __ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
313 struct ww_acquire_ctx *ctx)
315 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
316 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
317 struct MUTEX_WAITER *cur;
319 if (ctx->acquired == 0)
322 if (!ctx->is_wait_die) {
324 return __ww_mutex_kill(lock, ctx);
329 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
330 return __ww_mutex_kill(lock, ctx);
333 * If there is a waiter in front of us that has a context, then its
334 * stamp is earlier than ours and we must kill ourself.
336 for (cur = __ww_waiter_prev(lock, waiter); cur;
337 cur = __ww_waiter_prev(lock, cur)) {
342 return __ww_mutex_kill(lock, ctx);
349 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
350 * first. Such that older contexts are preferred to acquire the lock over
353 * Waiters without context are interspersed in FIFO order.
355 * Furthermore, for Wait-Die kill ourself immediately when possible (there are
356 * older contexts already waiting) to avoid unnecessary waiting and for
357 * Wound-Wait ensure we wound the owning context when it is younger.
360 __ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
362 struct ww_acquire_ctx *ww_ctx)
364 struct MUTEX_WAITER *cur, *pos = NULL;
368 __ww_waiter_add(lock, waiter, NULL);
372 is_wait_die = ww_ctx->is_wait_die;
375 * Add the waiter before the first waiter with a higher stamp.
376 * Waiters without a context are skipped to avoid starving
377 * them. Wait-Die waiters may die here. Wound-Wait waiters
378 * never die here, but they are sorted in stamp order and
379 * may wound the lock holder.
381 for (cur = __ww_waiter_last(lock); cur;
382 cur = __ww_waiter_prev(lock, cur)) {
387 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
389 * Wait-Die: if we find an older context waiting, there
390 * is no point in queueing behind it, as we'd have to
391 * die the moment it would acquire the lock.
394 int ret = __ww_mutex_kill(lock, ww_ctx);
405 /* Wait-Die: ensure younger waiters die. */
406 __ww_mutex_die(lock, cur, ww_ctx);
409 __ww_waiter_add(lock, waiter, pos);
412 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
413 * wound that such that we might proceed.
416 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
419 * See ww_mutex_set_context_fastpath(). Orders setting
420 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
421 * such that either we or the fastpath will wound @ww->ctx.
424 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
430 static inline void __ww_mutex_unlock(struct ww_mutex *lock)
433 #ifdef CONFIG_DEBUG_MUTEXES
434 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
436 if (lock->ctx->acquired > 0)
437 lock->ctx->acquired--;