treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 157
[sfrench/cifs-2.6.git] / kernel / locking / qrwlock.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Queued read/write locks
4  *
5  * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
6  *
7  * Authors: Waiman Long <waiman.long@hp.com>
8  */
9 #include <linux/smp.h>
10 #include <linux/bug.h>
11 #include <linux/cpumask.h>
12 #include <linux/percpu.h>
13 #include <linux/hardirq.h>
14 #include <linux/spinlock.h>
15 #include <asm/qrwlock.h>
16
17 /**
18  * queued_read_lock_slowpath - acquire read lock of a queue rwlock
19  * @lock: Pointer to queue rwlock structure
20  */
21 void queued_read_lock_slowpath(struct qrwlock *lock)
22 {
23         /*
24          * Readers come here when they cannot get the lock without waiting
25          */
26         if (unlikely(in_interrupt())) {
27                 /*
28                  * Readers in interrupt context will get the lock immediately
29                  * if the writer is just waiting (not holding the lock yet),
30                  * so spin with ACQUIRE semantics until the lock is available
31                  * without waiting in the queue.
32                  */
33                 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
34                 return;
35         }
36         atomic_sub(_QR_BIAS, &lock->cnts);
37
38         /*
39          * Put the reader into the wait queue
40          */
41         arch_spin_lock(&lock->wait_lock);
42         atomic_add(_QR_BIAS, &lock->cnts);
43
44         /*
45          * The ACQUIRE semantics of the following spinning code ensure
46          * that accesses can't leak upwards out of our subsequent critical
47          * section in the case that the lock is currently held for write.
48          */
49         atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
50
51         /*
52          * Signal the next one in queue to become queue head
53          */
54         arch_spin_unlock(&lock->wait_lock);
55 }
56 EXPORT_SYMBOL(queued_read_lock_slowpath);
57
58 /**
59  * queued_write_lock_slowpath - acquire write lock of a queue rwlock
60  * @lock : Pointer to queue rwlock structure
61  */
62 void queued_write_lock_slowpath(struct qrwlock *lock)
63 {
64         /* Put the writer into the wait queue */
65         arch_spin_lock(&lock->wait_lock);
66
67         /* Try to acquire the lock directly if no reader is present */
68         if (!atomic_read(&lock->cnts) &&
69             (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
70                 goto unlock;
71
72         /* Set the waiting flag to notify readers that a writer is pending */
73         atomic_add(_QW_WAITING, &lock->cnts);
74
75         /* When no more readers or writers, set the locked flag */
76         do {
77                 atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
78         } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
79                                         _QW_LOCKED) != _QW_WAITING);
80 unlock:
81         arch_spin_unlock(&lock->wait_lock);
82 }
83 EXPORT_SYMBOL(queued_write_lock_slowpath);