Merge branch 'efi-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / kernel / locking / rwsem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* kernel/rwsem.c: R/W semaphores, public implementation
3  *
4  * Written by David Howells (dhowells@redhat.com).
5  * Derived from asm-i386/semaphore.h
6  *
7  * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8  * and Michel Lespinasse <walken@google.com>
9  *
10  * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11  * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
12  *
13  * Rwsem count bit fields re-definition and rwsem rearchitecture by
14  * Waiman Long <longman@redhat.com> and
15  * Peter Zijlstra <peterz@infradead.org>.
16  */
17
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/sched/rt.h>
22 #include <linux/sched/task.h>
23 #include <linux/sched/debug.h>
24 #include <linux/sched/wake_q.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/clock.h>
27 #include <linux/export.h>
28 #include <linux/rwsem.h>
29 #include <linux/atomic.h>
30
31 #include "rwsem.h"
32 #include "lock_events.h"
33
34 /*
35  * The least significant 3 bits of the owner value has the following
36  * meanings when set.
37  *  - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
38  *  - Bit 1: RWSEM_RD_NONSPINNABLE - Readers cannot spin on this lock.
39  *  - Bit 2: RWSEM_WR_NONSPINNABLE - Writers cannot spin on this lock.
40  *
41  * When the rwsem is either owned by an anonymous writer, or it is
42  * reader-owned, but a spinning writer has timed out, both nonspinnable
43  * bits will be set to disable optimistic spinning by readers and writers.
44  * In the later case, the last unlocking reader should then check the
45  * writer nonspinnable bit and clear it only to give writers preference
46  * to acquire the lock via optimistic spinning, but not readers. Similar
47  * action is also done in the reader slowpath.
48
49  * When a writer acquires a rwsem, it puts its task_struct pointer
50  * into the owner field. It is cleared after an unlock.
51  *
52  * When a reader acquires a rwsem, it will also puts its task_struct
53  * pointer into the owner field with the RWSEM_READER_OWNED bit set.
54  * On unlock, the owner field will largely be left untouched. So
55  * for a free or reader-owned rwsem, the owner value may contain
56  * information about the last reader that acquires the rwsem.
57  *
58  * That information may be helpful in debugging cases where the system
59  * seems to hang on a reader owned rwsem especially if only one reader
60  * is involved. Ideally we would like to track all the readers that own
61  * a rwsem, but the overhead is simply too big.
62  *
63  * Reader optimistic spinning is helpful when the reader critical section
64  * is short and there aren't that many readers around. It makes readers
65  * relatively more preferred than writers. When a writer times out spinning
66  * on a reader-owned lock and set the nospinnable bits, there are two main
67  * reasons for that.
68  *
69  *  1) The reader critical section is long, perhaps the task sleeps after
70  *     acquiring the read lock.
71  *  2) There are just too many readers contending the lock causing it to
72  *     take a while to service all of them.
73  *
74  * In the former case, long reader critical section will impede the progress
75  * of writers which is usually more important for system performance. In
76  * the later case, reader optimistic spinning tends to make the reader
77  * groups that contain readers that acquire the lock together smaller
78  * leading to more of them. That may hurt performance in some cases. In
79  * other words, the setting of nonspinnable bits indicates that reader
80  * optimistic spinning may not be helpful for those workloads that cause
81  * it.
82  *
83  * Therefore, any writers that had observed the setting of the writer
84  * nonspinnable bit for a given rwsem after they fail to acquire the lock
85  * via optimistic spinning will set the reader nonspinnable bit once they
86  * acquire the write lock. Similarly, readers that observe the setting
87  * of reader nonspinnable bit at slowpath entry will set the reader
88  * nonspinnable bits when they acquire the read lock via the wakeup path.
89  *
90  * Once the reader nonspinnable bit is on, it will only be reset when
91  * a writer is able to acquire the rwsem in the fast path or somehow a
92  * reader or writer in the slowpath doesn't observe the nonspinable bit.
93  *
94  * This is to discourage reader optmistic spinning on that particular
95  * rwsem and make writers more preferred. This adaptive disabling of reader
96  * optimistic spinning will alleviate the negative side effect of this
97  * feature.
98  */
99 #define RWSEM_READER_OWNED      (1UL << 0)
100 #define RWSEM_RD_NONSPINNABLE   (1UL << 1)
101 #define RWSEM_WR_NONSPINNABLE   (1UL << 2)
102 #define RWSEM_NONSPINNABLE      (RWSEM_RD_NONSPINNABLE | RWSEM_WR_NONSPINNABLE)
103 #define RWSEM_OWNER_FLAGS_MASK  (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
104
105 #ifdef CONFIG_DEBUG_RWSEMS
106 # define DEBUG_RWSEMS_WARN_ON(c, sem)   do {                    \
107         if (!debug_locks_silent &&                              \
108             WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
109                 #c, atomic_long_read(&(sem)->count),            \
110                 atomic_long_read(&(sem)->owner), (long)current, \
111                 list_empty(&(sem)->wait_list) ? "" : "not "))   \
112                         debug_locks_off();                      \
113         } while (0)
114 #else
115 # define DEBUG_RWSEMS_WARN_ON(c, sem)
116 #endif
117
118 /*
119  * On 64-bit architectures, the bit definitions of the count are:
120  *
121  * Bit  0    - writer locked bit
122  * Bit  1    - waiters present bit
123  * Bit  2    - lock handoff bit
124  * Bits 3-7  - reserved
125  * Bits 8-62 - 55-bit reader count
126  * Bit  63   - read fail bit
127  *
128  * On 32-bit architectures, the bit definitions of the count are:
129  *
130  * Bit  0    - writer locked bit
131  * Bit  1    - waiters present bit
132  * Bit  2    - lock handoff bit
133  * Bits 3-7  - reserved
134  * Bits 8-30 - 23-bit reader count
135  * Bit  31   - read fail bit
136  *
137  * It is not likely that the most significant bit (read fail bit) will ever
138  * be set. This guard bit is still checked anyway in the down_read() fastpath
139  * just in case we need to use up more of the reader bits for other purpose
140  * in the future.
141  *
142  * atomic_long_fetch_add() is used to obtain reader lock, whereas
143  * atomic_long_cmpxchg() will be used to obtain writer lock.
144  *
145  * There are three places where the lock handoff bit may be set or cleared.
146  * 1) rwsem_mark_wake() for readers.
147  * 2) rwsem_try_write_lock() for writers.
148  * 3) Error path of rwsem_down_write_slowpath().
149  *
150  * For all the above cases, wait_lock will be held. A writer must also
151  * be the first one in the wait_list to be eligible for setting the handoff
152  * bit. So concurrent setting/clearing of handoff bit is not possible.
153  */
154 #define RWSEM_WRITER_LOCKED     (1UL << 0)
155 #define RWSEM_FLAG_WAITERS      (1UL << 1)
156 #define RWSEM_FLAG_HANDOFF      (1UL << 2)
157 #define RWSEM_FLAG_READFAIL     (1UL << (BITS_PER_LONG - 1))
158
159 #define RWSEM_READER_SHIFT      8
160 #define RWSEM_READER_BIAS       (1UL << RWSEM_READER_SHIFT)
161 #define RWSEM_READER_MASK       (~(RWSEM_READER_BIAS - 1))
162 #define RWSEM_WRITER_MASK       RWSEM_WRITER_LOCKED
163 #define RWSEM_LOCK_MASK         (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
164 #define RWSEM_READ_FAILED_MASK  (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
165                                  RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
166
167 /*
168  * All writes to owner are protected by WRITE_ONCE() to make sure that
169  * store tearing can't happen as optimistic spinners may read and use
170  * the owner value concurrently without lock. Read from owner, however,
171  * may not need READ_ONCE() as long as the pointer value is only used
172  * for comparison and isn't being dereferenced.
173  */
174 static inline void rwsem_set_owner(struct rw_semaphore *sem)
175 {
176         atomic_long_set(&sem->owner, (long)current);
177 }
178
179 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
180 {
181         atomic_long_set(&sem->owner, 0);
182 }
183
184 /*
185  * Test the flags in the owner field.
186  */
187 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
188 {
189         return atomic_long_read(&sem->owner) & flags;
190 }
191
192 /*
193  * The task_struct pointer of the last owning reader will be left in
194  * the owner field.
195  *
196  * Note that the owner value just indicates the task has owned the rwsem
197  * previously, it may not be the real owner or one of the real owners
198  * anymore when that field is examined, so take it with a grain of salt.
199  *
200  * The reader non-spinnable bit is preserved.
201  */
202 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
203                                             struct task_struct *owner)
204 {
205         unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
206                 (atomic_long_read(&sem->owner) & RWSEM_RD_NONSPINNABLE);
207
208         atomic_long_set(&sem->owner, val);
209 }
210
211 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
212 {
213         __rwsem_set_reader_owned(sem, current);
214 }
215
216 /*
217  * Return true if the rwsem is owned by a reader.
218  */
219 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
220 {
221 #ifdef CONFIG_DEBUG_RWSEMS
222         /*
223          * Check the count to see if it is write-locked.
224          */
225         long count = atomic_long_read(&sem->count);
226
227         if (count & RWSEM_WRITER_MASK)
228                 return false;
229 #endif
230         return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
231 }
232
233 #ifdef CONFIG_DEBUG_RWSEMS
234 /*
235  * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
236  * is a task pointer in owner of a reader-owned rwsem, it will be the
237  * real owner or one of the real owners. The only exception is when the
238  * unlock is done by up_read_non_owner().
239  */
240 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
241 {
242         unsigned long val = atomic_long_read(&sem->owner);
243
244         while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
245                 if (atomic_long_try_cmpxchg(&sem->owner, &val,
246                                             val & RWSEM_OWNER_FLAGS_MASK))
247                         return;
248         }
249 }
250 #else
251 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
252 {
253 }
254 #endif
255
256 /*
257  * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
258  * remains set. Otherwise, the operation will be aborted.
259  */
260 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
261 {
262         unsigned long owner = atomic_long_read(&sem->owner);
263
264         do {
265                 if (!(owner & RWSEM_READER_OWNED))
266                         break;
267                 if (owner & RWSEM_NONSPINNABLE)
268                         break;
269         } while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
270                                           owner | RWSEM_NONSPINNABLE));
271 }
272
273 static inline bool rwsem_read_trylock(struct rw_semaphore *sem)
274 {
275         long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
276         if (WARN_ON_ONCE(cnt < 0))
277                 rwsem_set_nonspinnable(sem);
278         return !(cnt & RWSEM_READ_FAILED_MASK);
279 }
280
281 /*
282  * Return just the real task structure pointer of the owner
283  */
284 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
285 {
286         return (struct task_struct *)
287                 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
288 }
289
290 /*
291  * Return the real task structure pointer of the owner and the embedded
292  * flags in the owner. pflags must be non-NULL.
293  */
294 static inline struct task_struct *
295 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
296 {
297         unsigned long owner = atomic_long_read(&sem->owner);
298
299         *pflags = owner & RWSEM_OWNER_FLAGS_MASK;
300         return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
301 }
302
303 /*
304  * Guide to the rw_semaphore's count field.
305  *
306  * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
307  * by a writer.
308  *
309  * The lock is owned by readers when
310  * (1) the RWSEM_WRITER_LOCKED isn't set in count,
311  * (2) some of the reader bits are set in count, and
312  * (3) the owner field has RWSEM_READ_OWNED bit set.
313  *
314  * Having some reader bits set is not enough to guarantee a readers owned
315  * lock as the readers may be in the process of backing out from the count
316  * and a writer has just released the lock. So another writer may steal
317  * the lock immediately after that.
318  */
319
320 /*
321  * Initialize an rwsem:
322  */
323 void __init_rwsem(struct rw_semaphore *sem, const char *name,
324                   struct lock_class_key *key)
325 {
326 #ifdef CONFIG_DEBUG_LOCK_ALLOC
327         /*
328          * Make sure we are not reinitializing a held semaphore:
329          */
330         debug_check_no_locks_freed((void *)sem, sizeof(*sem));
331         lockdep_init_map(&sem->dep_map, name, key, 0);
332 #endif
333         atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
334         raw_spin_lock_init(&sem->wait_lock);
335         INIT_LIST_HEAD(&sem->wait_list);
336         atomic_long_set(&sem->owner, 0L);
337 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
338         osq_lock_init(&sem->osq);
339 #endif
340 }
341 EXPORT_SYMBOL(__init_rwsem);
342
343 enum rwsem_waiter_type {
344         RWSEM_WAITING_FOR_WRITE,
345         RWSEM_WAITING_FOR_READ
346 };
347
348 struct rwsem_waiter {
349         struct list_head list;
350         struct task_struct *task;
351         enum rwsem_waiter_type type;
352         unsigned long timeout;
353         unsigned long last_rowner;
354 };
355 #define rwsem_first_waiter(sem) \
356         list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
357
358 enum rwsem_wake_type {
359         RWSEM_WAKE_ANY,         /* Wake whatever's at head of wait list */
360         RWSEM_WAKE_READERS,     /* Wake readers only */
361         RWSEM_WAKE_READ_OWNED   /* Waker thread holds the read lock */
362 };
363
364 enum writer_wait_state {
365         WRITER_NOT_FIRST,       /* Writer is not first in wait list */
366         WRITER_FIRST,           /* Writer is first in wait list     */
367         WRITER_HANDOFF          /* Writer is first & handoff needed */
368 };
369
370 /*
371  * The typical HZ value is either 250 or 1000. So set the minimum waiting
372  * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
373  * queue before initiating the handoff protocol.
374  */
375 #define RWSEM_WAIT_TIMEOUT      DIV_ROUND_UP(HZ, 250)
376
377 /*
378  * Magic number to batch-wakeup waiting readers, even when writers are
379  * also present in the queue. This both limits the amount of work the
380  * waking thread must do and also prevents any potential counter overflow,
381  * however unlikely.
382  */
383 #define MAX_READERS_WAKEUP      0x100
384
385 /*
386  * handle the lock release when processes blocked on it that can now run
387  * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
388  *   have been set.
389  * - there must be someone on the queue
390  * - the wait_lock must be held by the caller
391  * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
392  *   to actually wakeup the blocked task(s) and drop the reference count,
393  *   preferably when the wait_lock is released
394  * - woken process blocks are discarded from the list after having task zeroed
395  * - writers are only marked woken if downgrading is false
396  */
397 static void rwsem_mark_wake(struct rw_semaphore *sem,
398                             enum rwsem_wake_type wake_type,
399                             struct wake_q_head *wake_q)
400 {
401         struct rwsem_waiter *waiter, *tmp;
402         long oldcount, woken = 0, adjustment = 0;
403         struct list_head wlist;
404
405         lockdep_assert_held(&sem->wait_lock);
406
407         /*
408          * Take a peek at the queue head waiter such that we can determine
409          * the wakeup(s) to perform.
410          */
411         waiter = rwsem_first_waiter(sem);
412
413         if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
414                 if (wake_type == RWSEM_WAKE_ANY) {
415                         /*
416                          * Mark writer at the front of the queue for wakeup.
417                          * Until the task is actually later awoken later by
418                          * the caller, other writers are able to steal it.
419                          * Readers, on the other hand, will block as they
420                          * will notice the queued writer.
421                          */
422                         wake_q_add(wake_q, waiter->task);
423                         lockevent_inc(rwsem_wake_writer);
424                 }
425
426                 return;
427         }
428
429         /*
430          * No reader wakeup if there are too many of them already.
431          */
432         if (unlikely(atomic_long_read(&sem->count) < 0))
433                 return;
434
435         /*
436          * Writers might steal the lock before we grant it to the next reader.
437          * We prefer to do the first reader grant before counting readers
438          * so we can bail out early if a writer stole the lock.
439          */
440         if (wake_type != RWSEM_WAKE_READ_OWNED) {
441                 struct task_struct *owner;
442
443                 adjustment = RWSEM_READER_BIAS;
444                 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
445                 if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
446                         /*
447                          * When we've been waiting "too" long (for writers
448                          * to give up the lock), request a HANDOFF to
449                          * force the issue.
450                          */
451                         if (!(oldcount & RWSEM_FLAG_HANDOFF) &&
452                             time_after(jiffies, waiter->timeout)) {
453                                 adjustment -= RWSEM_FLAG_HANDOFF;
454                                 lockevent_inc(rwsem_rlock_handoff);
455                         }
456
457                         atomic_long_add(-adjustment, &sem->count);
458                         return;
459                 }
460                 /*
461                  * Set it to reader-owned to give spinners an early
462                  * indication that readers now have the lock.
463                  * The reader nonspinnable bit seen at slowpath entry of
464                  * the reader is copied over.
465                  */
466                 owner = waiter->task;
467                 if (waiter->last_rowner & RWSEM_RD_NONSPINNABLE) {
468                         owner = (void *)((unsigned long)owner | RWSEM_RD_NONSPINNABLE);
469                         lockevent_inc(rwsem_opt_norspin);
470                 }
471                 __rwsem_set_reader_owned(sem, owner);
472         }
473
474         /*
475          * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
476          * queue. We know that the woken will be at least 1 as we accounted
477          * for above. Note we increment the 'active part' of the count by the
478          * number of readers before waking any processes up.
479          *
480          * This is an adaptation of the phase-fair R/W locks where at the
481          * reader phase (first waiter is a reader), all readers are eligible
482          * to acquire the lock at the same time irrespective of their order
483          * in the queue. The writers acquire the lock according to their
484          * order in the queue.
485          *
486          * We have to do wakeup in 2 passes to prevent the possibility that
487          * the reader count may be decremented before it is incremented. It
488          * is because the to-be-woken waiter may not have slept yet. So it
489          * may see waiter->task got cleared, finish its critical section and
490          * do an unlock before the reader count increment.
491          *
492          * 1) Collect the read-waiters in a separate list, count them and
493          *    fully increment the reader count in rwsem.
494          * 2) For each waiters in the new list, clear waiter->task and
495          *    put them into wake_q to be woken up later.
496          */
497         INIT_LIST_HEAD(&wlist);
498         list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
499                 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
500                         continue;
501
502                 woken++;
503                 list_move_tail(&waiter->list, &wlist);
504
505                 /*
506                  * Limit # of readers that can be woken up per wakeup call.
507                  */
508                 if (woken >= MAX_READERS_WAKEUP)
509                         break;
510         }
511
512         adjustment = woken * RWSEM_READER_BIAS - adjustment;
513         lockevent_cond_inc(rwsem_wake_reader, woken);
514         if (list_empty(&sem->wait_list)) {
515                 /* hit end of list above */
516                 adjustment -= RWSEM_FLAG_WAITERS;
517         }
518
519         /*
520          * When we've woken a reader, we no longer need to force writers
521          * to give up the lock and we can clear HANDOFF.
522          */
523         if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF))
524                 adjustment -= RWSEM_FLAG_HANDOFF;
525
526         if (adjustment)
527                 atomic_long_add(adjustment, &sem->count);
528
529         /* 2nd pass */
530         list_for_each_entry_safe(waiter, tmp, &wlist, list) {
531                 struct task_struct *tsk;
532
533                 tsk = waiter->task;
534                 get_task_struct(tsk);
535
536                 /*
537                  * Ensure calling get_task_struct() before setting the reader
538                  * waiter to nil such that rwsem_down_read_slowpath() cannot
539                  * race with do_exit() by always holding a reference count
540                  * to the task to wakeup.
541                  */
542                 smp_store_release(&waiter->task, NULL);
543                 /*
544                  * Ensure issuing the wakeup (either by us or someone else)
545                  * after setting the reader waiter to nil.
546                  */
547                 wake_q_add_safe(wake_q, tsk);
548         }
549 }
550
551 /*
552  * This function must be called with the sem->wait_lock held to prevent
553  * race conditions between checking the rwsem wait list and setting the
554  * sem->count accordingly.
555  *
556  * If wstate is WRITER_HANDOFF, it will make sure that either the handoff
557  * bit is set or the lock is acquired with handoff bit cleared.
558  */
559 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
560                                         enum writer_wait_state wstate)
561 {
562         long count, new;
563
564         lockdep_assert_held(&sem->wait_lock);
565
566         count = atomic_long_read(&sem->count);
567         do {
568                 bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
569
570                 if (has_handoff && wstate == WRITER_NOT_FIRST)
571                         return false;
572
573                 new = count;
574
575                 if (count & RWSEM_LOCK_MASK) {
576                         if (has_handoff || (wstate != WRITER_HANDOFF))
577                                 return false;
578
579                         new |= RWSEM_FLAG_HANDOFF;
580                 } else {
581                         new |= RWSEM_WRITER_LOCKED;
582                         new &= ~RWSEM_FLAG_HANDOFF;
583
584                         if (list_is_singular(&sem->wait_list))
585                                 new &= ~RWSEM_FLAG_WAITERS;
586                 }
587         } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
588
589         /*
590          * We have either acquired the lock with handoff bit cleared or
591          * set the handoff bit.
592          */
593         if (new & RWSEM_FLAG_HANDOFF)
594                 return false;
595
596         rwsem_set_owner(sem);
597         return true;
598 }
599
600 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
601 /*
602  * Try to acquire read lock before the reader is put on wait queue.
603  * Lock acquisition isn't allowed if the rwsem is locked or a writer handoff
604  * is ongoing.
605  */
606 static inline bool rwsem_try_read_lock_unqueued(struct rw_semaphore *sem)
607 {
608         long count = atomic_long_read(&sem->count);
609
610         if (count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))
611                 return false;
612
613         count = atomic_long_fetch_add_acquire(RWSEM_READER_BIAS, &sem->count);
614         if (!(count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
615                 rwsem_set_reader_owned(sem);
616                 lockevent_inc(rwsem_opt_rlock);
617                 return true;
618         }
619
620         /* Back out the change */
621         atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
622         return false;
623 }
624
625 /*
626  * Try to acquire write lock before the writer has been put on wait queue.
627  */
628 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
629 {
630         long count = atomic_long_read(&sem->count);
631
632         while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
633                 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
634                                         count | RWSEM_WRITER_LOCKED)) {
635                         rwsem_set_owner(sem);
636                         lockevent_inc(rwsem_opt_wlock);
637                         return true;
638                 }
639         }
640         return false;
641 }
642
643 static inline bool owner_on_cpu(struct task_struct *owner)
644 {
645         /*
646          * As lock holder preemption issue, we both skip spinning if
647          * task is not on cpu or its cpu is preempted
648          */
649         return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
650 }
651
652 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
653                                            unsigned long nonspinnable)
654 {
655         struct task_struct *owner;
656         unsigned long flags;
657         bool ret = true;
658
659         BUILD_BUG_ON(!(RWSEM_OWNER_UNKNOWN & RWSEM_NONSPINNABLE));
660
661         if (need_resched()) {
662                 lockevent_inc(rwsem_opt_fail);
663                 return false;
664         }
665
666         preempt_disable();
667         rcu_read_lock();
668         owner = rwsem_owner_flags(sem, &flags);
669         /*
670          * Don't check the read-owner as the entry may be stale.
671          */
672         if ((flags & nonspinnable) ||
673             (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
674                 ret = false;
675         rcu_read_unlock();
676         preempt_enable();
677
678         lockevent_cond_inc(rwsem_opt_fail, !ret);
679         return ret;
680 }
681
682 /*
683  * The rwsem_spin_on_owner() function returns the folowing 4 values
684  * depending on the lock owner state.
685  *   OWNER_NULL  : owner is currently NULL
686  *   OWNER_WRITER: when owner changes and is a writer
687  *   OWNER_READER: when owner changes and the new owner may be a reader.
688  *   OWNER_NONSPINNABLE:
689  *                 when optimistic spinning has to stop because either the
690  *                 owner stops running, is unknown, or its timeslice has
691  *                 been used up.
692  */
693 enum owner_state {
694         OWNER_NULL              = 1 << 0,
695         OWNER_WRITER            = 1 << 1,
696         OWNER_READER            = 1 << 2,
697         OWNER_NONSPINNABLE      = 1 << 3,
698 };
699 #define OWNER_SPINNABLE         (OWNER_NULL | OWNER_WRITER | OWNER_READER)
700
701 static inline enum owner_state
702 rwsem_owner_state(struct task_struct *owner, unsigned long flags, unsigned long nonspinnable)
703 {
704         if (flags & nonspinnable)
705                 return OWNER_NONSPINNABLE;
706
707         if (flags & RWSEM_READER_OWNED)
708                 return OWNER_READER;
709
710         return owner ? OWNER_WRITER : OWNER_NULL;
711 }
712
713 static noinline enum owner_state
714 rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
715 {
716         struct task_struct *new, *owner;
717         unsigned long flags, new_flags;
718         enum owner_state state;
719
720         owner = rwsem_owner_flags(sem, &flags);
721         state = rwsem_owner_state(owner, flags, nonspinnable);
722         if (state != OWNER_WRITER)
723                 return state;
724
725         rcu_read_lock();
726         for (;;) {
727                 if (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF) {
728                         state = OWNER_NONSPINNABLE;
729                         break;
730                 }
731
732                 new = rwsem_owner_flags(sem, &new_flags);
733                 if ((new != owner) || (new_flags != flags)) {
734                         state = rwsem_owner_state(new, new_flags, nonspinnable);
735                         break;
736                 }
737
738                 /*
739                  * Ensure we emit the owner->on_cpu, dereference _after_
740                  * checking sem->owner still matches owner, if that fails,
741                  * owner might point to free()d memory, if it still matches,
742                  * the rcu_read_lock() ensures the memory stays valid.
743                  */
744                 barrier();
745
746                 if (need_resched() || !owner_on_cpu(owner)) {
747                         state = OWNER_NONSPINNABLE;
748                         break;
749                 }
750
751                 cpu_relax();
752         }
753         rcu_read_unlock();
754
755         return state;
756 }
757
758 /*
759  * Calculate reader-owned rwsem spinning threshold for writer
760  *
761  * The more readers own the rwsem, the longer it will take for them to
762  * wind down and free the rwsem. So the empirical formula used to
763  * determine the actual spinning time limit here is:
764  *
765  *   Spinning threshold = (10 + nr_readers/2)us
766  *
767  * The limit is capped to a maximum of 25us (30 readers). This is just
768  * a heuristic and is subjected to change in the future.
769  */
770 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
771 {
772         long count = atomic_long_read(&sem->count);
773         int readers = count >> RWSEM_READER_SHIFT;
774         u64 delta;
775
776         if (readers > 30)
777                 readers = 30;
778         delta = (20 + readers) * NSEC_PER_USEC / 2;
779
780         return sched_clock() + delta;
781 }
782
783 static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
784 {
785         bool taken = false;
786         int prev_owner_state = OWNER_NULL;
787         int loop = 0;
788         u64 rspin_threshold = 0;
789         unsigned long nonspinnable = wlock ? RWSEM_WR_NONSPINNABLE
790                                            : RWSEM_RD_NONSPINNABLE;
791
792         preempt_disable();
793
794         /* sem->wait_lock should not be held when doing optimistic spinning */
795         if (!osq_lock(&sem->osq))
796                 goto done;
797
798         /*
799          * Optimistically spin on the owner field and attempt to acquire the
800          * lock whenever the owner changes. Spinning will be stopped when:
801          *  1) the owning writer isn't running; or
802          *  2) readers own the lock and spinning time has exceeded limit.
803          */
804         for (;;) {
805                 enum owner_state owner_state;
806
807                 owner_state = rwsem_spin_on_owner(sem, nonspinnable);
808                 if (!(owner_state & OWNER_SPINNABLE))
809                         break;
810
811                 /*
812                  * Try to acquire the lock
813                  */
814                 taken = wlock ? rwsem_try_write_lock_unqueued(sem)
815                               : rwsem_try_read_lock_unqueued(sem);
816
817                 if (taken)
818                         break;
819
820                 /*
821                  * Time-based reader-owned rwsem optimistic spinning
822                  */
823                 if (wlock && (owner_state == OWNER_READER)) {
824                         /*
825                          * Re-initialize rspin_threshold every time when
826                          * the owner state changes from non-reader to reader.
827                          * This allows a writer to steal the lock in between
828                          * 2 reader phases and have the threshold reset at
829                          * the beginning of the 2nd reader phase.
830                          */
831                         if (prev_owner_state != OWNER_READER) {
832                                 if (rwsem_test_oflags(sem, nonspinnable))
833                                         break;
834                                 rspin_threshold = rwsem_rspin_threshold(sem);
835                                 loop = 0;
836                         }
837
838                         /*
839                          * Check time threshold once every 16 iterations to
840                          * avoid calling sched_clock() too frequently so
841                          * as to reduce the average latency between the times
842                          * when the lock becomes free and when the spinner
843                          * is ready to do a trylock.
844                          */
845                         else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
846                                 rwsem_set_nonspinnable(sem);
847                                 lockevent_inc(rwsem_opt_nospin);
848                                 break;
849                         }
850                 }
851
852                 /*
853                  * An RT task cannot do optimistic spinning if it cannot
854                  * be sure the lock holder is running or live-lock may
855                  * happen if the current task and the lock holder happen
856                  * to run in the same CPU. However, aborting optimistic
857                  * spinning while a NULL owner is detected may miss some
858                  * opportunity where spinning can continue without causing
859                  * problem.
860                  *
861                  * There are 2 possible cases where an RT task may be able
862                  * to continue spinning.
863                  *
864                  * 1) The lock owner is in the process of releasing the
865                  *    lock, sem->owner is cleared but the lock has not
866                  *    been released yet.
867                  * 2) The lock was free and owner cleared, but another
868                  *    task just comes in and acquire the lock before
869                  *    we try to get it. The new owner may be a spinnable
870                  *    writer.
871                  *
872                  * To take advantage of two scenarios listed agove, the RT
873                  * task is made to retry one more time to see if it can
874                  * acquire the lock or continue spinning on the new owning
875                  * writer. Of course, if the time lag is long enough or the
876                  * new owner is not a writer or spinnable, the RT task will
877                  * quit spinning.
878                  *
879                  * If the owner is a writer, the need_resched() check is
880                  * done inside rwsem_spin_on_owner(). If the owner is not
881                  * a writer, need_resched() check needs to be done here.
882                  */
883                 if (owner_state != OWNER_WRITER) {
884                         if (need_resched())
885                                 break;
886                         if (rt_task(current) &&
887                            (prev_owner_state != OWNER_WRITER))
888                                 break;
889                 }
890                 prev_owner_state = owner_state;
891
892                 /*
893                  * The cpu_relax() call is a compiler barrier which forces
894                  * everything in this loop to be re-loaded. We don't need
895                  * memory barriers as we'll eventually observe the right
896                  * values at the cost of a few extra spins.
897                  */
898                 cpu_relax();
899         }
900         osq_unlock(&sem->osq);
901 done:
902         preempt_enable();
903         lockevent_cond_inc(rwsem_opt_fail, !taken);
904         return taken;
905 }
906
907 /*
908  * Clear the owner's RWSEM_WR_NONSPINNABLE bit if it is set. This should
909  * only be called when the reader count reaches 0.
910  *
911  * This give writers better chance to acquire the rwsem first before
912  * readers when the rwsem was being held by readers for a relatively long
913  * period of time. Race can happen that an optimistic spinner may have
914  * just stolen the rwsem and set the owner, but just clearing the
915  * RWSEM_WR_NONSPINNABLE bit will do no harm anyway.
916  */
917 static inline void clear_wr_nonspinnable(struct rw_semaphore *sem)
918 {
919         if (rwsem_test_oflags(sem, RWSEM_WR_NONSPINNABLE))
920                 atomic_long_andnot(RWSEM_WR_NONSPINNABLE, &sem->owner);
921 }
922
923 /*
924  * This function is called when the reader fails to acquire the lock via
925  * optimistic spinning. In this case we will still attempt to do a trylock
926  * when comparing the rwsem state right now with the state when entering
927  * the slowpath indicates that the reader is still in a valid reader phase.
928  * This happens when the following conditions are true:
929  *
930  * 1) The lock is currently reader owned, and
931  * 2) The lock is previously not reader-owned or the last read owner changes.
932  *
933  * In the former case, we have transitioned from a writer phase to a
934  * reader-phase while spinning. In the latter case, it means the reader
935  * phase hasn't ended when we entered the optimistic spinning loop. In
936  * both cases, the reader is eligible to acquire the lock. This is the
937  * secondary path where a read lock is acquired optimistically.
938  *
939  * The reader non-spinnable bit wasn't set at time of entry or it will
940  * not be here at all.
941  */
942 static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
943                                               unsigned long last_rowner)
944 {
945         unsigned long owner = atomic_long_read(&sem->owner);
946
947         if (!(owner & RWSEM_READER_OWNED))
948                 return false;
949
950         if (((owner ^ last_rowner) & ~RWSEM_OWNER_FLAGS_MASK) &&
951             rwsem_try_read_lock_unqueued(sem)) {
952                 lockevent_inc(rwsem_opt_rlock2);
953                 lockevent_add(rwsem_opt_fail, -1);
954                 return true;
955         }
956         return false;
957 }
958 #else
959 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
960                                            unsigned long nonspinnable)
961 {
962         return false;
963 }
964
965 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
966 {
967         return false;
968 }
969
970 static inline void clear_wr_nonspinnable(struct rw_semaphore *sem) { }
971
972 static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
973                                               unsigned long last_rowner)
974 {
975         return false;
976 }
977 #endif
978
979 /*
980  * Wait for the read lock to be granted
981  */
982 static struct rw_semaphore __sched *
983 rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
984 {
985         long count, adjustment = -RWSEM_READER_BIAS;
986         struct rwsem_waiter waiter;
987         DEFINE_WAKE_Q(wake_q);
988         bool wake = false;
989
990         /*
991          * Save the current read-owner of rwsem, if available, and the
992          * reader nonspinnable bit.
993          */
994         waiter.last_rowner = atomic_long_read(&sem->owner);
995         if (!(waiter.last_rowner & RWSEM_READER_OWNED))
996                 waiter.last_rowner &= RWSEM_RD_NONSPINNABLE;
997
998         if (!rwsem_can_spin_on_owner(sem, RWSEM_RD_NONSPINNABLE))
999                 goto queue;
1000
1001         /*
1002          * Undo read bias from down_read() and do optimistic spinning.
1003          */
1004         atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
1005         adjustment = 0;
1006         if (rwsem_optimistic_spin(sem, false)) {
1007                 /* rwsem_optimistic_spin() implies ACQUIRE on success */
1008                 /*
1009                  * Wake up other readers in the wait list if the front
1010                  * waiter is a reader.
1011                  */
1012                 if ((atomic_long_read(&sem->count) & RWSEM_FLAG_WAITERS)) {
1013                         raw_spin_lock_irq(&sem->wait_lock);
1014                         if (!list_empty(&sem->wait_list))
1015                                 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
1016                                                 &wake_q);
1017                         raw_spin_unlock_irq(&sem->wait_lock);
1018                         wake_up_q(&wake_q);
1019                 }
1020                 return sem;
1021         } else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
1022                 /* rwsem_reader_phase_trylock() implies ACQUIRE on success */
1023                 return sem;
1024         }
1025
1026 queue:
1027         waiter.task = current;
1028         waiter.type = RWSEM_WAITING_FOR_READ;
1029         waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1030
1031         raw_spin_lock_irq(&sem->wait_lock);
1032         if (list_empty(&sem->wait_list)) {
1033                 /*
1034                  * In case the wait queue is empty and the lock isn't owned
1035                  * by a writer or has the handoff bit set, this reader can
1036                  * exit the slowpath and return immediately as its
1037                  * RWSEM_READER_BIAS has already been set in the count.
1038                  */
1039                 if (adjustment && !(atomic_long_read(&sem->count) &
1040                      (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
1041                         /* Provide lock ACQUIRE */
1042                         smp_acquire__after_ctrl_dep();
1043                         raw_spin_unlock_irq(&sem->wait_lock);
1044                         rwsem_set_reader_owned(sem);
1045                         lockevent_inc(rwsem_rlock_fast);
1046                         return sem;
1047                 }
1048                 adjustment += RWSEM_FLAG_WAITERS;
1049         }
1050         list_add_tail(&waiter.list, &sem->wait_list);
1051
1052         /* we're now waiting on the lock, but no longer actively locking */
1053         if (adjustment)
1054                 count = atomic_long_add_return(adjustment, &sem->count);
1055         else
1056                 count = atomic_long_read(&sem->count);
1057
1058         /*
1059          * If there are no active locks, wake the front queued process(es).
1060          *
1061          * If there are no writers and we are first in the queue,
1062          * wake our own waiter to join the existing active readers !
1063          */
1064         if (!(count & RWSEM_LOCK_MASK)) {
1065                 clear_wr_nonspinnable(sem);
1066                 wake = true;
1067         }
1068         if (wake || (!(count & RWSEM_WRITER_MASK) &&
1069                     (adjustment & RWSEM_FLAG_WAITERS)))
1070                 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1071
1072         raw_spin_unlock_irq(&sem->wait_lock);
1073         wake_up_q(&wake_q);
1074
1075         /* wait to be given the lock */
1076         for (;;) {
1077                 set_current_state(state);
1078                 if (!smp_load_acquire(&waiter.task)) {
1079                         /* Matches rwsem_mark_wake()'s smp_store_release(). */
1080                         break;
1081                 }
1082                 if (signal_pending_state(state, current)) {
1083                         raw_spin_lock_irq(&sem->wait_lock);
1084                         if (waiter.task)
1085                                 goto out_nolock;
1086                         raw_spin_unlock_irq(&sem->wait_lock);
1087                         /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
1088                         break;
1089                 }
1090                 schedule();
1091                 lockevent_inc(rwsem_sleep_reader);
1092         }
1093
1094         __set_current_state(TASK_RUNNING);
1095         lockevent_inc(rwsem_rlock);
1096         return sem;
1097
1098 out_nolock:
1099         list_del(&waiter.list);
1100         if (list_empty(&sem->wait_list)) {
1101                 atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF,
1102                                    &sem->count);
1103         }
1104         raw_spin_unlock_irq(&sem->wait_lock);
1105         __set_current_state(TASK_RUNNING);
1106         lockevent_inc(rwsem_rlock_fail);
1107         return ERR_PTR(-EINTR);
1108 }
1109
1110 /*
1111  * This function is called by the a write lock owner. So the owner value
1112  * won't get changed by others.
1113  */
1114 static inline void rwsem_disable_reader_optspin(struct rw_semaphore *sem,
1115                                                 bool disable)
1116 {
1117         if (unlikely(disable)) {
1118                 atomic_long_or(RWSEM_RD_NONSPINNABLE, &sem->owner);
1119                 lockevent_inc(rwsem_opt_norspin);
1120         }
1121 }
1122
1123 /*
1124  * Wait until we successfully acquire the write lock
1125  */
1126 static struct rw_semaphore *
1127 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
1128 {
1129         long count;
1130         bool disable_rspin;
1131         enum writer_wait_state wstate;
1132         struct rwsem_waiter waiter;
1133         struct rw_semaphore *ret = sem;
1134         DEFINE_WAKE_Q(wake_q);
1135
1136         /* do optimistic spinning and steal lock if possible */
1137         if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
1138             rwsem_optimistic_spin(sem, true)) {
1139                 /* rwsem_optimistic_spin() implies ACQUIRE on success */
1140                 return sem;
1141         }
1142
1143         /*
1144          * Disable reader optimistic spinning for this rwsem after
1145          * acquiring the write lock when the setting of the nonspinnable
1146          * bits are observed.
1147          */
1148         disable_rspin = atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE;
1149
1150         /*
1151          * Optimistic spinning failed, proceed to the slowpath
1152          * and block until we can acquire the sem.
1153          */
1154         waiter.task = current;
1155         waiter.type = RWSEM_WAITING_FOR_WRITE;
1156         waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1157
1158         raw_spin_lock_irq(&sem->wait_lock);
1159
1160         /* account for this before adding a new element to the list */
1161         wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
1162
1163         list_add_tail(&waiter.list, &sem->wait_list);
1164
1165         /* we're now waiting on the lock */
1166         if (wstate == WRITER_NOT_FIRST) {
1167                 count = atomic_long_read(&sem->count);
1168
1169                 /*
1170                  * If there were already threads queued before us and:
1171                  *  1) there are no no active locks, wake the front
1172                  *     queued process(es) as the handoff bit might be set.
1173                  *  2) there are no active writers and some readers, the lock
1174                  *     must be read owned; so we try to wake any read lock
1175                  *     waiters that were queued ahead of us.
1176                  */
1177                 if (count & RWSEM_WRITER_MASK)
1178                         goto wait;
1179
1180                 rwsem_mark_wake(sem, (count & RWSEM_READER_MASK)
1181                                         ? RWSEM_WAKE_READERS
1182                                         : RWSEM_WAKE_ANY, &wake_q);
1183
1184                 if (!wake_q_empty(&wake_q)) {
1185                         /*
1186                          * We want to minimize wait_lock hold time especially
1187                          * when a large number of readers are to be woken up.
1188                          */
1189                         raw_spin_unlock_irq(&sem->wait_lock);
1190                         wake_up_q(&wake_q);
1191                         wake_q_init(&wake_q);   /* Used again, reinit */
1192                         raw_spin_lock_irq(&sem->wait_lock);
1193                 }
1194         } else {
1195                 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
1196         }
1197
1198 wait:
1199         /* wait until we successfully acquire the lock */
1200         set_current_state(state);
1201         for (;;) {
1202                 if (rwsem_try_write_lock(sem, wstate)) {
1203                         /* rwsem_try_write_lock() implies ACQUIRE on success */
1204                         break;
1205                 }
1206
1207                 raw_spin_unlock_irq(&sem->wait_lock);
1208
1209                 /* Block until there are no active lockers. */
1210                 for (;;) {
1211                         if (signal_pending_state(state, current))
1212                                 goto out_nolock;
1213
1214                         schedule();
1215                         lockevent_inc(rwsem_sleep_writer);
1216                         set_current_state(state);
1217                         /*
1218                          * If HANDOFF bit is set, unconditionally do
1219                          * a trylock.
1220                          */
1221                         if (wstate == WRITER_HANDOFF)
1222                                 break;
1223
1224                         if ((wstate == WRITER_NOT_FIRST) &&
1225                             (rwsem_first_waiter(sem) == &waiter))
1226                                 wstate = WRITER_FIRST;
1227
1228                         count = atomic_long_read(&sem->count);
1229                         if (!(count & RWSEM_LOCK_MASK))
1230                                 break;
1231
1232                         /*
1233                          * The setting of the handoff bit is deferred
1234                          * until rwsem_try_write_lock() is called.
1235                          */
1236                         if ((wstate == WRITER_FIRST) && (rt_task(current) ||
1237                             time_after(jiffies, waiter.timeout))) {
1238                                 wstate = WRITER_HANDOFF;
1239                                 lockevent_inc(rwsem_wlock_handoff);
1240                                 break;
1241                         }
1242                 }
1243
1244                 raw_spin_lock_irq(&sem->wait_lock);
1245         }
1246         __set_current_state(TASK_RUNNING);
1247         list_del(&waiter.list);
1248         rwsem_disable_reader_optspin(sem, disable_rspin);
1249         raw_spin_unlock_irq(&sem->wait_lock);
1250         lockevent_inc(rwsem_wlock);
1251
1252         return ret;
1253
1254 out_nolock:
1255         __set_current_state(TASK_RUNNING);
1256         raw_spin_lock_irq(&sem->wait_lock);
1257         list_del(&waiter.list);
1258
1259         if (unlikely(wstate == WRITER_HANDOFF))
1260                 atomic_long_add(-RWSEM_FLAG_HANDOFF,  &sem->count);
1261
1262         if (list_empty(&sem->wait_list))
1263                 atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
1264         else
1265                 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1266         raw_spin_unlock_irq(&sem->wait_lock);
1267         wake_up_q(&wake_q);
1268         lockevent_inc(rwsem_wlock_fail);
1269
1270         return ERR_PTR(-EINTR);
1271 }
1272
1273 /*
1274  * handle waking up a waiter on the semaphore
1275  * - up_read/up_write has decremented the active part of count if we come here
1276  */
1277 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem, long count)
1278 {
1279         unsigned long flags;
1280         DEFINE_WAKE_Q(wake_q);
1281
1282         raw_spin_lock_irqsave(&sem->wait_lock, flags);
1283
1284         if (!list_empty(&sem->wait_list))
1285                 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1286
1287         raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1288         wake_up_q(&wake_q);
1289
1290         return sem;
1291 }
1292
1293 /*
1294  * downgrade a write lock into a read lock
1295  * - caller incremented waiting part of count and discovered it still negative
1296  * - just wake up any readers at the front of the queue
1297  */
1298 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1299 {
1300         unsigned long flags;
1301         DEFINE_WAKE_Q(wake_q);
1302
1303         raw_spin_lock_irqsave(&sem->wait_lock, flags);
1304
1305         if (!list_empty(&sem->wait_list))
1306                 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
1307
1308         raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1309         wake_up_q(&wake_q);
1310
1311         return sem;
1312 }
1313
1314 /*
1315  * lock for reading
1316  */
1317 inline void __down_read(struct rw_semaphore *sem)
1318 {
1319         if (!rwsem_read_trylock(sem)) {
1320                 rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
1321                 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1322         } else {
1323                 rwsem_set_reader_owned(sem);
1324         }
1325 }
1326
1327 static inline int __down_read_killable(struct rw_semaphore *sem)
1328 {
1329         if (!rwsem_read_trylock(sem)) {
1330                 if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
1331                         return -EINTR;
1332                 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1333         } else {
1334                 rwsem_set_reader_owned(sem);
1335         }
1336         return 0;
1337 }
1338
1339 static inline int __down_read_trylock(struct rw_semaphore *sem)
1340 {
1341         /*
1342          * Optimize for the case when the rwsem is not locked at all.
1343          */
1344         long tmp = RWSEM_UNLOCKED_VALUE;
1345
1346         do {
1347                 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1348                                         tmp + RWSEM_READER_BIAS)) {
1349                         rwsem_set_reader_owned(sem);
1350                         return 1;
1351                 }
1352         } while (!(tmp & RWSEM_READ_FAILED_MASK));
1353         return 0;
1354 }
1355
1356 /*
1357  * lock for writing
1358  */
1359 static inline void __down_write(struct rw_semaphore *sem)
1360 {
1361         long tmp = RWSEM_UNLOCKED_VALUE;
1362
1363         if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1364                                                       RWSEM_WRITER_LOCKED)))
1365                 rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
1366         else
1367                 rwsem_set_owner(sem);
1368 }
1369
1370 static inline int __down_write_killable(struct rw_semaphore *sem)
1371 {
1372         long tmp = RWSEM_UNLOCKED_VALUE;
1373
1374         if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1375                                                       RWSEM_WRITER_LOCKED))) {
1376                 if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
1377                         return -EINTR;
1378         } else {
1379                 rwsem_set_owner(sem);
1380         }
1381         return 0;
1382 }
1383
1384 static inline int __down_write_trylock(struct rw_semaphore *sem)
1385 {
1386         long tmp = RWSEM_UNLOCKED_VALUE;
1387
1388         if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1389                                             RWSEM_WRITER_LOCKED)) {
1390                 rwsem_set_owner(sem);
1391                 return true;
1392         }
1393         return false;
1394 }
1395
1396 /*
1397  * unlock after reading
1398  */
1399 inline void __up_read(struct rw_semaphore *sem)
1400 {
1401         long tmp;
1402
1403         DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1404         rwsem_clear_reader_owned(sem);
1405         tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
1406         DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
1407         if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
1408                       RWSEM_FLAG_WAITERS)) {
1409                 clear_wr_nonspinnable(sem);
1410                 rwsem_wake(sem, tmp);
1411         }
1412 }
1413
1414 /*
1415  * unlock after writing
1416  */
1417 static inline void __up_write(struct rw_semaphore *sem)
1418 {
1419         long tmp;
1420
1421         /*
1422          * sem->owner may differ from current if the ownership is transferred
1423          * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
1424          */
1425         DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
1426                             !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
1427         rwsem_clear_owner(sem);
1428         tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
1429         if (unlikely(tmp & RWSEM_FLAG_WAITERS))
1430                 rwsem_wake(sem, tmp);
1431 }
1432
1433 /*
1434  * downgrade write lock to read lock
1435  */
1436 static inline void __downgrade_write(struct rw_semaphore *sem)
1437 {
1438         long tmp;
1439
1440         /*
1441          * When downgrading from exclusive to shared ownership,
1442          * anything inside the write-locked region cannot leak
1443          * into the read side. In contrast, anything in the
1444          * read-locked region is ok to be re-ordered into the
1445          * write side. As such, rely on RELEASE semantics.
1446          */
1447         DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
1448         tmp = atomic_long_fetch_add_release(
1449                 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
1450         rwsem_set_reader_owned(sem);
1451         if (tmp & RWSEM_FLAG_WAITERS)
1452                 rwsem_downgrade_wake(sem);
1453 }
1454
1455 /*
1456  * lock for reading
1457  */
1458 void __sched down_read(struct rw_semaphore *sem)
1459 {
1460         might_sleep();
1461         rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1462
1463         LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1464 }
1465 EXPORT_SYMBOL(down_read);
1466
1467 int __sched down_read_killable(struct rw_semaphore *sem)
1468 {
1469         might_sleep();
1470         rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1471
1472         if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1473                 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1474                 return -EINTR;
1475         }
1476
1477         return 0;
1478 }
1479 EXPORT_SYMBOL(down_read_killable);
1480
1481 /*
1482  * trylock for reading -- returns 1 if successful, 0 if contention
1483  */
1484 int down_read_trylock(struct rw_semaphore *sem)
1485 {
1486         int ret = __down_read_trylock(sem);
1487
1488         if (ret == 1)
1489                 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
1490         return ret;
1491 }
1492 EXPORT_SYMBOL(down_read_trylock);
1493
1494 /*
1495  * lock for writing
1496  */
1497 void __sched down_write(struct rw_semaphore *sem)
1498 {
1499         might_sleep();
1500         rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1501         LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1502 }
1503 EXPORT_SYMBOL(down_write);
1504
1505 /*
1506  * lock for writing
1507  */
1508 int __sched down_write_killable(struct rw_semaphore *sem)
1509 {
1510         might_sleep();
1511         rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1512
1513         if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1514                                   __down_write_killable)) {
1515                 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1516                 return -EINTR;
1517         }
1518
1519         return 0;
1520 }
1521 EXPORT_SYMBOL(down_write_killable);
1522
1523 /*
1524  * trylock for writing -- returns 1 if successful, 0 if contention
1525  */
1526 int down_write_trylock(struct rw_semaphore *sem)
1527 {
1528         int ret = __down_write_trylock(sem);
1529
1530         if (ret == 1)
1531                 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
1532
1533         return ret;
1534 }
1535 EXPORT_SYMBOL(down_write_trylock);
1536
1537 /*
1538  * release a read lock
1539  */
1540 void up_read(struct rw_semaphore *sem)
1541 {
1542         rwsem_release(&sem->dep_map, 1, _RET_IP_);
1543         __up_read(sem);
1544 }
1545 EXPORT_SYMBOL(up_read);
1546
1547 /*
1548  * release a write lock
1549  */
1550 void up_write(struct rw_semaphore *sem)
1551 {
1552         rwsem_release(&sem->dep_map, 1, _RET_IP_);
1553         __up_write(sem);
1554 }
1555 EXPORT_SYMBOL(up_write);
1556
1557 /*
1558  * downgrade write lock to read lock
1559  */
1560 void downgrade_write(struct rw_semaphore *sem)
1561 {
1562         lock_downgrade(&sem->dep_map, _RET_IP_);
1563         __downgrade_write(sem);
1564 }
1565 EXPORT_SYMBOL(downgrade_write);
1566
1567 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1568
1569 void down_read_nested(struct rw_semaphore *sem, int subclass)
1570 {
1571         might_sleep();
1572         rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1573         LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
1574 }
1575 EXPORT_SYMBOL(down_read_nested);
1576
1577 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1578 {
1579         might_sleep();
1580         rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
1581         LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1582 }
1583 EXPORT_SYMBOL(_down_write_nest_lock);
1584
1585 void down_read_non_owner(struct rw_semaphore *sem)
1586 {
1587         might_sleep();
1588         __down_read(sem);
1589         __rwsem_set_reader_owned(sem, NULL);
1590 }
1591 EXPORT_SYMBOL(down_read_non_owner);
1592
1593 void down_write_nested(struct rw_semaphore *sem, int subclass)
1594 {
1595         might_sleep();
1596         rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1597         LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1598 }
1599 EXPORT_SYMBOL(down_write_nested);
1600
1601 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1602 {
1603         might_sleep();
1604         rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1605
1606         if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1607                                   __down_write_killable)) {
1608                 rwsem_release(&sem->dep_map, 1, _RET_IP_);
1609                 return -EINTR;
1610         }
1611
1612         return 0;
1613 }
1614 EXPORT_SYMBOL(down_write_killable_nested);
1615
1616 void up_read_non_owner(struct rw_semaphore *sem)
1617 {
1618         DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1619         __up_read(sem);
1620 }
1621 EXPORT_SYMBOL(up_read_non_owner);
1622
1623 #endif