sched/wait: Improve the bit-wait API parameter names in the API function prototypes
[sfrench/cifs-2.6.git] / include / linux / wait.h
1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
3 /*
4  * Linux wait queue related types and methods
5  */
6 #include <linux/list.h>
7 #include <linux/stddef.h>
8 #include <linux/spinlock.h>
9
10 #include <asm/current.h>
11 #include <uapi/linux/wait.h>
12
13 typedef struct wait_queue_entry wait_queue_entry_t;
14
15 typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
16 int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17
18 /* wait_queue_entry::flags */
19 #define WQ_FLAG_EXCLUSIVE       0x01
20 #define WQ_FLAG_WOKEN           0x02
21
22 /*
23  * A single wait-queue entry structure:
24  */
25 struct wait_queue_entry {
26         unsigned int            flags;
27         void                    *private;
28         wait_queue_func_t       func;
29         struct list_head        task_list;
30 };
31
32 struct wait_bit_key {
33         void                    *flags;
34         int                     bit_nr;
35 #define WAIT_ATOMIC_T_BIT_NR    -1
36         unsigned long           timeout;
37 };
38
39 struct wait_bit_queue_entry {
40         struct wait_bit_key     key;
41         struct wait_queue_entry wq_entry;
42 };
43
44 struct wait_queue_head {
45         spinlock_t              lock;
46         struct list_head        task_list;
47 };
48 typedef struct wait_queue_head wait_queue_head_t;
49
50 struct task_struct;
51
52 /*
53  * Macros for declaration and initialisaton of the datatypes
54  */
55
56 #define __WAITQUEUE_INITIALIZER(name, tsk) {                            \
57         .private        = tsk,                                          \
58         .func           = default_wake_function,                        \
59         .task_list      = { NULL, NULL } }
60
61 #define DECLARE_WAITQUEUE(name, tsk)                                    \
62         struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
63
64 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {                           \
65         .lock           = __SPIN_LOCK_UNLOCKED(name.lock),              \
66         .task_list      = { &(name).task_list, &(name).task_list } }
67
68 #define DECLARE_WAIT_QUEUE_HEAD(name) \
69         struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
70
71 #define __WAIT_BIT_KEY_INITIALIZER(word, bit)                           \
72         { .flags = word, .bit_nr = bit, }
73
74 #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p)                              \
75         { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
76
77 extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
78
79 #define init_waitqueue_head(wq_head)                    \
80         do {                                            \
81                 static struct lock_class_key __key;     \
82                                                         \
83                 __init_waitqueue_head((wq_head), #wq_head, &__key);     \
84         } while (0)
85
86 #ifdef CONFIG_LOCKDEP
87 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
88         ({ init_waitqueue_head(&name); name; })
89 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
90         struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
91 #else
92 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
93 #endif
94
95 static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
96 {
97         wq_entry->flags         = 0;
98         wq_entry->private       = p;
99         wq_entry->func          = default_wake_function;
100 }
101
102 static inline void
103 init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
104 {
105         wq_entry->flags         = 0;
106         wq_entry->private       = NULL;
107         wq_entry->func          = func;
108 }
109
110 /**
111  * waitqueue_active -- locklessly test for waiters on the queue
112  * @wq_head: the waitqueue to test for waiters
113  *
114  * returns true if the wait list is not empty
115  *
116  * NOTE: this function is lockless and requires care, incorrect usage _will_
117  * lead to sporadic and non-obvious failure.
118  *
119  * Use either while holding wait_queue_head::lock or when used for wakeups
120  * with an extra smp_mb() like:
121  *
122  *      CPU0 - waker                    CPU1 - waiter
123  *
124  *                                      for (;;) {
125  *      @cond = true;                     prepare_to_wait(&wq, &wait, state);
126  *      smp_mb();                         // smp_mb() from set_current_state()
127  *      if (waitqueue_active(wq))         if (@cond)
128  *        wake_up(wq);                      break;
129  *                                        schedule();
130  *                                      }
131  *                                      finish_wait(&wq, &wait);
132  *
133  * Because without the explicit smp_mb() it's possible for the
134  * waitqueue_active() load to get hoisted over the @cond store such that we'll
135  * observe an empty wait list while the waiter might not observe @cond.
136  *
137  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
138  * which (when the lock is uncontended) are of roughly equal cost.
139  */
140 static inline int waitqueue_active(struct wait_queue_head *wq_head)
141 {
142         return !list_empty(&wq_head->task_list);
143 }
144
145 /**
146  * wq_has_sleeper - check if there are any waiting processes
147  * @wq: wait queue head
148  *
149  * Returns true if wq has waiting processes
150  *
151  * Please refer to the comment for waitqueue_active.
152  */
153 static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
154 {
155         /*
156          * We need to be sure we are in sync with the
157          * add_wait_queue modifications to the wait queue.
158          *
159          * This memory barrier should be paired with one on the
160          * waiting side.
161          */
162         smp_mb();
163         return waitqueue_active(wq_head);
164 }
165
166 extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167 extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
168 extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
169
170 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
171 {
172         list_add(&wq_entry->task_list, &wq_head->task_list);
173 }
174
175 /*
176  * Used for wake-one threads:
177  */
178 static inline void
179 __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
180 {
181         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
182         __add_wait_queue(wq_head, wq_entry);
183 }
184
185 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
186 {
187         list_add_tail(&wq_entry->task_list, &wq_head->task_list);
188 }
189
190 static inline void
191 __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
192 {
193         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
194         __add_wait_queue_entry_tail(wq_head, wq_entry);
195 }
196
197 static inline void
198 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
199 {
200         list_del(&wq_entry->task_list);
201 }
202
203 typedef int wait_bit_action_f(struct wait_bit_key *key, int mode);
204 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
205 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
206 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
207 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
208 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
209 void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit);
210 int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
211 int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
212 void wake_up_bit(void *word, int bit);
213 void wake_up_atomic_t(atomic_t *p);
214 int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode);
215 int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
216 int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode);
217 int out_of_line_wait_on_atomic_t(atomic_t *p, int (*)(atomic_t *), unsigned int mode);
218 struct wait_queue_head *bit_waitqueue(void *word, int bit);
219
220 #define wake_up(x)                      __wake_up(x, TASK_NORMAL, 1, NULL)
221 #define wake_up_nr(x, nr)               __wake_up(x, TASK_NORMAL, nr, NULL)
222 #define wake_up_all(x)                  __wake_up(x, TASK_NORMAL, 0, NULL)
223 #define wake_up_locked(x)               __wake_up_locked((x), TASK_NORMAL, 1)
224 #define wake_up_all_locked(x)           __wake_up_locked((x), TASK_NORMAL, 0)
225
226 #define wake_up_interruptible(x)        __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
227 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
228 #define wake_up_interruptible_all(x)    __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
229 #define wake_up_interruptible_sync(x)   __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
230
231 /*
232  * Wakeup macros to be used to report events to the targets.
233  */
234 #define wake_up_poll(x, m)                                              \
235         __wake_up(x, TASK_NORMAL, 1, (void *) (m))
236 #define wake_up_locked_poll(x, m)                                       \
237         __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
238 #define wake_up_interruptible_poll(x, m)                                \
239         __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
240 #define wake_up_interruptible_sync_poll(x, m)                           \
241         __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
242
243 #define ___wait_cond_timeout(condition)                                 \
244 ({                                                                      \
245         bool __cond = (condition);                                      \
246         if (__cond && !__ret)                                           \
247                 __ret = 1;                                              \
248         __cond || !__ret;                                               \
249 })
250
251 #define ___wait_is_interruptible(state)                                 \
252         (!__builtin_constant_p(state) ||                                \
253                 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)  \
254
255 extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
256
257 /*
258  * The below macro ___wait_event() has an explicit shadow of the __ret
259  * variable when used from the wait_event_*() macros.
260  *
261  * This is so that both can use the ___wait_cond_timeout() construct
262  * to wrap the condition.
263  *
264  * The type inconsistency of the wait_event_*() __ret variable is also
265  * on purpose; we use long where we can return timeout values and int
266  * otherwise.
267  */
268
269 #define ___wait_event(wq, condition, state, exclusive, ret, cmd)        \
270 ({                                                                      \
271         __label__ __out;                                                \
272         struct wait_queue_entry __wq_entry;                             \
273         long __ret = ret;       /* explicit shadow */                   \
274                                                                         \
275         init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);\
276         for (;;) {                                                      \
277                 long __int = prepare_to_wait_event(&wq, &__wq_entry, state);\
278                                                                         \
279                 if (condition)                                          \
280                         break;                                          \
281                                                                         \
282                 if (___wait_is_interruptible(state) && __int) {         \
283                         __ret = __int;                                  \
284                         goto __out;                                     \
285                 }                                                       \
286                                                                         \
287                 cmd;                                                    \
288         }                                                               \
289         finish_wait(&wq, &__wq_entry);                                  \
290 __out:  __ret;                                                          \
291 })
292
293 #define __wait_event(wq, condition)                                     \
294         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
295                             schedule())
296
297 /**
298  * wait_event - sleep until a condition gets true
299  * @wq: the waitqueue to wait on
300  * @condition: a C expression for the event to wait for
301  *
302  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
303  * @condition evaluates to true. The @condition is checked each time
304  * the waitqueue @wq is woken up.
305  *
306  * wake_up() has to be called after changing any variable that could
307  * change the result of the wait condition.
308  */
309 #define wait_event(wq, condition)                                       \
310 do {                                                                    \
311         might_sleep();                                                  \
312         if (condition)                                                  \
313                 break;                                                  \
314         __wait_event(wq, condition);                                    \
315 } while (0)
316
317 #define __io_wait_event(wq, condition)                                  \
318         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
319                             io_schedule())
320
321 /*
322  * io_wait_event() -- like wait_event() but with io_schedule()
323  */
324 #define io_wait_event(wq, condition)                                    \
325 do {                                                                    \
326         might_sleep();                                                  \
327         if (condition)                                                  \
328                 break;                                                  \
329         __io_wait_event(wq, condition);                                 \
330 } while (0)
331
332 #define __wait_event_freezable(wq, condition)                           \
333         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
334                             schedule(); try_to_freeze())
335
336 /**
337  * wait_event_freezable - sleep (or freeze) until a condition gets true
338  * @wq: the waitqueue to wait on
339  * @condition: a C expression for the event to wait for
340  *
341  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
342  * to system load) until the @condition evaluates to true. The
343  * @condition is checked each time the waitqueue @wq is woken up.
344  *
345  * wake_up() has to be called after changing any variable that could
346  * change the result of the wait condition.
347  */
348 #define wait_event_freezable(wq, condition)                             \
349 ({                                                                      \
350         int __ret = 0;                                                  \
351         might_sleep();                                                  \
352         if (!(condition))                                               \
353                 __ret = __wait_event_freezable(wq, condition);          \
354         __ret;                                                          \
355 })
356
357 #define __wait_event_timeout(wq, condition, timeout)                    \
358         ___wait_event(wq, ___wait_cond_timeout(condition),              \
359                       TASK_UNINTERRUPTIBLE, 0, timeout,                 \
360                       __ret = schedule_timeout(__ret))
361
362 /**
363  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
364  * @wq: the waitqueue to wait on
365  * @condition: a C expression for the event to wait for
366  * @timeout: timeout, in jiffies
367  *
368  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
369  * @condition evaluates to true. The @condition is checked each time
370  * the waitqueue @wq is woken up.
371  *
372  * wake_up() has to be called after changing any variable that could
373  * change the result of the wait condition.
374  *
375  * Returns:
376  * 0 if the @condition evaluated to %false after the @timeout elapsed,
377  * 1 if the @condition evaluated to %true after the @timeout elapsed,
378  * or the remaining jiffies (at least 1) if the @condition evaluated
379  * to %true before the @timeout elapsed.
380  */
381 #define wait_event_timeout(wq, condition, timeout)                      \
382 ({                                                                      \
383         long __ret = timeout;                                           \
384         might_sleep();                                                  \
385         if (!___wait_cond_timeout(condition))                           \
386                 __ret = __wait_event_timeout(wq, condition, timeout);   \
387         __ret;                                                          \
388 })
389
390 #define __wait_event_freezable_timeout(wq, condition, timeout)          \
391         ___wait_event(wq, ___wait_cond_timeout(condition),              \
392                       TASK_INTERRUPTIBLE, 0, timeout,                   \
393                       __ret = schedule_timeout(__ret); try_to_freeze())
394
395 /*
396  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
397  * increasing load and is freezable.
398  */
399 #define wait_event_freezable_timeout(wq, condition, timeout)            \
400 ({                                                                      \
401         long __ret = timeout;                                           \
402         might_sleep();                                                  \
403         if (!___wait_cond_timeout(condition))                           \
404                 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
405         __ret;                                                          \
406 })
407
408 #define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)           \
409         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0,  \
410                             cmd1; schedule(); cmd2)
411 /*
412  * Just like wait_event_cmd(), except it sets exclusive flag
413  */
414 #define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)             \
415 do {                                                                    \
416         if (condition)                                                  \
417                 break;                                                  \
418         __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2);          \
419 } while (0)
420
421 #define __wait_event_cmd(wq, condition, cmd1, cmd2)                     \
422         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
423                             cmd1; schedule(); cmd2)
424
425 /**
426  * wait_event_cmd - sleep until a condition gets true
427  * @wq: the waitqueue to wait on
428  * @condition: a C expression for the event to wait for
429  * @cmd1: the command will be executed before sleep
430  * @cmd2: the command will be executed after sleep
431  *
432  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
433  * @condition evaluates to true. The @condition is checked each time
434  * the waitqueue @wq is woken up.
435  *
436  * wake_up() has to be called after changing any variable that could
437  * change the result of the wait condition.
438  */
439 #define wait_event_cmd(wq, condition, cmd1, cmd2)                       \
440 do {                                                                    \
441         if (condition)                                                  \
442                 break;                                                  \
443         __wait_event_cmd(wq, condition, cmd1, cmd2);                    \
444 } while (0)
445
446 #define __wait_event_interruptible(wq, condition)                       \
447         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
448                       schedule())
449
450 /**
451  * wait_event_interruptible - sleep until a condition gets true
452  * @wq: the waitqueue to wait on
453  * @condition: a C expression for the event to wait for
454  *
455  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
456  * @condition evaluates to true or a signal is received.
457  * The @condition is checked each time the waitqueue @wq is woken up.
458  *
459  * wake_up() has to be called after changing any variable that could
460  * change the result of the wait condition.
461  *
462  * The function will return -ERESTARTSYS if it was interrupted by a
463  * signal and 0 if @condition evaluated to true.
464  */
465 #define wait_event_interruptible(wq, condition)                         \
466 ({                                                                      \
467         int __ret = 0;                                                  \
468         might_sleep();                                                  \
469         if (!(condition))                                               \
470                 __ret = __wait_event_interruptible(wq, condition);      \
471         __ret;                                                          \
472 })
473
474 #define __wait_event_interruptible_timeout(wq, condition, timeout)      \
475         ___wait_event(wq, ___wait_cond_timeout(condition),              \
476                       TASK_INTERRUPTIBLE, 0, timeout,                   \
477                       __ret = schedule_timeout(__ret))
478
479 /**
480  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
481  * @wq: the waitqueue to wait on
482  * @condition: a C expression for the event to wait for
483  * @timeout: timeout, in jiffies
484  *
485  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
486  * @condition evaluates to true or a signal is received.
487  * The @condition is checked each time the waitqueue @wq is woken up.
488  *
489  * wake_up() has to be called after changing any variable that could
490  * change the result of the wait condition.
491  *
492  * Returns:
493  * 0 if the @condition evaluated to %false after the @timeout elapsed,
494  * 1 if the @condition evaluated to %true after the @timeout elapsed,
495  * the remaining jiffies (at least 1) if the @condition evaluated
496  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
497  * interrupted by a signal.
498  */
499 #define wait_event_interruptible_timeout(wq, condition, timeout)        \
500 ({                                                                      \
501         long __ret = timeout;                                           \
502         might_sleep();                                                  \
503         if (!___wait_cond_timeout(condition))                           \
504                 __ret = __wait_event_interruptible_timeout(wq,          \
505                                                 condition, timeout);    \
506         __ret;                                                          \
507 })
508
509 #define __wait_event_hrtimeout(wq, condition, timeout, state)           \
510 ({                                                                      \
511         int __ret = 0;                                                  \
512         struct hrtimer_sleeper __t;                                     \
513                                                                         \
514         hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC,              \
515                               HRTIMER_MODE_REL);                        \
516         hrtimer_init_sleeper(&__t, current);                            \
517         if ((timeout) != KTIME_MAX)                             \
518                 hrtimer_start_range_ns(&__t.timer, timeout,             \
519                                        current->timer_slack_ns,         \
520                                        HRTIMER_MODE_REL);               \
521                                                                         \
522         __ret = ___wait_event(wq, condition, state, 0, 0,               \
523                 if (!__t.task) {                                        \
524                         __ret = -ETIME;                                 \
525                         break;                                          \
526                 }                                                       \
527                 schedule());                                            \
528                                                                         \
529         hrtimer_cancel(&__t.timer);                                     \
530         destroy_hrtimer_on_stack(&__t.timer);                           \
531         __ret;                                                          \
532 })
533
534 /**
535  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
536  * @wq: the waitqueue to wait on
537  * @condition: a C expression for the event to wait for
538  * @timeout: timeout, as a ktime_t
539  *
540  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
541  * @condition evaluates to true or a signal is received.
542  * The @condition is checked each time the waitqueue @wq is woken up.
543  *
544  * wake_up() has to be called after changing any variable that could
545  * change the result of the wait condition.
546  *
547  * The function returns 0 if @condition became true, or -ETIME if the timeout
548  * elapsed.
549  */
550 #define wait_event_hrtimeout(wq, condition, timeout)                    \
551 ({                                                                      \
552         int __ret = 0;                                                  \
553         might_sleep();                                                  \
554         if (!(condition))                                               \
555                 __ret = __wait_event_hrtimeout(wq, condition, timeout,  \
556                                                TASK_UNINTERRUPTIBLE);   \
557         __ret;                                                          \
558 })
559
560 /**
561  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
562  * @wq: the waitqueue to wait on
563  * @condition: a C expression for the event to wait for
564  * @timeout: timeout, as a ktime_t
565  *
566  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
567  * @condition evaluates to true or a signal is received.
568  * The @condition is checked each time the waitqueue @wq is woken up.
569  *
570  * wake_up() has to be called after changing any variable that could
571  * change the result of the wait condition.
572  *
573  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
574  * interrupted by a signal, or -ETIME if the timeout elapsed.
575  */
576 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)      \
577 ({                                                                      \
578         long __ret = 0;                                                 \
579         might_sleep();                                                  \
580         if (!(condition))                                               \
581                 __ret = __wait_event_hrtimeout(wq, condition, timeout,  \
582                                                TASK_INTERRUPTIBLE);     \
583         __ret;                                                          \
584 })
585
586 #define __wait_event_interruptible_exclusive(wq, condition)             \
587         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,          \
588                       schedule())
589
590 #define wait_event_interruptible_exclusive(wq, condition)               \
591 ({                                                                      \
592         int __ret = 0;                                                  \
593         might_sleep();                                                  \
594         if (!(condition))                                               \
595                 __ret = __wait_event_interruptible_exclusive(wq, condition);\
596         __ret;                                                          \
597 })
598
599 #define __wait_event_killable_exclusive(wq, condition)                  \
600         ___wait_event(wq, condition, TASK_KILLABLE, 1, 0,               \
601                       schedule())
602
603 #define wait_event_killable_exclusive(wq, condition)                    \
604 ({                                                                      \
605         int __ret = 0;                                                  \
606         might_sleep();                                                  \
607         if (!(condition))                                               \
608                 __ret = __wait_event_killable_exclusive(wq, condition); \
609         __ret;                                                          \
610 })
611
612
613 #define __wait_event_freezable_exclusive(wq, condition)                 \
614         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,          \
615                         schedule(); try_to_freeze())
616
617 #define wait_event_freezable_exclusive(wq, condition)                   \
618 ({                                                                      \
619         int __ret = 0;                                                  \
620         might_sleep();                                                  \
621         if (!(condition))                                               \
622                 __ret = __wait_event_freezable_exclusive(wq, condition);\
623         __ret;                                                          \
624 })
625
626 extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
627 extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
628
629 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
630 ({                                                                      \
631         int __ret;                                                      \
632         DEFINE_WAIT(__wait);                                            \
633         if (exclusive)                                                  \
634                 __wait.flags |= WQ_FLAG_EXCLUSIVE;                      \
635         do {                                                            \
636                 __ret = fn(&(wq), &__wait);                             \
637                 if (__ret)                                              \
638                         break;                                          \
639         } while (!(condition));                                         \
640         __remove_wait_queue(&(wq), &__wait);                            \
641         __set_current_state(TASK_RUNNING);                              \
642         __ret;                                                          \
643 })
644
645
646 /**
647  * wait_event_interruptible_locked - sleep until a condition gets true
648  * @wq: the waitqueue to wait on
649  * @condition: a C expression for the event to wait for
650  *
651  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
652  * @condition evaluates to true or a signal is received.
653  * The @condition is checked each time the waitqueue @wq is woken up.
654  *
655  * It must be called with wq.lock being held.  This spinlock is
656  * unlocked while sleeping but @condition testing is done while lock
657  * is held and when this macro exits the lock is held.
658  *
659  * The lock is locked/unlocked using spin_lock()/spin_unlock()
660  * functions which must match the way they are locked/unlocked outside
661  * of this macro.
662  *
663  * wake_up_locked() has to be called after changing any variable that could
664  * change the result of the wait condition.
665  *
666  * The function will return -ERESTARTSYS if it was interrupted by a
667  * signal and 0 if @condition evaluated to true.
668  */
669 #define wait_event_interruptible_locked(wq, condition)                  \
670         ((condition)                                                    \
671          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
672
673 /**
674  * wait_event_interruptible_locked_irq - sleep until a condition gets true
675  * @wq: the waitqueue to wait on
676  * @condition: a C expression for the event to wait for
677  *
678  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
679  * @condition evaluates to true or a signal is received.
680  * The @condition is checked each time the waitqueue @wq is woken up.
681  *
682  * It must be called with wq.lock being held.  This spinlock is
683  * unlocked while sleeping but @condition testing is done while lock
684  * is held and when this macro exits the lock is held.
685  *
686  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
687  * functions which must match the way they are locked/unlocked outside
688  * of this macro.
689  *
690  * wake_up_locked() has to be called after changing any variable that could
691  * change the result of the wait condition.
692  *
693  * The function will return -ERESTARTSYS if it was interrupted by a
694  * signal and 0 if @condition evaluated to true.
695  */
696 #define wait_event_interruptible_locked_irq(wq, condition)              \
697         ((condition)                                                    \
698          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
699
700 /**
701  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
702  * @wq: the waitqueue to wait on
703  * @condition: a C expression for the event to wait for
704  *
705  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
706  * @condition evaluates to true or a signal is received.
707  * The @condition is checked each time the waitqueue @wq is woken up.
708  *
709  * It must be called with wq.lock being held.  This spinlock is
710  * unlocked while sleeping but @condition testing is done while lock
711  * is held and when this macro exits the lock is held.
712  *
713  * The lock is locked/unlocked using spin_lock()/spin_unlock()
714  * functions which must match the way they are locked/unlocked outside
715  * of this macro.
716  *
717  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
718  * set thus when other process waits process on the list if this
719  * process is awaken further processes are not considered.
720  *
721  * wake_up_locked() has to be called after changing any variable that could
722  * change the result of the wait condition.
723  *
724  * The function will return -ERESTARTSYS if it was interrupted by a
725  * signal and 0 if @condition evaluated to true.
726  */
727 #define wait_event_interruptible_exclusive_locked(wq, condition)        \
728         ((condition)                                                    \
729          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
730
731 /**
732  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
733  * @wq: the waitqueue to wait on
734  * @condition: a C expression for the event to wait for
735  *
736  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
737  * @condition evaluates to true or a signal is received.
738  * The @condition is checked each time the waitqueue @wq is woken up.
739  *
740  * It must be called with wq.lock being held.  This spinlock is
741  * unlocked while sleeping but @condition testing is done while lock
742  * is held and when this macro exits the lock is held.
743  *
744  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
745  * functions which must match the way they are locked/unlocked outside
746  * of this macro.
747  *
748  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
749  * set thus when other process waits process on the list if this
750  * process is awaken further processes are not considered.
751  *
752  * wake_up_locked() has to be called after changing any variable that could
753  * change the result of the wait condition.
754  *
755  * The function will return -ERESTARTSYS if it was interrupted by a
756  * signal and 0 if @condition evaluated to true.
757  */
758 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)    \
759         ((condition)                                                    \
760          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
761
762
763 #define __wait_event_killable(wq, condition)                            \
764         ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
765
766 /**
767  * wait_event_killable - sleep until a condition gets true
768  * @wq: the waitqueue to wait on
769  * @condition: a C expression for the event to wait for
770  *
771  * The process is put to sleep (TASK_KILLABLE) until the
772  * @condition evaluates to true or a signal is received.
773  * The @condition is checked each time the waitqueue @wq is woken up.
774  *
775  * wake_up() has to be called after changing any variable that could
776  * change the result of the wait condition.
777  *
778  * The function will return -ERESTARTSYS if it was interrupted by a
779  * signal and 0 if @condition evaluated to true.
780  */
781 #define wait_event_killable(wq, condition)                              \
782 ({                                                                      \
783         int __ret = 0;                                                  \
784         might_sleep();                                                  \
785         if (!(condition))                                               \
786                 __ret = __wait_event_killable(wq, condition);           \
787         __ret;                                                          \
788 })
789
790
791 #define __wait_event_lock_irq(wq, condition, lock, cmd)                 \
792         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
793                             spin_unlock_irq(&lock);                     \
794                             cmd;                                        \
795                             schedule();                                 \
796                             spin_lock_irq(&lock))
797
798 /**
799  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
800  *                           condition is checked under the lock. This
801  *                           is expected to be called with the lock
802  *                           taken.
803  * @wq: the waitqueue to wait on
804  * @condition: a C expression for the event to wait for
805  * @lock: a locked spinlock_t, which will be released before cmd
806  *        and schedule() and reacquired afterwards.
807  * @cmd: a command which is invoked outside the critical section before
808  *       sleep
809  *
810  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
811  * @condition evaluates to true. The @condition is checked each time
812  * the waitqueue @wq is woken up.
813  *
814  * wake_up() has to be called after changing any variable that could
815  * change the result of the wait condition.
816  *
817  * This is supposed to be called while holding the lock. The lock is
818  * dropped before invoking the cmd and going to sleep and is reacquired
819  * afterwards.
820  */
821 #define wait_event_lock_irq_cmd(wq, condition, lock, cmd)               \
822 do {                                                                    \
823         if (condition)                                                  \
824                 break;                                                  \
825         __wait_event_lock_irq(wq, condition, lock, cmd);                \
826 } while (0)
827
828 /**
829  * wait_event_lock_irq - sleep until a condition gets true. The
830  *                       condition is checked under the lock. This
831  *                       is expected to be called with the lock
832  *                       taken.
833  * @wq: the waitqueue to wait on
834  * @condition: a C expression for the event to wait for
835  * @lock: a locked spinlock_t, which will be released before schedule()
836  *        and reacquired afterwards.
837  *
838  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
839  * @condition evaluates to true. The @condition is checked each time
840  * the waitqueue @wq is woken up.
841  *
842  * wake_up() has to be called after changing any variable that could
843  * change the result of the wait condition.
844  *
845  * This is supposed to be called while holding the lock. The lock is
846  * dropped before going to sleep and is reacquired afterwards.
847  */
848 #define wait_event_lock_irq(wq, condition, lock)                        \
849 do {                                                                    \
850         if (condition)                                                  \
851                 break;                                                  \
852         __wait_event_lock_irq(wq, condition, lock, );                   \
853 } while (0)
854
855
856 #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd)   \
857         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
858                       spin_unlock_irq(&lock);                           \
859                       cmd;                                              \
860                       schedule();                                       \
861                       spin_lock_irq(&lock))
862
863 /**
864  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
865  *              The condition is checked under the lock. This is expected to
866  *              be called with the lock taken.
867  * @wq: the waitqueue to wait on
868  * @condition: a C expression for the event to wait for
869  * @lock: a locked spinlock_t, which will be released before cmd and
870  *        schedule() and reacquired afterwards.
871  * @cmd: a command which is invoked outside the critical section before
872  *       sleep
873  *
874  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
875  * @condition evaluates to true or a signal is received. The @condition is
876  * checked each time the waitqueue @wq is woken up.
877  *
878  * wake_up() has to be called after changing any variable that could
879  * change the result of the wait condition.
880  *
881  * This is supposed to be called while holding the lock. The lock is
882  * dropped before invoking the cmd and going to sleep and is reacquired
883  * afterwards.
884  *
885  * The macro will return -ERESTARTSYS if it was interrupted by a signal
886  * and 0 if @condition evaluated to true.
887  */
888 #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
889 ({                                                                      \
890         int __ret = 0;                                                  \
891         if (!(condition))                                               \
892                 __ret = __wait_event_interruptible_lock_irq(wq,         \
893                                                 condition, lock, cmd);  \
894         __ret;                                                          \
895 })
896
897 /**
898  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
899  *              The condition is checked under the lock. This is expected
900  *              to be called with the lock taken.
901  * @wq: the waitqueue to wait on
902  * @condition: a C expression for the event to wait for
903  * @lock: a locked spinlock_t, which will be released before schedule()
904  *        and reacquired afterwards.
905  *
906  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
907  * @condition evaluates to true or signal is received. The @condition is
908  * checked each time the waitqueue @wq is woken up.
909  *
910  * wake_up() has to be called after changing any variable that could
911  * change the result of the wait condition.
912  *
913  * This is supposed to be called while holding the lock. The lock is
914  * dropped before going to sleep and is reacquired afterwards.
915  *
916  * The macro will return -ERESTARTSYS if it was interrupted by a signal
917  * and 0 if @condition evaluated to true.
918  */
919 #define wait_event_interruptible_lock_irq(wq, condition, lock)          \
920 ({                                                                      \
921         int __ret = 0;                                                  \
922         if (!(condition))                                               \
923                 __ret = __wait_event_interruptible_lock_irq(wq,         \
924                                                 condition, lock,);      \
925         __ret;                                                          \
926 })
927
928 #define __wait_event_interruptible_lock_irq_timeout(wq, condition,      \
929                                                     lock, timeout)      \
930         ___wait_event(wq, ___wait_cond_timeout(condition),              \
931                       TASK_INTERRUPTIBLE, 0, timeout,                   \
932                       spin_unlock_irq(&lock);                           \
933                       __ret = schedule_timeout(__ret);                  \
934                       spin_lock_irq(&lock));
935
936 /**
937  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
938  *              true or a timeout elapses. The condition is checked under
939  *              the lock. This is expected to be called with the lock taken.
940  * @wq: the waitqueue to wait on
941  * @condition: a C expression for the event to wait for
942  * @lock: a locked spinlock_t, which will be released before schedule()
943  *        and reacquired afterwards.
944  * @timeout: timeout, in jiffies
945  *
946  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
947  * @condition evaluates to true or signal is received. The @condition is
948  * checked each time the waitqueue @wq is woken up.
949  *
950  * wake_up() has to be called after changing any variable that could
951  * change the result of the wait condition.
952  *
953  * This is supposed to be called while holding the lock. The lock is
954  * dropped before going to sleep and is reacquired afterwards.
955  *
956  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
957  * was interrupted by a signal, and the remaining jiffies otherwise
958  * if the condition evaluated to true before the timeout elapsed.
959  */
960 #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock,  \
961                                                   timeout)              \
962 ({                                                                      \
963         long __ret = timeout;                                           \
964         if (!___wait_cond_timeout(condition))                           \
965                 __ret = __wait_event_interruptible_lock_irq_timeout(    \
966                                         wq, condition, lock, timeout);  \
967         __ret;                                                          \
968 })
969
970 /*
971  * Waitqueues which are removed from the waitqueue_head at wakeup time
972  */
973 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
974 void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
975 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
976 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
977 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
978 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
979 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
980 int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
981
982 #define DEFINE_WAIT_FUNC(name, function)                                \
983         struct wait_queue_entry name = {                                \
984                 .private        = current,                              \
985                 .func           = function,                             \
986                 .task_list      = LIST_HEAD_INIT((name).task_list),     \
987         }
988
989 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
990
991 #define DEFINE_WAIT_BIT(name, word, bit)                                \
992         struct wait_bit_queue_entry name = {                            \
993                 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit),           \
994                 .wq_entry = {                                           \
995                         .private        = current,                      \
996                         .func           = wake_bit_function,            \
997                         .task_list      =                               \
998                                 LIST_HEAD_INIT((name).wq_entry.task_list), \
999                 },                                                      \
1000         }
1001
1002 #define init_wait(wait)                                                 \
1003         do {                                                            \
1004                 (wait)->private = current;                              \
1005                 (wait)->func = autoremove_wake_function;                \
1006                 INIT_LIST_HEAD(&(wait)->task_list);                     \
1007                 (wait)->flags = 0;                                      \
1008         } while (0)
1009
1010
1011 extern int bit_wait(struct wait_bit_key *key, int bit);
1012 extern int bit_wait_io(struct wait_bit_key *key, int bit);
1013 extern int bit_wait_timeout(struct wait_bit_key *key, int bit);
1014 extern int bit_wait_io_timeout(struct wait_bit_key *key, int bit);
1015
1016 /**
1017  * wait_on_bit - wait for a bit to be cleared
1018  * @word: the word being waited on, a kernel virtual address
1019  * @bit: the bit of the word being waited on
1020  * @mode: the task state to sleep in
1021  *
1022  * There is a standard hashed waitqueue table for generic use. This
1023  * is the part of the hashtable's accessor API that waits on a bit.
1024  * For instance, if one were to have waiters on a bitflag, one would
1025  * call wait_on_bit() in threads waiting for the bit to clear.
1026  * One uses wait_on_bit() where one is waiting for the bit to clear,
1027  * but has no intention of setting it.
1028  * Returned value will be zero if the bit was cleared, or non-zero
1029  * if the process received a signal and the mode permitted wakeup
1030  * on that signal.
1031  */
1032 static inline int
1033 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1034 {
1035         might_sleep();
1036         if (!test_bit(bit, word))
1037                 return 0;
1038         return out_of_line_wait_on_bit(word, bit,
1039                                        bit_wait,
1040                                        mode);
1041 }
1042
1043 /**
1044  * wait_on_bit_io - wait for a bit to be cleared
1045  * @word: the word being waited on, a kernel virtual address
1046  * @bit: the bit of the word being waited on
1047  * @mode: the task state to sleep in
1048  *
1049  * Use the standard hashed waitqueue table to wait for a bit
1050  * to be cleared.  This is similar to wait_on_bit(), but calls
1051  * io_schedule() instead of schedule() for the actual waiting.
1052  *
1053  * Returned value will be zero if the bit was cleared, or non-zero
1054  * if the process received a signal and the mode permitted wakeup
1055  * on that signal.
1056  */
1057 static inline int
1058 wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
1059 {
1060         might_sleep();
1061         if (!test_bit(bit, word))
1062                 return 0;
1063         return out_of_line_wait_on_bit(word, bit,
1064                                        bit_wait_io,
1065                                        mode);
1066 }
1067
1068 /**
1069  * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
1070  * @word: the word being waited on, a kernel virtual address
1071  * @bit: the bit of the word being waited on
1072  * @mode: the task state to sleep in
1073  * @timeout: timeout, in jiffies
1074  *
1075  * Use the standard hashed waitqueue table to wait for a bit
1076  * to be cleared. This is similar to wait_on_bit(), except also takes a
1077  * timeout parameter.
1078  *
1079  * Returned value will be zero if the bit was cleared before the
1080  * @timeout elapsed, or non-zero if the @timeout elapsed or process
1081  * received a signal and the mode permitted wakeup on that signal.
1082  */
1083 static inline int
1084 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
1085                     unsigned long timeout)
1086 {
1087         might_sleep();
1088         if (!test_bit(bit, word))
1089                 return 0;
1090         return out_of_line_wait_on_bit_timeout(word, bit,
1091                                                bit_wait_timeout,
1092                                                mode, timeout);
1093 }
1094
1095 /**
1096  * wait_on_bit_action - wait for a bit to be cleared
1097  * @word: the word being waited on, a kernel virtual address
1098  * @bit: the bit of the word being waited on
1099  * @action: the function used to sleep, which may take special actions
1100  * @mode: the task state to sleep in
1101  *
1102  * Use the standard hashed waitqueue table to wait for a bit
1103  * to be cleared, and allow the waiting action to be specified.
1104  * This is like wait_on_bit() but allows fine control of how the waiting
1105  * is done.
1106  *
1107  * Returned value will be zero if the bit was cleared, or non-zero
1108  * if the process received a signal and the mode permitted wakeup
1109  * on that signal.
1110  */
1111 static inline int
1112 wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
1113                    unsigned mode)
1114 {
1115         might_sleep();
1116         if (!test_bit(bit, word))
1117                 return 0;
1118         return out_of_line_wait_on_bit(word, bit, action, mode);
1119 }
1120
1121 /**
1122  * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
1123  * @word: the word being waited on, a kernel virtual address
1124  * @bit: the bit of the word being waited on
1125  * @mode: the task state to sleep in
1126  *
1127  * There is a standard hashed waitqueue table for generic use. This
1128  * is the part of the hashtable's accessor API that waits on a bit
1129  * when one intends to set it, for instance, trying to lock bitflags.
1130  * For instance, if one were to have waiters trying to set bitflag
1131  * and waiting for it to clear before setting it, one would call
1132  * wait_on_bit() in threads waiting to be able to set the bit.
1133  * One uses wait_on_bit_lock() where one is waiting for the bit to
1134  * clear with the intention of setting it, and when done, clearing it.
1135  *
1136  * Returns zero if the bit was (eventually) found to be clear and was
1137  * set.  Returns non-zero if a signal was delivered to the process and
1138  * the @mode allows that signal to wake the process.
1139  */
1140 static inline int
1141 wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
1142 {
1143         might_sleep();
1144         if (!test_and_set_bit(bit, word))
1145                 return 0;
1146         return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1147 }
1148
1149 /**
1150  * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
1151  * @word: the word being waited on, a kernel virtual address
1152  * @bit: the bit of the word being waited on
1153  * @mode: the task state to sleep in
1154  *
1155  * Use the standard hashed waitqueue table to wait for a bit
1156  * to be cleared and then to atomically set it.  This is similar
1157  * to wait_on_bit(), but calls io_schedule() instead of schedule()
1158  * for the actual waiting.
1159  *
1160  * Returns zero if the bit was (eventually) found to be clear and was
1161  * set.  Returns non-zero if a signal was delivered to the process and
1162  * the @mode allows that signal to wake the process.
1163  */
1164 static inline int
1165 wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
1166 {
1167         might_sleep();
1168         if (!test_and_set_bit(bit, word))
1169                 return 0;
1170         return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1171 }
1172
1173 /**
1174  * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
1175  * @word: the word being waited on, a kernel virtual address
1176  * @bit: the bit of the word being waited on
1177  * @action: the function used to sleep, which may take special actions
1178  * @mode: the task state to sleep in
1179  *
1180  * Use the standard hashed waitqueue table to wait for a bit
1181  * to be cleared and then to set it, and allow the waiting action
1182  * to be specified.
1183  * This is like wait_on_bit() but allows fine control of how the waiting
1184  * is done.
1185  *
1186  * Returns zero if the bit was (eventually) found to be clear and was
1187  * set.  Returns non-zero if a signal was delivered to the process and
1188  * the @mode allows that signal to wake the process.
1189  */
1190 static inline int
1191 wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
1192                         unsigned mode)
1193 {
1194         might_sleep();
1195         if (!test_and_set_bit(bit, word))
1196                 return 0;
1197         return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1198 }
1199
1200 /**
1201  * wait_on_atomic_t - Wait for an atomic_t to become 0
1202  * @val: The atomic value being waited on, a kernel virtual address
1203  * @action: the function used to sleep, which may take special actions
1204  * @mode: the task state to sleep in
1205  *
1206  * Wait for an atomic_t to become 0.  We abuse the bit-wait waitqueue table for
1207  * the purpose of getting a waitqueue, but we set the key to a bit number
1208  * outside of the target 'word'.
1209  */
1210 static inline
1211 int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1212 {
1213         might_sleep();
1214         if (atomic_read(val) == 0)
1215                 return 0;
1216         return out_of_line_wait_on_atomic_t(val, action, mode);
1217 }
1218
1219 #endif /* _LINUX_WAIT_H */