Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
[sfrench/cifs-2.6.git] / kernel / wait.c
1 /*
2  * Generic waiting primitives.
3  *
4  * (C) 2004 William Irwin, Oracle
5  */
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/wait.h>
11 #include <linux/hash.h>
12
13 void init_waitqueue_head(wait_queue_head_t *q)
14 {
15         spin_lock_init(&q->lock);
16         INIT_LIST_HEAD(&q->task_list);
17 }
18
19 EXPORT_SYMBOL(init_waitqueue_head);
20
21 void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
22 {
23         unsigned long flags;
24
25         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
26         spin_lock_irqsave(&q->lock, flags);
27         __add_wait_queue(q, wait);
28         spin_unlock_irqrestore(&q->lock, flags);
29 }
30 EXPORT_SYMBOL(add_wait_queue);
31
32 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
33 {
34         unsigned long flags;
35
36         wait->flags |= WQ_FLAG_EXCLUSIVE;
37         spin_lock_irqsave(&q->lock, flags);
38         __add_wait_queue_tail(q, wait);
39         spin_unlock_irqrestore(&q->lock, flags);
40 }
41 EXPORT_SYMBOL(add_wait_queue_exclusive);
42
43 void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
44 {
45         unsigned long flags;
46
47         spin_lock_irqsave(&q->lock, flags);
48         __remove_wait_queue(q, wait);
49         spin_unlock_irqrestore(&q->lock, flags);
50 }
51 EXPORT_SYMBOL(remove_wait_queue);
52
53
54 /*
55  * Note: we use "set_current_state()" _after_ the wait-queue add,
56  * because we need a memory barrier there on SMP, so that any
57  * wake-function that tests for the wait-queue being active
58  * will be guaranteed to see waitqueue addition _or_ subsequent
59  * tests in this thread will see the wakeup having taken place.
60  *
61  * The spin_unlock() itself is semi-permeable and only protects
62  * one way (it only protects stuff inside the critical region and
63  * stops them from bleeding out - it would still allow subsequent
64  * loads to move into the critical region).
65  */
66 void
67 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
68 {
69         unsigned long flags;
70
71         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
72         spin_lock_irqsave(&q->lock, flags);
73         if (list_empty(&wait->task_list))
74                 __add_wait_queue(q, wait);
75         set_current_state(state);
76         spin_unlock_irqrestore(&q->lock, flags);
77 }
78 EXPORT_SYMBOL(prepare_to_wait);
79
80 void
81 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
82 {
83         unsigned long flags;
84
85         wait->flags |= WQ_FLAG_EXCLUSIVE;
86         spin_lock_irqsave(&q->lock, flags);
87         if (list_empty(&wait->task_list))
88                 __add_wait_queue_tail(q, wait);
89         set_current_state(state);
90         spin_unlock_irqrestore(&q->lock, flags);
91 }
92 EXPORT_SYMBOL(prepare_to_wait_exclusive);
93
94 /*
95  * finish_wait - clean up after waiting in a queue
96  * @q: waitqueue waited on
97  * @wait: wait descriptor
98  *
99  * Sets current thread back to running state and removes
100  * the wait descriptor from the given waitqueue if still
101  * queued.
102  */
103 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
104 {
105         unsigned long flags;
106
107         __set_current_state(TASK_RUNNING);
108         /*
109          * We can check for list emptiness outside the lock
110          * IFF:
111          *  - we use the "careful" check that verifies both
112          *    the next and prev pointers, so that there cannot
113          *    be any half-pending updates in progress on other
114          *    CPU's that we haven't seen yet (and that might
115          *    still change the stack area.
116          * and
117          *  - all other users take the lock (ie we can only
118          *    have _one_ other CPU that looks at or modifies
119          *    the list).
120          */
121         if (!list_empty_careful(&wait->task_list)) {
122                 spin_lock_irqsave(&q->lock, flags);
123                 list_del_init(&wait->task_list);
124                 spin_unlock_irqrestore(&q->lock, flags);
125         }
126 }
127 EXPORT_SYMBOL(finish_wait);
128
129 /*
130  * abort_exclusive_wait - abort exclusive waiting in a queue
131  * @q: waitqueue waited on
132  * @wait: wait descriptor
133  * @state: runstate of the waiter to be woken
134  * @key: key to identify a wait bit queue or %NULL
135  *
136  * Sets current thread back to running state and removes
137  * the wait descriptor from the given waitqueue if still
138  * queued.
139  *
140  * Wakes up the next waiter if the caller is concurrently
141  * woken up through the queue.
142  *
143  * This prevents waiter starvation where an exclusive waiter
144  * aborts and is woken up concurrently and noone wakes up
145  * the next waiter.
146  */
147 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
148                         unsigned int mode, void *key)
149 {
150         unsigned long flags;
151
152         __set_current_state(TASK_RUNNING);
153         spin_lock_irqsave(&q->lock, flags);
154         if (!list_empty(&wait->task_list))
155                 list_del_init(&wait->task_list);
156         else if (waitqueue_active(q))
157                 __wake_up_locked_key(q, mode, key);
158         spin_unlock_irqrestore(&q->lock, flags);
159 }
160 EXPORT_SYMBOL(abort_exclusive_wait);
161
162 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
163 {
164         int ret = default_wake_function(wait, mode, sync, key);
165
166         if (ret)
167                 list_del_init(&wait->task_list);
168         return ret;
169 }
170 EXPORT_SYMBOL(autoremove_wake_function);
171
172 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
173 {
174         struct wait_bit_key *key = arg;
175         struct wait_bit_queue *wait_bit
176                 = container_of(wait, struct wait_bit_queue, wait);
177
178         if (wait_bit->key.flags != key->flags ||
179                         wait_bit->key.bit_nr != key->bit_nr ||
180                         test_bit(key->bit_nr, key->flags))
181                 return 0;
182         else
183                 return autoremove_wake_function(wait, mode, sync, key);
184 }
185 EXPORT_SYMBOL(wake_bit_function);
186
187 /*
188  * To allow interruptible waiting and asynchronous (i.e. nonblocking)
189  * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
190  * permitted return codes. Nonzero return codes halt waiting and return.
191  */
192 int __sched
193 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
194                         int (*action)(void *), unsigned mode)
195 {
196         int ret = 0;
197
198         do {
199                 prepare_to_wait(wq, &q->wait, mode);
200                 if (test_bit(q->key.bit_nr, q->key.flags))
201                         ret = (*action)(q->key.flags);
202         } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
203         finish_wait(wq, &q->wait);
204         return ret;
205 }
206 EXPORT_SYMBOL(__wait_on_bit);
207
208 int __sched out_of_line_wait_on_bit(void *word, int bit,
209                                         int (*action)(void *), unsigned mode)
210 {
211         wait_queue_head_t *wq = bit_waitqueue(word, bit);
212         DEFINE_WAIT_BIT(wait, word, bit);
213
214         return __wait_on_bit(wq, &wait, action, mode);
215 }
216 EXPORT_SYMBOL(out_of_line_wait_on_bit);
217
218 int __sched
219 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
220                         int (*action)(void *), unsigned mode)
221 {
222         do {
223                 int ret;
224
225                 prepare_to_wait_exclusive(wq, &q->wait, mode);
226                 if (!test_bit(q->key.bit_nr, q->key.flags))
227                         continue;
228                 ret = action(q->key.flags);
229                 if (!ret)
230                         continue;
231                 abort_exclusive_wait(wq, &q->wait, mode, &q->key);
232                 return ret;
233         } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
234         finish_wait(wq, &q->wait);
235         return 0;
236 }
237 EXPORT_SYMBOL(__wait_on_bit_lock);
238
239 int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
240                                         int (*action)(void *), unsigned mode)
241 {
242         wait_queue_head_t *wq = bit_waitqueue(word, bit);
243         DEFINE_WAIT_BIT(wait, word, bit);
244
245         return __wait_on_bit_lock(wq, &wait, action, mode);
246 }
247 EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
248
249 void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
250 {
251         struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
252         if (waitqueue_active(wq))
253                 __wake_up(wq, TASK_NORMAL, 1, &key);
254 }
255 EXPORT_SYMBOL(__wake_up_bit);
256
257 /**
258  * wake_up_bit - wake up a waiter on a bit
259  * @word: the word being waited on, a kernel virtual address
260  * @bit: the bit of the word being waited on
261  *
262  * There is a standard hashed waitqueue table for generic use. This
263  * is the part of the hashtable's accessor API that wakes up waiters
264  * on a bit. For instance, if one were to have waiters on a bitflag,
265  * one would call wake_up_bit() after clearing the bit.
266  *
267  * In order for this to function properly, as it uses waitqueue_active()
268  * internally, some kind of memory barrier must be done prior to calling
269  * this. Typically, this will be smp_mb__after_clear_bit(), but in some
270  * cases where bitflags are manipulated non-atomically under a lock, one
271  * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
272  * because spin_unlock() does not guarantee a memory barrier.
273  */
274 void wake_up_bit(void *word, int bit)
275 {
276         __wake_up_bit(bit_waitqueue(word, bit), word, bit);
277 }
278 EXPORT_SYMBOL(wake_up_bit);
279
280 wait_queue_head_t *bit_waitqueue(void *word, int bit)
281 {
282         const int shift = BITS_PER_LONG == 32 ? 5 : 6;
283         const struct zone *zone = page_zone(virt_to_page(word));
284         unsigned long val = (unsigned long)word << shift | bit;
285
286         return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
287 }
288 EXPORT_SYMBOL(bit_waitqueue);