Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
[sfrench/cifs-2.6.git] / arch / m32r / kernel / semaphore.c
1 /*
2  *  linux/arch/m32r/semaphore.c
3  *    orig : i386 2.6.4
4  *
5  *  M32R semaphore implementation.
6  *
7  *      Copyright (c) 2002 - 2004 Hitoshi Yamamoto
8  */
9
10 /*
11  * i386 semaphore implementation.
12  *
13  * (C) Copyright 1999 Linus Torvalds
14  *
15  * Portions Copyright 1999 Red Hat, Inc.
16  *
17  *      This program is free software; you can redistribute it and/or
18  *      modify it under the terms of the GNU General Public License
19  *      as published by the Free Software Foundation; either version
20  *      2 of the License, or (at your option) any later version.
21  *
22  * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
23  */
24 #include <linux/sched.h>
25 #include <linux/err.h>
26 #include <linux/init.h>
27 #include <asm/semaphore.h>
28
29 /*
30  * Semaphores are implemented using a two-way counter:
31  * The "count" variable is decremented for each process
32  * that tries to acquire the semaphore, while the "sleeping"
33  * variable is a count of such acquires.
34  *
35  * Notably, the inline "up()" and "down()" functions can
36  * efficiently test if they need to do any extra work (up
37  * needs to do something only if count was negative before
38  * the increment operation.
39  *
40  * "sleeping" and the contention routine ordering is protected
41  * by the spinlock in the semaphore's waitqueue head.
42  *
43  * Note that these functions are only called when there is
44  * contention on the lock, and as such all this is the
45  * "non-critical" part of the whole semaphore business. The
46  * critical part is the inline stuff in <asm/semaphore.h>
47  * where we want to avoid any extra jumps and calls.
48  */
49
50 /*
51  * Logic:
52  *  - only on a boundary condition do we need to care. When we go
53  *    from a negative count to a non-negative, we wake people up.
54  *  - when we go from a non-negative count to a negative do we
55  *    (a) synchronize with the "sleeper" count and (b) make sure
56  *    that we're on the wakeup list before we synchronize so that
57  *    we cannot lose wakeup events.
58  */
59
60 asmlinkage void __up(struct semaphore *sem)
61 {
62         wake_up(&sem->wait);
63 }
64
65 asmlinkage void __sched __down(struct semaphore * sem)
66 {
67         struct task_struct *tsk = current;
68         DECLARE_WAITQUEUE(wait, tsk);
69         unsigned long flags;
70
71         tsk->state = TASK_UNINTERRUPTIBLE;
72         spin_lock_irqsave(&sem->wait.lock, flags);
73         add_wait_queue_exclusive_locked(&sem->wait, &wait);
74
75         sem->sleepers++;
76         for (;;) {
77                 int sleepers = sem->sleepers;
78
79                 /*
80                  * Add "everybody else" into it. They aren't
81                  * playing, because we own the spinlock in
82                  * the wait_queue_head.
83                  */
84                 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
85                         sem->sleepers = 0;
86                         break;
87                 }
88                 sem->sleepers = 1;      /* us - see -1 above */
89                 spin_unlock_irqrestore(&sem->wait.lock, flags);
90
91                 schedule();
92
93                 spin_lock_irqsave(&sem->wait.lock, flags);
94                 tsk->state = TASK_UNINTERRUPTIBLE;
95         }
96         remove_wait_queue_locked(&sem->wait, &wait);
97         wake_up_locked(&sem->wait);
98         spin_unlock_irqrestore(&sem->wait.lock, flags);
99         tsk->state = TASK_RUNNING;
100 }
101
102 asmlinkage int __sched __down_interruptible(struct semaphore * sem)
103 {
104         int retval = 0;
105         struct task_struct *tsk = current;
106         DECLARE_WAITQUEUE(wait, tsk);
107         unsigned long flags;
108
109         tsk->state = TASK_INTERRUPTIBLE;
110         spin_lock_irqsave(&sem->wait.lock, flags);
111         add_wait_queue_exclusive_locked(&sem->wait, &wait);
112
113         sem->sleepers++;
114         for (;;) {
115                 int sleepers = sem->sleepers;
116
117                 /*
118                  * With signals pending, this turns into
119                  * the trylock failure case - we won't be
120                  * sleeping, and we* can't get the lock as
121                  * it has contention. Just correct the count
122                  * and exit.
123                  */
124                 if (signal_pending(current)) {
125                         retval = -EINTR;
126                         sem->sleepers = 0;
127                         atomic_add(sleepers, &sem->count);
128                         break;
129                 }
130
131                 /*
132                  * Add "everybody else" into it. They aren't
133                  * playing, because we own the spinlock in
134                  * wait_queue_head. The "-1" is because we're
135                  * still hoping to get the semaphore.
136                  */
137                 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
138                         sem->sleepers = 0;
139                         break;
140                 }
141                 sem->sleepers = 1;      /* us - see -1 above */
142                 spin_unlock_irqrestore(&sem->wait.lock, flags);
143
144                 schedule();
145
146                 spin_lock_irqsave(&sem->wait.lock, flags);
147                 tsk->state = TASK_INTERRUPTIBLE;
148         }
149         remove_wait_queue_locked(&sem->wait, &wait);
150         wake_up_locked(&sem->wait);
151         spin_unlock_irqrestore(&sem->wait.lock, flags);
152
153         tsk->state = TASK_RUNNING;
154         return retval;
155 }
156
157 /*
158  * Trylock failed - make sure we correct for
159  * having decremented the count.
160  *
161  * We could have done the trylock with a
162  * single "cmpxchg" without failure cases,
163  * but then it wouldn't work on a 386.
164  */
165 asmlinkage int __down_trylock(struct semaphore * sem)
166 {
167         int sleepers;
168         unsigned long flags;
169
170         spin_lock_irqsave(&sem->wait.lock, flags);
171         sleepers = sem->sleepers + 1;
172         sem->sleepers = 0;
173
174         /*
175          * Add "everybody else" and us into it. They aren't
176          * playing, because we own the spinlock in the
177          * wait_queue_head.
178          */
179         if (!atomic_add_negative(sleepers, &sem->count)) {
180                 wake_up_locked(&sem->wait);
181         }
182
183         spin_unlock_irqrestore(&sem->wait.lock, flags);
184         return 1;
185 }