4d5f75d24f88e0696ba2339a3f74376325c5f21d
[jlayton/glibc.git] / nptl / pthread_mutex_trylock.c
1 /* Copyright (C) 2002-2014 Free Software Foundation, Inc.
2    This file is part of the GNU C Library.
3    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, see
17    <http://www.gnu.org/licenses/>.  */
18
19 #include <assert.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 #include "pthreadP.h"
23 #include <lowlevellock.h>
24
25 #ifndef lll_trylock_elision
26 #define lll_trylock_elision(a,t) lll_trylock(a)
27 #endif
28
29 #ifndef DO_ELISION
30 #define DO_ELISION(m) 0
31 #endif
32
33 /* We don't force elision in trylock, because this can lead to inconsistent
34    lock state if the lock was actually busy.  */
35
36 int
37 __pthread_mutex_trylock (mutex)
38      pthread_mutex_t *mutex;
39 {
40   int oldval;
41   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
42
43   switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
44                             PTHREAD_MUTEX_TIMED_NP))
45     {
46       /* Recursive mutex.  */
47     case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
48     case PTHREAD_MUTEX_RECURSIVE_NP:
49       /* Check whether we already hold the mutex.  */
50       if (mutex->__data.__owner == id)
51         {
52           /* Just bump the counter.  */
53           if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
54             /* Overflow of the counter.  */
55             return EAGAIN;
56
57           ++mutex->__data.__count;
58           return 0;
59         }
60
61       if (lll_trylock (mutex->__data.__lock) == 0)
62         {
63           /* Record the ownership.  */
64           mutex->__data.__owner = id;
65           mutex->__data.__count = 1;
66           ++mutex->__data.__nusers;
67           return 0;
68         }
69       break;
70
71     case PTHREAD_MUTEX_TIMED_ELISION_NP:
72     elision:
73       if (lll_trylock_elision (mutex->__data.__lock,
74                                mutex->__data.__elision) != 0)
75         break;
76       /* Don't record the ownership.  */
77       return 0;
78
79     case PTHREAD_MUTEX_TIMED_NP:
80       if (DO_ELISION (mutex))
81         goto elision;
82       /*FALL THROUGH*/
83     case PTHREAD_MUTEX_ADAPTIVE_NP:
84     case PTHREAD_MUTEX_ERRORCHECK_NP:
85       if (lll_trylock (mutex->__data.__lock) != 0)
86         break;
87
88       /* Record the ownership.  */
89       mutex->__data.__owner = id;
90       ++mutex->__data.__nusers;
91
92       return 0;
93
94     case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
95     case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
96     case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
97     case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
98       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
99                      &mutex->__data.__list.__next);
100
101       oldval = mutex->__data.__lock;
102       do
103         {
104         again:
105           if ((oldval & FUTEX_OWNER_DIED) != 0)
106             {
107               /* The previous owner died.  Try locking the mutex.  */
108               int newval = id | (oldval & FUTEX_WAITERS);
109
110               newval
111                 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
112                                                        newval, oldval);
113
114               if (newval != oldval)
115                 {
116                   oldval = newval;
117                   goto again;
118                 }
119
120               /* We got the mutex.  */
121               mutex->__data.__count = 1;
122               /* But it is inconsistent unless marked otherwise.  */
123               mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
124
125               ENQUEUE_MUTEX (mutex);
126               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
127
128               /* Note that we deliberately exist here.  If we fall
129                  through to the end of the function __nusers would be
130                  incremented which is not correct because the old
131                  owner has to be discounted.  */
132               return EOWNERDEAD;
133             }
134
135           /* Check whether we already hold the mutex.  */
136           if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
137             {
138               int kind = PTHREAD_MUTEX_TYPE (mutex);
139               if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
140                 {
141                   THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
142                                  NULL);
143                   return EDEADLK;
144                 }
145
146               if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
147                 {
148                   THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
149                                  NULL);
150
151                   /* Just bump the counter.  */
152                   if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
153                     /* Overflow of the counter.  */
154                     return EAGAIN;
155
156                   ++mutex->__data.__count;
157
158                   return 0;
159                 }
160             }
161
162           oldval = lll_robust_trylock (mutex->__data.__lock, id);
163           if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
164             {
165               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
166
167               return EBUSY;
168             }
169
170           if (__builtin_expect (mutex->__data.__owner
171                                 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
172             {
173               /* This mutex is now not recoverable.  */
174               mutex->__data.__count = 0;
175               if (oldval == id)
176                 lll_unlock (mutex->__data.__lock,
177                             PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
178               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
179               return ENOTRECOVERABLE;
180             }
181         }
182       while ((oldval & FUTEX_OWNER_DIED) != 0);
183
184       ENQUEUE_MUTEX (mutex);
185       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
186
187       mutex->__data.__owner = id;
188       ++mutex->__data.__nusers;
189       mutex->__data.__count = 1;
190
191       return 0;
192
193     case PTHREAD_MUTEX_PI_RECURSIVE_NP:
194     case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
195     case PTHREAD_MUTEX_PI_NORMAL_NP:
196     case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
197     case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
198     case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
199     case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
200     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
201       {
202         int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
203         int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
204
205         if (robust)
206           /* Note: robust PI futexes are signaled by setting bit 0.  */
207           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
208                          (void *) (((uintptr_t) &mutex->__data.__list.__next)
209                                    | 1));
210
211         oldval = mutex->__data.__lock;
212
213         /* Check whether we already hold the mutex.  */
214         if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
215           {
216             if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
217               {
218                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
219                 return EDEADLK;
220               }
221
222             if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
223               {
224                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
225
226                 /* Just bump the counter.  */
227                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
228                   /* Overflow of the counter.  */
229                   return EAGAIN;
230
231                 ++mutex->__data.__count;
232
233                 return 0;
234               }
235           }
236
237         oldval
238           = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
239                                                  id, 0);
240
241         if (oldval != 0)
242           {
243             if ((oldval & FUTEX_OWNER_DIED) == 0)
244               {
245                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
246
247                 return EBUSY;
248               }
249
250             assert (robust);
251
252             /* The mutex owner died.  The kernel will now take care of
253                everything.  */
254             int private = (robust
255                            ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
256                            : PTHREAD_MUTEX_PSHARED (mutex));
257             INTERNAL_SYSCALL_DECL (__err);
258             int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
259                                       __lll_private_flag (FUTEX_TRYLOCK_PI,
260                                                           private), 0, 0);
261
262             if (INTERNAL_SYSCALL_ERROR_P (e, __err)
263                 && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
264               {
265                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
266
267                 return EBUSY;
268               }
269
270             oldval = mutex->__data.__lock;
271           }
272
273         if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
274           {
275             atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
276
277             /* We got the mutex.  */
278             mutex->__data.__count = 1;
279             /* But it is inconsistent unless marked otherwise.  */
280             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
281
282             ENQUEUE_MUTEX (mutex);
283             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
284
285             /* Note that we deliberately exit here.  If we fall
286                through to the end of the function __nusers would be
287                incremented which is not correct because the old owner
288                has to be discounted.  */
289             return EOWNERDEAD;
290           }
291
292         if (robust
293             && __builtin_expect (mutex->__data.__owner
294                                  == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
295           {
296             /* This mutex is now not recoverable.  */
297             mutex->__data.__count = 0;
298
299             INTERNAL_SYSCALL_DECL (__err);
300             INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
301                               __lll_private_flag (FUTEX_UNLOCK_PI,
302                                                   PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
303                               0, 0);
304
305             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
306             return ENOTRECOVERABLE;
307           }
308
309         if (robust)
310           {
311             ENQUEUE_MUTEX_PI (mutex);
312             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
313           }
314
315         mutex->__data.__owner = id;
316         ++mutex->__data.__nusers;
317         mutex->__data.__count = 1;
318
319         return 0;
320       }
321
322     case PTHREAD_MUTEX_PP_RECURSIVE_NP:
323     case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
324     case PTHREAD_MUTEX_PP_NORMAL_NP:
325     case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
326       {
327         int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
328
329         oldval = mutex->__data.__lock;
330
331         /* Check whether we already hold the mutex.  */
332         if (mutex->__data.__owner == id)
333           {
334             if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
335               return EDEADLK;
336
337             if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
338               {
339                 /* Just bump the counter.  */
340                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
341                   /* Overflow of the counter.  */
342                   return EAGAIN;
343
344                 ++mutex->__data.__count;
345
346                 return 0;
347               }
348           }
349
350         int oldprio = -1, ceilval;
351         do
352           {
353             int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
354                           >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
355
356             if (__pthread_current_priority () > ceiling)
357               {
358                 if (oldprio != -1)
359                   __pthread_tpp_change_priority (oldprio, -1);
360                 return EINVAL;
361               }
362
363             int retval = __pthread_tpp_change_priority (oldprio, ceiling);
364             if (retval)
365               return retval;
366
367             ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
368             oldprio = ceiling;
369
370             oldval
371               = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
372                                                      ceilval | 1, ceilval);
373
374             if (oldval == ceilval)
375               break;
376           }
377         while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
378
379         if (oldval != ceilval)
380           {
381             __pthread_tpp_change_priority (oldprio, -1);
382             break;
383           }
384
385         assert (mutex->__data.__owner == 0);
386         /* Record the ownership.  */
387         mutex->__data.__owner = id;
388         ++mutex->__data.__nusers;
389         mutex->__data.__count = 1;
390
391         return 0;
392       }
393       break;
394
395     default:
396       /* Correct code cannot set any other type.  */
397       return EINVAL;
398     }
399
400   return EBUSY;
401 }
402
403 #ifndef __pthread_mutex_trylock
404 #ifndef pthread_mutex_trylock
405 strong_alias (__pthread_mutex_trylock, pthread_mutex_trylock)
406 #endif
407 #endif