1 /* Copyright (C) 2002-2013 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 #include <not-cancel.h>
25 #include <lowlevellock.h>
26 #include <stap-probe.h>
29 #ifndef LLL_MUTEX_LOCK
30 # define LLL_MUTEX_LOCK(mutex) \
31 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
32 # define LLL_MUTEX_TRYLOCK(mutex) \
33 lll_trylock ((mutex)->__data.__lock)
34 # define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
35 lll_robust_lock ((mutex)->__data.__lock, id, \
36 PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
40 static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
41 __attribute_noinline__;
45 __pthread_mutex_lock (mutex)
46 pthread_mutex_t *mutex;
48 assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
50 unsigned int type = PTHREAD_MUTEX_TYPE (mutex);
52 LIBC_PROBE (mutex_entry, 1, mutex);
54 if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
55 return __pthread_mutex_lock_full (mutex);
57 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
59 if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
60 == PTHREAD_MUTEX_TIMED_NP)
64 LLL_MUTEX_LOCK (mutex);
65 assert (mutex->__data.__owner == 0);
67 else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
69 /* Recursive mutex. */
71 /* Check whether we already hold the mutex. */
72 if (mutex->__data.__owner == id)
74 /* Just bump the counter. */
75 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
76 /* Overflow of the counter. */
79 ++mutex->__data.__count;
84 /* We have to get the mutex. */
85 LLL_MUTEX_LOCK (mutex);
87 assert (mutex->__data.__owner == 0);
88 mutex->__data.__count = 1;
90 else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
95 if (LLL_MUTEX_TRYLOCK (mutex) != 0)
98 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
99 mutex->__data.__spins * 2 + 10);
102 if (cnt++ >= max_cnt)
104 LLL_MUTEX_LOCK (mutex);
112 while (LLL_MUTEX_TRYLOCK (mutex) != 0);
114 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
116 assert (mutex->__data.__owner == 0);
120 assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
121 /* Check whether we already hold the mutex. */
122 if (__builtin_expect (mutex->__data.__owner == id, 0))
127 /* Record the ownership. */
128 mutex->__data.__owner = id;
130 ++mutex->__data.__nusers;
133 LIBC_PROBE (mutex_acquired, 1, mutex);
139 __pthread_mutex_lock_full (pthread_mutex_t *mutex)
142 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
144 switch (PTHREAD_MUTEX_TYPE (mutex))
146 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
147 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
148 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
149 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
150 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
151 &mutex->__data.__list.__next);
153 oldval = mutex->__data.__lock;
157 if ((oldval & FUTEX_OWNER_DIED) != 0)
159 /* The previous owner died. Try locking the mutex. */
162 newval |= FUTEX_WAITERS;
164 newval |= (oldval & FUTEX_WAITERS);
168 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
171 if (newval != oldval)
177 /* We got the mutex. */
178 mutex->__data.__count = 1;
179 /* But it is inconsistent unless marked otherwise. */
180 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
182 ENQUEUE_MUTEX (mutex);
183 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
185 /* Note that we deliberately exit here. If we fall
186 through to the end of the function __nusers would be
187 incremented which is not correct because the old
188 owner has to be discounted. If we are not supposed
189 to increment __nusers we actually have to decrement
192 --mutex->__data.__nusers;
198 /* Check whether we already hold the mutex. */
199 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
201 int kind = PTHREAD_MUTEX_TYPE (mutex);
202 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
204 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
209 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
211 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
214 /* Just bump the counter. */
215 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
216 /* Overflow of the counter. */
219 ++mutex->__data.__count;
225 oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);
227 if (__builtin_expect (mutex->__data.__owner
228 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
230 /* This mutex is now not recoverable. */
231 mutex->__data.__count = 0;
232 lll_unlock (mutex->__data.__lock,
233 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
234 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
235 return ENOTRECOVERABLE;
238 while ((oldval & FUTEX_OWNER_DIED) != 0);
240 mutex->__data.__count = 1;
241 ENQUEUE_MUTEX (mutex);
242 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
245 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
246 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
247 case PTHREAD_MUTEX_PI_NORMAL_NP:
248 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
249 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
250 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
251 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
252 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
254 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
255 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
258 /* Note: robust PI futexes are signaled by setting bit 0. */
259 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
260 (void *) (((uintptr_t) &mutex->__data.__list.__next)
263 oldval = mutex->__data.__lock;
265 /* Check whether we already hold the mutex. */
266 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
268 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
270 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
274 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
276 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
278 /* Just bump the counter. */
279 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
280 /* Overflow of the counter. */
283 ++mutex->__data.__count;
291 newval |= FUTEX_WAITERS;
293 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
298 /* The mutex is locked. The kernel will now take care of
300 int private = (robust
301 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
302 : PTHREAD_MUTEX_PSHARED (mutex));
303 INTERNAL_SYSCALL_DECL (__err);
304 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
305 __lll_private_flag (FUTEX_LOCK_PI,
308 if (INTERNAL_SYSCALL_ERROR_P (e, __err)
309 && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
310 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
312 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
313 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
314 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
315 /* ESRCH can happen only for non-robust PI mutexes where
316 the owner of the lock died. */
317 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
319 /* Delay the thread indefinitely. */
324 oldval = mutex->__data.__lock;
326 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
329 if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
331 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
333 /* We got the mutex. */
334 mutex->__data.__count = 1;
335 /* But it is inconsistent unless marked otherwise. */
336 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
338 ENQUEUE_MUTEX_PI (mutex);
339 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
341 /* Note that we deliberately exit here. If we fall
342 through to the end of the function __nusers would be
343 incremented which is not correct because the old owner
344 has to be discounted. If we are not supposed to
345 increment __nusers we actually have to decrement it here. */
347 --mutex->__data.__nusers;
354 && __builtin_expect (mutex->__data.__owner
355 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
357 /* This mutex is now not recoverable. */
358 mutex->__data.__count = 0;
360 INTERNAL_SYSCALL_DECL (__err);
361 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
362 __lll_private_flag (FUTEX_UNLOCK_PI,
363 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
366 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
367 return ENOTRECOVERABLE;
370 mutex->__data.__count = 1;
373 ENQUEUE_MUTEX_PI (mutex);
374 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
379 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
380 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
381 case PTHREAD_MUTEX_PP_NORMAL_NP:
382 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
384 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
386 oldval = mutex->__data.__lock;
388 /* Check whether we already hold the mutex. */
389 if (mutex->__data.__owner == id)
391 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
394 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
396 /* Just bump the counter. */
397 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
398 /* Overflow of the counter. */
401 ++mutex->__data.__count;
407 int oldprio = -1, ceilval;
410 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
411 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
413 if (__pthread_current_priority () > ceiling)
416 __pthread_tpp_change_priority (oldprio, -1);
420 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
424 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
428 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
436 if (oldval == ceilval)
442 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
446 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
449 if (oldval != ceilval)
450 lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
451 PTHREAD_MUTEX_PSHARED (mutex));
453 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
454 ceilval | 2, ceilval)
457 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
459 assert (mutex->__data.__owner == 0);
460 mutex->__data.__count = 1;
465 /* Correct code cannot set any other type. */
469 /* Record the ownership. */
470 mutex->__data.__owner = id;
472 ++mutex->__data.__nusers;
475 LIBC_PROBE (mutex_acquired, 1, mutex);
479 #ifndef __pthread_mutex_lock
480 strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
481 hidden_def (__pthread_mutex_lock)
487 __pthread_mutex_cond_lock_adjust (mutex)
488 pthread_mutex_t *mutex;
490 assert ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
491 assert ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
492 assert ((mutex->__data.__kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
494 /* Record the ownership. */
495 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
496 mutex->__data.__owner = id;
498 if (mutex->__data.__kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
499 ++mutex->__data.__count;