1 /* Copyright (C) 2002-2013 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
25 #include <sys/param.h>
26 #include <sys/resource.h>
34 #include <shlib-compat.h>
36 #include <lowlevellock.h>
37 #include <kernel-features.h>
40 /* Size and alignment of static TLS block. */
41 size_t __static_tls_size;
42 size_t __static_tls_align_m1;
44 #ifndef __ASSUME_SET_ROBUST_LIST
45 /* Negative if we do not have the system call and we can use it. */
46 int __set_robust_list_avail;
47 # define set_robust_list_not_avail() \
48 __set_robust_list_avail = -1
50 # define set_robust_list_not_avail() do { } while (0)
53 #ifndef __ASSUME_FUTEX_CLOCK_REALTIME
54 /* Nonzero if we do not have FUTEX_CLOCK_REALTIME. */
55 int __have_futex_clock_realtime;
56 # define __set_futex_clock_realtime() \
57 __have_futex_clock_realtime = 1
59 #define __set_futex_clock_realtime() do { } while (0)
62 /* Version of the library, used in libthread_db to detect mismatches. */
63 static const char nptl_version[] __attribute_used__ = VERSION;
67 extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
75 void __nptl_set_robust (struct pthread *);
78 static void nptl_freeres (void);
81 static const struct pthread_functions pthread_functions =
83 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
84 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
85 .ptr___pthread_attr_init_2_0 = __pthread_attr_init_2_0,
87 .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
88 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
89 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
90 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
91 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
92 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
93 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
94 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
95 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
96 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
97 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
98 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
99 .ptr_pthread_condattr_init = __pthread_condattr_init,
100 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
101 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
102 .ptr___pthread_cond_init = __pthread_cond_init,
103 .ptr___pthread_cond_signal = __pthread_cond_signal,
104 .ptr___pthread_cond_wait = __pthread_cond_wait,
105 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
106 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
107 .ptr___pthread_cond_broadcast_2_0 = __pthread_cond_broadcast_2_0,
108 .ptr___pthread_cond_destroy_2_0 = __pthread_cond_destroy_2_0,
109 .ptr___pthread_cond_init_2_0 = __pthread_cond_init_2_0,
110 .ptr___pthread_cond_signal_2_0 = __pthread_cond_signal_2_0,
111 .ptr___pthread_cond_wait_2_0 = __pthread_cond_wait_2_0,
112 .ptr___pthread_cond_timedwait_2_0 = __pthread_cond_timedwait_2_0,
114 .ptr_pthread_equal = __pthread_equal,
115 .ptr___pthread_exit = __pthread_exit,
116 .ptr_pthread_getschedparam = __pthread_getschedparam,
117 .ptr_pthread_setschedparam = __pthread_setschedparam,
118 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
119 .ptr_pthread_mutex_init = __pthread_mutex_init,
120 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
121 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
122 .ptr_pthread_self = __pthread_self,
123 .ptr_pthread_setcancelstate = __pthread_setcancelstate,
124 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
125 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
126 .ptr___pthread_once = __pthread_once,
127 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock,
128 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock,
129 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock,
130 .ptr___pthread_key_create = __pthread_key_create,
131 .ptr___pthread_getspecific = __pthread_getspecific,
132 .ptr___pthread_setspecific = __pthread_setspecific,
133 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
134 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
135 .ptr_nthreads = &__nptl_nthreads,
136 .ptr___pthread_unwind = &__pthread_unwind,
137 .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
138 .ptr__nptl_setxid = __nptl_setxid,
139 /* For now only the stack cache needs to be freed. */
140 .ptr_freeres = nptl_freeres,
141 .ptr_set_robust = __nptl_set_robust
143 # define ptr_pthread_functions &pthread_functions
145 # define ptr_pthread_functions NULL
150 /* This function is called indirectly from the freeres code in libc. */
152 __libc_freeres_fn_section
163 __nptl_set_robust (struct pthread *self)
165 INTERNAL_SYSCALL_DECL (err);
166 INTERNAL_SYSCALL (set_robust_list, err, 2, &self->robust_head,
167 sizeof (struct robust_list_head));
171 /* For asynchronous cancellation we use a signal. This is the handler. */
173 sigcancel_handler (int sig, siginfo_t *si, void *ctx)
175 /* Determine the process ID. It might be negative if the thread is
176 in the middle of a fork() call. */
177 pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
178 if (__builtin_expect (pid < 0, 0))
181 /* Safety check. It would be possible to call this function for
182 other signals and send a signal from another process. This is not
183 correct and might even be a security problem. Try to catch as
184 many incorrect invocations as possible. */
187 || si->si_code != SI_TKILL)
190 struct pthread *self = THREAD_SELF;
192 int oldval = THREAD_GETMEM (self, cancelhandling);
195 /* We are canceled now. When canceled by another thread this flag
196 is already set but if the signal is directly send (internally or
197 from another process) is has to be done here. */
198 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
200 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
201 /* Already canceled or exiting. */
204 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
206 if (curval == oldval)
208 /* Set the return value. */
209 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
211 /* Make sure asynchronous cancellation is still enabled. */
212 if ((newval & CANCELTYPE_BITMASK) != 0)
213 /* Run the registered destructors and terminate the thread. */
224 struct xid_command *__xidcmd attribute_hidden;
226 /* We use the SIGSETXID signal in the setuid, setgid, etc. implementations to
227 tell each thread to call the respective setxid syscall on itself. This is
230 sighandler_setxid (int sig, siginfo_t *si, void *ctx)
232 /* Determine the process ID. It might be negative if the thread is
233 in the middle of a fork() call. */
234 pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
235 if (__builtin_expect (pid < 0, 0))
238 /* Safety check. It would be possible to call this function for
239 other signals and send a signal from another process. This is not
240 correct and might even be a security problem. Try to catch as
241 many incorrect invocations as possible. */
244 || si->si_code != SI_TKILL)
247 INTERNAL_SYSCALL_DECL (err);
248 INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
249 __xidcmd->id[1], __xidcmd->id[2]);
251 /* Reset the SETXID flag. */
252 struct pthread *self = THREAD_SELF;
256 flags = THREAD_GETMEM (self, cancelhandling);
257 newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
258 flags & ~SETXID_BITMASK, flags);
260 while (flags != newval);
262 /* And release the futex. */
263 self->setxid_futex = 1;
264 lll_futex_wake (&self->setxid_futex, 1, LLL_PRIVATE);
266 if (atomic_decrement_val (&__xidcmd->cntr) == 0)
267 lll_futex_wake (&__xidcmd->cntr, 1, LLL_PRIVATE);
271 /* When using __thread for this, we do it in libc so as not
272 to give libpthread its own TLS segment just for this. */
273 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
276 /* This can be set by the debugger before initialization is complete. */
277 static bool __nptl_initial_report_events __attribute_used__;
280 set_default_stacksize (size_t stacksize)
282 if (stacksize < PTHREAD_STACK_MIN)
283 stacksize = PTHREAD_STACK_MIN;
285 /* Make sure it meets the minimum size that allocate_stack
286 (allocatestack.c) will demand, which depends on the page size. */
287 const uintptr_t pagesz = GLRO(dl_pagesize);
288 const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
290 if (stacksize < minstack)
291 stacksize = minstack;
293 /* Round the resource limit up to page size. */
294 stacksize = (stacksize + pagesz - 1) & -pagesz;
295 __default_stacksize = stacksize;
298 __pthread_initialize_minimal_internal (int argc, char **argv, char **envp)
301 /* Unlike in the dynamically linked case the dynamic linker has not
302 taken care of initializing the TLS data structures. */
303 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
305 /* We must prevent gcc from being clever and move any of the
306 following code ahead of the __libc_setup_tls call. This function
307 will initialize the thread register which is subsequently
309 __asm __volatile ("");
312 /* Minimal initialization of the thread descriptor. */
313 struct pthread *pd = THREAD_SELF;
314 INTERNAL_SYSCALL_DECL (err);
315 pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
316 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
317 THREAD_SETMEM (pd, user_stack, true);
318 if (LLL_LOCK_INITIALIZER != 0)
319 THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
321 THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
324 /* Initialize the robust mutex data. */
325 #ifdef __PTHREAD_MUTEX_HAVE_PREV
326 pd->robust_prev = &pd->robust_head;
328 pd->robust_head.list = &pd->robust_head;
329 #ifdef __NR_set_robust_list
330 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
331 - offsetof (pthread_mutex_t,
332 __data.__list.__next));
333 int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
334 sizeof (struct robust_list_head));
335 if (INTERNAL_SYSCALL_ERROR_P (res, err))
337 set_robust_list_not_avail ();
339 #ifndef __ASSUME_PRIVATE_FUTEX
340 /* Private futexes are always used (at least internally) so that
341 doing the test once this early is beneficial. */
344 word = INTERNAL_SYSCALL (futex, err, 3, &word,
345 FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
346 if (!INTERNAL_SYSCALL_ERROR_P (word, err))
347 THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG);
350 /* Private futexes have been introduced earlier than the
351 FUTEX_CLOCK_REALTIME flag. We don't have to run the test if we
352 know the former are not supported. This also means we know the
353 kernel will return ENOSYS for unknown operations. */
354 if (THREAD_GETMEM (pd, header.private_futex) != 0)
356 #ifndef __ASSUME_FUTEX_CLOCK_REALTIME
359 /* NB: the syscall actually takes six parameters. The last is the
360 bit mask. But since we will not actually wait at all the value
361 is irrelevant. Given that passing six parameters is difficult
362 on some architectures we just pass whatever random value the
363 calling convention calls for to the kernel. It causes no harm. */
364 word = INTERNAL_SYSCALL (futex, err, 5, &word,
365 FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME
366 | FUTEX_PRIVATE_FLAG, 1, NULL, 0);
367 assert (INTERNAL_SYSCALL_ERROR_P (word, err));
368 if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS)
369 __set_futex_clock_realtime ();
373 /* Set initial thread's stack block from 0 up to __libc_stack_end.
374 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
375 purposes this is good enough. */
376 THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
378 /* Initialize the list of all running threads with the main thread. */
379 INIT_LIST_HEAD (&__stack_user);
380 list_add (&pd->list, &__stack_user);
382 /* Before initializing __stack_user, the debugger could not find us and
383 had to set __nptl_initial_report_events. Propagate its setting. */
384 THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
386 /* Install the cancellation signal handler. If for some reason we
387 cannot install the handler we do not abort. Maybe we should, but
388 it is only asynchronous cancellation which is affected. */
390 sa.sa_sigaction = sigcancel_handler;
391 sa.sa_flags = SA_SIGINFO;
392 __sigemptyset (&sa.sa_mask);
394 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
396 /* Install the handle to change the threads' uid/gid. */
397 sa.sa_sigaction = sighandler_setxid;
398 sa.sa_flags = SA_SIGINFO | SA_RESTART;
400 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
402 /* The parent process might have left the signals blocked. Just in
403 case, unblock it. We reuse the signal mask in the sigaction
404 structure. It is already cleared. */
405 __sigaddset (&sa.sa_mask, SIGCANCEL);
406 __sigaddset (&sa.sa_mask, SIGSETXID);
407 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
410 /* Get the size of the static and alignment requirements for the TLS
412 size_t static_tls_align;
413 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
415 /* Make sure the size takes all the alignments into account. */
416 if (STACK_ALIGN > static_tls_align)
417 static_tls_align = STACK_ALIGN;
418 __static_tls_align_m1 = static_tls_align - 1;
420 __static_tls_size = roundup (__static_tls_size, static_tls_align);
422 /* Initialize the environment. libc.so gets initialized after us due to a
423 circular dependency and hence __environ is not available otherwise. */
427 __libc_init_secure ();
430 size_t stacksize = 0;
431 char *envval = __libc_secure_getenv ("GLIBC_PTHREAD_DEFAULT_STACKSIZE");
433 if (__glibc_unlikely (envval != NULL && envval[0] != '\0'))
435 char *env_conv = envval;
436 size_t ret = strtoul (envval, &env_conv, 0);
438 if (*env_conv == '\0' && env_conv != envval)
444 /* Determine the default allowed stack size. This is the size used
445 in case the user does not specify one. */
447 if (getrlimit (RLIMIT_STACK, &limit) != 0
448 || limit.rlim_cur == RLIM_INFINITY)
449 /* The system limit is not usable. Use an architecture-specific
451 stacksize = ARCH_STACK_DEFAULT_SIZE;
453 stacksize = limit.rlim_cur;
456 set_default_stacksize (stacksize);
459 /* Transfer the old value from the dynamic linker's internal location. */
460 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
461 GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
463 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
464 keep the lock count from the ld.so implementation. */
465 GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
466 GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
467 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
468 GL(dl_load_lock).mutex.__data.__count = 0;
469 while (rtld_lock_count-- > 0)
470 __pthread_mutex_lock (&GL(dl_load_lock).mutex);
472 GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
475 GL(dl_init_static_tls) = &__pthread_init_static_tls;
477 GL(dl_wait_lookup_done) = &__wait_lookup_done;
479 /* Register the fork generation counter with the libc. */
480 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
481 __libc_multiple_threads_ptr =
483 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
484 ptr_pthread_functions);
486 /* Determine whether the machine is SMP or not. */
487 __is_smp = is_smp_system ();
489 strong_alias (__pthread_initialize_minimal_internal,
490 __pthread_initialize_minimal)
494 __pthread_get_minstack (const pthread_attr_t *attr)
496 struct pthread_attr *iattr = (struct pthread_attr *) attr;
498 return (GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN