1 /* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 #include <sys/param.h>
27 #include <dl-sysdep.h>
33 /* Most architectures have exactly one stack pointer. Some have more. */
34 #define STACK_VARIABLES void *stackaddr
36 /* How to pass the values to the 'create_thread' function. */
37 #define STACK_VARIABLES_ARGS stackaddr
39 /* How to declare function which gets there parameters. */
40 #define STACK_VARIABLES_PARMS void *stackaddr
43 /* Default alignment of stack. */
45 # define STACK_ALIGN __alignof__ (long double)
48 /* Default value for minimal stack size after allocating thread
49 descriptor and guard. */
50 #ifndef MINIMAL_REST_STACK
51 # define MINIMAL_REST_STACK 4096
55 /* Let the architecture add some flags to the mmap() call used to
57 #ifndef ARCH_MAP_FLAGS
58 # define ARCH_MAP_FLAGS 0
62 /* Cache handling for not-yet free stacks. */
64 /* Maximum size in kB of cache. */
65 static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default. */
66 static size_t stack_cache_actsize;
68 /* Mutex protecting this variable. */
69 static lll_lock_t stack_cache_lock = LLL_LOCK_INITIALIZER;
71 /* List of queued stack frames. */
72 static LIST_HEAD (stack_cache);
74 /* List of the stacks in use. */
75 static LIST_HEAD (stack_used);
77 /* List of the threads with user provided stacks in use. No need to
78 initialize this, since it's done in __pthread_initialize_minimal. */
79 list_t __stack_user __attribute__ ((nocommon));
80 hidden_def (__stack_user)
82 /* Number of threads created. */
83 static unsigned int nptl_ncreated;
86 /* Check whether the stack is still used or not. */
87 #define FREE_P(descr) ((descr)->tid == 0)
90 /* We create a double linked list of all cache entries. Double linked
91 because this allows removing entries from the end. */
94 /* Get a stack frame from the cache. We have to match by size since
95 some blocks might be too small or far too large. */
96 static struct pthread *
97 get_cached_stack (size_t *sizep, void **memp)
100 struct pthread *result = NULL;
103 lll_lock (stack_cache_lock);
105 /* Search the cache for a matching entry. We search for the
106 smallest stack which has at least the required size. Note that
107 in normal situations the size of all allocated stacks is the
108 same. As the very least there are only a few different sizes.
109 Therefore this loop will exit early most of the time with an
111 list_for_each (entry, &stack_cache)
113 struct pthread *curr;
115 curr = list_entry (entry, struct pthread, header.data.list);
116 if (FREE_P (curr) && curr->stackblock_size >= size)
118 if (curr->stackblock_size == size)
125 || result->stackblock_size > curr->stackblock_size)
130 if (__builtin_expect (result == NULL, 0)
131 /* Make sure the size difference is not too excessive. In that
132 case we do not use the block. */
133 || __builtin_expect (result->stackblock_size > 4 * size, 0))
135 /* Release the lock. */
136 lll_unlock (stack_cache_lock);
141 /* Dequeue the entry. */
142 list_del (&result->header.data.list);
144 /* And add to the list of stacks in use. */
145 list_add (&result->header.data.list, &stack_used);
147 /* And decrease the cache size. */
148 stack_cache_actsize -= result->stackblock_size;
150 /* Release the lock early. */
151 lll_unlock (stack_cache_lock);
153 /* Report size and location of the stack to the caller. */
154 *sizep = result->stackblock_size;
155 *memp = result->stackblock;
157 /* Cancellation handling is back to the default. */
158 result->cancelhandling = 0;
159 result->cleanup = NULL;
161 /* No pending event. */
162 result->nextevent = NULL;
165 dtv_t *dtv = GET_DTV (result);
166 memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t));
168 /* Re-initialize the TLS. */
169 return _dl_allocate_tls_init (result);
173 /* Add a stack frame which is not used anymore to the stack. Must be
174 called with the cache lock held. */
176 queue_stack (struct pthread *stack)
178 /* We unconditionally add the stack to the list. The memory may
179 still be in use but it will not be reused until the kernel marks
180 the stack as not used anymore. */
181 list_add (&stack->header.data.list, &stack_cache);
183 stack_cache_actsize += stack->stackblock_size;
184 if (__builtin_expect (stack_cache_actsize > stack_cache_maxsize, 0))
186 /* We reduce the size of the cache. Remove the last entries
187 until the size is below the limit. */
191 /* Search from the end of the list. */
192 list_for_each_prev_safe (entry, prev, &stack_cache)
194 struct pthread *curr;
196 curr = list_entry (entry, struct pthread, header.data.list);
199 /* Unlink the block. */
202 /* Account for the freed memory. */
203 stack_cache_actsize -= curr->stackblock_size;
205 /* Free the memory associated with the ELF TLS. */
206 _dl_deallocate_tls (curr, false);
208 /* Remove this block. This should never fail. If it
209 does something is really wrong. */
210 if (munmap (curr->stackblock, curr->stackblock_size) != 0)
213 /* Maybe we have freed enough. */
214 if (stack_cache_actsize <= stack_cache_maxsize)
224 allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
229 size_t pagesize_m1 = __getpagesize () - 1;
231 assert (attr != NULL);
232 assert (powerof2 (pagesize_m1 + 1));
233 assert (TCB_ALIGNMENT >= STACK_ALIGN);
235 /* Get the stack size from the attribute if it is set. Otherwise we
236 use the default we determined at start time. */
237 size = attr->stacksize ?: __default_stacksize;
239 /* Get memory for the stack. */
240 if (__builtin_expect (attr->flags & ATTR_FLAG_STACKADDR, 0))
244 /* If the user also specified the size of the stack make sure it
246 if (attr->stacksize != 0
247 && attr->stacksize < (__static_tls_size + MINIMAL_REST_STACK))
250 /* Adjust stack size for alignment of the TLS block. */
251 adj = ((uintptr_t) attr->stackaddr) & __static_tls_align_m1;
254 /* The user provided some memory. Let's hope it matches the
255 size... We do not allocate guard pages if the user provided
256 the stack. It is the user's responsibility to do this if it
258 pd = (struct pthread *) (((uintptr_t) attr->stackaddr - adj)
259 & ~(__alignof (struct pthread) - 1)) - 1;
261 /* The user provided stack memory needs to be cleared. */
262 memset (pd, '\0', sizeof (struct pthread));
264 /* The first TSD block is included in the TCB. */
265 pd->specific[0] = pd->specific_1stblock;
267 #if LLL_LOCK_INITIALIZER != 0
268 /* Initialize the lock. */
269 pd->lock = LLL_LOCK_INITIALIZER;
272 /* Remember the stack-related values. */
273 pd->stackblock = (char *) attr->stackaddr - size;
274 pd->stackblock_size = size - adj;
276 /* This is a user-provided stack. It will not be queued in the
277 stack cache nor will the memory (except the TLS memory) be freed. */
278 pd->user_stack = true;
280 /* This is at least the second thread. */
281 pd->header.data.multiple_threads = 1;
283 #ifdef NEED_DL_SYSINFO
284 /* Copy the sysinfo value from the parent. */
285 pd->header.data.sysinfo
286 = THREAD_GETMEM (THREAD_SELF, header.data.sysinfo);
289 /* Allocate the DTV for this thread. */
290 if (_dl_allocate_tls (pd) == NULL)
291 /* Something went wrong. */
295 /* Prepare to modify global data. */
296 lll_lock (stack_cache_lock);
298 /* And add to the list of stacks in use. */
299 list_add (&pd->header.data.list, &__stack_user);
301 lll_unlock (stack_cache_lock);
305 /* Allocate some anonymous memory. If possible use the
311 #if COLORING_INCREMENT != 0
312 /* Add one more page for stack coloring. Don't to it for stacks
313 with 16 times pagesize or larger. This might just cause
314 unnecessary misalignment. */
315 if (size <= 16 * pagesize_m1)
316 size += pagesize_m1 + 1;
319 /* Adjust the stack size for alignment. */
320 size &= ~__static_tls_align_m1;
323 /* Make sure the size of the stack is enough for the guard and
324 eventually the thread descriptor. */
325 guardsize = (attr->guardsize + pagesize_m1) & ~pagesize_m1;
326 if (__builtin_expect (size < (guardsize + __static_tls_size
327 + MINIMAL_REST_STACK + pagesize_m1 + 1),
329 /* The stack is too small (or the guard too large). */
332 /* Try to get a stack from the cache. */
334 pd = get_cached_stack (&size, &mem);
337 mem = mmap (NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC,
338 MAP_PRIVATE | MAP_ANONYMOUS | ARCH_MAP_FLAGS, -1, 0);
340 if (__builtin_expect (mem == MAP_FAILED, 0))
343 /* 'size' is guaranteed to be greater than zero. So we can
344 never get a NULL pointer back from MMAP. */
345 assert (mem != NULL);
347 #if COLORING_INCREMENT != 0
348 /* Atomically increment NCREATED. */
349 unsigned int ncreated = atomic_exchange_and_add (&nptl_ncreated, 1);
351 /* We chose the offset for coloring by incrementing it for
352 every new thread by a fixed amount. The offset used
353 module the page size. Even if coloring would be better
354 relative to higher alignment values it makes no sense to
355 do it since the mmap() interface does not allow us to
356 specify any alignment for the returned memory block. */
357 size_t coloring = (ncreated * COLORING_INCREMENT) & pagesize_m1;
359 /* Make sure the coloring offsets does not disturb the alignment
360 of the TCB and static TLS block. */
361 if (__builtin_expect ((coloring & __static_tls_align_m1) != 0, 0))
362 coloring = (((coloring + __static_tls_align_m1)
363 & ~(__static_tls_align_m1))
366 /* Unless specified we do not make any adjustments. */
370 /* Place the thread descriptor at the end of the stack. */
371 pd = (struct pthread *) ((char *) mem + size - coloring) - 1;
373 /* Remember the stack-related values. */
374 pd->stackblock = mem;
375 pd->stackblock_size = size;
377 /* We allocated the first block thread-specific data array.
378 This address will not change for the lifetime of this
380 pd->specific[0] = pd->specific_1stblock;
382 #if LLL_LOCK_INITIALIZER != 0
383 /* Initialize the lock. */
384 pd->lock = LLL_LOCK_INITIALIZER;
387 /* This is at least the second thread. */
388 pd->header.data.multiple_threads = 1;
390 #ifdef NEED_DL_SYSINFO
391 /* Copy the sysinfo value from the parent. */
392 pd->header.data.sysinfo
393 = THREAD_GETMEM (THREAD_SELF, header.data.sysinfo);
396 /* Allocate the DTV for this thread. */
397 if (_dl_allocate_tls (pd) == NULL)
399 /* Something went wrong. */
402 /* Free the stack memory we just allocated. */
403 (void) munmap (mem, size);
409 /* Prepare to modify global data. */
410 lll_lock (stack_cache_lock);
412 /* And add to the list of stacks in use. */
413 list_add (&pd->header.data.list, &stack_used);
415 lll_unlock (stack_cache_lock);
418 /* Note that all of the stack and the thread descriptor is
419 zeroed. This means we do not have to initialize fields
420 with initial value zero. This is specifically true for
421 the 'tid' field which is always set back to zero once the
422 stack is not used anymore and for the 'guardsize' field
423 which will be read next. */
426 /* Create or resize the guard area if necessary. */
427 if (__builtin_expect (guardsize > pd->guardsize, 0))
429 if (mprotect (mem, guardsize, PROT_NONE) != 0)
435 lll_lock (stack_cache_lock);
437 /* Remove the thread from the list. */
438 list_del (&pd->header.data.list);
440 lll_unlock (stack_cache_lock);
442 /* Get rid of the TLS block we allocated. */
443 _dl_deallocate_tls (pd, false);
445 /* Free the stack memory regardless of whether the size
446 of the cache is over the limit or not. If this piece
447 of memory caused problems we better do not use it
448 anymore. Uh, and we ignore possible errors. There
449 is nothing we could do. */
450 (void) munmap (mem, size);
455 pd->guardsize = guardsize;
457 else if (__builtin_expect (pd->guardsize - guardsize > size - reqsize,
460 /* The old guard area is too large. */
461 if (mprotect ((char *) mem + guardsize, pd->guardsize - guardsize,
462 PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
465 pd->guardsize = guardsize;
469 /* We place the thread descriptor at the end of the stack. */
473 /* The stack begins before the TCB and the static TLS block. */
474 *stack = ((char *) (pd + 1) - __static_tls_size);
476 # error "Implement me"
482 /* This is how the function is called. We do it this way to allow
483 other variants of the function to have more parameters. */
484 #define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr)
489 __deallocate_stack (struct pthread *pd)
491 lll_lock (stack_cache_lock);
493 /* Remove the thread from the list of threads with user defined
495 list_del (&pd->header.data.list);
497 /* Not much to do. Just free the mmap()ed memory. Note that we do
498 not reset the 'used' flag in the 'tid' field. This is done by
499 the kernel. If no thread has been created yet this field is
501 if (__builtin_expect (! pd->user_stack, 1))
502 (void) queue_stack (pd);
504 /* Free the memory associated with the ELF TLS. */
505 _dl_deallocate_tls (pd, false);
507 lll_unlock (stack_cache_lock);
511 /* In case of a fork() call the memory allocation in the child will be
512 the same but only one thread is running. All stacks except that of
513 the one running thread are not used anymore. We have to recycle
516 __reclaim_stacks (void)
518 struct pthread *self = (struct pthread *) THREAD_SELF;
520 /* No locking necessary. The caller is the only stack in use. */
522 /* Mark all stacks except the still running one as free. */
524 list_for_each (runp, &stack_used)
526 struct pthread *curp;
528 curp = list_entry (runp, struct pthread, header.data.list);
531 /* This marks the stack as free. */
534 /* Account for the size of the stack. */
535 stack_cache_actsize += curp->stackblock_size;
539 /* Add the stack of all running threads to the cache. */
540 list_splice (&stack_used, &stack_cache);
542 /* Remove the entry for the current thread to from the cache list
543 and add it to the list of running threads. Which of the two
544 lists is decided by the user_stack flag. */
545 list_del (&self->header.data.list);
547 /* Re-initialize the lists for all the threads. */
548 INIT_LIST_HEAD (&stack_used);
549 INIT_LIST_HEAD (&__stack_user);
551 if (__builtin_expect (THREAD_GETMEM (self, user_stack), 0))
552 list_add (&self->header.data.list, &__stack_user);
554 list_add (&self->header.data.list, &stack_used);
556 /* There is one thread running. */
559 /* Initialize the lock. */
560 stack_cache_lock = LLL_LOCK_INITIALIZER;