2 Samba Unix SMB/CIFS implementation.
4 Samba trivial allocation library - new interface
6 NOTE: Please read talloc_guide.txt for full documentation
8 Copyright (C) Andrew Tridgell 2004
9 Copyright (C) Stefan Metzmacher 2006
11 ** NOTE! The following LGPL license applies to the talloc
12 ** library. This does NOT imply that all of Samba is released
15 This library is free software; you can redistribute it and/or
16 modify it under the terms of the GNU Lesser General Public
17 License as published by the Free Software Foundation; either
18 version 3 of the License, or (at your option) any later version.
20 This library is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 Lesser General Public License for more details.
25 You should have received a copy of the GNU Lesser General Public
26 License along with this library; if not, see <http://www.gnu.org/licenses/>.
30 inspired by http://swapped.cc/halloc/
36 #ifdef HAVE_SYS_AUXV_H
40 #ifdef TALLOC_BUILD_VERSION_MAJOR
41 #if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR)
42 #error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR"
46 #ifdef TALLOC_BUILD_VERSION_MINOR
47 #if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR)
48 #error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR"
52 /* Special macros that are no-ops except when run under Valgrind on
53 * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
54 #ifdef HAVE_VALGRIND_MEMCHECK_H
55 /* memcheck.h includes valgrind.h */
56 #include <valgrind/memcheck.h>
57 #elif defined(HAVE_VALGRIND_H)
61 /* use this to force every realloc to change the pointer, to stress test
62 code that might not cope */
63 #define ALWAYS_REALLOC 0
66 #define MAX_TALLOC_SIZE 0x10000000
68 #define TALLOC_FLAG_FREE 0x01
69 #define TALLOC_FLAG_LOOP 0x02
70 #define TALLOC_FLAG_POOL 0x04 /* This is a talloc pool */
71 #define TALLOC_FLAG_POOLMEM 0x08 /* This is allocated in a pool */
74 * Bits above this are random, used to make it harder to fake talloc
75 * headers during an attack. Try not to change this without good reason.
77 #define TALLOC_FLAG_MASK 0x0F
79 #define TALLOC_MAGIC_REFERENCE ((const char *)1)
81 #define TALLOC_MAGIC_BASE 0xe814ec70
82 static unsigned int talloc_magic = (
84 (TALLOC_VERSION_MAJOR << 12) +
85 (TALLOC_VERSION_MINOR << 4));
87 /* by default we abort when given a bad pointer (such as when talloc_free() is called
88 on a pointer that came from malloc() */
90 #define TALLOC_ABORT(reason) abort()
93 #ifndef discard_const_p
94 #if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T)
95 # define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr)))
97 # define discard_const_p(type, ptr) ((type *)(ptr))
101 /* these macros gain us a few percent of speed on gcc */
103 /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
104 as its first argument */
106 #define likely(x) __builtin_expect(!!(x), 1)
109 #define unlikely(x) __builtin_expect(!!(x), 0)
113 #define likely(x) (x)
116 #define unlikely(x) (x)
120 /* this null_context is only used if talloc_enable_leak_report() or
121 talloc_enable_leak_report_full() is called, otherwise it remains
124 static void *null_context;
125 static void *autofree_context;
127 /* used to enable fill of memory on free, which can be useful for
128 * catching use after free errors when valgrind is too slow
136 #define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
139 * do not wipe the header, to allow the
140 * double-free logic to still work
142 #define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
143 if (unlikely(talloc_fill.enabled)) { \
144 size_t _flen = (_tc)->size; \
145 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
146 memset(_fptr, talloc_fill.fill_value, _flen); \
150 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
151 /* Mark the whole chunk as not accessable */
152 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
153 size_t _flen = TC_HDR_SIZE + (_tc)->size; \
154 char *_fptr = (char *)(_tc); \
155 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
158 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
161 #define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
162 TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
163 TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
166 #define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
167 if (unlikely(talloc_fill.enabled)) { \
168 size_t _flen = (_tc)->size - (_new_size); \
169 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
170 _fptr += (_new_size); \
171 memset(_fptr, talloc_fill.fill_value, _flen); \
175 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
176 /* Mark the unused bytes not accessable */
177 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
178 size_t _flen = (_tc)->size - (_new_size); \
179 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
180 _fptr += (_new_size); \
181 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
184 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
187 #define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
188 TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
189 TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
192 #define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
193 if (unlikely(talloc_fill.enabled)) { \
194 size_t _flen = (_tc)->size - (_new_size); \
195 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
196 _fptr += (_new_size); \
197 memset(_fptr, talloc_fill.fill_value, _flen); \
201 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
202 /* Mark the unused bytes as undefined */
203 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
204 size_t _flen = (_tc)->size - (_new_size); \
205 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
206 _fptr += (_new_size); \
207 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
210 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
213 #define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
214 TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
215 TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
218 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
219 /* Mark the new bytes as undefined */
220 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
221 size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
222 size_t _new_used = TC_HDR_SIZE + (_new_size); \
223 size_t _flen = _new_used - _old_used; \
224 char *_fptr = _old_used + (char *)(_tc); \
225 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
228 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
231 #define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
232 TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
235 struct talloc_reference_handle {
236 struct talloc_reference_handle *next, *prev;
238 const char *location;
241 struct talloc_memlimit {
242 struct talloc_chunk *parent;
243 struct talloc_memlimit *upper;
248 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size);
249 static inline void talloc_memlimit_grow(struct talloc_memlimit *limit,
251 static inline void talloc_memlimit_shrink(struct talloc_memlimit *limit,
253 static inline void tc_memlimit_update_on_free(struct talloc_chunk *tc);
255 static inline void _tc_set_name_const(struct talloc_chunk *tc,
258 typedef int (*talloc_destructor_t)(void *);
260 struct talloc_pool_hdr;
262 struct talloc_chunk {
264 struct talloc_chunk *next, *prev;
265 struct talloc_chunk *parent, *child;
266 struct talloc_reference_handle *refs;
267 talloc_destructor_t destructor;
273 * if 'limit' is set it means all *new* children of the context will
274 * be limited to a total aggregate size ox max_size for memory
276 * cur_size is used to keep track of the current use
278 struct talloc_memlimit *limit;
281 * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool"
282 * is a pointer to the struct talloc_chunk of the pool that it was
283 * allocated from. This way children can quickly find the pool to chew
286 struct talloc_pool_hdr *pool;
289 /* 16 byte alignment seems to keep everyone happy */
290 #define TC_ALIGN16(s) (((s)+15)&~15)
291 #define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
292 #define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
294 _PUBLIC_ int talloc_version_major(void)
296 return TALLOC_VERSION_MAJOR;
299 _PUBLIC_ int talloc_version_minor(void)
301 return TALLOC_VERSION_MINOR;
304 _PUBLIC_ int talloc_test_get_magic(void)
309 static void (*talloc_log_fn)(const char *message);
311 _PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message))
313 talloc_log_fn = log_fn;
316 #ifdef HAVE_CONSTRUCTOR_ATTRIBUTE
317 void talloc_lib_init(void) __attribute__((constructor));
318 void talloc_lib_init(void)
320 uint32_t random_value;
321 #if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM)
324 * Use the kernel-provided random values used for
325 * ASLR. This won't change per-exec, which is ideal for us
327 p = (uint8_t *) getauxval(AT_RANDOM);
330 * We get 16 bytes from getauxval. By calling rand(),
331 * a totally insecure PRNG, but one that will
332 * deterministically have a different value when called
333 * twice, we ensure that if two talloc-like libraries
334 * are somehow loaded in the same address space, that
335 * because we choose different bytes, we will keep the
336 * protection against collision of multiple talloc
339 * This protection is important because the effects of
340 * passing a talloc pointer from one to the other may
341 * be very hard to determine.
343 int offset = rand() % (16 - sizeof(random_value));
344 memcpy(&random_value, p + offset, sizeof(random_value));
349 * Otherwise, hope the location we are loaded in
350 * memory is randomised by someone else
352 random_value = ((uintptr_t)talloc_lib_init & 0xFFFFFFFF);
354 talloc_magic = random_value & ~TALLOC_FLAG_MASK;
357 #warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available"
360 static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2);
361 static void talloc_log(const char *fmt, ...)
366 if (!talloc_log_fn) {
371 message = talloc_vasprintf(NULL, fmt, ap);
374 talloc_log_fn(message);
375 talloc_free(message);
378 static void talloc_log_stderr(const char *message)
380 fprintf(stderr, "%s", message);
383 _PUBLIC_ void talloc_set_log_stderr(void)
385 talloc_set_log_fn(talloc_log_stderr);
388 static void (*talloc_abort_fn)(const char *reason);
390 _PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason))
392 talloc_abort_fn = abort_fn;
395 static void talloc_abort(const char *reason)
397 talloc_log("%s\n", reason);
399 if (!talloc_abort_fn) {
400 TALLOC_ABORT(reason);
403 talloc_abort_fn(reason);
406 static void talloc_abort_magic(unsigned magic)
408 talloc_abort("Bad talloc magic value - wrong talloc version used/mixed");
411 static void talloc_abort_access_after_free(void)
413 talloc_abort("Bad talloc magic value - access after free");
416 static void talloc_abort_unknown_value(void)
418 talloc_abort("Bad talloc magic value - unknown value");
421 /* panic if we get a bad magic value */
422 static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr)
424 const char *pp = (const char *)ptr;
425 struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE);
426 if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) != talloc_magic)) {
427 if ((tc->flags & (~0xF)) == talloc_magic) {
428 talloc_abort_magic(tc->flags & (~TALLOC_FLAG_MASK));
432 if (tc->flags & TALLOC_FLAG_FREE) {
433 talloc_log("talloc: access after free error - first free may be at %s\n", tc->name);
434 talloc_abort_access_after_free();
437 talloc_abort_unknown_value();
444 /* hook into the front of the list */
445 #define _TLIST_ADD(list, p) \
449 (p)->next = (p)->prev = NULL; \
451 (list)->prev = (p); \
452 (p)->next = (list); \
458 /* remove an element from a list - element doesn't have to be in list. */
459 #define _TLIST_REMOVE(list, p) \
461 if ((p) == (list)) { \
462 (list) = (p)->next; \
463 if (list) (list)->prev = NULL; \
465 if ((p)->prev) (p)->prev->next = (p)->next; \
466 if ((p)->next) (p)->next->prev = (p)->prev; \
468 if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \
473 return the parent chunk of a pointer
475 static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr)
477 struct talloc_chunk *tc;
479 if (unlikely(ptr == NULL)) {
483 tc = talloc_chunk_from_ptr(ptr);
484 while (tc->prev) tc=tc->prev;
489 _PUBLIC_ void *talloc_parent(const void *ptr)
491 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
492 return tc? TC_PTR_FROM_CHUNK(tc) : NULL;
498 _PUBLIC_ const char *talloc_parent_name(const void *ptr)
500 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
501 return tc? tc->name : NULL;
505 A pool carries an in-pool object count count in the first 16 bytes.
506 bytes. This is done to support talloc_steal() to a parent outside of the
507 pool. The count includes the pool itself, so a talloc_free() on a pool will
508 only destroy the pool if the count has dropped to zero. A talloc_free() of a
509 pool member will reduce the count, and eventually also call free(3) on the
512 The object count is not put into "struct talloc_chunk" because it is only
513 relevant for talloc pools and the alignment to 16 bytes would increase the
514 memory footprint of each talloc chunk by those 16 bytes.
517 struct talloc_pool_hdr {
519 unsigned int object_count;
523 #define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
525 static inline struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c)
527 return (struct talloc_pool_hdr *)((char *)c - TP_HDR_SIZE);
530 static inline struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h)
532 return (struct talloc_chunk *)((char *)h + TP_HDR_SIZE);
535 static inline void *tc_pool_end(struct talloc_pool_hdr *pool_hdr)
537 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
538 return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize;
541 static inline size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr)
543 return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end;
546 /* If tc is inside a pool, this gives the next neighbour. */
547 static inline void *tc_next_chunk(struct talloc_chunk *tc)
549 return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size);
552 static inline void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr)
554 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
555 return tc_next_chunk(tc);
558 /* Mark the whole remaining pool as not accessable */
559 static inline void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr)
561 size_t flen = tc_pool_space_left(pool_hdr);
563 if (unlikely(talloc_fill.enabled)) {
564 memset(pool_hdr->end, talloc_fill.fill_value, flen);
567 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
568 VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen);
576 static inline struct talloc_chunk *tc_alloc_pool(struct talloc_chunk *parent,
577 size_t size, size_t prefix_len)
579 struct talloc_pool_hdr *pool_hdr = NULL;
581 struct talloc_chunk *result;
584 if (parent == NULL) {
588 if (parent->flags & TALLOC_FLAG_POOL) {
589 pool_hdr = talloc_pool_from_chunk(parent);
591 else if (parent->flags & TALLOC_FLAG_POOLMEM) {
592 pool_hdr = parent->pool;
595 if (pool_hdr == NULL) {
599 space_left = tc_pool_space_left(pool_hdr);
602 * Align size to 16 bytes
604 chunk_size = TC_ALIGN16(size + prefix_len);
606 if (space_left < chunk_size) {
610 result = (struct talloc_chunk *)((char *)pool_hdr->end + prefix_len);
612 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
613 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size);
616 pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size);
618 result->flags = talloc_magic | TALLOC_FLAG_POOLMEM;
619 result->pool = pool_hdr;
621 pool_hdr->object_count++;
627 Allocate a bit of memory as a child of an existing pointer
629 static inline void *__talloc_with_prefix(const void *context,
632 struct talloc_chunk **tc_ret)
634 struct talloc_chunk *tc = NULL;
635 struct talloc_memlimit *limit = NULL;
636 size_t total_len = TC_HDR_SIZE + size + prefix_len;
638 if (unlikely(context == NULL)) {
639 context = null_context;
642 if (unlikely(size >= MAX_TALLOC_SIZE)) {
646 if (unlikely(total_len < TC_HDR_SIZE)) {
650 if (context != NULL) {
651 struct talloc_chunk *ptc = talloc_chunk_from_ptr(context);
653 if (ptc->limit != NULL) {
657 tc = tc_alloc_pool(ptc, TC_HDR_SIZE+size, prefix_len);
664 * Only do the memlimit check/update on actual allocation.
666 if (!talloc_memlimit_check(limit, total_len)) {
671 ptr = malloc(total_len);
672 if (unlikely(ptr == NULL)) {
675 tc = (struct talloc_chunk *)(ptr + prefix_len);
676 tc->flags = talloc_magic;
679 talloc_memlimit_grow(limit, total_len);
684 tc->destructor = NULL;
689 if (likely(context)) {
690 struct talloc_chunk *parent = talloc_chunk_from_ptr(context);
693 parent->child->parent = NULL;
694 tc->next = parent->child;
703 tc->next = tc->prev = tc->parent = NULL;
707 return TC_PTR_FROM_CHUNK(tc);
710 static inline void *__talloc(const void *context,
712 struct talloc_chunk **tc)
714 return __talloc_with_prefix(context, size, 0, tc);
718 * Create a talloc pool
721 static inline void *_talloc_pool(const void *context, size_t size)
723 struct talloc_chunk *tc;
724 struct talloc_pool_hdr *pool_hdr;
727 result = __talloc_with_prefix(context, size, TP_HDR_SIZE, &tc);
729 if (unlikely(result == NULL)) {
733 pool_hdr = talloc_pool_from_chunk(tc);
735 tc->flags |= TALLOC_FLAG_POOL;
738 pool_hdr->object_count = 1;
739 pool_hdr->end = result;
740 pool_hdr->poolsize = size;
742 tc_invalidate_pool(pool_hdr);
747 _PUBLIC_ void *talloc_pool(const void *context, size_t size)
749 return _talloc_pool(context, size);
753 * Create a talloc pool correctly sized for a basic size plus
754 * a number of subobjects whose total size is given. Essentially
755 * a custom allocator for talloc to reduce fragmentation.
758 _PUBLIC_ void *_talloc_pooled_object(const void *ctx,
760 const char *type_name,
761 unsigned num_subobjects,
762 size_t total_subobjects_size)
764 size_t poolsize, subobjects_slack, tmp;
765 struct talloc_chunk *tc;
766 struct talloc_pool_hdr *pool_hdr;
769 poolsize = type_size + total_subobjects_size;
771 if ((poolsize < type_size) || (poolsize < total_subobjects_size)) {
775 if (num_subobjects == UINT_MAX) {
778 num_subobjects += 1; /* the object body itself */
781 * Alignment can increase the pool size by at most 15 bytes per object
782 * plus alignment for the object itself
784 subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects;
785 if (subobjects_slack < num_subobjects) {
789 tmp = poolsize + subobjects_slack;
790 if ((tmp < poolsize) || (tmp < subobjects_slack)) {
795 ret = _talloc_pool(ctx, poolsize);
800 tc = talloc_chunk_from_ptr(ret);
801 tc->size = type_size;
803 pool_hdr = talloc_pool_from_chunk(tc);
805 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
806 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size);
809 pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size));
811 _tc_set_name_const(tc, type_name);
819 setup a destructor to be called on free of a pointer
820 the destructor should return 0 on success, or -1 on failure.
821 if the destructor fails then the free is failed, and the memory can
822 be continued to be used
824 _PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
826 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
827 tc->destructor = destructor;
831 increase the reference count on a piece of memory.
833 _PUBLIC_ int talloc_increase_ref_count(const void *ptr)
835 if (unlikely(!talloc_reference(null_context, ptr))) {
842 helper for talloc_reference()
844 this is referenced by a function pointer and should not be inline
846 static int talloc_reference_destructor(struct talloc_reference_handle *handle)
848 struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr);
849 _TLIST_REMOVE(ptr_tc->refs, handle);
854 more efficient way to add a name to a pointer - the name must point to a
857 static inline void _tc_set_name_const(struct talloc_chunk *tc,
864 internal talloc_named_const()
866 static inline void *_talloc_named_const(const void *context, size_t size, const char *name)
869 struct talloc_chunk *tc;
871 ptr = __talloc(context, size, &tc);
872 if (unlikely(ptr == NULL)) {
876 _tc_set_name_const(tc, name);
882 make a secondary reference to a pointer, hanging off the given context.
883 the pointer remains valid until both the original caller and this given
886 the major use for this is when two different structures need to reference the
887 same underlying data, and you want to be able to free the two instances separately,
890 _PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location)
892 struct talloc_chunk *tc;
893 struct talloc_reference_handle *handle;
894 if (unlikely(ptr == NULL)) return NULL;
896 tc = talloc_chunk_from_ptr(ptr);
897 handle = (struct talloc_reference_handle *)_talloc_named_const(context,
898 sizeof(struct talloc_reference_handle),
899 TALLOC_MAGIC_REFERENCE);
900 if (unlikely(handle == NULL)) return NULL;
902 /* note that we hang the destructor off the handle, not the
903 main context as that allows the caller to still setup their
904 own destructor on the context if they want to */
905 talloc_set_destructor(handle, talloc_reference_destructor);
906 handle->ptr = discard_const_p(void, ptr);
907 handle->location = location;
908 _TLIST_ADD(tc->refs, handle);
912 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr);
914 static inline void _tc_free_poolmem(struct talloc_chunk *tc,
915 const char *location)
917 struct talloc_pool_hdr *pool;
918 struct talloc_chunk *pool_tc;
922 pool_tc = talloc_chunk_from_pool(pool);
923 next_tc = tc_next_chunk(tc);
925 tc->flags |= TALLOC_FLAG_FREE;
927 /* we mark the freed memory with where we called the free
928 * from. This means on a double free error we can report where
929 * the first free came from
933 TC_INVALIDATE_FULL_CHUNK(tc);
935 if (unlikely(pool->object_count == 0)) {
936 talloc_abort("Pool object count zero!");
940 pool->object_count--;
942 if (unlikely(pool->object_count == 1
943 && !(pool_tc->flags & TALLOC_FLAG_FREE))) {
945 * if there is just one object left in the pool
946 * and pool->flags does not have TALLOC_FLAG_FREE,
947 * it means this is the pool itself and
948 * the rest is available for new objects
951 pool->end = tc_pool_first_chunk(pool);
952 tc_invalidate_pool(pool);
956 if (unlikely(pool->object_count == 0)) {
958 * we mark the freed memory with where we called the free
959 * from. This means on a double free error we can report where
960 * the first free came from
962 pool_tc->name = location;
964 if (pool_tc->flags & TALLOC_FLAG_POOLMEM) {
965 _tc_free_poolmem(pool_tc, location);
968 * The tc_memlimit_update_on_free()
969 * call takes into account the
970 * prefix TP_HDR_SIZE allocated before
971 * the pool talloc_chunk.
973 tc_memlimit_update_on_free(pool_tc);
974 TC_INVALIDATE_FULL_CHUNK(pool_tc);
980 if (pool->end == next_tc) {
982 * if pool->pool still points to end of
983 * 'tc' (which is stored in the 'next_tc' variable),
984 * we can reclaim the memory of 'tc'.
991 * Do nothing. The memory is just "wasted", waiting for the pool
992 * itself to be freed.
996 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
998 const char *location);
1001 internal talloc_free call
1003 static inline int _talloc_free_internal(void *ptr, const char *location)
1005 struct talloc_chunk *tc;
1008 if (unlikely(ptr == NULL)) {
1012 /* possibly initialised the talloc fill value */
1013 if (unlikely(!talloc_fill.initialised)) {
1014 const char *fill = getenv(TALLOC_FILL_ENV);
1016 talloc_fill.enabled = true;
1017 talloc_fill.fill_value = strtoul(fill, NULL, 0);
1019 talloc_fill.initialised = true;
1022 tc = talloc_chunk_from_ptr(ptr);
1024 if (unlikely(tc->refs)) {
1026 /* check if this is a reference from a child or
1027 * grandchild back to it's parent or grandparent
1029 * in that case we need to remove the reference and
1030 * call another instance of talloc_free() on the current
1033 is_child = talloc_is_parent(tc->refs, ptr);
1034 _talloc_free_internal(tc->refs, location);
1036 return _talloc_free_internal(ptr, location);
1041 if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) {
1042 /* we have a free loop - stop looping */
1046 if (unlikely(tc->destructor)) {
1047 talloc_destructor_t d = tc->destructor;
1048 if (d == (talloc_destructor_t)-1) {
1051 tc->destructor = (talloc_destructor_t)-1;
1054 * Only replace the destructor pointer if
1055 * calling the destructor didn't modify it.
1057 if (tc->destructor == (talloc_destructor_t)-1) {
1062 tc->destructor = NULL;
1066 _TLIST_REMOVE(tc->parent->child, tc);
1067 if (tc->parent->child) {
1068 tc->parent->child->parent = tc->parent;
1071 if (tc->prev) tc->prev->next = tc->next;
1072 if (tc->next) tc->next->prev = tc->prev;
1073 tc->prev = tc->next = NULL;
1076 tc->flags |= TALLOC_FLAG_LOOP;
1078 _tc_free_children_internal(tc, ptr, location);
1080 tc->flags |= TALLOC_FLAG_FREE;
1082 /* we mark the freed memory with where we called the free
1083 * from. This means on a double free error we can report where
1084 * the first free came from
1086 tc->name = location;
1088 if (tc->flags & TALLOC_FLAG_POOL) {
1089 struct talloc_pool_hdr *pool;
1091 pool = talloc_pool_from_chunk(tc);
1093 if (unlikely(pool->object_count == 0)) {
1094 talloc_abort("Pool object count zero!");
1098 pool->object_count--;
1100 if (likely(pool->object_count != 0)) {
1105 * With object_count==0, a pool becomes a normal piece of
1106 * memory to free. If it's allocated inside a pool, it needs
1107 * to be freed as poolmem, else it needs to be just freed.
1114 if (tc->flags & TALLOC_FLAG_POOLMEM) {
1115 _tc_free_poolmem(tc, location);
1119 tc_memlimit_update_on_free(tc);
1121 TC_INVALIDATE_FULL_CHUNK(tc);
1126 static inline size_t _talloc_total_limit_size(const void *ptr,
1127 struct talloc_memlimit *old_limit,
1128 struct talloc_memlimit *new_limit);
1131 move a lump of memory from one talloc context to another return the
1132 ptr on success, or NULL if it could not be transferred.
1133 passing NULL as ptr will always return NULL with no side effects.
1135 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr)
1137 struct talloc_chunk *tc, *new_tc;
1138 size_t ctx_size = 0;
1140 if (unlikely(!ptr)) {
1144 if (unlikely(new_ctx == NULL)) {
1145 new_ctx = null_context;
1148 tc = talloc_chunk_from_ptr(ptr);
1150 if (tc->limit != NULL) {
1152 ctx_size = _talloc_total_limit_size(ptr, NULL, NULL);
1154 /* Decrement the memory limit from the source .. */
1155 talloc_memlimit_shrink(tc->limit->upper, ctx_size);
1157 if (tc->limit->parent == tc) {
1158 tc->limit->upper = NULL;
1164 if (unlikely(new_ctx == NULL)) {
1166 _TLIST_REMOVE(tc->parent->child, tc);
1167 if (tc->parent->child) {
1168 tc->parent->child->parent = tc->parent;
1171 if (tc->prev) tc->prev->next = tc->next;
1172 if (tc->next) tc->next->prev = tc->prev;
1175 tc->parent = tc->next = tc->prev = NULL;
1176 return discard_const_p(void, ptr);
1179 new_tc = talloc_chunk_from_ptr(new_ctx);
1181 if (unlikely(tc == new_tc || tc->parent == new_tc)) {
1182 return discard_const_p(void, ptr);
1186 _TLIST_REMOVE(tc->parent->child, tc);
1187 if (tc->parent->child) {
1188 tc->parent->child->parent = tc->parent;
1191 if (tc->prev) tc->prev->next = tc->next;
1192 if (tc->next) tc->next->prev = tc->prev;
1193 tc->prev = tc->next = NULL;
1196 tc->parent = new_tc;
1197 if (new_tc->child) new_tc->child->parent = NULL;
1198 _TLIST_ADD(new_tc->child, tc);
1200 if (tc->limit || new_tc->limit) {
1201 ctx_size = _talloc_total_limit_size(ptr, tc->limit,
1203 /* .. and increment it in the destination. */
1204 if (new_tc->limit) {
1205 talloc_memlimit_grow(new_tc->limit, ctx_size);
1209 return discard_const_p(void, ptr);
1213 move a lump of memory from one talloc context to another return the
1214 ptr on success, or NULL if it could not be transferred.
1215 passing NULL as ptr will always return NULL with no side effects.
1217 _PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location)
1219 struct talloc_chunk *tc;
1221 if (unlikely(ptr == NULL)) {
1225 tc = talloc_chunk_from_ptr(ptr);
1227 if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) {
1228 struct talloc_reference_handle *h;
1230 talloc_log("WARNING: talloc_steal with references at %s\n",
1233 for (h=tc->refs; h; h=h->next) {
1234 talloc_log("\treference at %s\n",
1240 /* this test is probably too expensive to have on in the
1241 normal build, but it useful for debugging */
1242 if (talloc_is_parent(new_ctx, ptr)) {
1243 talloc_log("WARNING: stealing into talloc child at %s\n", location);
1247 return _talloc_steal_internal(new_ctx, ptr);
1251 this is like a talloc_steal(), but you must supply the old
1252 parent. This resolves the ambiguity in a talloc_steal() which is
1253 called on a context that has more than one parent (via references)
1255 The old parent can be either a reference or a parent
1257 _PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr)
1259 struct talloc_chunk *tc;
1260 struct talloc_reference_handle *h;
1262 if (unlikely(ptr == NULL)) {
1266 if (old_parent == talloc_parent(ptr)) {
1267 return _talloc_steal_internal(new_parent, ptr);
1270 tc = talloc_chunk_from_ptr(ptr);
1271 for (h=tc->refs;h;h=h->next) {
1272 if (talloc_parent(h) == old_parent) {
1273 if (_talloc_steal_internal(new_parent, h) != h) {
1276 return discard_const_p(void, ptr);
1280 /* it wasn't a parent */
1285 remove a secondary reference to a pointer. This undo's what
1286 talloc_reference() has done. The context and pointer arguments
1287 must match those given to a talloc_reference()
1289 static inline int talloc_unreference(const void *context, const void *ptr)
1291 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1292 struct talloc_reference_handle *h;
1294 if (unlikely(context == NULL)) {
1295 context = null_context;
1298 for (h=tc->refs;h;h=h->next) {
1299 struct talloc_chunk *p = talloc_parent_chunk(h);
1301 if (context == NULL) break;
1302 } else if (TC_PTR_FROM_CHUNK(p) == context) {
1310 return _talloc_free_internal(h, __location__);
1314 remove a specific parent context from a pointer. This is a more
1315 controlled variant of talloc_free()
1317 _PUBLIC_ int talloc_unlink(const void *context, void *ptr)
1319 struct talloc_chunk *tc_p, *new_p, *tc_c;
1326 if (context == NULL) {
1327 context = null_context;
1330 if (talloc_unreference(context, ptr) == 0) {
1334 if (context != NULL) {
1335 tc_c = talloc_chunk_from_ptr(context);
1339 if (tc_c != talloc_parent_chunk(ptr)) {
1343 tc_p = talloc_chunk_from_ptr(ptr);
1345 if (tc_p->refs == NULL) {
1346 return _talloc_free_internal(ptr, __location__);
1349 new_p = talloc_parent_chunk(tc_p->refs);
1351 new_parent = TC_PTR_FROM_CHUNK(new_p);
1356 if (talloc_unreference(new_parent, ptr) != 0) {
1360 _talloc_steal_internal(new_parent, ptr);
1366 add a name to an existing pointer - va_list version
1368 static inline const char *talloc_set_name_v(const void *ptr, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(2,0);
1370 static inline const char *talloc_set_name_v(const void *ptr, const char *fmt, va_list ap)
1372 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1373 tc->name = talloc_vasprintf(ptr, fmt, ap);
1374 if (likely(tc->name)) {
1375 _tc_set_name_const(talloc_chunk_from_ptr(tc->name),
1382 add a name to an existing pointer
1384 _PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...)
1389 name = talloc_set_name_v(ptr, fmt, ap);
1396 create a named talloc pointer. Any talloc pointer can be named, and
1397 talloc_named() operates just like talloc() except that it allows you
1398 to name the pointer.
1400 _PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...)
1405 struct talloc_chunk *tc;
1407 ptr = __talloc(context, size, &tc);
1408 if (unlikely(ptr == NULL)) return NULL;
1411 name = talloc_set_name_v(ptr, fmt, ap);
1414 if (unlikely(name == NULL)) {
1415 _talloc_free_internal(ptr, __location__);
1423 return the name of a talloc ptr, or "UNNAMED"
1425 static inline const char *__talloc_get_name(const void *ptr)
1427 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1428 if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) {
1429 return ".reference";
1431 if (likely(tc->name)) {
1437 _PUBLIC_ const char *talloc_get_name(const void *ptr)
1439 return __talloc_get_name(ptr);
1443 check if a pointer has the given name. If it does, return the pointer,
1444 otherwise return NULL
1446 _PUBLIC_ void *talloc_check_name(const void *ptr, const char *name)
1449 if (unlikely(ptr == NULL)) return NULL;
1450 pname = __talloc_get_name(ptr);
1451 if (likely(pname == name || strcmp(pname, name) == 0)) {
1452 return discard_const_p(void, ptr);
1457 static void talloc_abort_type_mismatch(const char *location,
1459 const char *expected)
1463 reason = talloc_asprintf(NULL,
1464 "%s: Type mismatch: name[%s] expected[%s]",
1469 reason = "Type mismatch";
1472 talloc_abort(reason);
1475 _PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location)
1479 if (unlikely(ptr == NULL)) {
1480 talloc_abort_type_mismatch(location, NULL, name);
1484 pname = __talloc_get_name(ptr);
1485 if (likely(pname == name || strcmp(pname, name) == 0)) {
1486 return discard_const_p(void, ptr);
1489 talloc_abort_type_mismatch(location, pname, name);
1494 this is for compatibility with older versions of talloc
1496 _PUBLIC_ void *talloc_init(const char *fmt, ...)
1501 struct talloc_chunk *tc;
1503 ptr = __talloc(NULL, 0, &tc);
1504 if (unlikely(ptr == NULL)) return NULL;
1507 name = talloc_set_name_v(ptr, fmt, ap);
1510 if (unlikely(name == NULL)) {
1511 _talloc_free_internal(ptr, __location__);
1518 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1520 const char *location)
1523 /* we need to work out who will own an abandoned child
1524 if it cannot be freed. In priority order, the first
1525 choice is owner of any remaining reference to this
1526 pointer, the second choice is our parent, and the
1527 final choice is the null context. */
1528 void *child = TC_PTR_FROM_CHUNK(tc->child);
1529 const void *new_parent = null_context;
1530 if (unlikely(tc->child->refs)) {
1531 struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs);
1532 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1534 if (unlikely(_talloc_free_internal(child, location) == -1)) {
1535 if (talloc_parent_chunk(child) != tc) {
1537 * Destructor already reparented this child.
1538 * No further reparenting needed.
1542 if (new_parent == null_context) {
1543 struct talloc_chunk *p = talloc_parent_chunk(ptr);
1544 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1546 _talloc_steal_internal(new_parent, child);
1552 this is a replacement for the Samba3 talloc_destroy_pool functionality. It
1553 should probably not be used in new code. It's in here to keep the talloc
1554 code consistent across Samba 3 and 4.
1556 _PUBLIC_ void talloc_free_children(void *ptr)
1558 struct talloc_chunk *tc_name = NULL;
1559 struct talloc_chunk *tc;
1561 if (unlikely(ptr == NULL)) {
1565 tc = talloc_chunk_from_ptr(ptr);
1567 /* we do not want to free the context name if it is a child .. */
1568 if (likely(tc->child)) {
1569 for (tc_name = tc->child; tc_name; tc_name = tc_name->next) {
1570 if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break;
1573 _TLIST_REMOVE(tc->child, tc_name);
1575 tc->child->parent = tc;
1580 _tc_free_children_internal(tc, ptr, __location__);
1582 /* .. so we put it back after all other children have been freed */
1585 tc->child->parent = NULL;
1587 tc_name->parent = tc;
1588 _TLIST_ADD(tc->child, tc_name);
1593 Allocate a bit of memory as a child of an existing pointer
1595 _PUBLIC_ void *_talloc(const void *context, size_t size)
1597 struct talloc_chunk *tc;
1598 return __talloc(context, size, &tc);
1602 externally callable talloc_set_name_const()
1604 _PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name)
1606 _tc_set_name_const(talloc_chunk_from_ptr(ptr), name);
1610 create a named talloc pointer. Any talloc pointer can be named, and
1611 talloc_named() operates just like talloc() except that it allows you
1612 to name the pointer.
1614 _PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name)
1616 return _talloc_named_const(context, size, name);
1620 free a talloc pointer. This also frees all child pointers of this
1623 return 0 if the memory is actually freed, otherwise -1. The memory
1624 will not be freed if the ref_count is > 1 or the destructor (if
1625 any) returns non-zero
1627 _PUBLIC_ int _talloc_free(void *ptr, const char *location)
1629 struct talloc_chunk *tc;
1631 if (unlikely(ptr == NULL)) {
1635 tc = talloc_chunk_from_ptr(ptr);
1637 if (unlikely(tc->refs != NULL)) {
1638 struct talloc_reference_handle *h;
1640 if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) {
1641 /* in this case we do know which parent should
1642 get this pointer, as there is really only
1644 return talloc_unlink(null_context, ptr);
1647 talloc_log("ERROR: talloc_free with references at %s\n",
1650 for (h=tc->refs; h; h=h->next) {
1651 talloc_log("\treference at %s\n",
1657 return _talloc_free_internal(ptr, location);
1663 A talloc version of realloc. The context argument is only used if
1666 _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name)
1668 struct talloc_chunk *tc;
1670 bool malloced = false;
1671 struct talloc_pool_hdr *pool_hdr = NULL;
1672 size_t old_size = 0;
1673 size_t new_size = 0;
1675 /* size zero is equivalent to free() */
1676 if (unlikely(size == 0)) {
1677 talloc_unlink(context, ptr);
1681 if (unlikely(size >= MAX_TALLOC_SIZE)) {
1685 /* realloc(NULL) is equivalent to malloc() */
1687 return _talloc_named_const(context, size, name);
1690 tc = talloc_chunk_from_ptr(ptr);
1692 /* don't allow realloc on referenced pointers */
1693 if (unlikely(tc->refs)) {
1697 /* don't let anybody try to realloc a talloc_pool */
1698 if (unlikely(tc->flags & TALLOC_FLAG_POOL)) {
1702 if (tc->limit && (size > tc->size)) {
1703 if (!talloc_memlimit_check(tc->limit, (size - tc->size))) {
1709 /* handle realloc inside a talloc_pool */
1710 if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
1711 pool_hdr = tc->pool;
1714 #if (ALWAYS_REALLOC == 0)
1715 /* don't shrink if we have less than 1k to gain */
1716 if (size < tc->size && tc->limit == NULL) {
1718 void *next_tc = tc_next_chunk(tc);
1719 TC_INVALIDATE_SHRINK_CHUNK(tc, size);
1721 if (next_tc == pool_hdr->end) {
1722 /* note: tc->size has changed, so this works */
1723 pool_hdr->end = tc_next_chunk(tc);
1726 } else if ((tc->size - size) < 1024) {
1728 * if we call TC_INVALIDATE_SHRINK_CHUNK() here
1729 * we would need to call TC_UNDEFINE_GROW_CHUNK()
1730 * after each realloc call, which slows down
1731 * testing a lot :-(.
1733 * That is why we only mark memory as undefined here.
1735 TC_UNDEFINE_SHRINK_CHUNK(tc, size);
1737 /* do not shrink if we have less than 1k to gain */
1741 } else if (tc->size == size) {
1743 * do not change the pointer if it is exactly
1750 /* by resetting magic we catch users of the old memory */
1751 tc->flags |= TALLOC_FLAG_FREE;
1755 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1756 pool_hdr->object_count--;
1758 if (new_ptr == NULL) {
1759 new_ptr = malloc(TC_HDR_SIZE+size);
1765 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1766 TC_INVALIDATE_FULL_CHUNK(tc);
1769 /* We're doing malloc then free here, so record the difference. */
1770 old_size = tc->size;
1772 new_ptr = malloc(size + TC_HDR_SIZE);
1774 memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE);
1780 struct talloc_chunk *pool_tc;
1781 void *next_tc = tc_next_chunk(tc);
1782 size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size);
1783 size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
1784 size_t space_needed;
1786 unsigned int chunk_count = pool_hdr->object_count;
1788 pool_tc = talloc_chunk_from_pool(pool_hdr);
1789 if (!(pool_tc->flags & TALLOC_FLAG_FREE)) {
1793 if (chunk_count == 1) {
1795 * optimize for the case where 'tc' is the only
1796 * chunk in the pool.
1798 char *start = tc_pool_first_chunk(pool_hdr);
1799 space_needed = new_chunk_size;
1800 space_left = (char *)tc_pool_end(pool_hdr) - start;
1802 if (space_left >= space_needed) {
1803 size_t old_used = TC_HDR_SIZE + tc->size;
1804 size_t new_used = TC_HDR_SIZE + size;
1807 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
1811 * start -> tc may have
1812 * been freed and thus been marked as
1813 * VALGRIND_MEM_NOACCESS. Set it to
1814 * VALGRIND_MEM_UNDEFINED so we can
1815 * copy into it without valgrind errors.
1816 * We can't just mark
1817 * new_ptr -> new_ptr + old_used
1818 * as this may overlap on top of tc,
1819 * (which is why we use memmove, not
1820 * memcpy below) hence the MIN.
1822 size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used);
1823 VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len);
1827 memmove(new_ptr, tc, old_used);
1829 tc = (struct talloc_chunk *)new_ptr;
1830 TC_UNDEFINE_GROW_CHUNK(tc, size);
1833 * first we do not align the pool pointer
1834 * because we want to invalidate the padding
1837 pool_hdr->end = new_used + (char *)new_ptr;
1838 tc_invalidate_pool(pool_hdr);
1840 /* now the aligned pointer */
1841 pool_hdr->end = new_chunk_size + (char *)new_ptr;
1848 if (new_chunk_size == old_chunk_size) {
1849 TC_UNDEFINE_GROW_CHUNK(tc, size);
1850 tc->flags &= ~TALLOC_FLAG_FREE;
1855 if (next_tc == pool_hdr->end) {
1857 * optimize for the case where 'tc' is the last
1858 * chunk in the pool.
1860 space_needed = new_chunk_size - old_chunk_size;
1861 space_left = tc_pool_space_left(pool_hdr);
1863 if (space_left >= space_needed) {
1864 TC_UNDEFINE_GROW_CHUNK(tc, size);
1865 tc->flags &= ~TALLOC_FLAG_FREE;
1867 pool_hdr->end = tc_next_chunk(tc);
1872 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1874 if (new_ptr == NULL) {
1875 new_ptr = malloc(TC_HDR_SIZE+size);
1881 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1883 _tc_free_poolmem(tc, __location__ "_talloc_realloc");
1887 /* We're doing realloc here, so record the difference. */
1888 old_size = tc->size;
1890 new_ptr = realloc(tc, size + TC_HDR_SIZE);
1894 if (unlikely(!new_ptr)) {
1895 tc->flags &= ~TALLOC_FLAG_FREE;
1899 tc = (struct talloc_chunk *)new_ptr;
1900 tc->flags &= ~TALLOC_FLAG_FREE;
1902 tc->flags &= ~TALLOC_FLAG_POOLMEM;
1905 tc->parent->child = tc;
1908 tc->child->parent = tc;
1912 tc->prev->next = tc;
1915 tc->next->prev = tc;
1918 if (new_size > old_size) {
1919 talloc_memlimit_grow(tc->limit, new_size - old_size);
1920 } else if (new_size < old_size) {
1921 talloc_memlimit_shrink(tc->limit, old_size - new_size);
1925 _tc_set_name_const(tc, name);
1927 return TC_PTR_FROM_CHUNK(tc);
1931 a wrapper around talloc_steal() for situations where you are moving a pointer
1932 between two structures, and want the old pointer to be set to NULL
1934 _PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr)
1936 const void **pptr = discard_const_p(const void *,_pptr);
1937 void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr));
1942 enum talloc_mem_count_type {
1948 static inline size_t _talloc_total_mem_internal(const void *ptr,
1949 enum talloc_mem_count_type type,
1950 struct talloc_memlimit *old_limit,
1951 struct talloc_memlimit *new_limit)
1954 struct talloc_chunk *c, *tc;
1963 tc = talloc_chunk_from_ptr(ptr);
1965 if (old_limit || new_limit) {
1966 if (tc->limit && tc->limit->upper == old_limit) {
1967 tc->limit->upper = new_limit;
1971 /* optimize in the memlimits case */
1972 if (type == TOTAL_MEM_LIMIT &&
1973 tc->limit != NULL &&
1974 tc->limit != old_limit &&
1975 tc->limit->parent == tc) {
1976 return tc->limit->cur_size;
1979 if (tc->flags & TALLOC_FLAG_LOOP) {
1983 tc->flags |= TALLOC_FLAG_LOOP;
1985 if (old_limit || new_limit) {
1986 if (old_limit == tc->limit) {
1987 tc->limit = new_limit;
1992 case TOTAL_MEM_SIZE:
1993 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
1997 case TOTAL_MEM_BLOCKS:
2000 case TOTAL_MEM_LIMIT:
2001 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2003 * Don't count memory allocated from a pool
2004 * when calculating limits. Only count the
2007 if (!(tc->flags & TALLOC_FLAG_POOLMEM)) {
2008 if (tc->flags & TALLOC_FLAG_POOL) {
2010 * If this is a pool, the allocated
2011 * size is in the pool header, and
2012 * remember to add in the prefix
2015 struct talloc_pool_hdr *pool_hdr
2016 = talloc_pool_from_chunk(tc);
2017 total = pool_hdr->poolsize +
2021 total = tc->size + TC_HDR_SIZE;
2027 for (c = tc->child; c; c = c->next) {
2028 total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type,
2029 old_limit, new_limit);
2032 tc->flags &= ~TALLOC_FLAG_LOOP;
2038 return the total size of a talloc pool (subtree)
2040 _PUBLIC_ size_t talloc_total_size(const void *ptr)
2042 return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL);
2046 return the total number of blocks in a talloc pool (subtree)
2048 _PUBLIC_ size_t talloc_total_blocks(const void *ptr)
2050 return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL);
2054 return the number of external references to a pointer
2056 _PUBLIC_ size_t talloc_reference_count(const void *ptr)
2058 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
2059 struct talloc_reference_handle *h;
2062 for (h=tc->refs;h;h=h->next) {
2069 report on memory usage by all children of a pointer, giving a full tree view
2071 _PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
2072 void (*callback)(const void *ptr,
2073 int depth, int max_depth,
2075 void *private_data),
2078 struct talloc_chunk *c, *tc;
2083 if (ptr == NULL) return;
2085 tc = talloc_chunk_from_ptr(ptr);
2087 if (tc->flags & TALLOC_FLAG_LOOP) {
2091 callback(ptr, depth, max_depth, 0, private_data);
2093 if (max_depth >= 0 && depth >= max_depth) {
2097 tc->flags |= TALLOC_FLAG_LOOP;
2098 for (c=tc->child;c;c=c->next) {
2099 if (c->name == TALLOC_MAGIC_REFERENCE) {
2100 struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c);
2101 callback(h->ptr, depth + 1, max_depth, 1, private_data);
2103 talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data);
2106 tc->flags &= ~TALLOC_FLAG_LOOP;
2109 static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f)
2111 const char *name = __talloc_get_name(ptr);
2112 struct talloc_chunk *tc;
2113 FILE *f = (FILE *)_f;
2116 fprintf(f, "%*sreference to: %s\n", depth*4, "", name);
2120 tc = talloc_chunk_from_ptr(ptr);
2121 if (tc->limit && tc->limit->parent == tc) {
2122 fprintf(f, "%*s%-30s is a memlimit context"
2123 " (max_size = %lu bytes, cur_size = %lu bytes)\n",
2126 (unsigned long)tc->limit->max_size,
2127 (unsigned long)tc->limit->cur_size);
2131 fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n",
2132 (max_depth < 0 ? "full " :""), name,
2133 (unsigned long)talloc_total_size(ptr),
2134 (unsigned long)talloc_total_blocks(ptr));
2138 fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n",
2141 (unsigned long)talloc_total_size(ptr),
2142 (unsigned long)talloc_total_blocks(ptr),
2143 (int)talloc_reference_count(ptr), ptr);
2146 fprintf(f, "content: ");
2147 if (talloc_total_size(ptr)) {
2148 int tot = talloc_total_size(ptr);
2151 for (i = 0; i < tot; i++) {
2152 if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) {
2153 fprintf(f, "%c", ((char *)ptr)[i]);
2155 fprintf(f, "~%02x", ((char *)ptr)[i]);
2164 report on memory usage by all children of a pointer, giving a full tree view
2166 _PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f)
2169 talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f);
2175 report on memory usage by all children of a pointer, giving a full tree view
2177 _PUBLIC_ void talloc_report_full(const void *ptr, FILE *f)
2179 talloc_report_depth_file(ptr, 0, -1, f);
2183 report on memory usage by all children of a pointer
2185 _PUBLIC_ void talloc_report(const void *ptr, FILE *f)
2187 talloc_report_depth_file(ptr, 0, 1, f);
2191 report on any memory hanging off the null context
2193 static void talloc_report_null(void)
2195 if (talloc_total_size(null_context) != 0) {
2196 talloc_report(null_context, stderr);
2201 report on any memory hanging off the null context
2203 static void talloc_report_null_full(void)
2205 if (talloc_total_size(null_context) != 0) {
2206 talloc_report_full(null_context, stderr);
2211 enable tracking of the NULL context
2213 _PUBLIC_ void talloc_enable_null_tracking(void)
2215 if (null_context == NULL) {
2216 null_context = _talloc_named_const(NULL, 0, "null_context");
2217 if (autofree_context != NULL) {
2218 talloc_reparent(NULL, null_context, autofree_context);
2224 enable tracking of the NULL context, not moving the autofree context
2225 into the NULL context. This is needed for the talloc testsuite
2227 _PUBLIC_ void talloc_enable_null_tracking_no_autofree(void)
2229 if (null_context == NULL) {
2230 null_context = _talloc_named_const(NULL, 0, "null_context");
2235 disable tracking of the NULL context
2237 _PUBLIC_ void talloc_disable_null_tracking(void)
2239 if (null_context != NULL) {
2240 /* we have to move any children onto the real NULL
2242 struct talloc_chunk *tc, *tc2;
2243 tc = talloc_chunk_from_ptr(null_context);
2244 for (tc2 = tc->child; tc2; tc2=tc2->next) {
2245 if (tc2->parent == tc) tc2->parent = NULL;
2246 if (tc2->prev == tc) tc2->prev = NULL;
2248 for (tc2 = tc->next; tc2; tc2=tc2->next) {
2249 if (tc2->parent == tc) tc2->parent = NULL;
2250 if (tc2->prev == tc) tc2->prev = NULL;
2255 talloc_free(null_context);
2256 null_context = NULL;
2260 enable leak reporting on exit
2262 _PUBLIC_ void talloc_enable_leak_report(void)
2264 talloc_enable_null_tracking();
2265 atexit(talloc_report_null);
2269 enable full leak reporting on exit
2271 _PUBLIC_ void talloc_enable_leak_report_full(void)
2273 talloc_enable_null_tracking();
2274 atexit(talloc_report_null_full);
2278 talloc and zero memory.
2280 _PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name)
2282 void *p = _talloc_named_const(ctx, size, name);
2285 memset(p, '\0', size);
2292 memdup with a talloc.
2294 _PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name)
2296 void *newp = _talloc_named_const(t, size, name);
2299 memcpy(newp, p, size);
2305 static inline char *__talloc_strlendup(const void *t, const char *p, size_t len)
2308 struct talloc_chunk *tc;
2310 ret = (char *)__talloc(t, len + 1, &tc);
2311 if (unlikely(!ret)) return NULL;
2313 memcpy(ret, p, len);
2316 _tc_set_name_const(tc, ret);
2321 strdup with a talloc
2323 _PUBLIC_ char *talloc_strdup(const void *t, const char *p)
2325 if (unlikely(!p)) return NULL;
2326 return __talloc_strlendup(t, p, strlen(p));
2330 strndup with a talloc
2332 _PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n)
2334 if (unlikely(!p)) return NULL;
2335 return __talloc_strlendup(t, p, strnlen(p, n));
2338 static inline char *__talloc_strlendup_append(char *s, size_t slen,
2339 const char *a, size_t alen)
2343 ret = talloc_realloc(NULL, s, char, slen + alen + 1);
2344 if (unlikely(!ret)) return NULL;
2346 /* append the string and the trailing \0 */
2347 memcpy(&ret[slen], a, alen);
2350 _tc_set_name_const(talloc_chunk_from_ptr(ret), ret);
2355 * Appends at the end of the string.
2357 _PUBLIC_ char *talloc_strdup_append(char *s, const char *a)
2360 return talloc_strdup(NULL, a);
2367 return __talloc_strlendup_append(s, strlen(s), a, strlen(a));
2371 * Appends at the end of the talloc'ed buffer,
2372 * not the end of the string.
2374 _PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a)
2379 return talloc_strdup(NULL, a);
2386 slen = talloc_get_size(s);
2387 if (likely(slen > 0)) {
2391 return __talloc_strlendup_append(s, slen, a, strlen(a));
2395 * Appends at the end of the string.
2397 _PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n)
2400 return talloc_strndup(NULL, a, n);
2407 return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n));
2411 * Appends at the end of the talloc'ed buffer,
2412 * not the end of the string.
2414 _PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
2419 return talloc_strndup(NULL, a, n);
2426 slen = talloc_get_size(s);
2427 if (likely(slen > 0)) {
2431 return __talloc_strlendup_append(s, slen, a, strnlen(a, n));
2434 #ifndef HAVE_VA_COPY
2435 #ifdef HAVE___VA_COPY
2436 #define va_copy(dest, src) __va_copy(dest, src)
2438 #define va_copy(dest, src) (dest) = (src)
2442 _PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
2447 struct talloc_chunk *tc;
2450 /* this call looks strange, but it makes it work on older solaris boxes */
2452 len = vsnprintf(buf, sizeof(buf), fmt, ap2);
2454 if (unlikely(len < 0)) {
2458 ret = (char *)__talloc(t, len+1, &tc);
2459 if (unlikely(!ret)) return NULL;
2461 if (len < sizeof(buf)) {
2462 memcpy(ret, buf, len+1);
2465 vsnprintf(ret, len+1, fmt, ap2);
2469 _tc_set_name_const(talloc_chunk_from_ptr(ret), ret);
2475 Perform string formatting, and return a pointer to newly allocated
2476 memory holding the result, inside a memory pool.
2478 _PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...)
2484 ret = talloc_vasprintf(t, fmt, ap);
2489 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2490 const char *fmt, va_list ap)
2491 PRINTF_ATTRIBUTE(3,0);
2493 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2494 const char *fmt, va_list ap)
2501 alen = vsnprintf(&c, 1, fmt, ap2);
2505 /* Either the vsnprintf failed or the format resulted in
2506 * no characters being formatted. In the former case, we
2507 * ought to return NULL, in the latter we ought to return
2508 * the original string. Most current callers of this
2509 * function expect it to never return NULL.
2514 s = talloc_realloc(NULL, s, char, slen + alen + 1);
2515 if (!s) return NULL;
2518 vsnprintf(s + slen, alen + 1, fmt, ap2);
2521 _tc_set_name_const(talloc_chunk_from_ptr(s), s);
2526 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2527 * and return @p s, which may have moved. Good for gradually
2528 * accumulating output into a string buffer. Appends at the end
2531 _PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
2534 return talloc_vasprintf(NULL, fmt, ap);
2537 return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap);
2541 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2542 * and return @p s, which may have moved. Always appends at the
2543 * end of the talloc'ed buffer, not the end of the string.
2545 _PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
2550 return talloc_vasprintf(NULL, fmt, ap);
2553 slen = talloc_get_size(s);
2554 if (likely(slen > 0)) {
2558 return __talloc_vaslenprintf_append(s, slen, fmt, ap);
2562 Realloc @p s to append the formatted result of @p fmt and return @p
2563 s, which may have moved. Good for gradually accumulating output
2564 into a string buffer.
2566 _PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...)
2571 s = talloc_vasprintf_append(s, fmt, ap);
2577 Realloc @p s to append the formatted result of @p fmt and return @p
2578 s, which may have moved. Good for gradually accumulating output
2581 _PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
2586 s = talloc_vasprintf_append_buffer(s, fmt, ap);
2592 alloc an array, checking for integer overflow in the array size
2594 _PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2596 if (count >= MAX_TALLOC_SIZE/el_size) {
2599 return _talloc_named_const(ctx, el_size * count, name);
2603 alloc an zero array, checking for integer overflow in the array size
2605 _PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2607 if (count >= MAX_TALLOC_SIZE/el_size) {
2610 return _talloc_zero(ctx, el_size * count, name);
2614 realloc an array, checking for integer overflow in the array size
2616 _PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name)
2618 if (count >= MAX_TALLOC_SIZE/el_size) {
2621 return _talloc_realloc(ctx, ptr, el_size * count, name);
2625 a function version of talloc_realloc(), so it can be passed as a function pointer
2626 to libraries that want a realloc function (a realloc function encapsulates
2627 all the basic capabilities of an allocation library, which is why this is useful)
2629 _PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size)
2631 return _talloc_realloc(context, ptr, size, NULL);
2635 static int talloc_autofree_destructor(void *ptr)
2637 autofree_context = NULL;
2641 static void talloc_autofree(void)
2643 talloc_free(autofree_context);
2647 return a context which will be auto-freed on exit
2648 this is useful for reducing the noise in leak reports
2650 _PUBLIC_ void *talloc_autofree_context(void)
2652 if (autofree_context == NULL) {
2653 autofree_context = _talloc_named_const(NULL, 0, "autofree_context");
2654 talloc_set_destructor(autofree_context, talloc_autofree_destructor);
2655 atexit(talloc_autofree);
2657 return autofree_context;
2660 _PUBLIC_ size_t talloc_get_size(const void *context)
2662 struct talloc_chunk *tc;
2664 if (context == NULL) {
2665 context = null_context;
2667 if (context == NULL) {
2671 tc = talloc_chunk_from_ptr(context);
2677 find a parent of this context that has the given name, if any
2679 _PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name)
2681 struct talloc_chunk *tc;
2683 if (context == NULL) {
2687 tc = talloc_chunk_from_ptr(context);
2689 if (tc->name && strcmp(tc->name, name) == 0) {
2690 return TC_PTR_FROM_CHUNK(tc);
2692 while (tc && tc->prev) tc = tc->prev;
2701 show the parentage of a context
2703 _PUBLIC_ void talloc_show_parents(const void *context, FILE *file)
2705 struct talloc_chunk *tc;
2707 if (context == NULL) {
2708 fprintf(file, "talloc no parents for NULL\n");
2712 tc = talloc_chunk_from_ptr(context);
2713 fprintf(file, "talloc parents of '%s'\n", __talloc_get_name(context));
2715 fprintf(file, "\t'%s'\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc)));
2716 while (tc && tc->prev) tc = tc->prev;
2725 return 1 if ptr is a parent of context
2727 static int _talloc_is_parent(const void *context, const void *ptr, int depth)
2729 struct talloc_chunk *tc;
2731 if (context == NULL) {
2735 tc = talloc_chunk_from_ptr(context);
2740 if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1;
2741 while (tc && tc->prev) tc = tc->prev;
2751 return 1 if ptr is a parent of context
2753 _PUBLIC_ int talloc_is_parent(const void *context, const void *ptr)
2755 return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH);
2759 return the total size of memory used by this context and all children
2761 static inline size_t _talloc_total_limit_size(const void *ptr,
2762 struct talloc_memlimit *old_limit,
2763 struct talloc_memlimit *new_limit)
2765 return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT,
2766 old_limit, new_limit);
2769 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size)
2771 struct talloc_memlimit *l;
2773 for (l = limit; l != NULL; l = l->upper) {
2774 if (l->max_size != 0 &&
2775 ((l->max_size <= l->cur_size) ||
2776 (l->max_size - l->cur_size < size))) {
2785 Update memory limits when freeing a talloc_chunk.
2787 static void tc_memlimit_update_on_free(struct talloc_chunk *tc)
2789 size_t limit_shrink_size;
2796 * Pool entries don't count. Only the pools
2797 * themselves are counted as part of the memory
2798 * limits. Note that this also takes care of
2799 * nested pools which have both flags
2800 * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set.
2802 if (tc->flags & TALLOC_FLAG_POOLMEM) {
2807 * If we are part of a memory limited context hierarchy
2808 * we need to subtract the memory used from the counters
2811 limit_shrink_size = tc->size+TC_HDR_SIZE;
2814 * If we're deallocating a pool, take into
2815 * account the prefix size added for the pool.
2818 if (tc->flags & TALLOC_FLAG_POOL) {
2819 limit_shrink_size += TP_HDR_SIZE;
2822 talloc_memlimit_shrink(tc->limit, limit_shrink_size);
2824 if (tc->limit->parent == tc) {
2832 Increase memory limit accounting after a malloc/realloc.
2834 static void talloc_memlimit_grow(struct talloc_memlimit *limit,
2837 struct talloc_memlimit *l;
2839 for (l = limit; l != NULL; l = l->upper) {
2840 size_t new_cur_size = l->cur_size + size;
2841 if (new_cur_size < l->cur_size) {
2842 talloc_abort("logic error in talloc_memlimit_grow\n");
2845 l->cur_size = new_cur_size;
2850 Decrease memory limit accounting after a free/realloc.
2852 static void talloc_memlimit_shrink(struct talloc_memlimit *limit,
2855 struct talloc_memlimit *l;
2857 for (l = limit; l != NULL; l = l->upper) {
2858 if (l->cur_size < size) {
2859 talloc_abort("logic error in talloc_memlimit_shrink\n");
2862 l->cur_size = l->cur_size - size;
2866 _PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size)
2868 struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx);
2869 struct talloc_memlimit *orig_limit;
2870 struct talloc_memlimit *limit = NULL;
2872 if (tc->limit && tc->limit->parent == tc) {
2873 tc->limit->max_size = max_size;
2876 orig_limit = tc->limit;
2878 limit = malloc(sizeof(struct talloc_memlimit));
2879 if (limit == NULL) {
2883 limit->max_size = max_size;
2884 limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit);
2887 limit->upper = orig_limit;
2889 limit->upper = NULL;