2 Samba Unix SMB/CIFS implementation.
4 Samba trivial allocation library - new interface
6 NOTE: Please read talloc_guide.txt for full documentation
8 Copyright (C) Andrew Tridgell 2004
9 Copyright (C) Stefan Metzmacher 2006
11 ** NOTE! The following LGPL license applies to the talloc
12 ** library. This does NOT imply that all of Samba is released
15 This library is free software; you can redistribute it and/or
16 modify it under the terms of the GNU Lesser General Public
17 License as published by the Free Software Foundation; either
18 version 3 of the License, or (at your option) any later version.
20 This library is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 Lesser General Public License for more details.
25 You should have received a copy of the GNU Lesser General Public
26 License along with this library; if not, see <http://www.gnu.org/licenses/>.
30 inspired by http://swapped.cc/halloc/
36 #ifdef HAVE_SYS_AUXV_H
40 #ifdef TALLOC_BUILD_VERSION_MAJOR
41 #if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR)
42 #error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR"
46 #ifdef TALLOC_BUILD_VERSION_MINOR
47 #if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR)
48 #error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR"
52 /* Special macros that are no-ops except when run under Valgrind on
53 * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
54 #ifdef HAVE_VALGRIND_MEMCHECK_H
55 /* memcheck.h includes valgrind.h */
56 #include <valgrind/memcheck.h>
57 #elif defined(HAVE_VALGRIND_H)
61 /* use this to force every realloc to change the pointer, to stress test
62 code that might not cope */
63 #define ALWAYS_REALLOC 0
66 #define MAX_TALLOC_SIZE 0x10000000
68 #define TALLOC_FLAG_FREE 0x01
69 #define TALLOC_FLAG_LOOP 0x02
70 #define TALLOC_FLAG_POOL 0x04 /* This is a talloc pool */
71 #define TALLOC_FLAG_POOLMEM 0x08 /* This is allocated in a pool */
74 * Bits above this are random, used to make it harder to fake talloc
75 * headers during an attack. Try not to change this without good reason.
77 #define TALLOC_FLAG_MASK 0x0F
79 #define TALLOC_MAGIC_REFERENCE ((const char *)1)
81 #define TALLOC_MAGIC_BASE 0xe814ec70
82 static unsigned int talloc_magic = (
85 (TALLOC_VERSION_MAJOR << 12) +
86 (TALLOC_VERSION_MINOR << 4)));
88 /* by default we abort when given a bad pointer (such as when talloc_free() is called
89 on a pointer that came from malloc() */
91 #define TALLOC_ABORT(reason) abort()
94 #ifndef discard_const_p
95 #if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T)
96 # define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr)))
98 # define discard_const_p(type, ptr) ((type *)(ptr))
102 /* these macros gain us a few percent of speed on gcc */
104 /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
105 as its first argument */
107 #define likely(x) __builtin_expect(!!(x), 1)
110 #define unlikely(x) __builtin_expect(!!(x), 0)
114 #define likely(x) (x)
117 #define unlikely(x) (x)
121 /* this null_context is only used if talloc_enable_leak_report() or
122 talloc_enable_leak_report_full() is called, otherwise it remains
125 static void *null_context;
126 static void *autofree_context;
128 /* used to enable fill of memory on free, which can be useful for
129 * catching use after free errors when valgrind is too slow
137 #define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
140 * do not wipe the header, to allow the
141 * double-free logic to still work
143 #define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
144 if (unlikely(talloc_fill.enabled)) { \
145 size_t _flen = (_tc)->size; \
146 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
147 memset(_fptr, talloc_fill.fill_value, _flen); \
151 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
152 /* Mark the whole chunk as not accessable */
153 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
154 size_t _flen = TC_HDR_SIZE + (_tc)->size; \
155 char *_fptr = (char *)(_tc); \
156 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
159 #define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
162 #define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
163 TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
164 TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
167 #define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
168 if (unlikely(talloc_fill.enabled)) { \
169 size_t _flen = (_tc)->size - (_new_size); \
170 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
171 _fptr += (_new_size); \
172 memset(_fptr, talloc_fill.fill_value, _flen); \
176 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
177 /* Mark the unused bytes not accessable */
178 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
179 size_t _flen = (_tc)->size - (_new_size); \
180 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
181 _fptr += (_new_size); \
182 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
185 #define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
188 #define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
189 TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
190 TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
193 #define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
194 if (unlikely(talloc_fill.enabled)) { \
195 size_t _flen = (_tc)->size - (_new_size); \
196 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
197 _fptr += (_new_size); \
198 memset(_fptr, talloc_fill.fill_value, _flen); \
202 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
203 /* Mark the unused bytes as undefined */
204 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
205 size_t _flen = (_tc)->size - (_new_size); \
206 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
207 _fptr += (_new_size); \
208 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
211 #define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
214 #define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
215 TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
216 TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
219 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
220 /* Mark the new bytes as undefined */
221 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
222 size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
223 size_t _new_used = TC_HDR_SIZE + (_new_size); \
224 size_t _flen = _new_used - _old_used; \
225 char *_fptr = _old_used + (char *)(_tc); \
226 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
229 #define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
232 #define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
233 TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
236 struct talloc_reference_handle {
237 struct talloc_reference_handle *next, *prev;
239 const char *location;
242 struct talloc_memlimit {
243 struct talloc_chunk *parent;
244 struct talloc_memlimit *upper;
249 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size);
250 static inline void talloc_memlimit_grow(struct talloc_memlimit *limit,
252 static inline void talloc_memlimit_shrink(struct talloc_memlimit *limit,
254 static inline void tc_memlimit_update_on_free(struct talloc_chunk *tc);
256 static inline void _tc_set_name_const(struct talloc_chunk *tc,
258 static struct talloc_chunk *_vasprintf_tc(const void *t,
262 typedef int (*talloc_destructor_t)(void *);
264 struct talloc_pool_hdr;
266 struct talloc_chunk {
268 * flags includes the talloc magic, which is randomised to
269 * make overwrite attacks harder
274 * If you have a logical tree like:
280 * <child 1> <child 2> <child 3>
282 * The actual talloc tree is:
286 * <child 1> - <child 2> - <child 3>
288 * The children are linked with next/prev pointers, and
289 * child 1 is linked to the parent with parent/child
293 struct talloc_chunk *next, *prev;
294 struct talloc_chunk *parent, *child;
295 struct talloc_reference_handle *refs;
296 talloc_destructor_t destructor;
302 * if 'limit' is set it means all *new* children of the context will
303 * be limited to a total aggregate size ox max_size for memory
305 * cur_size is used to keep track of the current use
307 struct talloc_memlimit *limit;
310 * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool"
311 * is a pointer to the struct talloc_chunk of the pool that it was
312 * allocated from. This way children can quickly find the pool to chew
315 struct talloc_pool_hdr *pool;
318 /* 16 byte alignment seems to keep everyone happy */
319 #define TC_ALIGN16(s) (((s)+15)&~15)
320 #define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
321 #define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
323 _PUBLIC_ int talloc_version_major(void)
325 return TALLOC_VERSION_MAJOR;
328 _PUBLIC_ int talloc_version_minor(void)
330 return TALLOC_VERSION_MINOR;
333 _PUBLIC_ int talloc_test_get_magic(void)
338 static void (*talloc_log_fn)(const char *message);
340 _PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message))
342 talloc_log_fn = log_fn;
345 #ifdef HAVE_CONSTRUCTOR_ATTRIBUTE
346 void talloc_lib_init(void) __attribute__((constructor));
347 void talloc_lib_init(void)
349 uint32_t random_value;
350 #if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM)
353 * Use the kernel-provided random values used for
354 * ASLR. This won't change per-exec, which is ideal for us
356 p = (uint8_t *) getauxval(AT_RANDOM);
359 * We get 16 bytes from getauxval. By calling rand(),
360 * a totally insecure PRNG, but one that will
361 * deterministically have a different value when called
362 * twice, we ensure that if two talloc-like libraries
363 * are somehow loaded in the same address space, that
364 * because we choose different bytes, we will keep the
365 * protection against collision of multiple talloc
368 * This protection is important because the effects of
369 * passing a talloc pointer from one to the other may
370 * be very hard to determine.
372 int offset = rand() % (16 - sizeof(random_value));
373 memcpy(&random_value, p + offset, sizeof(random_value));
378 * Otherwise, hope the location we are loaded in
379 * memory is randomised by someone else
381 random_value = ((uintptr_t)talloc_lib_init & 0xFFFFFFFF);
383 talloc_magic = random_value & ~TALLOC_FLAG_MASK;
386 #warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available"
389 static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2);
390 static void talloc_log(const char *fmt, ...)
395 if (!talloc_log_fn) {
400 message = talloc_vasprintf(NULL, fmt, ap);
403 talloc_log_fn(message);
404 talloc_free(message);
407 static void talloc_log_stderr(const char *message)
409 fprintf(stderr, "%s", message);
412 _PUBLIC_ void talloc_set_log_stderr(void)
414 talloc_set_log_fn(talloc_log_stderr);
417 static void (*talloc_abort_fn)(const char *reason);
419 _PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason))
421 talloc_abort_fn = abort_fn;
424 static void talloc_abort(const char *reason)
426 talloc_log("%s\n", reason);
428 if (!talloc_abort_fn) {
429 TALLOC_ABORT(reason);
432 talloc_abort_fn(reason);
435 static void talloc_abort_magic(unsigned magic)
437 talloc_abort("Bad talloc magic value - wrong talloc version used/mixed");
440 static void talloc_abort_access_after_free(void)
442 talloc_abort("Bad talloc magic value - access after free");
445 static void talloc_abort_unknown_value(void)
447 talloc_abort("Bad talloc magic value - unknown value");
450 /* panic if we get a bad magic value */
451 static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr)
453 const char *pp = (const char *)ptr;
454 struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE);
455 if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) != talloc_magic)) {
456 if ((tc->flags & (~0xF)) == talloc_magic) {
457 talloc_abort_magic(tc->flags & (~TALLOC_FLAG_MASK));
461 if (tc->flags & TALLOC_FLAG_FREE) {
462 talloc_log("talloc: access after free error - first free may be at %s\n", tc->name);
463 talloc_abort_access_after_free();
466 talloc_abort_unknown_value();
473 /* hook into the front of the list */
474 #define _TLIST_ADD(list, p) \
478 (p)->next = (p)->prev = NULL; \
480 (list)->prev = (p); \
481 (p)->next = (list); \
487 /* remove an element from a list - element doesn't have to be in list. */
488 #define _TLIST_REMOVE(list, p) \
490 if ((p) == (list)) { \
491 (list) = (p)->next; \
492 if (list) (list)->prev = NULL; \
494 if ((p)->prev) (p)->prev->next = (p)->next; \
495 if ((p)->next) (p)->next->prev = (p)->prev; \
497 if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \
502 return the parent chunk of a pointer
504 static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr)
506 struct talloc_chunk *tc;
508 if (unlikely(ptr == NULL)) {
512 tc = talloc_chunk_from_ptr(ptr);
513 while (tc->prev) tc=tc->prev;
518 _PUBLIC_ void *talloc_parent(const void *ptr)
520 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
521 return tc? TC_PTR_FROM_CHUNK(tc) : NULL;
527 _PUBLIC_ const char *talloc_parent_name(const void *ptr)
529 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
530 return tc? tc->name : NULL;
534 A pool carries an in-pool object count count in the first 16 bytes.
535 bytes. This is done to support talloc_steal() to a parent outside of the
536 pool. The count includes the pool itself, so a talloc_free() on a pool will
537 only destroy the pool if the count has dropped to zero. A talloc_free() of a
538 pool member will reduce the count, and eventually also call free(3) on the
541 The object count is not put into "struct talloc_chunk" because it is only
542 relevant for talloc pools and the alignment to 16 bytes would increase the
543 memory footprint of each talloc chunk by those 16 bytes.
546 struct talloc_pool_hdr {
548 unsigned int object_count;
552 #define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
554 static inline struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c)
556 return (struct talloc_pool_hdr *)((char *)c - TP_HDR_SIZE);
559 static inline struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h)
561 return (struct talloc_chunk *)((char *)h + TP_HDR_SIZE);
564 static inline void *tc_pool_end(struct talloc_pool_hdr *pool_hdr)
566 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
567 return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize;
570 static inline size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr)
572 return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end;
575 /* If tc is inside a pool, this gives the next neighbour. */
576 static inline void *tc_next_chunk(struct talloc_chunk *tc)
578 return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size);
581 static inline void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr)
583 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
584 return tc_next_chunk(tc);
587 /* Mark the whole remaining pool as not accessable */
588 static inline void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr)
590 size_t flen = tc_pool_space_left(pool_hdr);
592 if (unlikely(talloc_fill.enabled)) {
593 memset(pool_hdr->end, talloc_fill.fill_value, flen);
596 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
597 VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen);
605 static inline struct talloc_chunk *tc_alloc_pool(struct talloc_chunk *parent,
606 size_t size, size_t prefix_len)
608 struct talloc_pool_hdr *pool_hdr = NULL;
610 struct talloc_chunk *result;
613 if (parent == NULL) {
617 if (parent->flags & TALLOC_FLAG_POOL) {
618 pool_hdr = talloc_pool_from_chunk(parent);
620 else if (parent->flags & TALLOC_FLAG_POOLMEM) {
621 pool_hdr = parent->pool;
624 if (pool_hdr == NULL) {
628 space_left = tc_pool_space_left(pool_hdr);
631 * Align size to 16 bytes
633 chunk_size = TC_ALIGN16(size + prefix_len);
635 if (space_left < chunk_size) {
639 result = (struct talloc_chunk *)((char *)pool_hdr->end + prefix_len);
641 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
642 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size);
645 pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size);
647 result->flags = talloc_magic | TALLOC_FLAG_POOLMEM;
648 result->pool = pool_hdr;
650 pool_hdr->object_count++;
656 Allocate a bit of memory as a child of an existing pointer
658 static inline void *__talloc_with_prefix(const void *context,
661 struct talloc_chunk **tc_ret)
663 struct talloc_chunk *tc = NULL;
664 struct talloc_memlimit *limit = NULL;
665 size_t total_len = TC_HDR_SIZE + size + prefix_len;
666 struct talloc_chunk *parent = NULL;
668 if (unlikely(context == NULL)) {
669 context = null_context;
672 if (unlikely(size >= MAX_TALLOC_SIZE)) {
676 if (unlikely(total_len < TC_HDR_SIZE)) {
680 if (likely(context != NULL)) {
681 parent = talloc_chunk_from_ptr(context);
683 if (parent->limit != NULL) {
684 limit = parent->limit;
687 tc = tc_alloc_pool(parent, TC_HDR_SIZE+size, prefix_len);
694 * Only do the memlimit check/update on actual allocation.
696 if (!talloc_memlimit_check(limit, total_len)) {
701 ptr = malloc(total_len);
702 if (unlikely(ptr == NULL)) {
705 tc = (struct talloc_chunk *)(ptr + prefix_len);
706 tc->flags = talloc_magic;
709 talloc_memlimit_grow(limit, total_len);
714 tc->destructor = NULL;
719 if (likely(context != NULL)) {
721 parent->child->parent = NULL;
722 tc->next = parent->child;
731 tc->next = tc->prev = tc->parent = NULL;
735 return TC_PTR_FROM_CHUNK(tc);
738 static inline void *__talloc(const void *context,
740 struct talloc_chunk **tc)
742 return __talloc_with_prefix(context, size, 0, tc);
746 * Create a talloc pool
749 static inline void *_talloc_pool(const void *context, size_t size)
751 struct talloc_chunk *tc;
752 struct talloc_pool_hdr *pool_hdr;
755 result = __talloc_with_prefix(context, size, TP_HDR_SIZE, &tc);
757 if (unlikely(result == NULL)) {
761 pool_hdr = talloc_pool_from_chunk(tc);
763 tc->flags |= TALLOC_FLAG_POOL;
766 pool_hdr->object_count = 1;
767 pool_hdr->end = result;
768 pool_hdr->poolsize = size;
770 tc_invalidate_pool(pool_hdr);
775 _PUBLIC_ void *talloc_pool(const void *context, size_t size)
777 return _talloc_pool(context, size);
781 * Create a talloc pool correctly sized for a basic size plus
782 * a number of subobjects whose total size is given. Essentially
783 * a custom allocator for talloc to reduce fragmentation.
786 _PUBLIC_ void *_talloc_pooled_object(const void *ctx,
788 const char *type_name,
789 unsigned num_subobjects,
790 size_t total_subobjects_size)
792 size_t poolsize, subobjects_slack, tmp;
793 struct talloc_chunk *tc;
794 struct talloc_pool_hdr *pool_hdr;
797 poolsize = type_size + total_subobjects_size;
799 if ((poolsize < type_size) || (poolsize < total_subobjects_size)) {
803 if (num_subobjects == UINT_MAX) {
806 num_subobjects += 1; /* the object body itself */
809 * Alignment can increase the pool size by at most 15 bytes per object
810 * plus alignment for the object itself
812 subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects;
813 if (subobjects_slack < num_subobjects) {
817 tmp = poolsize + subobjects_slack;
818 if ((tmp < poolsize) || (tmp < subobjects_slack)) {
823 ret = _talloc_pool(ctx, poolsize);
828 tc = talloc_chunk_from_ptr(ret);
829 tc->size = type_size;
831 pool_hdr = talloc_pool_from_chunk(tc);
833 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
834 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size);
837 pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size));
839 _tc_set_name_const(tc, type_name);
847 setup a destructor to be called on free of a pointer
848 the destructor should return 0 on success, or -1 on failure.
849 if the destructor fails then the free is failed, and the memory can
850 be continued to be used
852 _PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
854 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
855 tc->destructor = destructor;
859 increase the reference count on a piece of memory.
861 _PUBLIC_ int talloc_increase_ref_count(const void *ptr)
863 if (unlikely(!talloc_reference(null_context, ptr))) {
870 helper for talloc_reference()
872 this is referenced by a function pointer and should not be inline
874 static int talloc_reference_destructor(struct talloc_reference_handle *handle)
876 struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr);
877 _TLIST_REMOVE(ptr_tc->refs, handle);
882 more efficient way to add a name to a pointer - the name must point to a
885 static inline void _tc_set_name_const(struct talloc_chunk *tc,
892 internal talloc_named_const()
894 static inline void *_talloc_named_const(const void *context, size_t size, const char *name)
897 struct talloc_chunk *tc;
899 ptr = __talloc(context, size, &tc);
900 if (unlikely(ptr == NULL)) {
904 _tc_set_name_const(tc, name);
910 make a secondary reference to a pointer, hanging off the given context.
911 the pointer remains valid until both the original caller and this given
914 the major use for this is when two different structures need to reference the
915 same underlying data, and you want to be able to free the two instances separately,
918 _PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location)
920 struct talloc_chunk *tc;
921 struct talloc_reference_handle *handle;
922 if (unlikely(ptr == NULL)) return NULL;
924 tc = talloc_chunk_from_ptr(ptr);
925 handle = (struct talloc_reference_handle *)_talloc_named_const(context,
926 sizeof(struct talloc_reference_handle),
927 TALLOC_MAGIC_REFERENCE);
928 if (unlikely(handle == NULL)) return NULL;
930 /* note that we hang the destructor off the handle, not the
931 main context as that allows the caller to still setup their
932 own destructor on the context if they want to */
933 talloc_set_destructor(handle, talloc_reference_destructor);
934 handle->ptr = discard_const_p(void, ptr);
935 handle->location = location;
936 _TLIST_ADD(tc->refs, handle);
940 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr);
942 static inline void _tc_free_poolmem(struct talloc_chunk *tc,
943 const char *location)
945 struct talloc_pool_hdr *pool;
946 struct talloc_chunk *pool_tc;
950 pool_tc = talloc_chunk_from_pool(pool);
951 next_tc = tc_next_chunk(tc);
953 tc->flags |= TALLOC_FLAG_FREE;
955 /* we mark the freed memory with where we called the free
956 * from. This means on a double free error we can report where
957 * the first free came from
961 TC_INVALIDATE_FULL_CHUNK(tc);
963 if (unlikely(pool->object_count == 0)) {
964 talloc_abort("Pool object count zero!");
968 pool->object_count--;
970 if (unlikely(pool->object_count == 1
971 && !(pool_tc->flags & TALLOC_FLAG_FREE))) {
973 * if there is just one object left in the pool
974 * and pool->flags does not have TALLOC_FLAG_FREE,
975 * it means this is the pool itself and
976 * the rest is available for new objects
979 pool->end = tc_pool_first_chunk(pool);
980 tc_invalidate_pool(pool);
984 if (unlikely(pool->object_count == 0)) {
986 * we mark the freed memory with where we called the free
987 * from. This means on a double free error we can report where
988 * the first free came from
990 pool_tc->name = location;
992 if (pool_tc->flags & TALLOC_FLAG_POOLMEM) {
993 _tc_free_poolmem(pool_tc, location);
996 * The tc_memlimit_update_on_free()
997 * call takes into account the
998 * prefix TP_HDR_SIZE allocated before
999 * the pool talloc_chunk.
1001 tc_memlimit_update_on_free(pool_tc);
1002 TC_INVALIDATE_FULL_CHUNK(pool_tc);
1008 if (pool->end == next_tc) {
1010 * if pool->pool still points to end of
1011 * 'tc' (which is stored in the 'next_tc' variable),
1012 * we can reclaim the memory of 'tc'.
1019 * Do nothing. The memory is just "wasted", waiting for the pool
1020 * itself to be freed.
1024 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1026 const char *location);
1028 static inline int _talloc_free_internal(void *ptr, const char *location);
1031 internal free call that takes a struct talloc_chunk *.
1033 static inline int _tc_free_internal(struct talloc_chunk *tc,
1034 const char *location)
1037 void *ptr = TC_PTR_FROM_CHUNK(tc);
1039 if (unlikely(tc->refs)) {
1041 /* check if this is a reference from a child or
1042 * grandchild back to it's parent or grandparent
1044 * in that case we need to remove the reference and
1045 * call another instance of talloc_free() on the current
1048 is_child = talloc_is_parent(tc->refs, ptr);
1049 _talloc_free_internal(tc->refs, location);
1051 return _talloc_free_internal(ptr, location);
1056 if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) {
1057 /* we have a free loop - stop looping */
1061 if (unlikely(tc->destructor)) {
1062 talloc_destructor_t d = tc->destructor;
1065 * Protect the destructor against some overwrite
1066 * attacks, by explicitly checking it has the right
1069 if (talloc_chunk_from_ptr(ptr) != tc) {
1071 * This can't actually happen, the
1072 * call itself will panic.
1074 TALLOC_ABORT("talloc_chunk_from_ptr failed!");
1077 if (d == (talloc_destructor_t)-1) {
1080 tc->destructor = (talloc_destructor_t)-1;
1083 * Only replace the destructor pointer if
1084 * calling the destructor didn't modify it.
1086 if (tc->destructor == (talloc_destructor_t)-1) {
1091 tc->destructor = NULL;
1095 _TLIST_REMOVE(tc->parent->child, tc);
1096 if (tc->parent->child) {
1097 tc->parent->child->parent = tc->parent;
1100 if (tc->prev) tc->prev->next = tc->next;
1101 if (tc->next) tc->next->prev = tc->prev;
1102 tc->prev = tc->next = NULL;
1105 tc->flags |= TALLOC_FLAG_LOOP;
1107 _tc_free_children_internal(tc, ptr, location);
1109 tc->flags |= TALLOC_FLAG_FREE;
1111 /* we mark the freed memory with where we called the free
1112 * from. This means on a double free error we can report where
1113 * the first free came from
1115 tc->name = location;
1117 if (tc->flags & TALLOC_FLAG_POOL) {
1118 struct talloc_pool_hdr *pool;
1120 pool = talloc_pool_from_chunk(tc);
1122 if (unlikely(pool->object_count == 0)) {
1123 talloc_abort("Pool object count zero!");
1127 pool->object_count--;
1129 if (likely(pool->object_count != 0)) {
1134 * With object_count==0, a pool becomes a normal piece of
1135 * memory to free. If it's allocated inside a pool, it needs
1136 * to be freed as poolmem, else it needs to be just freed.
1143 if (tc->flags & TALLOC_FLAG_POOLMEM) {
1144 _tc_free_poolmem(tc, location);
1148 tc_memlimit_update_on_free(tc);
1150 TC_INVALIDATE_FULL_CHUNK(tc);
1156 internal talloc_free call
1158 static inline int _talloc_free_internal(void *ptr, const char *location)
1160 struct talloc_chunk *tc;
1162 if (unlikely(ptr == NULL)) {
1166 /* possibly initialised the talloc fill value */
1167 if (unlikely(!talloc_fill.initialised)) {
1168 const char *fill = getenv(TALLOC_FILL_ENV);
1170 talloc_fill.enabled = true;
1171 talloc_fill.fill_value = strtoul(fill, NULL, 0);
1173 talloc_fill.initialised = true;
1176 tc = talloc_chunk_from_ptr(ptr);
1177 return _tc_free_internal(tc, location);
1180 static inline size_t _talloc_total_limit_size(const void *ptr,
1181 struct talloc_memlimit *old_limit,
1182 struct talloc_memlimit *new_limit);
1185 move a lump of memory from one talloc context to another return the
1186 ptr on success, or NULL if it could not be transferred.
1187 passing NULL as ptr will always return NULL with no side effects.
1189 static void *_talloc_steal_internal(const void *new_ctx, const void *ptr)
1191 struct talloc_chunk *tc, *new_tc;
1192 size_t ctx_size = 0;
1194 if (unlikely(!ptr)) {
1198 if (unlikely(new_ctx == NULL)) {
1199 new_ctx = null_context;
1202 tc = talloc_chunk_from_ptr(ptr);
1204 if (tc->limit != NULL) {
1206 ctx_size = _talloc_total_limit_size(ptr, NULL, NULL);
1208 /* Decrement the memory limit from the source .. */
1209 talloc_memlimit_shrink(tc->limit->upper, ctx_size);
1211 if (tc->limit->parent == tc) {
1212 tc->limit->upper = NULL;
1218 if (unlikely(new_ctx == NULL)) {
1220 _TLIST_REMOVE(tc->parent->child, tc);
1221 if (tc->parent->child) {
1222 tc->parent->child->parent = tc->parent;
1225 if (tc->prev) tc->prev->next = tc->next;
1226 if (tc->next) tc->next->prev = tc->prev;
1229 tc->parent = tc->next = tc->prev = NULL;
1230 return discard_const_p(void, ptr);
1233 new_tc = talloc_chunk_from_ptr(new_ctx);
1235 if (unlikely(tc == new_tc || tc->parent == new_tc)) {
1236 return discard_const_p(void, ptr);
1240 _TLIST_REMOVE(tc->parent->child, tc);
1241 if (tc->parent->child) {
1242 tc->parent->child->parent = tc->parent;
1245 if (tc->prev) tc->prev->next = tc->next;
1246 if (tc->next) tc->next->prev = tc->prev;
1247 tc->prev = tc->next = NULL;
1250 tc->parent = new_tc;
1251 if (new_tc->child) new_tc->child->parent = NULL;
1252 _TLIST_ADD(new_tc->child, tc);
1254 if (tc->limit || new_tc->limit) {
1255 ctx_size = _talloc_total_limit_size(ptr, tc->limit,
1257 /* .. and increment it in the destination. */
1258 if (new_tc->limit) {
1259 talloc_memlimit_grow(new_tc->limit, ctx_size);
1263 return discard_const_p(void, ptr);
1267 move a lump of memory from one talloc context to another return the
1268 ptr on success, or NULL if it could not be transferred.
1269 passing NULL as ptr will always return NULL with no side effects.
1271 _PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location)
1273 struct talloc_chunk *tc;
1275 if (unlikely(ptr == NULL)) {
1279 tc = talloc_chunk_from_ptr(ptr);
1281 if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) {
1282 struct talloc_reference_handle *h;
1284 talloc_log("WARNING: talloc_steal with references at %s\n",
1287 for (h=tc->refs; h; h=h->next) {
1288 talloc_log("\treference at %s\n",
1294 /* this test is probably too expensive to have on in the
1295 normal build, but it useful for debugging */
1296 if (talloc_is_parent(new_ctx, ptr)) {
1297 talloc_log("WARNING: stealing into talloc child at %s\n", location);
1301 return _talloc_steal_internal(new_ctx, ptr);
1305 this is like a talloc_steal(), but you must supply the old
1306 parent. This resolves the ambiguity in a talloc_steal() which is
1307 called on a context that has more than one parent (via references)
1309 The old parent can be either a reference or a parent
1311 _PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr)
1313 struct talloc_chunk *tc;
1314 struct talloc_reference_handle *h;
1316 if (unlikely(ptr == NULL)) {
1320 if (old_parent == talloc_parent(ptr)) {
1321 return _talloc_steal_internal(new_parent, ptr);
1324 tc = talloc_chunk_from_ptr(ptr);
1325 for (h=tc->refs;h;h=h->next) {
1326 if (talloc_parent(h) == old_parent) {
1327 if (_talloc_steal_internal(new_parent, h) != h) {
1330 return discard_const_p(void, ptr);
1334 /* it wasn't a parent */
1339 remove a secondary reference to a pointer. This undo's what
1340 talloc_reference() has done. The context and pointer arguments
1341 must match those given to a talloc_reference()
1343 static inline int talloc_unreference(const void *context, const void *ptr)
1345 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1346 struct talloc_reference_handle *h;
1348 if (unlikely(context == NULL)) {
1349 context = null_context;
1352 for (h=tc->refs;h;h=h->next) {
1353 struct talloc_chunk *p = talloc_parent_chunk(h);
1355 if (context == NULL) break;
1356 } else if (TC_PTR_FROM_CHUNK(p) == context) {
1364 return _talloc_free_internal(h, __location__);
1368 remove a specific parent context from a pointer. This is a more
1369 controlled variant of talloc_free()
1371 _PUBLIC_ int talloc_unlink(const void *context, void *ptr)
1373 struct talloc_chunk *tc_p, *new_p, *tc_c;
1380 if (context == NULL) {
1381 context = null_context;
1384 if (talloc_unreference(context, ptr) == 0) {
1388 if (context != NULL) {
1389 tc_c = talloc_chunk_from_ptr(context);
1393 if (tc_c != talloc_parent_chunk(ptr)) {
1397 tc_p = talloc_chunk_from_ptr(ptr);
1399 if (tc_p->refs == NULL) {
1400 return _talloc_free_internal(ptr, __location__);
1403 new_p = talloc_parent_chunk(tc_p->refs);
1405 new_parent = TC_PTR_FROM_CHUNK(new_p);
1410 if (talloc_unreference(new_parent, ptr) != 0) {
1414 _talloc_steal_internal(new_parent, ptr);
1420 add a name to an existing pointer - va_list version
1422 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1424 va_list ap) PRINTF_ATTRIBUTE(2,0);
1426 static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1430 struct talloc_chunk *name_tc = _vasprintf_tc(TC_PTR_FROM_CHUNK(tc),
1433 if (likely(name_tc)) {
1434 tc->name = TC_PTR_FROM_CHUNK(name_tc);
1435 _tc_set_name_const(name_tc, ".name");
1443 add a name to an existing pointer
1445 _PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...)
1447 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1451 name = tc_set_name_v(tc, fmt, ap);
1458 create a named talloc pointer. Any talloc pointer can be named, and
1459 talloc_named() operates just like talloc() except that it allows you
1460 to name the pointer.
1462 _PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...)
1467 struct talloc_chunk *tc;
1469 ptr = __talloc(context, size, &tc);
1470 if (unlikely(ptr == NULL)) return NULL;
1473 name = tc_set_name_v(tc, fmt, ap);
1476 if (unlikely(name == NULL)) {
1477 _talloc_free_internal(ptr, __location__);
1485 return the name of a talloc ptr, or "UNNAMED"
1487 static inline const char *__talloc_get_name(const void *ptr)
1489 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1490 if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) {
1491 return ".reference";
1493 if (likely(tc->name)) {
1499 _PUBLIC_ const char *talloc_get_name(const void *ptr)
1501 return __talloc_get_name(ptr);
1505 check if a pointer has the given name. If it does, return the pointer,
1506 otherwise return NULL
1508 _PUBLIC_ void *talloc_check_name(const void *ptr, const char *name)
1511 if (unlikely(ptr == NULL)) return NULL;
1512 pname = __talloc_get_name(ptr);
1513 if (likely(pname == name || strcmp(pname, name) == 0)) {
1514 return discard_const_p(void, ptr);
1519 static void talloc_abort_type_mismatch(const char *location,
1521 const char *expected)
1525 reason = talloc_asprintf(NULL,
1526 "%s: Type mismatch: name[%s] expected[%s]",
1531 reason = "Type mismatch";
1534 talloc_abort(reason);
1537 _PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location)
1541 if (unlikely(ptr == NULL)) {
1542 talloc_abort_type_mismatch(location, NULL, name);
1546 pname = __talloc_get_name(ptr);
1547 if (likely(pname == name || strcmp(pname, name) == 0)) {
1548 return discard_const_p(void, ptr);
1551 talloc_abort_type_mismatch(location, pname, name);
1556 this is for compatibility with older versions of talloc
1558 _PUBLIC_ void *talloc_init(const char *fmt, ...)
1563 struct talloc_chunk *tc;
1565 ptr = __talloc(NULL, 0, &tc);
1566 if (unlikely(ptr == NULL)) return NULL;
1569 name = tc_set_name_v(tc, fmt, ap);
1572 if (unlikely(name == NULL)) {
1573 _talloc_free_internal(ptr, __location__);
1580 static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1582 const char *location)
1585 /* we need to work out who will own an abandoned child
1586 if it cannot be freed. In priority order, the first
1587 choice is owner of any remaining reference to this
1588 pointer, the second choice is our parent, and the
1589 final choice is the null context. */
1590 void *child = TC_PTR_FROM_CHUNK(tc->child);
1591 const void *new_parent = null_context;
1592 if (unlikely(tc->child->refs)) {
1593 struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs);
1594 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1596 if (unlikely(_tc_free_internal(tc->child, location) == -1)) {
1597 if (talloc_parent_chunk(child) != tc) {
1599 * Destructor already reparented this child.
1600 * No further reparenting needed.
1604 if (new_parent == null_context) {
1605 struct talloc_chunk *p = talloc_parent_chunk(ptr);
1606 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1608 _talloc_steal_internal(new_parent, child);
1614 this is a replacement for the Samba3 talloc_destroy_pool functionality. It
1615 should probably not be used in new code. It's in here to keep the talloc
1616 code consistent across Samba 3 and 4.
1618 _PUBLIC_ void talloc_free_children(void *ptr)
1620 struct talloc_chunk *tc_name = NULL;
1621 struct talloc_chunk *tc;
1623 if (unlikely(ptr == NULL)) {
1627 tc = talloc_chunk_from_ptr(ptr);
1629 /* we do not want to free the context name if it is a child .. */
1630 if (likely(tc->child)) {
1631 for (tc_name = tc->child; tc_name; tc_name = tc_name->next) {
1632 if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break;
1635 _TLIST_REMOVE(tc->child, tc_name);
1637 tc->child->parent = tc;
1642 _tc_free_children_internal(tc, ptr, __location__);
1644 /* .. so we put it back after all other children have been freed */
1647 tc->child->parent = NULL;
1649 tc_name->parent = tc;
1650 _TLIST_ADD(tc->child, tc_name);
1655 Allocate a bit of memory as a child of an existing pointer
1657 _PUBLIC_ void *_talloc(const void *context, size_t size)
1659 struct talloc_chunk *tc;
1660 return __talloc(context, size, &tc);
1664 externally callable talloc_set_name_const()
1666 _PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name)
1668 _tc_set_name_const(talloc_chunk_from_ptr(ptr), name);
1672 create a named talloc pointer. Any talloc pointer can be named, and
1673 talloc_named() operates just like talloc() except that it allows you
1674 to name the pointer.
1676 _PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name)
1678 return _talloc_named_const(context, size, name);
1682 free a talloc pointer. This also frees all child pointers of this
1685 return 0 if the memory is actually freed, otherwise -1. The memory
1686 will not be freed if the ref_count is > 1 or the destructor (if
1687 any) returns non-zero
1689 _PUBLIC_ int _talloc_free(void *ptr, const char *location)
1691 struct talloc_chunk *tc;
1693 if (unlikely(ptr == NULL)) {
1697 tc = talloc_chunk_from_ptr(ptr);
1699 if (unlikely(tc->refs != NULL)) {
1700 struct talloc_reference_handle *h;
1702 if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) {
1703 /* in this case we do know which parent should
1704 get this pointer, as there is really only
1706 return talloc_unlink(null_context, ptr);
1709 talloc_log("ERROR: talloc_free with references at %s\n",
1712 for (h=tc->refs; h; h=h->next) {
1713 talloc_log("\treference at %s\n",
1719 return _talloc_free_internal(ptr, location);
1725 A talloc version of realloc. The context argument is only used if
1728 _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name)
1730 struct talloc_chunk *tc;
1732 bool malloced = false;
1733 struct talloc_pool_hdr *pool_hdr = NULL;
1734 size_t old_size = 0;
1735 size_t new_size = 0;
1737 /* size zero is equivalent to free() */
1738 if (unlikely(size == 0)) {
1739 talloc_unlink(context, ptr);
1743 if (unlikely(size >= MAX_TALLOC_SIZE)) {
1747 /* realloc(NULL) is equivalent to malloc() */
1749 return _talloc_named_const(context, size, name);
1752 tc = talloc_chunk_from_ptr(ptr);
1754 /* don't allow realloc on referenced pointers */
1755 if (unlikely(tc->refs)) {
1759 /* don't let anybody try to realloc a talloc_pool */
1760 if (unlikely(tc->flags & TALLOC_FLAG_POOL)) {
1764 if (tc->limit && (size > tc->size)) {
1765 if (!talloc_memlimit_check(tc->limit, (size - tc->size))) {
1771 /* handle realloc inside a talloc_pool */
1772 if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
1773 pool_hdr = tc->pool;
1776 #if (ALWAYS_REALLOC == 0)
1777 /* don't shrink if we have less than 1k to gain */
1778 if (size < tc->size && tc->limit == NULL) {
1780 void *next_tc = tc_next_chunk(tc);
1781 TC_INVALIDATE_SHRINK_CHUNK(tc, size);
1783 if (next_tc == pool_hdr->end) {
1784 /* note: tc->size has changed, so this works */
1785 pool_hdr->end = tc_next_chunk(tc);
1788 } else if ((tc->size - size) < 1024) {
1790 * if we call TC_INVALIDATE_SHRINK_CHUNK() here
1791 * we would need to call TC_UNDEFINE_GROW_CHUNK()
1792 * after each realloc call, which slows down
1793 * testing a lot :-(.
1795 * That is why we only mark memory as undefined here.
1797 TC_UNDEFINE_SHRINK_CHUNK(tc, size);
1799 /* do not shrink if we have less than 1k to gain */
1803 } else if (tc->size == size) {
1805 * do not change the pointer if it is exactly
1812 /* by resetting magic we catch users of the old memory */
1813 tc->flags |= TALLOC_FLAG_FREE;
1817 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1818 pool_hdr->object_count--;
1820 if (new_ptr == NULL) {
1821 new_ptr = malloc(TC_HDR_SIZE+size);
1827 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1828 TC_INVALIDATE_FULL_CHUNK(tc);
1831 /* We're doing malloc then free here, so record the difference. */
1832 old_size = tc->size;
1834 new_ptr = malloc(size + TC_HDR_SIZE);
1836 memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE);
1842 struct talloc_chunk *pool_tc;
1843 void *next_tc = tc_next_chunk(tc);
1844 size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size);
1845 size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
1846 size_t space_needed;
1848 unsigned int chunk_count = pool_hdr->object_count;
1850 pool_tc = talloc_chunk_from_pool(pool_hdr);
1851 if (!(pool_tc->flags & TALLOC_FLAG_FREE)) {
1855 if (chunk_count == 1) {
1857 * optimize for the case where 'tc' is the only
1858 * chunk in the pool.
1860 char *start = tc_pool_first_chunk(pool_hdr);
1861 space_needed = new_chunk_size;
1862 space_left = (char *)tc_pool_end(pool_hdr) - start;
1864 if (space_left >= space_needed) {
1865 size_t old_used = TC_HDR_SIZE + tc->size;
1866 size_t new_used = TC_HDR_SIZE + size;
1869 #if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
1873 * start -> tc may have
1874 * been freed and thus been marked as
1875 * VALGRIND_MEM_NOACCESS. Set it to
1876 * VALGRIND_MEM_UNDEFINED so we can
1877 * copy into it without valgrind errors.
1878 * We can't just mark
1879 * new_ptr -> new_ptr + old_used
1880 * as this may overlap on top of tc,
1881 * (which is why we use memmove, not
1882 * memcpy below) hence the MIN.
1884 size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used);
1885 VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len);
1889 memmove(new_ptr, tc, old_used);
1891 tc = (struct talloc_chunk *)new_ptr;
1892 TC_UNDEFINE_GROW_CHUNK(tc, size);
1895 * first we do not align the pool pointer
1896 * because we want to invalidate the padding
1899 pool_hdr->end = new_used + (char *)new_ptr;
1900 tc_invalidate_pool(pool_hdr);
1902 /* now the aligned pointer */
1903 pool_hdr->end = new_chunk_size + (char *)new_ptr;
1910 if (new_chunk_size == old_chunk_size) {
1911 TC_UNDEFINE_GROW_CHUNK(tc, size);
1912 tc->flags &= ~TALLOC_FLAG_FREE;
1917 if (next_tc == pool_hdr->end) {
1919 * optimize for the case where 'tc' is the last
1920 * chunk in the pool.
1922 space_needed = new_chunk_size - old_chunk_size;
1923 space_left = tc_pool_space_left(pool_hdr);
1925 if (space_left >= space_needed) {
1926 TC_UNDEFINE_GROW_CHUNK(tc, size);
1927 tc->flags &= ~TALLOC_FLAG_FREE;
1929 pool_hdr->end = tc_next_chunk(tc);
1934 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1936 if (new_ptr == NULL) {
1937 new_ptr = malloc(TC_HDR_SIZE+size);
1943 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1945 _tc_free_poolmem(tc, __location__ "_talloc_realloc");
1949 /* We're doing realloc here, so record the difference. */
1950 old_size = tc->size;
1952 new_ptr = realloc(tc, size + TC_HDR_SIZE);
1956 if (unlikely(!new_ptr)) {
1957 tc->flags &= ~TALLOC_FLAG_FREE;
1961 tc = (struct talloc_chunk *)new_ptr;
1962 tc->flags &= ~TALLOC_FLAG_FREE;
1964 tc->flags &= ~TALLOC_FLAG_POOLMEM;
1967 tc->parent->child = tc;
1970 tc->child->parent = tc;
1974 tc->prev->next = tc;
1977 tc->next->prev = tc;
1980 if (new_size > old_size) {
1981 talloc_memlimit_grow(tc->limit, new_size - old_size);
1982 } else if (new_size < old_size) {
1983 talloc_memlimit_shrink(tc->limit, old_size - new_size);
1987 _tc_set_name_const(tc, name);
1989 return TC_PTR_FROM_CHUNK(tc);
1993 a wrapper around talloc_steal() for situations where you are moving a pointer
1994 between two structures, and want the old pointer to be set to NULL
1996 _PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr)
1998 const void **pptr = discard_const_p(const void *,_pptr);
1999 void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr));
2004 enum talloc_mem_count_type {
2010 static inline size_t _talloc_total_mem_internal(const void *ptr,
2011 enum talloc_mem_count_type type,
2012 struct talloc_memlimit *old_limit,
2013 struct talloc_memlimit *new_limit)
2016 struct talloc_chunk *c, *tc;
2025 tc = talloc_chunk_from_ptr(ptr);
2027 if (old_limit || new_limit) {
2028 if (tc->limit && tc->limit->upper == old_limit) {
2029 tc->limit->upper = new_limit;
2033 /* optimize in the memlimits case */
2034 if (type == TOTAL_MEM_LIMIT &&
2035 tc->limit != NULL &&
2036 tc->limit != old_limit &&
2037 tc->limit->parent == tc) {
2038 return tc->limit->cur_size;
2041 if (tc->flags & TALLOC_FLAG_LOOP) {
2045 tc->flags |= TALLOC_FLAG_LOOP;
2047 if (old_limit || new_limit) {
2048 if (old_limit == tc->limit) {
2049 tc->limit = new_limit;
2054 case TOTAL_MEM_SIZE:
2055 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2059 case TOTAL_MEM_BLOCKS:
2062 case TOTAL_MEM_LIMIT:
2063 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2065 * Don't count memory allocated from a pool
2066 * when calculating limits. Only count the
2069 if (!(tc->flags & TALLOC_FLAG_POOLMEM)) {
2070 if (tc->flags & TALLOC_FLAG_POOL) {
2072 * If this is a pool, the allocated
2073 * size is in the pool header, and
2074 * remember to add in the prefix
2077 struct talloc_pool_hdr *pool_hdr
2078 = talloc_pool_from_chunk(tc);
2079 total = pool_hdr->poolsize +
2083 total = tc->size + TC_HDR_SIZE;
2089 for (c = tc->child; c; c = c->next) {
2090 total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type,
2091 old_limit, new_limit);
2094 tc->flags &= ~TALLOC_FLAG_LOOP;
2100 return the total size of a talloc pool (subtree)
2102 _PUBLIC_ size_t talloc_total_size(const void *ptr)
2104 return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL);
2108 return the total number of blocks in a talloc pool (subtree)
2110 _PUBLIC_ size_t talloc_total_blocks(const void *ptr)
2112 return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL);
2116 return the number of external references to a pointer
2118 _PUBLIC_ size_t talloc_reference_count(const void *ptr)
2120 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
2121 struct talloc_reference_handle *h;
2124 for (h=tc->refs;h;h=h->next) {
2131 report on memory usage by all children of a pointer, giving a full tree view
2133 _PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
2134 void (*callback)(const void *ptr,
2135 int depth, int max_depth,
2137 void *private_data),
2140 struct talloc_chunk *c, *tc;
2145 if (ptr == NULL) return;
2147 tc = talloc_chunk_from_ptr(ptr);
2149 if (tc->flags & TALLOC_FLAG_LOOP) {
2153 callback(ptr, depth, max_depth, 0, private_data);
2155 if (max_depth >= 0 && depth >= max_depth) {
2159 tc->flags |= TALLOC_FLAG_LOOP;
2160 for (c=tc->child;c;c=c->next) {
2161 if (c->name == TALLOC_MAGIC_REFERENCE) {
2162 struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c);
2163 callback(h->ptr, depth + 1, max_depth, 1, private_data);
2165 talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data);
2168 tc->flags &= ~TALLOC_FLAG_LOOP;
2171 static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f)
2173 const char *name = __talloc_get_name(ptr);
2174 struct talloc_chunk *tc;
2175 FILE *f = (FILE *)_f;
2178 fprintf(f, "%*sreference to: %s\n", depth*4, "", name);
2182 tc = talloc_chunk_from_ptr(ptr);
2183 if (tc->limit && tc->limit->parent == tc) {
2184 fprintf(f, "%*s%-30s is a memlimit context"
2185 " (max_size = %lu bytes, cur_size = %lu bytes)\n",
2188 (unsigned long)tc->limit->max_size,
2189 (unsigned long)tc->limit->cur_size);
2193 fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n",
2194 (max_depth < 0 ? "full " :""), name,
2195 (unsigned long)talloc_total_size(ptr),
2196 (unsigned long)talloc_total_blocks(ptr));
2200 fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n",
2203 (unsigned long)talloc_total_size(ptr),
2204 (unsigned long)talloc_total_blocks(ptr),
2205 (int)talloc_reference_count(ptr), ptr);
2208 fprintf(f, "content: ");
2209 if (talloc_total_size(ptr)) {
2210 int tot = talloc_total_size(ptr);
2213 for (i = 0; i < tot; i++) {
2214 if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) {
2215 fprintf(f, "%c", ((char *)ptr)[i]);
2217 fprintf(f, "~%02x", ((char *)ptr)[i]);
2226 report on memory usage by all children of a pointer, giving a full tree view
2228 _PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f)
2231 talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f);
2237 report on memory usage by all children of a pointer, giving a full tree view
2239 _PUBLIC_ void talloc_report_full(const void *ptr, FILE *f)
2241 talloc_report_depth_file(ptr, 0, -1, f);
2245 report on memory usage by all children of a pointer
2247 _PUBLIC_ void talloc_report(const void *ptr, FILE *f)
2249 talloc_report_depth_file(ptr, 0, 1, f);
2253 report on any memory hanging off the null context
2255 static void talloc_report_null(void)
2257 if (talloc_total_size(null_context) != 0) {
2258 talloc_report(null_context, stderr);
2263 report on any memory hanging off the null context
2265 static void talloc_report_null_full(void)
2267 if (talloc_total_size(null_context) != 0) {
2268 talloc_report_full(null_context, stderr);
2273 enable tracking of the NULL context
2275 _PUBLIC_ void talloc_enable_null_tracking(void)
2277 if (null_context == NULL) {
2278 null_context = _talloc_named_const(NULL, 0, "null_context");
2279 if (autofree_context != NULL) {
2280 talloc_reparent(NULL, null_context, autofree_context);
2286 enable tracking of the NULL context, not moving the autofree context
2287 into the NULL context. This is needed for the talloc testsuite
2289 _PUBLIC_ void talloc_enable_null_tracking_no_autofree(void)
2291 if (null_context == NULL) {
2292 null_context = _talloc_named_const(NULL, 0, "null_context");
2297 disable tracking of the NULL context
2299 _PUBLIC_ void talloc_disable_null_tracking(void)
2301 if (null_context != NULL) {
2302 /* we have to move any children onto the real NULL
2304 struct talloc_chunk *tc, *tc2;
2305 tc = talloc_chunk_from_ptr(null_context);
2306 for (tc2 = tc->child; tc2; tc2=tc2->next) {
2307 if (tc2->parent == tc) tc2->parent = NULL;
2308 if (tc2->prev == tc) tc2->prev = NULL;
2310 for (tc2 = tc->next; tc2; tc2=tc2->next) {
2311 if (tc2->parent == tc) tc2->parent = NULL;
2312 if (tc2->prev == tc) tc2->prev = NULL;
2317 talloc_free(null_context);
2318 null_context = NULL;
2322 enable leak reporting on exit
2324 _PUBLIC_ void talloc_enable_leak_report(void)
2326 talloc_enable_null_tracking();
2327 atexit(talloc_report_null);
2331 enable full leak reporting on exit
2333 _PUBLIC_ void talloc_enable_leak_report_full(void)
2335 talloc_enable_null_tracking();
2336 atexit(talloc_report_null_full);
2340 talloc and zero memory.
2342 _PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name)
2344 void *p = _talloc_named_const(ctx, size, name);
2347 memset(p, '\0', size);
2354 memdup with a talloc.
2356 _PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name)
2358 void *newp = _talloc_named_const(t, size, name);
2361 memcpy(newp, p, size);
2367 static inline char *__talloc_strlendup(const void *t, const char *p, size_t len)
2370 struct talloc_chunk *tc;
2372 ret = (char *)__talloc(t, len + 1, &tc);
2373 if (unlikely(!ret)) return NULL;
2375 memcpy(ret, p, len);
2378 _tc_set_name_const(tc, ret);
2383 strdup with a talloc
2385 _PUBLIC_ char *talloc_strdup(const void *t, const char *p)
2387 if (unlikely(!p)) return NULL;
2388 return __talloc_strlendup(t, p, strlen(p));
2392 strndup with a talloc
2394 _PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n)
2396 if (unlikely(!p)) return NULL;
2397 return __talloc_strlendup(t, p, strnlen(p, n));
2400 static inline char *__talloc_strlendup_append(char *s, size_t slen,
2401 const char *a, size_t alen)
2405 ret = talloc_realloc(NULL, s, char, slen + alen + 1);
2406 if (unlikely(!ret)) return NULL;
2408 /* append the string and the trailing \0 */
2409 memcpy(&ret[slen], a, alen);
2412 _tc_set_name_const(talloc_chunk_from_ptr(ret), ret);
2417 * Appends at the end of the string.
2419 _PUBLIC_ char *talloc_strdup_append(char *s, const char *a)
2422 return talloc_strdup(NULL, a);
2429 return __talloc_strlendup_append(s, strlen(s), a, strlen(a));
2433 * Appends at the end of the talloc'ed buffer,
2434 * not the end of the string.
2436 _PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a)
2441 return talloc_strdup(NULL, a);
2448 slen = talloc_get_size(s);
2449 if (likely(slen > 0)) {
2453 return __talloc_strlendup_append(s, slen, a, strlen(a));
2457 * Appends at the end of the string.
2459 _PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n)
2462 return talloc_strndup(NULL, a, n);
2469 return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n));
2473 * Appends at the end of the talloc'ed buffer,
2474 * not the end of the string.
2476 _PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
2481 return talloc_strndup(NULL, a, n);
2488 slen = talloc_get_size(s);
2489 if (likely(slen > 0)) {
2493 return __talloc_strlendup_append(s, slen, a, strnlen(a, n));
2496 #ifndef HAVE_VA_COPY
2497 #ifdef HAVE___VA_COPY
2498 #define va_copy(dest, src) __va_copy(dest, src)
2500 #define va_copy(dest, src) (dest) = (src)
2504 static struct talloc_chunk *_vasprintf_tc(const void *t,
2506 va_list ap) PRINTF_ATTRIBUTE(2,0);
2508 static struct talloc_chunk *_vasprintf_tc(const void *t,
2515 struct talloc_chunk *tc;
2518 /* this call looks strange, but it makes it work on older solaris boxes */
2520 len = vsnprintf(buf, sizeof(buf), fmt, ap2);
2522 if (unlikely(len < 0)) {
2526 ret = (char *)__talloc(t, len+1, &tc);
2527 if (unlikely(!ret)) return NULL;
2529 if (len < sizeof(buf)) {
2530 memcpy(ret, buf, len+1);
2533 vsnprintf(ret, len+1, fmt, ap2);
2537 _tc_set_name_const(tc, ret);
2541 _PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
2543 struct talloc_chunk *tc = _vasprintf_tc(t, fmt, ap);
2547 return TC_PTR_FROM_CHUNK(tc);
2552 Perform string formatting, and return a pointer to newly allocated
2553 memory holding the result, inside a memory pool.
2555 _PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...)
2561 ret = talloc_vasprintf(t, fmt, ap);
2566 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2567 const char *fmt, va_list ap)
2568 PRINTF_ATTRIBUTE(3,0);
2570 static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2571 const char *fmt, va_list ap)
2578 alen = vsnprintf(&c, 1, fmt, ap2);
2582 /* Either the vsnprintf failed or the format resulted in
2583 * no characters being formatted. In the former case, we
2584 * ought to return NULL, in the latter we ought to return
2585 * the original string. Most current callers of this
2586 * function expect it to never return NULL.
2591 s = talloc_realloc(NULL, s, char, slen + alen + 1);
2592 if (!s) return NULL;
2595 vsnprintf(s + slen, alen + 1, fmt, ap2);
2598 _tc_set_name_const(talloc_chunk_from_ptr(s), s);
2603 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2604 * and return @p s, which may have moved. Good for gradually
2605 * accumulating output into a string buffer. Appends at the end
2608 _PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
2611 return talloc_vasprintf(NULL, fmt, ap);
2614 return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap);
2618 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2619 * and return @p s, which may have moved. Always appends at the
2620 * end of the talloc'ed buffer, not the end of the string.
2622 _PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
2627 return talloc_vasprintf(NULL, fmt, ap);
2630 slen = talloc_get_size(s);
2631 if (likely(slen > 0)) {
2635 return __talloc_vaslenprintf_append(s, slen, fmt, ap);
2639 Realloc @p s to append the formatted result of @p fmt and return @p
2640 s, which may have moved. Good for gradually accumulating output
2641 into a string buffer.
2643 _PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...)
2648 s = talloc_vasprintf_append(s, fmt, ap);
2654 Realloc @p s to append the formatted result of @p fmt and return @p
2655 s, which may have moved. Good for gradually accumulating output
2658 _PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
2663 s = talloc_vasprintf_append_buffer(s, fmt, ap);
2669 alloc an array, checking for integer overflow in the array size
2671 _PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2673 if (count >= MAX_TALLOC_SIZE/el_size) {
2676 return _talloc_named_const(ctx, el_size * count, name);
2680 alloc an zero array, checking for integer overflow in the array size
2682 _PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2684 if (count >= MAX_TALLOC_SIZE/el_size) {
2687 return _talloc_zero(ctx, el_size * count, name);
2691 realloc an array, checking for integer overflow in the array size
2693 _PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name)
2695 if (count >= MAX_TALLOC_SIZE/el_size) {
2698 return _talloc_realloc(ctx, ptr, el_size * count, name);
2702 a function version of talloc_realloc(), so it can be passed as a function pointer
2703 to libraries that want a realloc function (a realloc function encapsulates
2704 all the basic capabilities of an allocation library, which is why this is useful)
2706 _PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size)
2708 return _talloc_realloc(context, ptr, size, NULL);
2712 static int talloc_autofree_destructor(void *ptr)
2714 autofree_context = NULL;
2718 static void talloc_autofree(void)
2720 talloc_free(autofree_context);
2724 return a context which will be auto-freed on exit
2725 this is useful for reducing the noise in leak reports
2727 _PUBLIC_ void *talloc_autofree_context(void)
2729 if (autofree_context == NULL) {
2730 autofree_context = _talloc_named_const(NULL, 0, "autofree_context");
2731 talloc_set_destructor(autofree_context, talloc_autofree_destructor);
2732 atexit(talloc_autofree);
2734 return autofree_context;
2737 _PUBLIC_ size_t talloc_get_size(const void *context)
2739 struct talloc_chunk *tc;
2741 if (context == NULL) {
2745 tc = talloc_chunk_from_ptr(context);
2751 find a parent of this context that has the given name, if any
2753 _PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name)
2755 struct talloc_chunk *tc;
2757 if (context == NULL) {
2761 tc = talloc_chunk_from_ptr(context);
2763 if (tc->name && strcmp(tc->name, name) == 0) {
2764 return TC_PTR_FROM_CHUNK(tc);
2766 while (tc && tc->prev) tc = tc->prev;
2775 show the parentage of a context
2777 _PUBLIC_ void talloc_show_parents(const void *context, FILE *file)
2779 struct talloc_chunk *tc;
2781 if (context == NULL) {
2782 fprintf(file, "talloc no parents for NULL\n");
2786 tc = talloc_chunk_from_ptr(context);
2787 fprintf(file, "talloc parents of '%s'\n", __talloc_get_name(context));
2789 fprintf(file, "\t'%s'\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc)));
2790 while (tc && tc->prev) tc = tc->prev;
2799 return 1 if ptr is a parent of context
2801 static int _talloc_is_parent(const void *context, const void *ptr, int depth)
2803 struct talloc_chunk *tc;
2805 if (context == NULL) {
2809 tc = talloc_chunk_from_ptr(context);
2814 if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1;
2815 while (tc && tc->prev) tc = tc->prev;
2825 return 1 if ptr is a parent of context
2827 _PUBLIC_ int talloc_is_parent(const void *context, const void *ptr)
2829 return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH);
2833 return the total size of memory used by this context and all children
2835 static inline size_t _talloc_total_limit_size(const void *ptr,
2836 struct talloc_memlimit *old_limit,
2837 struct talloc_memlimit *new_limit)
2839 return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT,
2840 old_limit, new_limit);
2843 static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size)
2845 struct talloc_memlimit *l;
2847 for (l = limit; l != NULL; l = l->upper) {
2848 if (l->max_size != 0 &&
2849 ((l->max_size <= l->cur_size) ||
2850 (l->max_size - l->cur_size < size))) {
2859 Update memory limits when freeing a talloc_chunk.
2861 static void tc_memlimit_update_on_free(struct talloc_chunk *tc)
2863 size_t limit_shrink_size;
2870 * Pool entries don't count. Only the pools
2871 * themselves are counted as part of the memory
2872 * limits. Note that this also takes care of
2873 * nested pools which have both flags
2874 * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set.
2876 if (tc->flags & TALLOC_FLAG_POOLMEM) {
2881 * If we are part of a memory limited context hierarchy
2882 * we need to subtract the memory used from the counters
2885 limit_shrink_size = tc->size+TC_HDR_SIZE;
2888 * If we're deallocating a pool, take into
2889 * account the prefix size added for the pool.
2892 if (tc->flags & TALLOC_FLAG_POOL) {
2893 limit_shrink_size += TP_HDR_SIZE;
2896 talloc_memlimit_shrink(tc->limit, limit_shrink_size);
2898 if (tc->limit->parent == tc) {
2906 Increase memory limit accounting after a malloc/realloc.
2908 static void talloc_memlimit_grow(struct talloc_memlimit *limit,
2911 struct talloc_memlimit *l;
2913 for (l = limit; l != NULL; l = l->upper) {
2914 size_t new_cur_size = l->cur_size + size;
2915 if (new_cur_size < l->cur_size) {
2916 talloc_abort("logic error in talloc_memlimit_grow\n");
2919 l->cur_size = new_cur_size;
2924 Decrease memory limit accounting after a free/realloc.
2926 static void talloc_memlimit_shrink(struct talloc_memlimit *limit,
2929 struct talloc_memlimit *l;
2931 for (l = limit; l != NULL; l = l->upper) {
2932 if (l->cur_size < size) {
2933 talloc_abort("logic error in talloc_memlimit_shrink\n");
2936 l->cur_size = l->cur_size - size;
2940 _PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size)
2942 struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx);
2943 struct talloc_memlimit *orig_limit;
2944 struct talloc_memlimit *limit = NULL;
2946 if (tc->limit && tc->limit->parent == tc) {
2947 tc->limit->max_size = max_size;
2950 orig_limit = tc->limit;
2952 limit = malloc(sizeof(struct talloc_memlimit));
2953 if (limit == NULL) {
2957 limit->max_size = max_size;
2958 limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit);
2961 limit->upper = orig_limit;
2963 limit->upper = NULL;