2 * Wireshark memory management and garbage collection functions
7 * Wireshark - Network traffic analyzer
8 * By Gerald Combs <gerald@wireshark.org>
9 * Copyright 1998 Gerald Combs
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
36 #ifdef HAVE_SYS_TIME_H
50 #include <windows.h> /* VirtualAlloc, VirtualProtect */
51 #include <process.h> /* getpid */
54 /* Print out statistics about our memory allocations? */
55 /*#define SHOW_EMEM_STATS*/
57 /* Do we want to use guardpages? if available */
58 #define WANT_GUARD_PAGES 1
60 #ifdef WANT_GUARD_PAGES
61 /* Add guard pages at each end of our allocated memory */
62 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
64 #ifdef HAVE_SYS_TYPES_H
65 #include <sys/types.h>
68 #if defined(MAP_ANONYMOUS)
69 #define ANON_PAGE_MODE (MAP_ANONYMOUS|MAP_PRIVATE)
70 #elif defined(MAP_ANON)
71 #define ANON_PAGE_MODE (MAP_ANON|MAP_PRIVATE)
73 #define ANON_PAGE_MODE (MAP_PRIVATE) /* have to map /dev/zero */
78 static int dev_zero_fd;
79 #define ANON_FD dev_zero_fd
83 #define USE_GUARD_PAGES 1
87 /* When required, allocate more memory from the OS in this size chunks */
88 #define EMEM_PACKET_CHUNK_SIZE (10 * 1024 * 1024)
90 /* The canary between allocations is at least 8 bytes and up to 16 bytes to
91 * allow future allocations to be 4- or 8-byte aligned.
92 * All but the last byte of the canary are randomly generated; the last byte is
93 * NULL to separate the canary and the pointer to the next canary.
95 * For example, if the allocation is a multiple of 8 bytes, the canary and
96 * pointer would look like:
97 * |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
98 * |c|c|c|c|c|c|c|0||p|p|p|p|p|p|p|p| (64-bit), or:
99 * |c|c|c|c|c|c|c|0||p|p|p|p| (32-bit)
101 * If the allocation was, for example, 12 bytes, the canary would look like:
102 * |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
103 * [...]|a|a|a|a|c|c|c|c||c|c|c|c|c|c|c|0| (followed by the pointer)
105 #define EMEM_CANARY_SIZE 8
106 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
108 typedef struct _emem_chunk_t {
109 struct _emem_chunk_t *next;
111 unsigned int amount_free_init;
112 unsigned int amount_free;
113 unsigned int free_offset_init;
114 unsigned int free_offset;
118 typedef struct _emem_header_t {
119 emem_chunk_t *free_list;
120 emem_chunk_t *used_list;
122 emem_tree_t *trees; /* only used by se_mem allocator */
124 guint8 canary[EMEM_CANARY_DATA_SIZE];
125 void *(*memory_alloc)(size_t size, struct _emem_header_t *);
128 * Tools like Valgrind and ElectricFence don't work well with memchunks.
129 * Export the following environment variables to make {ep|se}_alloc() allocate each
130 * object individually.
132 * WIRESHARK_DEBUG_EP_NO_CHUNKS
133 * WIRESHARK_DEBUG_SE_NO_CHUNKS
135 gboolean debug_use_chunks;
137 /* Do we want to use canaries?
138 * Export the following environment variables to disable/enable canaries
140 * WIRESHARK_DEBUG_EP_NO_CANARY
141 * For SE memory use of canary is default off as the memory overhead
143 * WIRESHARK_DEBUG_SE_USE_CANARY
145 gboolean debug_use_canary;
147 /* Do we want to verify no one is using a pointer to an ep_ or se_
148 * allocated thing where they shouldn't be?
150 * Export WIRESHARK_EP_VERIFY_POINTERS or WIRESHARK_SE_VERIFY_POINTERS
153 gboolean debug_verify_pointers;
157 static emem_header_t ep_packet_mem;
158 static emem_header_t se_packet_mem;
161 * Memory scrubbing is expensive but can be useful to ensure we don't:
162 * - use memory before initializing it
163 * - use memory after freeing it
164 * Export WIRESHARK_DEBUG_SCRUB_MEMORY to turn it on.
166 static gboolean debug_use_memory_scrubber = FALSE;
169 static SYSTEM_INFO sysinfo;
170 static OSVERSIONINFO versinfo;
172 #elif defined(USE_GUARD_PAGES)
173 static intptr_t pagesize;
174 #endif /* _WIN32 / USE_GUARD_PAGES */
176 static void *emem_alloc_chunk(size_t size, emem_header_t *mem);
177 static void *emem_alloc_glib(size_t size, emem_header_t *mem);
180 * Set a canary value to be placed between memchunks.
183 emem_canary_init(guint8 *canary)
186 static GRand *rand_state = NULL;
188 if (rand_state == NULL) {
189 rand_state = g_rand_new();
191 for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
192 canary[i] = (guint8) g_rand_int_range(rand_state, 1, 0x100);
198 emem_canary_next(guint8 *mem_canary, guint8 *canary, int *len)
203 for (i = 0; i < EMEM_CANARY_SIZE-1; i++)
204 if (mem_canary[i] != canary[i])
207 for (; i < EMEM_CANARY_DATA_SIZE; i++) {
208 if (canary[i] == '\0') {
209 memcpy(&ptr, &canary[i+1], sizeof(void *));
212 *len = i + 1 + sizeof(void *);
216 if (mem_canary[i] != canary[i])
224 * Given an allocation size, return the amount of room needed for the canary
225 * (with a minimum of 8 bytes) while using the canary to pad to an 8-byte
229 emem_canary_pad (size_t allocation)
233 pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
234 if (pad < EMEM_CANARY_SIZE)
235 pad += EMEM_CANARY_SIZE;
240 /* used for debugging canaries, will block */
241 #ifdef DEBUG_INTENSE_CANARY_CHECKS
242 gboolean intense_canary_checking = FALSE;
244 /* used to intensivelly check ep canaries
247 ep_check_canary_integrity(const char* fmt, ...)
250 static gchar there[128] = {
251 'L','a','u','n','c','h',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
252 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
253 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
254 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
256 emem_chunk_t* npc = NULL;
258 if (! intense_canary_checking ) return;
261 g_vsnprintf(here, sizeof(here), fmt, ap);
264 for (npc = ep_packet_mem.free_list; npc != NULL; npc = npc->next) {
265 void *canary_next = npc->canary_last;
267 while (canary_next != NULL) {
268 canary_next = emem_canary_next(ep_packet_mem.canary, canary_next, NULL);
269 /* XXX, check if canary_next is inside allocated memory? */
271 if (canary_next == (void *) -1)
272 g_error("Per-packet memory corrupted\nbetween: %s\nand: %s", there, here);
276 g_strlcpy(there, here, sizeof(there));
281 emem_init_chunk(emem_header_t *mem)
283 if (mem->debug_use_canary)
284 emem_canary_init(mem->canary);
286 if (mem->debug_use_chunks)
287 mem->memory_alloc = emem_alloc_chunk;
289 mem->memory_alloc = emem_alloc_glib;
293 /* Initialize the packet-lifetime memory allocation pool.
294 * This function should be called only once when Wireshark or TShark starts
300 ep_packet_mem.free_list=NULL;
301 ep_packet_mem.used_list=NULL;
302 ep_packet_mem.trees=NULL; /* not used by this allocator */
304 ep_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_EP_NO_CHUNKS") == NULL);
305 ep_packet_mem.debug_use_canary = ep_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_EP_NO_CANARY") == NULL);
306 ep_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_EP_VERIFY_POINTERS") != NULL);
308 #ifdef DEBUG_INTENSE_CANARY_CHECKS
309 intense_canary_checking = (getenv("WIRESHARK_DEBUG_EP_INTENSE_CANARY") != NULL);
312 emem_init_chunk(&ep_packet_mem);
315 /* Initialize the capture-lifetime memory allocation pool.
316 * This function should be called only once when Wireshark or TShark starts
322 se_packet_mem.free_list = NULL;
323 se_packet_mem.used_list = NULL;
324 se_packet_mem.trees = NULL;
326 se_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_SE_NO_CHUNKS") == NULL);
327 se_packet_mem.debug_use_canary = se_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_SE_USE_CANARY") != NULL);
328 se_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_SE_VERIFY_POINTERS") != NULL);
330 emem_init_chunk(&se_packet_mem);
333 /* Initialize all the allocators here.
334 * This function should be called only once when Wireshark or TShark starts
343 if (getenv("WIRESHARK_DEBUG_SCRUB_MEMORY"))
344 debug_use_memory_scrubber = TRUE;
347 /* Set up our guard page info for Win32 */
348 GetSystemInfo(&sysinfo);
349 pagesize = sysinfo.dwPageSize;
351 /* calling GetVersionEx using the OSVERSIONINFO structure.
352 * OSVERSIONINFOEX requires Win NT4 with SP6 or newer NT Versions.
353 * OSVERSIONINFOEX will fail on Win9x and older NT Versions.
355 * http://msdn.microsoft.com/library/en-us/sysinfo/base/getversionex.asp
356 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
357 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfoex_str.asp
359 versinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
360 GetVersionEx(&versinfo);
362 #elif defined(USE_GUARD_PAGES)
363 pagesize = sysconf(_SC_PAGESIZE);
365 dev_zero_fd = ws_open("/dev/zero", O_RDWR);
366 g_assert(dev_zero_fd != -1);
368 #endif /* _WIN32 / USE_GUARD_PAGES */
371 #ifdef SHOW_EMEM_STATS
372 #define NUM_ALLOC_DIST 10
373 static guint allocations[NUM_ALLOC_DIST] = { 0 };
374 static guint total_no_chunks = 0;
379 guint num_chunks = 0;
380 guint num_allocs = 0;
381 guint total_used = 0;
382 guint total_allocation = 0;
383 guint total_free = 0;
384 guint used_for_canaries = 0;
388 guint total_space_allocated_from_os, total_space_wasted;
389 gboolean ep_stat=TRUE;
391 fprintf(stderr, "\n-------- EP allocator statistics --------\n");
392 fprintf(stderr, "%s chunks, %s canaries, %s memory scrubber\n",
393 ep_packet_mem.debug_use_chunks ? "Using" : "Not using",
394 ep_packet_mem.debug_use_canary ? "using" : "not using",
395 debug_use_memory_scrubber ? "using" : "not using");
397 if (! (ep_packet_mem.free_list || !ep_packet_mem.used_list)) {
398 fprintf(stderr, "No memory allocated\n");
401 if (ep_packet_mem.debug_use_chunks && ep_stat) {
402 /* Nothing interesting without chunks */
403 /* Only look at the used_list since those chunks are fully
404 * used. Looking at the free list would skew our view of what
407 for (chunk = ep_packet_mem.used_list; chunk; chunk = chunk->next) {
409 total_used += (chunk->amount_free_init - chunk->amount_free);
410 total_allocation += chunk->amount_free_init;
411 total_free += chunk->amount_free;
413 if (num_chunks > 0) {
414 fprintf (stderr, "\n");
415 fprintf (stderr, "\n---- Buffer space ----\n");
416 fprintf (stderr, "\tChunk allocation size: %10u\n", EMEM_PACKET_CHUNK_SIZE);
417 fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
418 fprintf (stderr, "\t-------------------------------------------\n");
419 fprintf (stderr, "\t= %u (%u including guard pages) total space used for buffers\n",
420 total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
421 fprintf (stderr, "\t-------------------------------------------\n");
422 total_space_allocated_from_os = total_allocation
423 + sizeof(emem_chunk_t) * num_chunks;
424 fprintf (stderr, "Total allocated from OS: %u\n\n",
425 total_space_allocated_from_os);
427 fprintf (stderr, "No fully used chunks, nothing to do\n");
433 total_allocation = 0;
435 used_for_canaries = 0;
439 fprintf(stderr, "\n-------- SE allocator statistics --------\n");
440 fprintf(stderr, "Total number of chunk allocations %u\n",
442 fprintf(stderr, "%s chunks, %s canaries\n",
443 se_packet_mem.debug_use_chunks ? "Using" : "Not using",
444 se_packet_mem.debug_use_canary ? "using" : "not using");
446 if (! (se_packet_mem.free_list || !se_packet_mem.used_list)) {
447 fprintf(stderr, "No memory allocated\n");
451 if (!se_packet_mem.debug_use_chunks )
452 return; /* Nothing interesting without chunks?? */
454 /* Only look at the used_list since those chunks are fully used.
455 * Looking at the free list would skew our view of what we have wasted.
457 for (chunk = se_packet_mem.used_list; chunk; chunk = chunk->next) {
459 total_used += (chunk->amount_free_init - chunk->amount_free);
460 total_allocation += chunk->amount_free_init;
461 total_free += chunk->amount_free;
463 if (se_packet_mem.debug_use_canary){
464 void *ptr = chunk->canary_last;
467 while (ptr != NULL) {
468 ptr = emem_canary_next(se_packet_mem.canary, ptr, &len);
470 if (ptr == (void *) -1)
471 g_error("Memory corrupted");
472 used_for_canaries += len;
477 if (num_chunks == 0) {
479 fprintf (stderr, "No fully used chunks, nothing to do\n");
483 fprintf (stderr, "\n");
484 fprintf (stderr, "---------- Allocations from the OS ----------\n");
485 fprintf (stderr, "---- Headers ----\n");
486 fprintf (stderr, "\t( Chunk header size: %10lu\n",
487 sizeof(emem_chunk_t));
488 fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
489 fprintf (stderr, "\t-------------------------------------------\n");
491 total_headers = sizeof(emem_chunk_t) * num_chunks;
492 fprintf (stderr, "\t= %u bytes used for headers\n", total_headers);
493 fprintf (stderr, "\n---- Buffer space ----\n");
494 fprintf (stderr, "\tChunk allocation size: %10u\n",
495 EMEM_PACKET_CHUNK_SIZE);
496 fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
497 fprintf (stderr, "\t-------------------------------------------\n");
498 fprintf (stderr, "\t= %u (%u including guard pages) bytes used for buffers\n",
499 total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
500 fprintf (stderr, "\t-------------------------------------------\n");
501 total_space_allocated_from_os = (EMEM_PACKET_CHUNK_SIZE * num_chunks)
503 fprintf (stderr, "Total bytes allocated from the OS: %u\n\n",
504 total_space_allocated_from_os);
506 for (i = 0; i < NUM_ALLOC_DIST; i++)
507 num_allocs += allocations[i];
509 fprintf (stderr, "---------- Allocations from the SE pool ----------\n");
510 fprintf (stderr, " Number of SE allocations: %10u\n",
512 fprintf (stderr, " Bytes used (incl. canaries): %10u\n",
514 fprintf (stderr, " Bytes used for canaries: %10u\n",
516 fprintf (stderr, "Bytes unused (wasted, excl. guard pages): %10u\n",
517 total_allocation - total_used);
518 fprintf (stderr, "Bytes unused (wasted, incl. guard pages): %10u\n\n",
519 total_space_allocated_from_os - total_used);
521 fprintf (stderr, "---------- Statistics ----------\n");
522 fprintf (stderr, "Average SE allocation size (incl. canaries): %6.2f\n",
523 (float)total_used/(float)num_allocs);
524 fprintf (stderr, "Average SE allocation size (excl. canaries): %6.2f\n",
525 (float)(total_used - used_for_canaries)/(float)num_allocs);
526 fprintf (stderr, " Average wasted bytes per allocation: %6.2f\n",
527 (total_allocation - total_used)/(float)num_allocs);
528 total_space_wasted = (total_allocation - total_used)
529 + (sizeof(emem_chunk_t));
530 fprintf (stderr, " Space used for headers + unused allocation: %8u\n",
532 fprintf (stderr, "--> %% overhead/waste: %4.2f\n",
533 100 * (float)total_space_wasted/(float)total_space_allocated_from_os);
535 fprintf (stderr, "\nAllocation distribution (sizes include canaries):\n");
536 for (i = 0; i < (NUM_ALLOC_DIST-1); i++)
537 fprintf (stderr, "size < %5d: %8u\n", 32<<i, allocations[i]);
538 fprintf (stderr, "size > %5d: %8u\n", 32<<i, allocations[i]);
543 emem_verify_pointer_list(const emem_chunk_t *chunk_list, const void *ptr)
545 const gchar *cptr = ptr;
546 const emem_chunk_t *chunk;
548 for (chunk = chunk_list; chunk; chunk = chunk->next) {
549 if (cptr >= (chunk->buf + chunk->free_offset_init) && cptr < (chunk->buf + chunk->free_offset))
556 emem_verify_pointer(const emem_header_t *hdr, const void *ptr)
558 return emem_verify_pointer_list(hdr->free_list, ptr) || emem_verify_pointer_list(hdr->used_list, ptr);
562 ep_verify_pointer(const void *ptr)
564 if (ep_packet_mem.debug_verify_pointers)
565 return emem_verify_pointer(&ep_packet_mem, ptr);
571 se_verify_pointer(const void *ptr)
573 if (se_packet_mem.debug_verify_pointers)
574 return emem_verify_pointer(&se_packet_mem, ptr);
580 emem_scrub_memory(char *buf, size_t size, gboolean alloc)
582 guint scrubbed_value;
585 if (!debug_use_memory_scrubber)
588 if (alloc) /* this memory is being allocated */
589 scrubbed_value = 0xBADDCAFE;
590 else /* this memory is being freed */
591 scrubbed_value = 0xDEADBEEF;
593 /* We shouldn't need to check the alignment of the starting address
594 * since this is malloc'd memory (or 'pagesize' bytes into malloc'd
598 /* XXX - if the above is *NOT* true, we should use memcpy here,
599 * in order to avoid problems on alignment-sensitive platforms, e.g.
600 * http://stackoverflow.com/questions/108866/is-there-memset-that-accepts-integers-larger-than-char
603 for (offset = 0; offset + sizeof(guint) <= size; offset += sizeof(guint))
604 *(guint*)(void*)(buf+offset) = scrubbed_value;
606 /* Initialize the last bytes, if any */
608 *(guint8*)(buf+offset) = scrubbed_value >> 24;
611 *(guint8*)(buf+offset) = (scrubbed_value >> 16) & 0xFF;
614 *(guint8*)(buf+offset) = (scrubbed_value >> 8) & 0xFF;
622 static emem_chunk_t *
623 emem_create_chunk(void) {
626 char *buf_end, *prot1, *prot2;
628 #elif defined(USE_GUARD_PAGES)
630 char *buf_end, *prot1, *prot2;
631 #endif /* _WIN32 / USE_GUARD_PAGES */
634 npc = g_new(emem_chunk_t, 1);
636 npc->canary_last = NULL;
640 * MSDN documents VirtualAlloc/VirtualProtect at
641 * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
644 /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
645 npc->buf = VirtualAlloc(NULL, EMEM_PACKET_CHUNK_SIZE,
646 MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
648 if (npc->buf == NULL) {
650 THROW(OutOfMemoryError);
653 #elif defined(USE_GUARD_PAGES)
654 npc->buf = mmap(NULL, EMEM_PACKET_CHUNK_SIZE,
655 PROT_READ|PROT_WRITE, ANON_PAGE_MODE, ANON_FD, 0);
657 if (npc->buf == MAP_FAILED) {
659 THROW(OutOfMemoryError);
662 #else /* Is there a draft in here? */
663 npc->buf = g_malloc(EMEM_PACKET_CHUNK_SIZE);
664 /* g_malloc() can't fail */
667 #ifdef SHOW_EMEM_STATS
672 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
674 /* Align our guard pages on page-sized boundaries */
675 prot1 = (char *) ((((int) npc->buf + pagesize - 1) / pagesize) * pagesize);
676 prot2 = (char *) ((((int) buf_end - (1 * pagesize)) / pagesize) * pagesize);
678 ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
679 g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
680 ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
681 g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
683 npc->amount_free_init = (unsigned int) (prot2 - prot1 - pagesize);
684 npc->free_offset_init = (unsigned int) (prot1 - npc->buf) + pagesize;
685 #elif defined(USE_GUARD_PAGES)
686 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
688 /* Align our guard pages on page-sized boundaries */
689 prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
690 prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
692 ret = mprotect(prot1, pagesize, PROT_NONE);
694 ret = mprotect(prot2, pagesize, PROT_NONE);
697 npc->amount_free_init = prot2 - prot1 - pagesize;
698 npc->free_offset_init = (prot1 - npc->buf) + pagesize;
700 npc->amount_free_init = EMEM_PACKET_CHUNK_SIZE;
701 npc->free_offset_init = 0;
702 #endif /* USE_GUARD_PAGES */
704 npc->amount_free = npc->amount_free_init;
705 npc->free_offset = npc->free_offset_init;
710 emem_alloc_chunk(size_t size, emem_header_t *mem)
715 gboolean use_canary = mem->debug_use_canary;
717 emem_chunk_t *free_list;
719 /* Allocate room for at least 8 bytes of canary plus some padding
720 * so the canary ends on an 8-byte boundary.
721 * Then add the room needed for the pointer to the next canary.
724 pad = emem_canary_pad(asize);
725 asize += sizeof(void *);
727 pad = (G_MEM_ALIGN - (asize & (G_MEM_ALIGN-1))) & (G_MEM_ALIGN-1);
731 #ifdef SHOW_EMEM_STATS
732 /* Do this check here so we can include the canary size */
733 if (mem == &se_packet_mem) {
738 else if (asize < 128)
740 else if (asize < 256)
742 else if (asize < 512)
744 else if (asize < 1024)
746 else if (asize < 2048)
748 else if (asize < 4096)
750 else if (asize < 8192)
752 else if (asize < 16384)
755 allocations[(NUM_ALLOC_DIST-1)]++;
759 /* make sure we dont try to allocate too much (arbitrary limit) */
760 DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
763 mem->free_list = emem_create_chunk();
765 /* oops, we need to allocate more memory to serve this request
766 * than we have free. move this node to the used list and try again
768 if(asize > mem->free_list->amount_free) {
771 mem->free_list=mem->free_list->next;
772 npc->next=mem->used_list;
776 mem->free_list = emem_create_chunk();
779 free_list = mem->free_list;
781 buf = free_list->buf + free_list->free_offset;
783 free_list->amount_free -= (unsigned int) asize;
784 free_list->free_offset += (unsigned int) asize;
787 char *cptr = (char *)buf + size;
789 memcpy(cptr, mem->canary, pad-1);
791 memcpy(cptr + pad, &free_list->canary_last, sizeof(void *));
793 free_list->canary_last = cptr;
800 emem_alloc_glib(size_t size, emem_header_t *mem)
804 npc=g_new(emem_chunk_t, 1);
805 npc->next=mem->used_list;
806 npc->buf=g_malloc(size);
807 npc->canary_last = NULL;
809 /* There's no padding/alignment involved (from our point of view) when
810 * we fetch the memory directly from the system pool, so WYSIWYG */
811 npc->free_offset = npc->free_offset_init = 0;
812 npc->amount_free = npc->amount_free_init = (unsigned int) size;
817 /* allocate 'size' amount of memory. */
819 emem_alloc(size_t size, emem_header_t *mem)
821 void *buf = mem->memory_alloc(size, mem);
823 /* XXX - this is a waste of time if the allocator function is going to
824 * memset this straight back to 0.
826 emem_scrub_memory(buf, size, TRUE);
831 /* allocate 'size' amount of memory with an allocation lifetime until the
835 ep_alloc(size_t size)
837 return emem_alloc(size, &ep_packet_mem);
840 /* allocate 'size' amount of memory with an allocation lifetime until the
844 se_alloc(size_t size)
846 return emem_alloc(size, &se_packet_mem);
850 ep_alloc0(size_t size)
852 return memset(ep_alloc(size),'\0',size);
856 se_alloc0(size_t size)
858 return memset(se_alloc(size),'\0',size);
863 emem_strdup(const gchar *src, void *allocator(size_t))
868 /* If str is NULL, just return the string "<NULL>" so that the callers don't
869 * have to bother checking it.
874 len = (guint) strlen(src);
875 dst = memcpy(allocator(len+1), src, len+1);
881 ep_strdup(const gchar *src)
883 return emem_strdup(src, ep_alloc);
887 se_strdup(const gchar *src)
889 return emem_strdup(src, se_alloc);
893 emem_strndup(const gchar *src, size_t len, void *allocator(size_t))
895 gchar *dst = allocator(len+1);
898 for (i = 0; (i < len) && src[i]; i++)
907 ep_strndup(const gchar *src, size_t len)
909 return emem_strndup(src, len, ep_alloc);
913 se_strndup(const gchar *src, size_t len)
915 return emem_strndup(src, len, se_alloc);
921 ep_memdup(const void* src, size_t len)
923 return memcpy(ep_alloc(len), src, len);
927 se_memdup(const void* src, size_t len)
929 return memcpy(se_alloc(len), src, len);
933 emem_strdup_vprintf(const gchar *fmt, va_list ap, void *allocator(size_t))
941 len = g_printf_string_upper_bound(fmt, ap);
943 dst = allocator(len+1);
944 g_vsnprintf (dst, (gulong) len, fmt, ap2);
951 ep_strdup_vprintf(const gchar *fmt, va_list ap)
953 return emem_strdup_vprintf(fmt, ap, ep_alloc);
957 se_strdup_vprintf(const gchar* fmt, va_list ap)
959 return emem_strdup_vprintf(fmt, ap, se_alloc);
963 ep_strdup_printf(const gchar *fmt, ...)
969 dst = ep_strdup_vprintf(fmt, ap);
975 se_strdup_printf(const gchar *fmt, ...)
981 dst = se_strdup_vprintf(fmt, ap);
987 ep_strsplit(const gchar* string, const gchar* sep, int max_tokens)
996 enum { AT_START, IN_PAD, IN_TOKEN } state;
1004 s = splitted = ep_strdup(string);
1005 str_len = (guint) strlen(splitted);
1006 sep_len = (guint) strlen(sep);
1008 if (max_tokens < 1) max_tokens = INT_MAX;
1013 while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
1016 for(i=0; i < sep_len; i++ )
1023 vec = ep_alloc_array(gchar*,tokens+1);
1026 for (i=0; i< str_len; i++) {
1029 switch(splitted[i]) {
1034 vec[curr_tok] = &(splitted[i]);
1040 switch(splitted[i]) {
1047 switch(splitted[i]) {
1049 vec[curr_tok] = &(splitted[i]);
1058 vec[curr_tok] = NULL;
1064 ep_strconcat(const gchar *string1, ...)
1075 l = 1 + strlen(string1);
1076 va_start(args, string1);
1077 s = va_arg(args, gchar*);
1080 s = va_arg(args, gchar*);
1084 concat = ep_alloc(l);
1087 ptr = g_stpcpy(ptr, string1);
1088 va_start(args, string1);
1089 s = va_arg(args, gchar*);
1091 ptr = g_stpcpy(ptr, s);
1092 s = va_arg(args, gchar*);
1101 /* release all allocated memory back to the pool. */
1103 emem_free_all(emem_header_t *mem)
1105 gboolean use_chunks = mem->debug_use_chunks;
1108 emem_tree_t *tree_list;
1110 /* move all used chunks over to the free list */
1111 while(mem->used_list){
1113 mem->used_list=mem->used_list->next;
1114 npc->next=mem->free_list;
1118 /* clear them all out */
1119 npc = mem->free_list;
1120 while (npc != NULL) {
1122 while (npc->canary_last != NULL) {
1123 npc->canary_last = emem_canary_next(mem->canary, npc->canary_last, NULL);
1124 /* XXX, check if canary_last is inside allocated memory? */
1126 if (npc->canary_last == (void *) -1)
1127 g_error("Memory corrupted");
1130 emem_scrub_memory((npc->buf + npc->free_offset_init),
1131 (npc->free_offset - npc->free_offset_init),
1134 npc->amount_free = npc->amount_free_init;
1135 npc->free_offset = npc->free_offset_init;
1138 emem_chunk_t *next = npc->next;
1140 emem_scrub_memory(npc->buf, npc->amount_free_init, FALSE);
1149 /* We've freed all this memory already */
1150 mem->free_list = NULL;
1153 /* release/reset all allocated trees */
1154 for(tree_list=mem->trees;tree_list;tree_list=tree_list->next){
1155 tree_list->tree=NULL;
1159 /* release all allocated memory back to the pool. */
1163 emem_free_all(&ep_packet_mem);
1166 /* release all allocated memory back to the pool. */
1170 #ifdef SHOW_EMEM_STATS
1171 print_alloc_stats();
1174 emem_free_all(&se_packet_mem);
1178 ep_stack_new(void) {
1179 ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
1180 *s = ep_new0(struct _ep_stack_frame_t);
1184 /* for ep_stack_t we'll keep the popped frames so we reuse them instead
1185 of allocating new ones.
1189 ep_stack_push(ep_stack_t stack, void* data)
1191 struct _ep_stack_frame_t* frame;
1192 struct _ep_stack_frame_t* head = (*stack);
1195 frame = head->above;
1197 frame = ep_new(struct _ep_stack_frame_t);
1198 head->above = frame;
1199 frame->below = head;
1200 frame->above = NULL;
1203 frame->payload = data;
1210 ep_stack_pop(ep_stack_t stack)
1213 if ((*stack)->below) {
1214 (*stack) = (*stack)->below;
1215 return (*stack)->above->payload;
1222 se_tree_create(int type, const char *name)
1224 emem_tree_t *tree_list;
1226 tree_list=g_malloc(sizeof(emem_tree_t));
1227 tree_list->next=se_packet_mem.trees;
1228 tree_list->type=type;
1229 tree_list->tree=NULL;
1230 tree_list->name=name;
1231 tree_list->malloc=se_alloc;
1232 se_packet_mem.trees=tree_list;
1238 emem_tree_lookup32(emem_tree_t *se_tree, guint32 key)
1240 emem_tree_node_t *node;
1245 if(key==node->key32){
1248 if(key<node->key32){
1252 if(key>node->key32){
1261 emem_tree_lookup32_le(emem_tree_t *se_tree, guint32 key)
1263 emem_tree_node_t *node;
1273 if(key==node->key32){
1276 if(key<node->key32){
1284 if(key>node->key32){
1299 /* If we are still at the root of the tree this means that this node
1300 * is either smaller than the search key and then we return this
1301 * node or else there is no smaller key available and then
1305 if(key>node->key32){
1312 if(node->parent->left==node){
1315 if(key>node->key32){
1316 /* if this is a left child and its key is smaller than
1317 * the search key, then this is the node we want.
1321 /* if this is a left child and its key is bigger than
1322 * the search key, we have to check if any
1323 * of our ancestors are smaller than the search key.
1326 if(key>node->key32){
1336 if(node->key32<key){
1337 /* if this is the right child and its key is smaller
1338 * than the search key then this is the one we want.
1342 /* if this is the right child and its key is larger
1343 * than the search key then our parent is the one we
1346 return node->parent->data;
1353 static inline emem_tree_node_t *
1354 emem_tree_parent(emem_tree_node_t *node)
1356 return node->parent;
1359 static inline emem_tree_node_t *
1360 emem_tree_grandparent(emem_tree_node_t *node)
1362 emem_tree_node_t *parent;
1364 parent=emem_tree_parent(node);
1366 return parent->parent;
1371 static inline emem_tree_node_t *
1372 emem_tree_uncle(emem_tree_node_t *node)
1374 emem_tree_node_t *parent, *grandparent;
1376 parent=emem_tree_parent(node);
1380 grandparent=emem_tree_parent(parent);
1384 if(parent==grandparent->left){
1385 return grandparent->right;
1387 return grandparent->left;
1390 static inline void rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node);
1391 static inline void rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node);
1394 rotate_left(emem_tree_t *se_tree, emem_tree_node_t *node)
1397 if(node->parent->left==node){
1398 node->parent->left=node->right;
1400 node->parent->right=node->right;
1403 se_tree->tree=node->right;
1405 node->right->parent=node->parent;
1406 node->parent=node->right;
1407 node->right=node->right->left;
1409 node->right->parent=node;
1411 node->parent->left=node;
1415 rotate_right(emem_tree_t *se_tree, emem_tree_node_t *node)
1418 if(node->parent->left==node){
1419 node->parent->left=node->left;
1421 node->parent->right=node->left;
1424 se_tree->tree=node->left;
1426 node->left->parent=node->parent;
1427 node->parent=node->left;
1428 node->left=node->left->right;
1430 node->left->parent=node;
1432 node->parent->right=node;
1436 rb_insert_case5(emem_tree_t *se_tree, emem_tree_node_t *node)
1438 emem_tree_node_t *grandparent;
1439 emem_tree_node_t *parent;
1441 parent=emem_tree_parent(node);
1442 grandparent=emem_tree_parent(parent);
1443 parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1444 grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1445 if( (node==parent->left) && (parent==grandparent->left) ){
1446 rotate_right(se_tree, grandparent);
1448 rotate_left(se_tree, grandparent);
1453 rb_insert_case4(emem_tree_t *se_tree, emem_tree_node_t *node)
1455 emem_tree_node_t *grandparent;
1456 emem_tree_node_t *parent;
1458 parent=emem_tree_parent(node);
1459 grandparent=emem_tree_parent(parent);
1463 if( (node==parent->right) && (parent==grandparent->left) ){
1464 rotate_left(se_tree, parent);
1466 } else if( (node==parent->left) && (parent==grandparent->right) ){
1467 rotate_right(se_tree, parent);
1470 rb_insert_case5(se_tree, node);
1474 rb_insert_case3(emem_tree_t *se_tree, emem_tree_node_t *node)
1476 emem_tree_node_t *grandparent;
1477 emem_tree_node_t *parent;
1478 emem_tree_node_t *uncle;
1480 uncle=emem_tree_uncle(node);
1481 if(uncle && (uncle->u.rb_color==EMEM_TREE_RB_COLOR_RED)){
1482 parent=emem_tree_parent(node);
1483 parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1484 uncle->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1485 grandparent=emem_tree_grandparent(node);
1486 grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1487 rb_insert_case1(se_tree, grandparent);
1489 rb_insert_case4(se_tree, node);
1494 rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node)
1496 emem_tree_node_t *parent;
1498 parent=emem_tree_parent(node);
1499 /* parent is always non-NULL here */
1500 if(parent->u.rb_color==EMEM_TREE_RB_COLOR_BLACK){
1503 rb_insert_case3(se_tree, node);
1507 rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node)
1509 emem_tree_node_t *parent;
1511 parent=emem_tree_parent(node);
1513 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1516 rb_insert_case2(se_tree, node);
1519 /* insert a new node in the tree. if this node matches an already existing node
1520 * then just replace the data for that node */
1522 emem_tree_insert32(emem_tree_t *se_tree, guint32 key, void *data)
1524 emem_tree_node_t *node;
1528 /* is this the first node ?*/
1530 node=se_tree->malloc(sizeof(emem_tree_node_t));
1531 switch(se_tree->type){
1532 case EMEM_TREE_TYPE_RED_BLACK:
1533 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1541 node->u.is_subtree = EMEM_TREE_NODE_IS_DATA;
1546 /* it was not the new root so walk the tree until we find where to
1547 * insert this new leaf.
1550 /* this node already exists, so just replace the data pointer*/
1551 if(key==node->key32){
1555 if(key<node->key32) {
1557 /* new node to the left */
1558 emem_tree_node_t *new_node;
1559 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1560 node->left=new_node;
1561 new_node->parent=node;
1562 new_node->left=NULL;
1563 new_node->right=NULL;
1564 new_node->key32=key;
1565 new_node->data=data;
1566 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1573 if(key>node->key32) {
1575 /* new node to the right */
1576 emem_tree_node_t *new_node;
1577 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1578 node->right=new_node;
1579 new_node->parent=node;
1580 new_node->left=NULL;
1581 new_node->right=NULL;
1582 new_node->key32=key;
1583 new_node->data=data;
1584 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1593 /* node will now point to the newly created node */
1594 switch(se_tree->type){
1595 case EMEM_TREE_TYPE_RED_BLACK:
1596 node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1597 rb_insert_case1(se_tree, node);
1603 lookup_or_insert32(emem_tree_t *se_tree, guint32 key, void*(*func)(void*),void* ud, int is_subtree)
1605 emem_tree_node_t *node;
1609 /* is this the first node ?*/
1611 node=se_tree->malloc(sizeof(emem_tree_node_t));
1612 switch(se_tree->type){
1613 case EMEM_TREE_TYPE_RED_BLACK:
1614 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1621 node->data= func(ud);
1622 node->u.is_subtree = is_subtree;
1627 /* it was not the new root so walk the tree until we find where to
1628 * insert this new leaf.
1631 /* this node already exists, so just return the data pointer*/
1632 if(key==node->key32){
1635 if(key<node->key32) {
1637 /* new node to the left */
1638 emem_tree_node_t *new_node;
1639 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1640 node->left=new_node;
1641 new_node->parent=node;
1642 new_node->left=NULL;
1643 new_node->right=NULL;
1644 new_node->key32=key;
1645 new_node->data= func(ud);
1646 new_node->u.is_subtree = is_subtree;
1653 if(key>node->key32) {
1655 /* new node to the right */
1656 emem_tree_node_t *new_node;
1657 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1658 node->right=new_node;
1659 new_node->parent=node;
1660 new_node->left=NULL;
1661 new_node->right=NULL;
1662 new_node->key32=key;
1663 new_node->data= func(ud);
1664 new_node->u.is_subtree = is_subtree;
1673 /* node will now point to the newly created node */
1674 switch(se_tree->type){
1675 case EMEM_TREE_TYPE_RED_BLACK:
1676 node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1677 rb_insert_case1(se_tree, node);
1684 /* When the se data is released, this entire tree will dissapear as if it
1685 * never existed including all metadata associated with the tree.
1688 se_tree_create_non_persistent(int type, const char *name)
1690 emem_tree_t *tree_list;
1692 tree_list=se_alloc(sizeof(emem_tree_t));
1693 tree_list->next=NULL;
1694 tree_list->type=type;
1695 tree_list->tree=NULL;
1696 tree_list->name=name;
1697 tree_list->malloc=se_alloc;
1702 /* This tree is PErmanent and will never be released
1705 pe_tree_create(int type, const char *name)
1707 emem_tree_t *tree_list;
1709 tree_list=g_new(emem_tree_t, 1);
1710 tree_list->next=NULL;
1711 tree_list->type=type;
1712 tree_list->tree=NULL;
1713 tree_list->name=name;
1714 tree_list->malloc=(void *(*)(size_t)) g_malloc;
1719 /* create another (sub)tree using the same memory allocation scope
1720 * as the parent tree.
1722 static emem_tree_t *
1723 emem_tree_create_subtree(emem_tree_t *parent_tree, const char *name)
1725 emem_tree_t *tree_list;
1727 tree_list=parent_tree->malloc(sizeof(emem_tree_t));
1728 tree_list->next=NULL;
1729 tree_list->type=parent_tree->type;
1730 tree_list->tree=NULL;
1731 tree_list->name=name;
1732 tree_list->malloc=parent_tree->malloc;
1738 create_sub_tree(void* d)
1740 emem_tree_t *se_tree = d;
1741 return emem_tree_create_subtree(se_tree, "subtree");
1744 /* insert a new node in the tree. if this node matches an already existing node
1745 * then just replace the data for that node */
1748 emem_tree_insert32_array(emem_tree_t *se_tree, emem_tree_key_t *key, void *data)
1750 emem_tree_t *next_tree;
1752 if((key[0].length<1)||(key[0].length>100)){
1753 DISSECTOR_ASSERT_NOT_REACHED();
1755 if((key[0].length==1)&&(key[1].length==0)){
1756 emem_tree_insert32(se_tree, *key[0].key, data);
1760 next_tree=lookup_or_insert32(se_tree, *key[0].key, create_sub_tree, se_tree, EMEM_TREE_NODE_IS_SUBTREE);
1762 if(key[0].length==1){
1768 emem_tree_insert32_array(next_tree, key, data);
1772 emem_tree_lookup32_array(emem_tree_t *se_tree, emem_tree_key_t *key)
1774 emem_tree_t *next_tree;
1776 if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1778 if((key[0].length<1)||(key[0].length>100)){
1779 DISSECTOR_ASSERT_NOT_REACHED();
1781 if((key[0].length==1)&&(key[1].length==0)){
1782 return emem_tree_lookup32(se_tree, *key[0].key);
1784 next_tree=emem_tree_lookup32(se_tree, *key[0].key);
1788 if(key[0].length==1){
1794 return emem_tree_lookup32_array(next_tree, key);
1798 emem_tree_lookup32_array_le(emem_tree_t *se_tree, emem_tree_key_t *key)
1800 emem_tree_t *next_tree;
1802 if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1804 if((key[0].length<1)||(key[0].length>100)){
1805 DISSECTOR_ASSERT_NOT_REACHED();
1807 if((key[0].length==1)&&(key[1].length==0)){ /* last key in key array */
1808 return emem_tree_lookup32_le(se_tree, *key[0].key);
1810 next_tree=emem_tree_lookup32(se_tree, *key[0].key);
1811 /* key[0].key not found so find le and return */
1813 return emem_tree_lookup32_le(se_tree, *key[0].key);
1815 /* key[0].key found so inc key pointer and try again */
1816 if(key[0].length==1){
1822 return emem_tree_lookup32_array_le(next_tree, key);
1825 /* Strings are stored as an array of uint32 containing the string characters
1826 with 4 characters in each uint32.
1827 The first byte of the string is stored as the most significant byte.
1828 If the string is not a multiple of 4 characters in length the last
1829 uint32 containing the string bytes are padded with 0 bytes.
1830 After the uint32's containing the string, there is one final terminator
1831 uint32 with the value 0x00000001
1834 emem_tree_insert_string(emem_tree_t* se_tree, const gchar* k, void* v, guint32 flags)
1836 emem_tree_key_t key[2];
1837 guint32 *aligned=NULL;
1838 guint32 len = (guint32) strlen(k);
1839 guint32 divx = (len+3)/4+1;
1843 aligned = g_malloc(divx * sizeof (guint32));
1845 /* pack the bytes one one by one into guint32s */
1847 for (i = 0;i < len;i++) {
1850 ch = (unsigned char)k[i];
1851 if (flags & EMEM_TREE_STRING_NOCASE) {
1863 /* add required padding to the last uint32 */
1869 aligned[i/4-1] = tmp;
1872 /* add the terminator */
1873 aligned[divx-1] = 0x00000001;
1875 key[0].length = divx;
1876 key[0].key = aligned;
1881 emem_tree_insert32_array(se_tree, key, v);
1886 emem_tree_lookup_string(emem_tree_t* se_tree, const gchar* k, guint32 flags)
1888 emem_tree_key_t key[2];
1889 guint32 *aligned=NULL;
1890 guint32 len = (guint) strlen(k);
1891 guint32 divx = (len+3)/4+1;
1896 aligned = g_malloc(divx * sizeof (guint32));
1898 /* pack the bytes one one by one into guint32s */
1900 for (i = 0;i < len;i++) {
1903 ch = (unsigned char)k[i];
1904 if (flags & EMEM_TREE_STRING_NOCASE) {
1916 /* add required padding to the last uint32 */
1922 aligned[i/4-1] = tmp;
1925 /* add the terminator */
1926 aligned[divx-1] = 0x00000001;
1928 key[0].length = divx;
1929 key[0].key = aligned;
1934 ret = emem_tree_lookup32_array(se_tree, key);
1940 emem_tree_foreach_nodes(emem_tree_node_t* node, tree_foreach_func callback, void *user_data)
1942 gboolean stop_traverse = FALSE;
1948 stop_traverse = emem_tree_foreach_nodes(node->left, callback, user_data);
1949 if (stop_traverse) {
1954 if (node->u.is_subtree == EMEM_TREE_NODE_IS_SUBTREE) {
1955 stop_traverse = emem_tree_foreach(node->data, callback, user_data);
1957 stop_traverse = callback(node->data, user_data);
1960 if (stop_traverse) {
1965 stop_traverse = emem_tree_foreach_nodes(node->right, callback, user_data);
1966 if (stop_traverse) {
1975 emem_tree_foreach(emem_tree_t* emem_tree, tree_foreach_func callback, void *user_data)
1980 if(!emem_tree->tree)
1983 return emem_tree_foreach_nodes(emem_tree->tree, callback, user_data);
1988 emem_tree_print_nodes(emem_tree_node_t* node, int level)
1995 for(i=0;i<level;i++){
1999 printf("NODE:%p parent:%p left:0x%p right:%px key:%d data:%p\n",
2000 (void *)node,(void *)(node->parent),(void *)(node->left),(void *)(node->right),
2001 (node->key32),node->data);
2003 emem_tree_print_nodes(node->left, level+1);
2005 emem_tree_print_nodes(node->right, level+1);
2008 emem_print_tree(emem_tree_t* emem_tree)
2013 printf("EMEM tree type:%d name:%s tree:%p\n",emem_tree->type,emem_tree->name,(void *)(emem_tree->tree));
2015 emem_tree_print_nodes(emem_tree->tree, 0);
2023 * Presumably we're using these routines for building strings for the tree.
2024 * Use ITEM_LABEL_LENGTH as the basis for our default lengths.
2027 #define DEFAULT_STRBUF_LEN (ITEM_LABEL_LENGTH / 10)
2028 #define MAX_STRBUF_LEN 65536
2031 next_size(gsize cur_alloc_len, gsize wanted_alloc_len, gsize max_alloc_len)
2033 if (max_alloc_len < 1 || max_alloc_len > MAX_STRBUF_LEN) {
2034 max_alloc_len = MAX_STRBUF_LEN;
2037 if (cur_alloc_len < 1) {
2038 cur_alloc_len = DEFAULT_STRBUF_LEN;
2041 while (cur_alloc_len < wanted_alloc_len) {
2045 return cur_alloc_len < max_alloc_len ? cur_alloc_len : max_alloc_len;
2049 ep_strbuf_grow(emem_strbuf_t *strbuf, gsize wanted_alloc_len)
2051 gsize new_alloc_len;
2054 if (!strbuf || (wanted_alloc_len <= strbuf->alloc_len) || (strbuf->alloc_len >= strbuf->max_alloc_len)) {
2058 new_alloc_len = next_size(strbuf->alloc_len, wanted_alloc_len, strbuf->max_alloc_len);
2059 new_str = ep_alloc(new_alloc_len);
2060 g_strlcpy(new_str, strbuf->str, new_alloc_len);
2062 strbuf->alloc_len = new_alloc_len;
2063 strbuf->str = new_str;
2067 ep_strbuf_sized_new(gsize alloc_len, gsize max_alloc_len)
2069 emem_strbuf_t *strbuf;
2071 strbuf = ep_alloc(sizeof(emem_strbuf_t));
2073 if ((max_alloc_len == 0) || (max_alloc_len > MAX_STRBUF_LEN))
2074 max_alloc_len = MAX_STRBUF_LEN;
2077 else if (alloc_len > max_alloc_len)
2078 alloc_len = max_alloc_len;
2080 strbuf->str = ep_alloc(alloc_len);
2081 strbuf->str[0] = '\0';
2084 strbuf->alloc_len = alloc_len;
2085 strbuf->max_alloc_len = max_alloc_len;
2091 ep_strbuf_new(const gchar *init)
2093 emem_strbuf_t *strbuf;
2095 strbuf = ep_strbuf_sized_new(next_size(0, init?strlen(init)+1:0, 0), 0); /* +1 for NULL terminator */
2098 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2099 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2106 ep_strbuf_new_label(const gchar *init)
2108 emem_strbuf_t *strbuf;
2111 /* Be optimistic: Allocate default size strbuf string and only */
2112 /* request an increase if needed. */
2113 /* XXX: Is it reasonable to assume that much of the usage of */
2114 /* ep_strbuf_new_label will have init==NULL or */
2115 /* strlen(init) < DEFAULT_STRBUF_LEN) ??? */
2116 strbuf = ep_strbuf_sized_new(DEFAULT_STRBUF_LEN, ITEM_LABEL_LENGTH);
2121 /* full_len does not count the trailing '\0'. */
2122 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2123 if (full_len < strbuf->alloc_len) {
2124 strbuf->len += full_len;
2126 strbuf = ep_strbuf_sized_new(full_len+1, ITEM_LABEL_LENGTH);
2127 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2128 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2135 ep_strbuf_append(emem_strbuf_t *strbuf, const gchar *str)
2137 gsize add_len, full_len;
2139 if (!strbuf || !str || str[0] == '\0') {
2143 /* Be optimistic; try the g_strlcpy first & see if enough room. */
2144 /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same */
2145 add_len = strbuf->alloc_len - strbuf->len;
2146 full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2147 if (full_len < add_len) {
2148 strbuf->len += full_len;
2150 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2151 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2152 add_len = strbuf->alloc_len - strbuf->len;
2153 full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2154 strbuf->len += MIN(add_len-1, full_len);
2161 ep_strbuf_append_vprintf(emem_strbuf_t *strbuf, const gchar *format, va_list ap)
2164 gsize add_len, full_len;
2168 /* Be optimistic; try the g_vsnprintf first & see if enough room. */
2169 /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same. */
2170 add_len = strbuf->alloc_len - strbuf->len;
2171 full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap);
2172 if (full_len < add_len) {
2173 strbuf->len += full_len;
2175 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2176 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2177 add_len = strbuf->alloc_len - strbuf->len;
2178 full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap2);
2179 strbuf->len += MIN(add_len-1, full_len);
2186 ep_strbuf_append_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2190 va_start(ap, format);
2191 ep_strbuf_append_vprintf(strbuf, format, ap);
2196 ep_strbuf_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2205 va_start(ap, format);
2206 ep_strbuf_append_vprintf(strbuf, format, ap);
2211 ep_strbuf_append_c(emem_strbuf_t *strbuf, const gchar c)
2217 /* +1 for the new character & +1 for the trailing '\0'. */
2218 if (strbuf->alloc_len < strbuf->len + 1 + 1) {
2219 ep_strbuf_grow(strbuf, strbuf->len + 1 + 1);
2221 if (strbuf->alloc_len >= strbuf->len + 1 + 1) {
2222 strbuf->str[strbuf->len] = c;
2224 strbuf->str[strbuf->len] = '\0';
2231 ep_strbuf_truncate(emem_strbuf_t *strbuf, gsize len)
2233 if (!strbuf || len >= strbuf->len) {
2237 strbuf->str[len] = '\0';
2249 * indent-tabs-mode: t
2252 * ex: set shiftwidth=8 tabstop=8 noexpandtab
2253 * :indentSize=8:tabSize=8:noTabs=false: