2 * Wireshark memory management and garbage collection functions
7 * Wireshark - Network traffic analyzer
8 * By Gerald Combs <gerald@wireshark.org>
9 * Copyright 1998 Gerald Combs
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
34 #ifdef HAVE_SYS_TIME_H
48 #include <windows.h> /* VirtualAlloc, VirtualProtect */
49 #include <process.h> /* getpid */
52 /* Print out statistics about our memory allocations? */
53 /*#define SHOW_EMEM_STATS*/
55 /* Do we want to use guardpages? if available */
56 #define WANT_GUARD_PAGES 1
58 #ifdef WANT_GUARD_PAGES
59 /* Add guard pages at each end of our allocated memory */
60 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
62 #ifdef HAVE_SYS_TYPES_H
63 #include <sys/types.h>
66 #if defined(MAP_ANONYMOUS)
67 #define ANON_PAGE_MODE (MAP_ANONYMOUS|MAP_PRIVATE)
68 #elif defined(MAP_ANON)
69 #define ANON_PAGE_MODE (MAP_ANON|MAP_PRIVATE)
71 #define ANON_PAGE_MODE (MAP_PRIVATE) /* have to map /dev/zero */
76 static int dev_zero_fd;
77 #define ANON_FD dev_zero_fd
81 #define USE_GUARD_PAGES 1
85 /* When required, allocate more memory from the OS in this size chunks */
86 #define EMEM_PACKET_CHUNK_SIZE (10 * 1024 * 1024)
88 /* The canary between allocations is at least 8 bytes and up to 16 bytes to
89 * allow future allocations to be 4- or 8-byte aligned.
90 * All but the last byte of the canary are randomly generated; the last byte is
91 * NULL to separate the canary and the pointer to the next canary.
93 * For example, if the allocation is a multiple of 8 bytes, the canary and
94 * pointer would look like:
95 * |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
96 * |c|c|c|c|c|c|c|0||p|p|p|p|p|p|p|p| (64-bit), or:
97 * |c|c|c|c|c|c|c|0||p|p|p|p| (32-bit)
99 * If the allocation was, for example, 12 bytes, the canary would look like:
100 * |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
101 * [...]|a|a|a|a|c|c|c|c||c|c|c|c|c|c|c|0| (followed by the pointer)
103 #define EMEM_CANARY_SIZE 8
104 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
106 typedef struct _emem_chunk_t {
107 struct _emem_chunk_t *next;
109 unsigned int amount_free_init;
110 unsigned int amount_free;
111 unsigned int free_offset_init;
112 unsigned int free_offset;
116 typedef struct _emem_header_t {
117 emem_chunk_t *free_list;
118 emem_chunk_t *used_list;
120 emem_tree_t *trees; /* only used by se_mem allocator */
122 guint8 canary[EMEM_CANARY_DATA_SIZE];
123 void *(*memory_alloc)(size_t size, struct _emem_header_t *);
126 * Tools like Valgrind and ElectricFence don't work well with memchunks.
127 * Export the following environment variables to make {ep|se}_alloc() allocate each
128 * object individually.
130 * WIRESHARK_DEBUG_EP_NO_CHUNKS
131 * WIRESHARK_DEBUG_SE_NO_CHUNKS
133 gboolean debug_use_chunks;
135 /* Do we want to use canaries?
136 * Export the following environment variables to disable/enable canaries
138 * WIRESHARK_DEBUG_EP_NO_CANARY
139 * For SE memory use of canary is default off as the memory overhead
141 * WIRESHARK_DEBUG_SE_USE_CANARY
143 gboolean debug_use_canary;
145 /* Do we want to verify no one is using a pointer to an ep_ or se_
146 * allocated thing where they shouldn't be?
148 * Export WIRESHARK_EP_VERIFY_POINTERS or WIRESHARK_SE_VERIFY_POINTERS
151 gboolean debug_verify_pointers;
155 static emem_header_t ep_packet_mem;
156 static emem_header_t se_packet_mem;
159 * Memory scrubbing is expensive but can be useful to ensure we don't:
160 * - use memory before initializing it
161 * - use memory after freeing it
162 * Export WIRESHARK_DEBUG_SCRUB_MEMORY to turn it on.
164 static gboolean debug_use_memory_scrubber = FALSE;
167 static SYSTEM_INFO sysinfo;
168 static OSVERSIONINFO versinfo;
170 #elif defined(USE_GUARD_PAGES)
171 static intptr_t pagesize;
172 #endif /* _WIN32 / USE_GUARD_PAGES */
174 static void *emem_alloc_chunk(size_t size, emem_header_t *mem);
175 static void *emem_alloc_glib(size_t size, emem_header_t *mem);
178 * Set a canary value to be placed between memchunks.
181 emem_canary_init(guint8 *canary)
184 static GRand *rand_state = NULL;
186 if (rand_state == NULL) {
187 rand_state = g_rand_new();
189 for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
190 canary[i] = (guint8) g_rand_int_range(rand_state, 1, 0x100);
196 emem_canary_next(guint8 *mem_canary, guint8 *canary, int *len)
201 for (i = 0; i < EMEM_CANARY_SIZE-1; i++)
202 if (mem_canary[i] != canary[i])
205 for (; i < EMEM_CANARY_DATA_SIZE; i++) {
206 if (canary[i] == '\0') {
207 memcpy(&ptr, &canary[i+1], sizeof(void *));
210 *len = i + 1 + sizeof(void *);
214 if (mem_canary[i] != canary[i])
222 * Given an allocation size, return the amount of room needed for the canary
223 * (with a minimum of 8 bytes) while using the canary to pad to an 8-byte
227 emem_canary_pad (size_t allocation)
231 pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
232 if (pad < EMEM_CANARY_SIZE)
233 pad += EMEM_CANARY_SIZE;
238 /* used for debugging canaries, will block */
239 #ifdef DEBUG_INTENSE_CANARY_CHECKS
240 gboolean intense_canary_checking = FALSE;
242 /* used to intensivelly check ep canaries
245 ep_check_canary_integrity(const char* fmt, ...)
248 static gchar there[128] = {
249 'L','a','u','n','c','h',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
250 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
251 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
252 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
254 emem_chunk_t* npc = NULL;
256 if (! intense_canary_checking ) return;
259 g_vsnprintf(here, sizeof(here), fmt, ap);
262 for (npc = ep_packet_mem.free_list; npc != NULL; npc = npc->next) {
263 void *canary_next = npc->canary_last;
265 while (canary_next != NULL) {
266 canary_next = emem_canary_next(ep_packet_mem.canary, canary_next, NULL);
267 /* XXX, check if canary_next is inside allocated memory? */
269 if (canary_next == (void *) -1)
270 g_error("Per-packet memory corrupted\nbetween: %s\nand: %s", there, here);
274 g_strlcpy(there, here, sizeof(there));
279 emem_init_chunk(emem_header_t *mem)
281 if (mem->debug_use_canary)
282 emem_canary_init(mem->canary);
284 if (mem->debug_use_chunks)
285 mem->memory_alloc = emem_alloc_chunk;
287 mem->memory_alloc = emem_alloc_glib;
291 /* Initialize the packet-lifetime memory allocation pool.
292 * This function should be called only once when Wireshark or TShark starts
298 ep_packet_mem.free_list=NULL;
299 ep_packet_mem.used_list=NULL;
300 ep_packet_mem.trees=NULL; /* not used by this allocator */
302 ep_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_EP_NO_CHUNKS") == NULL);
303 ep_packet_mem.debug_use_canary = ep_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_EP_NO_CANARY") == NULL);
304 ep_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_EP_VERIFY_POINTERS") != NULL);
306 #ifdef DEBUG_INTENSE_CANARY_CHECKS
307 intense_canary_checking = (getenv("WIRESHARK_DEBUG_EP_INTENSE_CANARY") != NULL);
310 emem_init_chunk(&ep_packet_mem);
313 /* Initialize the capture-lifetime memory allocation pool.
314 * This function should be called only once when Wireshark or TShark starts
320 se_packet_mem.free_list = NULL;
321 se_packet_mem.used_list = NULL;
322 se_packet_mem.trees = NULL;
324 se_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_SE_NO_CHUNKS") == NULL);
325 se_packet_mem.debug_use_canary = se_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_SE_USE_CANARY") != NULL);
326 se_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_SE_VERIFY_POINTERS") != NULL);
328 emem_init_chunk(&se_packet_mem);
331 /* Initialize all the allocators here.
332 * This function should be called only once when Wireshark or TShark starts
341 if (getenv("WIRESHARK_DEBUG_SCRUB_MEMORY"))
342 debug_use_memory_scrubber = TRUE;
345 /* Set up our guard page info for Win32 */
346 GetSystemInfo(&sysinfo);
347 pagesize = sysinfo.dwPageSize;
349 /* calling GetVersionEx using the OSVERSIONINFO structure.
350 * OSVERSIONINFOEX requires Win NT4 with SP6 or newer NT Versions.
351 * OSVERSIONINFOEX will fail on Win9x and older NT Versions.
353 * http://msdn.microsoft.com/library/en-us/sysinfo/base/getversionex.asp
354 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
355 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfoex_str.asp
357 versinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
358 GetVersionEx(&versinfo);
360 #elif defined(USE_GUARD_PAGES)
361 pagesize = sysconf(_SC_PAGESIZE);
363 dev_zero_fd = ws_open("/dev/zero", O_RDWR);
364 g_assert(dev_zero_fd != -1);
366 #endif /* _WIN32 / USE_GUARD_PAGES */
369 #ifdef SHOW_EMEM_STATS
370 #define NUM_ALLOC_DIST 10
371 static guint allocations[NUM_ALLOC_DIST] = { 0 };
372 static guint total_no_chunks = 0;
377 guint num_chunks = 0;
378 guint num_allocs = 0;
379 guint total_used = 0;
380 guint total_allocation = 0;
381 guint used_for_canaries = 0;
385 guint total_space_allocated_from_os, total_space_wasted;
386 gboolean ep_stat=TRUE;
388 fprintf(stderr, "\n-------- EP allocator statistics --------\n");
389 fprintf(stderr, "%s chunks, %s canaries, %s memory scrubber\n",
390 ep_packet_mem.debug_use_chunks ? "Using" : "Not using",
391 ep_packet_mem.debug_use_canary ? "using" : "not using",
392 debug_use_memory_scrubber ? "using" : "not using");
394 if (! (ep_packet_mem.free_list || !ep_packet_mem.used_list)) {
395 fprintf(stderr, "No memory allocated\n");
398 if (ep_packet_mem.debug_use_chunks && ep_stat) {
399 /* Nothing interesting without chunks */
400 /* Only look at the used_list since those chunks are fully
401 * used. Looking at the free list would skew our view of what
404 for (chunk = ep_packet_mem.used_list; chunk; chunk = chunk->next) {
406 total_used += (chunk->amount_free_init - chunk->amount_free);
407 total_allocation += chunk->amount_free_init;
409 if (num_chunks > 0) {
410 fprintf (stderr, "\n");
411 fprintf (stderr, "\n---- Buffer space ----\n");
412 fprintf (stderr, "\tChunk allocation size: %10u\n", EMEM_PACKET_CHUNK_SIZE);
413 fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
414 fprintf (stderr, "\t-------------------------------------------\n");
415 fprintf (stderr, "\t= %u (%u including guard pages) total space used for buffers\n",
416 total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
417 fprintf (stderr, "\t-------------------------------------------\n");
418 total_space_allocated_from_os = total_allocation
419 + sizeof(emem_chunk_t) * num_chunks;
420 fprintf (stderr, "Total allocated from OS: %u\n\n",
421 total_space_allocated_from_os);
423 fprintf (stderr, "No fully used chunks, nothing to do\n");
429 total_allocation = 0;
430 used_for_canaries = 0;
434 fprintf(stderr, "\n-------- SE allocator statistics --------\n");
435 fprintf(stderr, "Total number of chunk allocations %u\n",
437 fprintf(stderr, "%s chunks, %s canaries\n",
438 se_packet_mem.debug_use_chunks ? "Using" : "Not using",
439 se_packet_mem.debug_use_canary ? "using" : "not using");
441 if (! (se_packet_mem.free_list || !se_packet_mem.used_list)) {
442 fprintf(stderr, "No memory allocated\n");
446 if (!se_packet_mem.debug_use_chunks )
447 return; /* Nothing interesting without chunks?? */
449 /* Only look at the used_list since those chunks are fully used.
450 * Looking at the free list would skew our view of what we have wasted.
452 for (chunk = se_packet_mem.used_list; chunk; chunk = chunk->next) {
454 total_used += (chunk->amount_free_init - chunk->amount_free);
455 total_allocation += chunk->amount_free_init;
457 if (se_packet_mem.debug_use_canary){
458 void *ptr = chunk->canary_last;
461 while (ptr != NULL) {
462 ptr = emem_canary_next(se_packet_mem.canary, ptr, &len);
464 if (ptr == (void *) -1)
465 g_error("Memory corrupted");
466 used_for_canaries += len;
471 if (num_chunks == 0) {
473 fprintf (stderr, "No fully used chunks, nothing to do\n");
477 fprintf (stderr, "\n");
478 fprintf (stderr, "---------- Allocations from the OS ----------\n");
479 fprintf (stderr, "---- Headers ----\n");
480 fprintf (stderr, "\t( Chunk header size: %10lu\n",
481 sizeof(emem_chunk_t));
482 fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
483 fprintf (stderr, "\t-------------------------------------------\n");
485 total_headers = sizeof(emem_chunk_t) * num_chunks;
486 fprintf (stderr, "\t= %u bytes used for headers\n", total_headers);
487 fprintf (stderr, "\n---- Buffer space ----\n");
488 fprintf (stderr, "\tChunk allocation size: %10u\n",
489 EMEM_PACKET_CHUNK_SIZE);
490 fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
491 fprintf (stderr, "\t-------------------------------------------\n");
492 fprintf (stderr, "\t= %u (%u including guard pages) bytes used for buffers\n",
493 total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
494 fprintf (stderr, "\t-------------------------------------------\n");
495 total_space_allocated_from_os = (EMEM_PACKET_CHUNK_SIZE * num_chunks)
497 fprintf (stderr, "Total bytes allocated from the OS: %u\n\n",
498 total_space_allocated_from_os);
500 for (i = 0; i < NUM_ALLOC_DIST; i++)
501 num_allocs += allocations[i];
503 fprintf (stderr, "---------- Allocations from the SE pool ----------\n");
504 fprintf (stderr, " Number of SE allocations: %10u\n",
506 fprintf (stderr, " Bytes used (incl. canaries): %10u\n",
508 fprintf (stderr, " Bytes used for canaries: %10u\n",
510 fprintf (stderr, "Bytes unused (wasted, excl. guard pages): %10u\n",
511 total_allocation - total_used);
512 fprintf (stderr, "Bytes unused (wasted, incl. guard pages): %10u\n\n",
513 total_space_allocated_from_os - total_used);
515 fprintf (stderr, "---------- Statistics ----------\n");
516 fprintf (stderr, "Average SE allocation size (incl. canaries): %6.2f\n",
517 (float)total_used/(float)num_allocs);
518 fprintf (stderr, "Average SE allocation size (excl. canaries): %6.2f\n",
519 (float)(total_used - used_for_canaries)/(float)num_allocs);
520 fprintf (stderr, " Average wasted bytes per allocation: %6.2f\n",
521 (total_allocation - total_used)/(float)num_allocs);
522 total_space_wasted = (total_allocation - total_used)
523 + (sizeof(emem_chunk_t));
524 fprintf (stderr, " Space used for headers + unused allocation: %8u\n",
526 fprintf (stderr, "--> %% overhead/waste: %4.2f\n",
527 100 * (float)total_space_wasted/(float)total_space_allocated_from_os);
529 fprintf (stderr, "\nAllocation distribution (sizes include canaries):\n");
530 for (i = 0; i < (NUM_ALLOC_DIST-1); i++)
531 fprintf (stderr, "size < %5d: %8u\n", 32<<i, allocations[i]);
532 fprintf (stderr, "size > %5d: %8u\n", 32<<i, allocations[i]);
537 emem_verify_pointer_list(const emem_chunk_t *chunk_list, const void *ptr)
539 const gchar *cptr = ptr;
540 const emem_chunk_t *chunk;
542 for (chunk = chunk_list; chunk; chunk = chunk->next) {
543 if (cptr >= (chunk->buf + chunk->free_offset_init) && cptr < (chunk->buf + chunk->free_offset))
550 emem_verify_pointer(const emem_header_t *hdr, const void *ptr)
552 return emem_verify_pointer_list(hdr->free_list, ptr) || emem_verify_pointer_list(hdr->used_list, ptr);
556 ep_verify_pointer(const void *ptr)
558 if (ep_packet_mem.debug_verify_pointers)
559 return emem_verify_pointer(&ep_packet_mem, ptr);
565 se_verify_pointer(const void *ptr)
567 if (se_packet_mem.debug_verify_pointers)
568 return emem_verify_pointer(&se_packet_mem, ptr);
574 emem_scrub_memory(char *buf, size_t size, gboolean alloc)
576 guint scrubbed_value;
579 if (!debug_use_memory_scrubber)
582 if (alloc) /* this memory is being allocated */
583 scrubbed_value = 0xBADDCAFE;
584 else /* this memory is being freed */
585 scrubbed_value = 0xDEADBEEF;
587 /* We shouldn't need to check the alignment of the starting address
588 * since this is malloc'd memory (or 'pagesize' bytes into malloc'd
592 /* XXX - if the above is *NOT* true, we should use memcpy here,
593 * in order to avoid problems on alignment-sensitive platforms, e.g.
594 * http://stackoverflow.com/questions/108866/is-there-memset-that-accepts-integers-larger-than-char
597 for (offset = 0; offset + sizeof(guint) <= size; offset += sizeof(guint))
598 *(guint*)(void*)(buf+offset) = scrubbed_value;
600 /* Initialize the last bytes, if any */
602 *(guint8*)(buf+offset) = scrubbed_value >> 24;
605 *(guint8*)(buf+offset) = (scrubbed_value >> 16) & 0xFF;
608 *(guint8*)(buf+offset) = (scrubbed_value >> 8) & 0xFF;
616 static emem_chunk_t *
617 emem_create_chunk(size_t size)
621 npc = g_new(emem_chunk_t, 1);
623 npc->canary_last = NULL;
627 * MSDN documents VirtualAlloc/VirtualProtect at
628 * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
631 /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
632 npc->buf = VirtualAlloc(NULL, size,
633 MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
635 if (npc->buf == NULL) {
637 if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
640 THROW(OutOfMemoryError);
643 #elif defined(USE_GUARD_PAGES)
644 npc->buf = mmap(NULL, size,
645 PROT_READ|PROT_WRITE, ANON_PAGE_MODE, ANON_FD, 0);
647 if (npc->buf == MAP_FAILED) {
649 if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
652 THROW(OutOfMemoryError);
655 #else /* Is there a draft in here? */
656 npc->buf = g_malloc(size);
657 /* g_malloc() can't fail */
660 #ifdef SHOW_EMEM_STATS
664 npc->amount_free = npc->amount_free_init = (unsigned int) size;
665 npc->free_offset = npc->free_offset_init = 0;
670 emem_destroy_chunk(emem_chunk_t *npc)
673 VirtualFree(npc->buf, 0, MEM_RELEASE);
674 #elif defined(USE_GUARD_PAGES)
675 munmap(npc->buf, npc->amount_free_init);
679 #ifdef SHOW_EMEM_STATS
685 static emem_chunk_t *
686 emem_create_chunk_gp(size_t size)
690 char *buf_end, *prot1, *prot2;
692 #elif defined(USE_GUARD_PAGES)
694 char *buf_end, *prot1, *prot2;
695 #endif /* _WIN32 / USE_GUARD_PAGES */
698 npc = emem_create_chunk(size);
701 buf_end = npc->buf + size;
703 /* Align our guard pages on page-sized boundaries */
704 prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
705 prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
707 ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
708 g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
709 ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
710 g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
712 npc->amount_free_init = (unsigned int) (prot2 - prot1 - pagesize);
713 npc->free_offset_init = (unsigned int) (prot1 - npc->buf) + pagesize;
714 #elif defined(USE_GUARD_PAGES)
715 buf_end = npc->buf + size;
717 /* Align our guard pages on page-sized boundaries */
718 prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
719 prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
721 ret = mprotect(prot1, pagesize, PROT_NONE);
723 ret = mprotect(prot2, pagesize, PROT_NONE);
726 npc->amount_free_init = prot2 - prot1 - pagesize;
727 npc->free_offset_init = (prot1 - npc->buf) + pagesize;
729 npc->amount_free_init = size;
730 npc->free_offset_init = 0;
731 #endif /* USE_GUARD_PAGES */
733 npc->amount_free = npc->amount_free_init;
734 npc->free_offset = npc->free_offset_init;
739 emem_alloc_chunk(size_t size, emem_header_t *mem)
744 gboolean use_canary = mem->debug_use_canary;
746 emem_chunk_t *free_list;
748 /* Allocate room for at least 8 bytes of canary plus some padding
749 * so the canary ends on an 8-byte boundary.
750 * But first add the room needed for the pointer to the next canary
751 * (so the entire allocation will end on an 8-byte boundary).
754 asize += sizeof(void *);
755 pad = emem_canary_pad(asize);
757 pad = (WS_MEM_ALIGN - (asize & (WS_MEM_ALIGN-1))) & (WS_MEM_ALIGN-1);
761 #ifdef SHOW_EMEM_STATS
762 /* Do this check here so we can include the canary size */
763 if (mem == &se_packet_mem) {
768 else if (asize < 128)
770 else if (asize < 256)
772 else if (asize < 512)
774 else if (asize < 1024)
776 else if (asize < 2048)
778 else if (asize < 4096)
780 else if (asize < 8192)
782 else if (asize < 16384)
785 allocations[(NUM_ALLOC_DIST-1)]++;
789 /* make sure we dont try to allocate too much (arbitrary limit) */
790 DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
793 mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
795 /* oops, we need to allocate more memory to serve this request
796 * than we have free. move this node to the used list and try again
798 if(asize > mem->free_list->amount_free) {
801 mem->free_list=mem->free_list->next;
802 npc->next=mem->used_list;
806 mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
809 free_list = mem->free_list;
811 buf = free_list->buf + free_list->free_offset;
813 free_list->amount_free -= (unsigned int) asize;
814 free_list->free_offset += (unsigned int) asize;
817 char *cptr = (char *)buf + size;
819 memcpy(cptr, mem->canary, pad-1);
821 memcpy(cptr + pad, &free_list->canary_last, sizeof(void *));
823 free_list->canary_last = cptr;
830 emem_alloc_glib(size_t size, emem_header_t *mem)
834 npc=g_new(emem_chunk_t, 1);
835 npc->next=mem->used_list;
836 npc->buf=g_malloc(size);
837 npc->canary_last = NULL;
839 /* There's no padding/alignment involved (from our point of view) when
840 * we fetch the memory directly from the system pool, so WYSIWYG */
841 npc->free_offset = npc->free_offset_init = 0;
842 npc->amount_free = npc->amount_free_init = (unsigned int) size;
847 /* allocate 'size' amount of memory. */
849 emem_alloc(size_t size, emem_header_t *mem)
851 void *buf = mem->memory_alloc(size, mem);
853 /* XXX - this is a waste of time if the allocator function is going to
854 * memset this straight back to 0.
856 emem_scrub_memory(buf, size, TRUE);
861 /* allocate 'size' amount of memory with an allocation lifetime until the
865 ep_alloc(size_t size)
867 return emem_alloc(size, &ep_packet_mem);
870 /* allocate 'size' amount of memory with an allocation lifetime until the
874 se_alloc(size_t size)
876 return emem_alloc(size, &se_packet_mem);
880 sl_alloc(struct ws_memory_slab *mem_chunk)
885 /* XXX, debug_use_slices -> fallback to g_slice_alloc0 */
887 if ((mem_chunk->freed != NULL)) {
888 ptr = mem_chunk->freed;
889 memcpy(&mem_chunk->freed, ptr, sizeof(void *));
893 if (!(chunk = mem_chunk->chunk_list) || chunk->amount_free < (guint) mem_chunk->item_size) {
894 size_t alloc_size = mem_chunk->item_size * mem_chunk->count;
896 /* align to page-size */
897 #if defined (_WIN32) || defined(USE_GUARD_PAGES)
898 alloc_size = (alloc_size + (pagesize - 1)) & ~(pagesize - 1);
901 chunk = emem_create_chunk(alloc_size); /* NOTE: using version without guard pages! */
902 chunk->next = mem_chunk->chunk_list;
903 mem_chunk->chunk_list = chunk;
906 ptr = chunk->buf + chunk->free_offset;
907 chunk->free_offset += mem_chunk->item_size;
908 chunk->amount_free -= mem_chunk->item_size;
914 sl_free(struct ws_memory_slab *mem_chunk, gpointer ptr)
916 /* XXX, debug_use_slices -> fallback to g_slice_free1 */
918 /* XXX, abort if ptr not found in emem_verify_pointer_list()? */
919 if (ptr != NULL /* && emem_verify_pointer_list(mem_chunk->chunk_list, ptr) */) {
920 memcpy(ptr, &(mem_chunk->freed), sizeof(void *));
921 mem_chunk->freed = ptr;
926 ep_alloc0(size_t size)
928 return memset(ep_alloc(size),'\0',size);
932 se_alloc0(size_t size)
934 return memset(se_alloc(size),'\0',size);
938 sl_alloc0(struct ws_memory_slab *mem_chunk)
940 return memset(sl_alloc(mem_chunk), '\0', mem_chunk->item_size);
944 emem_strdup(const gchar *src, void *allocator(size_t))
949 /* If str is NULL, just return the string "<NULL>" so that the callers don't
950 * have to bother checking it.
955 len = (guint) strlen(src);
956 dst = memcpy(allocator(len+1), src, len+1);
962 ep_strdup(const gchar *src)
964 return emem_strdup(src, ep_alloc);
968 se_strdup(const gchar *src)
970 return emem_strdup(src, se_alloc);
974 emem_strndup(const gchar *src, size_t len, void *allocator(size_t))
976 gchar *dst = allocator(len+1);
979 for (i = 0; (i < len) && src[i]; i++)
988 ep_strndup(const gchar *src, size_t len)
990 return emem_strndup(src, len, ep_alloc);
994 se_strndup(const gchar *src, size_t len)
996 return emem_strndup(src, len, se_alloc);
1002 ep_memdup(const void* src, size_t len)
1004 return memcpy(ep_alloc(len), src, len);
1008 se_memdup(const void* src, size_t len)
1010 return memcpy(se_alloc(len), src, len);
1014 emem_strdup_vprintf(const gchar *fmt, va_list ap, void *allocator(size_t))
1022 len = g_printf_string_upper_bound(fmt, ap);
1024 dst = allocator(len+1);
1025 g_vsnprintf (dst, (gulong) len, fmt, ap2);
1032 ep_strdup_vprintf(const gchar *fmt, va_list ap)
1034 return emem_strdup_vprintf(fmt, ap, ep_alloc);
1038 se_strdup_vprintf(const gchar* fmt, va_list ap)
1040 return emem_strdup_vprintf(fmt, ap, se_alloc);
1044 ep_strdup_printf(const gchar *fmt, ...)
1050 dst = ep_strdup_vprintf(fmt, ap);
1056 se_strdup_printf(const gchar *fmt, ...)
1062 dst = se_strdup_vprintf(fmt, ap);
1068 ep_strsplit(const gchar* string, const gchar* sep, int max_tokens)
1077 enum { AT_START, IN_PAD, IN_TOKEN } state;
1085 s = splitted = ep_strdup(string);
1086 str_len = (guint) strlen(splitted);
1087 sep_len = (guint) strlen(sep);
1089 if (max_tokens < 1) max_tokens = INT_MAX;
1094 while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
1097 for(i=0; i < sep_len; i++ )
1104 vec = ep_alloc_array(gchar*,tokens+1);
1107 for (i=0; i< str_len; i++) {
1110 switch(splitted[i]) {
1115 vec[curr_tok] = &(splitted[i]);
1121 switch(splitted[i]) {
1128 switch(splitted[i]) {
1130 vec[curr_tok] = &(splitted[i]);
1139 vec[curr_tok] = NULL;
1145 ep_strconcat(const gchar *string1, ...)
1156 l = 1 + strlen(string1);
1157 va_start(args, string1);
1158 s = va_arg(args, gchar*);
1161 s = va_arg(args, gchar*);
1165 concat = ep_alloc(l);
1168 ptr = g_stpcpy(ptr, string1);
1169 va_start(args, string1);
1170 s = va_arg(args, gchar*);
1172 ptr = g_stpcpy(ptr, s);
1173 s = va_arg(args, gchar*);
1182 /* release all allocated memory back to the pool. */
1184 emem_free_all(emem_header_t *mem)
1186 gboolean use_chunks = mem->debug_use_chunks;
1189 emem_tree_t *tree_list;
1191 /* move all used chunks over to the free list */
1192 while(mem->used_list){
1194 mem->used_list=mem->used_list->next;
1195 npc->next=mem->free_list;
1199 /* clear them all out */
1200 npc = mem->free_list;
1201 while (npc != NULL) {
1203 while (npc->canary_last != NULL) {
1204 npc->canary_last = emem_canary_next(mem->canary, npc->canary_last, NULL);
1205 /* XXX, check if canary_last is inside allocated memory? */
1207 if (npc->canary_last == (void *) -1)
1208 g_error("Memory corrupted");
1211 emem_scrub_memory((npc->buf + npc->free_offset_init),
1212 (npc->free_offset - npc->free_offset_init),
1215 npc->amount_free = npc->amount_free_init;
1216 npc->free_offset = npc->free_offset_init;
1219 emem_chunk_t *next = npc->next;
1221 emem_scrub_memory(npc->buf, npc->amount_free_init, FALSE);
1230 /* We've freed all this memory already */
1231 mem->free_list = NULL;
1234 /* release/reset all allocated trees */
1235 for(tree_list=mem->trees;tree_list;tree_list=tree_list->next){
1236 tree_list->tree=NULL;
1240 /* release all allocated memory back to the pool. */
1244 emem_free_all(&ep_packet_mem);
1247 /* release all allocated memory back to the pool. */
1251 #ifdef SHOW_EMEM_STATS
1252 print_alloc_stats();
1255 emem_free_all(&se_packet_mem);
1259 sl_free_all(struct ws_memory_slab *mem_chunk)
1261 emem_chunk_t *chunk_list = mem_chunk->chunk_list;
1263 mem_chunk->chunk_list = NULL;
1264 mem_chunk->freed = NULL;
1265 while (chunk_list) {
1266 emem_chunk_t *chunk = chunk_list;
1268 chunk_list = chunk_list->next;
1269 emem_destroy_chunk(chunk);
1274 ep_stack_new(void) {
1275 ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
1276 *s = ep_new0(struct _ep_stack_frame_t);
1280 /* for ep_stack_t we'll keep the popped frames so we reuse them instead
1281 of allocating new ones.
1285 ep_stack_push(ep_stack_t stack, void* data)
1287 struct _ep_stack_frame_t* frame;
1288 struct _ep_stack_frame_t* head = (*stack);
1291 frame = head->above;
1293 frame = ep_new(struct _ep_stack_frame_t);
1294 head->above = frame;
1295 frame->below = head;
1296 frame->above = NULL;
1299 frame->payload = data;
1306 ep_stack_pop(ep_stack_t stack)
1309 if ((*stack)->below) {
1310 (*stack) = (*stack)->below;
1311 return (*stack)->above->payload;
1318 se_tree_create(int type, const char *name)
1320 emem_tree_t *tree_list;
1322 tree_list=g_malloc(sizeof(emem_tree_t));
1323 tree_list->next=se_packet_mem.trees;
1324 tree_list->type=type;
1325 tree_list->tree=NULL;
1326 tree_list->name=name;
1327 tree_list->malloc=se_alloc;
1328 se_packet_mem.trees=tree_list;
1334 emem_tree_lookup32(emem_tree_t *se_tree, guint32 key)
1336 emem_tree_node_t *node;
1341 if(key==node->key32){
1344 if(key<node->key32){
1348 if(key>node->key32){
1357 emem_tree_lookup32_le(emem_tree_t *se_tree, guint32 key)
1359 emem_tree_node_t *node;
1369 if(key==node->key32){
1372 if(key<node->key32){
1380 if(key>node->key32){
1395 /* If we are still at the root of the tree this means that this node
1396 * is either smaller than the search key and then we return this
1397 * node or else there is no smaller key available and then
1401 if(key>node->key32){
1408 if(node->parent->left==node){
1411 if(key>node->key32){
1412 /* if this is a left child and its key is smaller than
1413 * the search key, then this is the node we want.
1417 /* if this is a left child and its key is bigger than
1418 * the search key, we have to check if any
1419 * of our ancestors are smaller than the search key.
1422 if(key>node->key32){
1432 if(node->key32<key){
1433 /* if this is the right child and its key is smaller
1434 * than the search key then this is the one we want.
1438 /* if this is the right child and its key is larger
1439 * than the search key then our parent is the one we
1442 return node->parent->data;
1449 static inline emem_tree_node_t *
1450 emem_tree_parent(emem_tree_node_t *node)
1452 return node->parent;
1455 static inline emem_tree_node_t *
1456 emem_tree_grandparent(emem_tree_node_t *node)
1458 emem_tree_node_t *parent;
1460 parent=emem_tree_parent(node);
1462 return parent->parent;
1467 static inline emem_tree_node_t *
1468 emem_tree_uncle(emem_tree_node_t *node)
1470 emem_tree_node_t *parent, *grandparent;
1472 parent=emem_tree_parent(node);
1476 grandparent=emem_tree_parent(parent);
1480 if(parent==grandparent->left){
1481 return grandparent->right;
1483 return grandparent->left;
1486 static inline void rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node);
1487 static inline void rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node);
1490 rotate_left(emem_tree_t *se_tree, emem_tree_node_t *node)
1493 if(node->parent->left==node){
1494 node->parent->left=node->right;
1496 node->parent->right=node->right;
1499 se_tree->tree=node->right;
1501 node->right->parent=node->parent;
1502 node->parent=node->right;
1503 node->right=node->right->left;
1505 node->right->parent=node;
1507 node->parent->left=node;
1511 rotate_right(emem_tree_t *se_tree, emem_tree_node_t *node)
1514 if(node->parent->left==node){
1515 node->parent->left=node->left;
1517 node->parent->right=node->left;
1520 se_tree->tree=node->left;
1522 node->left->parent=node->parent;
1523 node->parent=node->left;
1524 node->left=node->left->right;
1526 node->left->parent=node;
1528 node->parent->right=node;
1532 rb_insert_case5(emem_tree_t *se_tree, emem_tree_node_t *node)
1534 emem_tree_node_t *grandparent;
1535 emem_tree_node_t *parent;
1537 parent=emem_tree_parent(node);
1538 grandparent=emem_tree_parent(parent);
1539 parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1540 grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1541 if( (node==parent->left) && (parent==grandparent->left) ){
1542 rotate_right(se_tree, grandparent);
1544 rotate_left(se_tree, grandparent);
1549 rb_insert_case4(emem_tree_t *se_tree, emem_tree_node_t *node)
1551 emem_tree_node_t *grandparent;
1552 emem_tree_node_t *parent;
1554 parent=emem_tree_parent(node);
1555 grandparent=emem_tree_parent(parent);
1559 if( (node==parent->right) && (parent==grandparent->left) ){
1560 rotate_left(se_tree, parent);
1562 } else if( (node==parent->left) && (parent==grandparent->right) ){
1563 rotate_right(se_tree, parent);
1566 rb_insert_case5(se_tree, node);
1570 rb_insert_case3(emem_tree_t *se_tree, emem_tree_node_t *node)
1572 emem_tree_node_t *grandparent;
1573 emem_tree_node_t *parent;
1574 emem_tree_node_t *uncle;
1576 uncle=emem_tree_uncle(node);
1577 if(uncle && (uncle->u.rb_color==EMEM_TREE_RB_COLOR_RED)){
1578 parent=emem_tree_parent(node);
1579 parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1580 uncle->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1581 grandparent=emem_tree_grandparent(node);
1582 grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1583 rb_insert_case1(se_tree, grandparent);
1585 rb_insert_case4(se_tree, node);
1590 rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node)
1592 emem_tree_node_t *parent;
1594 parent=emem_tree_parent(node);
1595 /* parent is always non-NULL here */
1596 if(parent->u.rb_color==EMEM_TREE_RB_COLOR_BLACK){
1599 rb_insert_case3(se_tree, node);
1603 rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node)
1605 emem_tree_node_t *parent;
1607 parent=emem_tree_parent(node);
1609 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1612 rb_insert_case2(se_tree, node);
1615 /* insert a new node in the tree. if this node matches an already existing node
1616 * then just replace the data for that node */
1618 emem_tree_insert32(emem_tree_t *se_tree, guint32 key, void *data)
1620 emem_tree_node_t *node;
1624 /* is this the first node ?*/
1626 node=se_tree->malloc(sizeof(emem_tree_node_t));
1627 switch(se_tree->type){
1628 case EMEM_TREE_TYPE_RED_BLACK:
1629 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1637 node->u.is_subtree = EMEM_TREE_NODE_IS_DATA;
1642 /* it was not the new root so walk the tree until we find where to
1643 * insert this new leaf.
1646 /* this node already exists, so just replace the data pointer*/
1647 if(key==node->key32){
1651 if(key<node->key32) {
1653 /* new node to the left */
1654 emem_tree_node_t *new_node;
1655 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1656 node->left=new_node;
1657 new_node->parent=node;
1658 new_node->left=NULL;
1659 new_node->right=NULL;
1660 new_node->key32=key;
1661 new_node->data=data;
1662 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1669 if(key>node->key32) {
1671 /* new node to the right */
1672 emem_tree_node_t *new_node;
1673 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1674 node->right=new_node;
1675 new_node->parent=node;
1676 new_node->left=NULL;
1677 new_node->right=NULL;
1678 new_node->key32=key;
1679 new_node->data=data;
1680 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1689 /* node will now point to the newly created node */
1690 switch(se_tree->type){
1691 case EMEM_TREE_TYPE_RED_BLACK:
1692 node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1693 rb_insert_case1(se_tree, node);
1699 lookup_or_insert32(emem_tree_t *se_tree, guint32 key, void*(*func)(void*),void* ud, int is_subtree)
1701 emem_tree_node_t *node;
1705 /* is this the first node ?*/
1707 node=se_tree->malloc(sizeof(emem_tree_node_t));
1708 switch(se_tree->type){
1709 case EMEM_TREE_TYPE_RED_BLACK:
1710 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1717 node->data= func(ud);
1718 node->u.is_subtree = is_subtree;
1723 /* it was not the new root so walk the tree until we find where to
1724 * insert this new leaf.
1727 /* this node already exists, so just return the data pointer*/
1728 if(key==node->key32){
1731 if(key<node->key32) {
1733 /* new node to the left */
1734 emem_tree_node_t *new_node;
1735 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1736 node->left=new_node;
1737 new_node->parent=node;
1738 new_node->left=NULL;
1739 new_node->right=NULL;
1740 new_node->key32=key;
1741 new_node->data= func(ud);
1742 new_node->u.is_subtree = is_subtree;
1749 if(key>node->key32) {
1751 /* new node to the right */
1752 emem_tree_node_t *new_node;
1753 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1754 node->right=new_node;
1755 new_node->parent=node;
1756 new_node->left=NULL;
1757 new_node->right=NULL;
1758 new_node->key32=key;
1759 new_node->data= func(ud);
1760 new_node->u.is_subtree = is_subtree;
1769 /* node will now point to the newly created node */
1770 switch(se_tree->type){
1771 case EMEM_TREE_TYPE_RED_BLACK:
1772 node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1773 rb_insert_case1(se_tree, node);
1780 /* When the se data is released, this entire tree will dissapear as if it
1781 * never existed including all metadata associated with the tree.
1784 se_tree_create_non_persistent(int type, const char *name)
1786 emem_tree_t *tree_list;
1788 tree_list=se_alloc(sizeof(emem_tree_t));
1789 tree_list->next=NULL;
1790 tree_list->type=type;
1791 tree_list->tree=NULL;
1792 tree_list->name=name;
1793 tree_list->malloc=se_alloc;
1798 /* This tree is PErmanent and will never be released
1801 pe_tree_create(int type, const char *name)
1803 emem_tree_t *tree_list;
1805 tree_list=g_new(emem_tree_t, 1);
1806 tree_list->next=NULL;
1807 tree_list->type=type;
1808 tree_list->tree=NULL;
1809 tree_list->name=name;
1810 tree_list->malloc=(void *(*)(size_t)) g_malloc;
1815 /* create another (sub)tree using the same memory allocation scope
1816 * as the parent tree.
1818 static emem_tree_t *
1819 emem_tree_create_subtree(emem_tree_t *parent_tree, const char *name)
1821 emem_tree_t *tree_list;
1823 tree_list=parent_tree->malloc(sizeof(emem_tree_t));
1824 tree_list->next=NULL;
1825 tree_list->type=parent_tree->type;
1826 tree_list->tree=NULL;
1827 tree_list->name=name;
1828 tree_list->malloc=parent_tree->malloc;
1834 create_sub_tree(void* d)
1836 emem_tree_t *se_tree = d;
1837 return emem_tree_create_subtree(se_tree, "subtree");
1840 /* insert a new node in the tree. if this node matches an already existing node
1841 * then just replace the data for that node */
1844 emem_tree_insert32_array(emem_tree_t *se_tree, emem_tree_key_t *key, void *data)
1846 emem_tree_t *insert_tree = NULL;
1847 emem_tree_key_t *cur_key;
1848 guint32 i, insert_key32 = 0;
1850 if(!se_tree || !key) return;
1852 for (cur_key = key; cur_key->length > 0; cur_key++) {
1853 if(cur_key->length > 100) {
1854 DISSECTOR_ASSERT_NOT_REACHED();
1857 for (i = 0; i < cur_key->length; i++) {
1858 /* Insert using the previous key32 */
1860 insert_tree = se_tree;
1862 insert_tree = lookup_or_insert32(insert_tree, insert_key32, create_sub_tree, se_tree, EMEM_TREE_NODE_IS_SUBTREE);
1864 insert_key32 = cur_key->key[i];
1869 /* We didn't get a valid key. Should we return NULL instead? */
1870 DISSECTOR_ASSERT_NOT_REACHED();
1873 emem_tree_insert32(insert_tree, insert_key32, data);
1878 emem_tree_lookup32_array(emem_tree_t *se_tree, emem_tree_key_t *key)
1880 emem_tree_t *lookup_tree = NULL;
1881 emem_tree_key_t *cur_key;
1882 guint32 i, lookup_key32 = 0;
1884 if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1886 for (cur_key = key; cur_key->length > 0; cur_key++) {
1887 if(cur_key->length > 100) {
1888 DISSECTOR_ASSERT_NOT_REACHED();
1891 for (i = 0; i < cur_key->length; i++) {
1892 /* Lookup using the previous key32 */
1894 lookup_tree = se_tree;
1896 lookup_tree = emem_tree_lookup32(lookup_tree, lookup_key32);
1901 lookup_key32 = cur_key->key[i];
1906 /* We didn't get a valid key. Should we return NULL instead? */
1907 DISSECTOR_ASSERT_NOT_REACHED();
1910 return emem_tree_lookup32(lookup_tree, lookup_key32);
1914 emem_tree_lookup32_array_le(emem_tree_t *se_tree, emem_tree_key_t *key)
1916 emem_tree_t *lookup_tree = NULL;
1917 emem_tree_key_t *cur_key;
1918 guint32 i, lookup_key32 = 0;
1920 if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1922 for (cur_key = key; cur_key->length > 0; cur_key++) {
1923 if(cur_key->length > 100) {
1924 DISSECTOR_ASSERT_NOT_REACHED();
1927 for (i = 0; i < cur_key->length; i++) {
1928 /* Lookup using the previous key32 */
1930 lookup_tree = se_tree;
1932 lookup_tree = emem_tree_lookup32_le(lookup_tree, lookup_key32);
1937 lookup_key32 = cur_key->key[i];
1942 /* We didn't get a valid key. Should we return NULL instead? */
1943 DISSECTOR_ASSERT_NOT_REACHED();
1946 return emem_tree_lookup32_le(lookup_tree, lookup_key32);
1950 /* Strings are stored as an array of uint32 containing the string characters
1951 with 4 characters in each uint32.
1952 The first byte of the string is stored as the most significant byte.
1953 If the string is not a multiple of 4 characters in length the last
1954 uint32 containing the string bytes are padded with 0 bytes.
1955 After the uint32's containing the string, there is one final terminator
1956 uint32 with the value 0x00000001
1959 emem_tree_insert_string(emem_tree_t* se_tree, const gchar* k, void* v, guint32 flags)
1961 emem_tree_key_t key[2];
1962 guint32 *aligned=NULL;
1963 guint32 len = (guint32) strlen(k);
1964 guint32 divx = (len+3)/4+1;
1968 aligned = g_malloc(divx * sizeof (guint32));
1970 /* pack the bytes one one by one into guint32s */
1972 for (i = 0;i < len;i++) {
1975 ch = (unsigned char)k[i];
1976 if (flags & EMEM_TREE_STRING_NOCASE) {
1988 /* add required padding to the last uint32 */
1994 aligned[i/4-1] = tmp;
1997 /* add the terminator */
1998 aligned[divx-1] = 0x00000001;
2000 key[0].length = divx;
2001 key[0].key = aligned;
2006 emem_tree_insert32_array(se_tree, key, v);
2011 emem_tree_lookup_string(emem_tree_t* se_tree, const gchar* k, guint32 flags)
2013 emem_tree_key_t key[2];
2014 guint32 *aligned=NULL;
2015 guint32 len = (guint) strlen(k);
2016 guint32 divx = (len+3)/4+1;
2021 aligned = g_malloc(divx * sizeof (guint32));
2023 /* pack the bytes one one by one into guint32s */
2025 for (i = 0;i < len;i++) {
2028 ch = (unsigned char)k[i];
2029 if (flags & EMEM_TREE_STRING_NOCASE) {
2041 /* add required padding to the last uint32 */
2047 aligned[i/4-1] = tmp;
2050 /* add the terminator */
2051 aligned[divx-1] = 0x00000001;
2053 key[0].length = divx;
2054 key[0].key = aligned;
2059 ret = emem_tree_lookup32_array(se_tree, key);
2065 emem_tree_foreach_nodes(emem_tree_node_t* node, tree_foreach_func callback, void *user_data)
2067 gboolean stop_traverse = FALSE;
2073 stop_traverse = emem_tree_foreach_nodes(node->left, callback, user_data);
2074 if (stop_traverse) {
2079 if (node->u.is_subtree == EMEM_TREE_NODE_IS_SUBTREE) {
2080 stop_traverse = emem_tree_foreach(node->data, callback, user_data);
2082 stop_traverse = callback(node->data, user_data);
2085 if (stop_traverse) {
2090 stop_traverse = emem_tree_foreach_nodes(node->right, callback, user_data);
2091 if (stop_traverse) {
2100 emem_tree_foreach(emem_tree_t* emem_tree, tree_foreach_func callback, void *user_data)
2105 if(!emem_tree->tree)
2108 return emem_tree_foreach_nodes(emem_tree->tree, callback, user_data);
2111 static void emem_print_subtree(emem_tree_t* emem_tree, guint32 level);
2114 emem_tree_print_nodes(const char *prefix, emem_tree_node_t* node, guint32 level)
2121 for(i=0;i<level;i++){
2125 printf("%sNODE:%p parent:%p left:%p right:%p colour:%s key:%u %s:%p\n", prefix,
2126 (void *)node,(void *)(node->parent),(void *)(node->left),(void *)(node->right),
2127 (node->u.rb_color)?"Black":"Red",(node->key32),(node->u.is_subtree)?"tree":"data",node->data);
2129 emem_tree_print_nodes("L-", node->left, level+1);
2131 emem_tree_print_nodes("R-", node->right, level+1);
2133 if (node->u.is_subtree)
2134 emem_print_subtree(node->data, level+1);
2138 emem_print_subtree(emem_tree_t* emem_tree, guint32 level)
2145 for(i=0;i<level;i++){
2149 printf("EMEM tree:%p type:%s name:%s root:%p\n",emem_tree,(emem_tree->type==1)?"RedBlack":"unknown",emem_tree->name,(void *)(emem_tree->tree));
2151 emem_tree_print_nodes("Root-", emem_tree->tree, level);
2155 emem_print_tree(emem_tree_t* emem_tree)
2157 emem_print_subtree(emem_tree, 0);
2165 * Presumably we're using these routines for building strings for the tree.
2166 * Use ITEM_LABEL_LENGTH as the basis for our default lengths.
2169 #define DEFAULT_STRBUF_LEN (ITEM_LABEL_LENGTH / 10)
2170 #define MAX_STRBUF_LEN 65536
2173 next_size(gsize cur_alloc_len, gsize wanted_alloc_len, gsize max_alloc_len)
2175 if (max_alloc_len < 1 || max_alloc_len > MAX_STRBUF_LEN) {
2176 max_alloc_len = MAX_STRBUF_LEN;
2179 if (cur_alloc_len < 1) {
2180 cur_alloc_len = DEFAULT_STRBUF_LEN;
2183 while (cur_alloc_len < wanted_alloc_len) {
2187 return cur_alloc_len < max_alloc_len ? cur_alloc_len : max_alloc_len;
2191 ep_strbuf_grow(emem_strbuf_t *strbuf, gsize wanted_alloc_len)
2193 gsize new_alloc_len;
2196 if (!strbuf || (wanted_alloc_len <= strbuf->alloc_len) || (strbuf->alloc_len >= strbuf->max_alloc_len)) {
2200 new_alloc_len = next_size(strbuf->alloc_len, wanted_alloc_len, strbuf->max_alloc_len);
2201 new_str = ep_alloc(new_alloc_len);
2202 g_strlcpy(new_str, strbuf->str, new_alloc_len);
2204 strbuf->alloc_len = new_alloc_len;
2205 strbuf->str = new_str;
2209 ep_strbuf_sized_new(gsize alloc_len, gsize max_alloc_len)
2211 emem_strbuf_t *strbuf;
2213 strbuf = ep_alloc(sizeof(emem_strbuf_t));
2215 if ((max_alloc_len == 0) || (max_alloc_len > MAX_STRBUF_LEN))
2216 max_alloc_len = MAX_STRBUF_LEN;
2219 else if (alloc_len > max_alloc_len)
2220 alloc_len = max_alloc_len;
2222 strbuf->str = ep_alloc(alloc_len);
2223 strbuf->str[0] = '\0';
2226 strbuf->alloc_len = alloc_len;
2227 strbuf->max_alloc_len = max_alloc_len;
2233 ep_strbuf_new(const gchar *init)
2235 emem_strbuf_t *strbuf;
2237 strbuf = ep_strbuf_sized_new(next_size(0, init?strlen(init)+1:0, 0), 0); /* +1 for NULL terminator */
2240 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2241 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2248 ep_strbuf_new_label(const gchar *init)
2250 emem_strbuf_t *strbuf;
2253 /* Be optimistic: Allocate default size strbuf string and only */
2254 /* request an increase if needed. */
2255 /* XXX: Is it reasonable to assume that much of the usage of */
2256 /* ep_strbuf_new_label will have init==NULL or */
2257 /* strlen(init) < DEFAULT_STRBUF_LEN) ??? */
2258 strbuf = ep_strbuf_sized_new(DEFAULT_STRBUF_LEN, ITEM_LABEL_LENGTH);
2263 /* full_len does not count the trailing '\0'. */
2264 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2265 if (full_len < strbuf->alloc_len) {
2266 strbuf->len += full_len;
2268 strbuf = ep_strbuf_sized_new(full_len+1, ITEM_LABEL_LENGTH);
2269 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2270 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2277 ep_strbuf_append(emem_strbuf_t *strbuf, const gchar *str)
2279 gsize add_len, full_len;
2281 if (!strbuf || !str || str[0] == '\0') {
2285 /* Be optimistic; try the g_strlcpy first & see if enough room. */
2286 /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same */
2287 add_len = strbuf->alloc_len - strbuf->len;
2288 full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2289 if (full_len < add_len) {
2290 strbuf->len += full_len;
2292 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2293 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2294 add_len = strbuf->alloc_len - strbuf->len;
2295 full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2296 strbuf->len += MIN(add_len-1, full_len);
2303 ep_strbuf_append_vprintf(emem_strbuf_t *strbuf, const gchar *format, va_list ap)
2306 gsize add_len, full_len;
2310 /* Be optimistic; try the g_vsnprintf first & see if enough room. */
2311 /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same. */
2312 add_len = strbuf->alloc_len - strbuf->len;
2313 full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap);
2314 if (full_len < add_len) {
2315 strbuf->len += full_len;
2317 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2318 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2319 add_len = strbuf->alloc_len - strbuf->len;
2320 full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap2);
2321 strbuf->len += MIN(add_len-1, full_len);
2328 ep_strbuf_append_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2332 va_start(ap, format);
2333 ep_strbuf_append_vprintf(strbuf, format, ap);
2338 ep_strbuf_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2347 va_start(ap, format);
2348 ep_strbuf_append_vprintf(strbuf, format, ap);
2353 ep_strbuf_append_c(emem_strbuf_t *strbuf, const gchar c)
2359 /* +1 for the new character & +1 for the trailing '\0'. */
2360 if (strbuf->alloc_len < strbuf->len + 1 + 1) {
2361 ep_strbuf_grow(strbuf, strbuf->len + 1 + 1);
2363 if (strbuf->alloc_len >= strbuf->len + 1 + 1) {
2364 strbuf->str[strbuf->len] = c;
2366 strbuf->str[strbuf->len] = '\0';
2373 ep_strbuf_append_unichar(emem_strbuf_t *strbuf, const gunichar c)
2382 charlen = g_unichar_to_utf8(c, buf);
2384 /* +charlen for the new character & +1 for the trailing '\0'. */
2385 if (strbuf->alloc_len < strbuf->len + charlen + 1) {
2386 ep_strbuf_grow(strbuf, strbuf->len + charlen + 1);
2388 if (strbuf->alloc_len >= strbuf->len + charlen + 1) {
2389 memcpy(&strbuf->str[strbuf->len], buf, charlen);
2390 strbuf->len += charlen;
2391 strbuf->str[strbuf->len] = '\0';
2398 ep_strbuf_truncate(emem_strbuf_t *strbuf, gsize len)
2400 if (!strbuf || len >= strbuf->len) {
2404 strbuf->str[len] = '\0';
2416 * indent-tabs-mode: t
2419 * ex: set shiftwidth=8 tabstop=8 noexpandtab:
2420 * :indentSize=8:tabSize=8:noTabs=false: