2 * Wireshark memory management and garbage collection functions
7 * Wireshark - Network traffic analyzer
8 * By Gerald Combs <gerald@wireshark.org>
9 * Copyright 1998 Gerald Combs
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
34 #ifdef HAVE_SYS_TIME_H
46 #include "wmem/wmem.h"
49 #include <windows.h> /* VirtualAlloc, VirtualProtect */
50 #include <process.h> /* getpid */
53 /* Print out statistics about our memory allocations? */
54 /*#define SHOW_EMEM_STATS*/
56 /* Do we want to use guardpages? if available */
57 #define WANT_GUARD_PAGES 1
59 #ifdef WANT_GUARD_PAGES
60 /* Add guard pages at each end of our allocated memory */
62 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
65 #ifdef HAVE_SYS_TYPES_H
66 #include <sys/types.h>
67 #endif /* HAVE_SYS_TYPES_H */
71 #if defined(MAP_ANONYMOUS)
72 #define ANON_PAGE_MODE (MAP_ANONYMOUS|MAP_PRIVATE)
73 #elif defined(MAP_ANON)
74 #define ANON_PAGE_MODE (MAP_ANON|MAP_PRIVATE)
76 #define ANON_PAGE_MODE (MAP_PRIVATE) /* have to map /dev/zero */
78 #endif /* defined(MAP_ANONYMOUS) */
82 static int dev_zero_fd;
83 #define ANON_FD dev_zero_fd
86 #endif /* NEED_DEV_ZERO */
88 #define USE_GUARD_PAGES 1
89 #endif /* defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H) */
90 #endif /* WANT_GUARD_PAGES */
92 /* When required, allocate more memory from the OS in this size chunks */
93 #define EMEM_PACKET_CHUNK_SIZE (10 * 1024 * 1024)
95 /* The canary between allocations is at least 8 bytes and up to 16 bytes to
96 * allow future allocations to be 4- or 8-byte aligned.
97 * All but the last byte of the canary are randomly generated; the last byte is
98 * NULL to separate the canary and the pointer to the next canary.
100 * For example, if the allocation is a multiple of 8 bytes, the canary and
101 * pointer would look like:
102 * |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
103 * |c|c|c|c|c|c|c|0||p|p|p|p|p|p|p|p| (64-bit), or:
104 * |c|c|c|c|c|c|c|0||p|p|p|p| (32-bit)
106 * If the allocation was, for example, 12 bytes, the canary would look like:
107 * |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
108 * [...]|a|a|a|a|c|c|c|c||c|c|c|c|c|c|c|0| (followed by the pointer)
110 #define EMEM_CANARY_SIZE 8
111 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
113 typedef struct _emem_chunk_t {
114 struct _emem_chunk_t *next;
117 unsigned int amount_free_init;
118 unsigned int amount_free;
119 unsigned int free_offset_init;
120 unsigned int free_offset;
124 typedef struct _emem_pool_t {
125 emem_chunk_t *free_list;
126 emem_chunk_t *used_list;
128 emem_tree_t *trees; /* only used by se_mem allocator */
130 guint8 canary[EMEM_CANARY_DATA_SIZE];
131 void *(*memory_alloc)(size_t size, struct _emem_pool_t *);
134 * Tools like Valgrind and ElectricFence don't work well with memchunks.
135 * Export the following environment variables to make {ep|se}_alloc() allocate each
136 * object individually.
138 * WIRESHARK_DEBUG_EP_NO_CHUNKS
139 * WIRESHARK_DEBUG_SE_NO_CHUNKS
141 gboolean debug_use_chunks;
143 /* Do we want to use canaries?
144 * Export the following environment variables to disable/enable canaries
146 * WIRESHARK_DEBUG_EP_NO_CANARY
147 * For SE memory use of canary is default off as the memory overhead
149 * WIRESHARK_DEBUG_SE_USE_CANARY
151 gboolean debug_use_canary;
153 /* Do we want to verify no one is using a pointer to an ep_ or se_
154 * allocated thing where they shouldn't be?
156 * Export WIRESHARK_EP_VERIFY_POINTERS or WIRESHARK_SE_VERIFY_POINTERS
159 gboolean debug_verify_pointers;
163 static emem_pool_t ep_packet_mem;
164 static emem_pool_t se_packet_mem;
167 * Memory scrubbing is expensive but can be useful to ensure we don't:
168 * - use memory before initializing it
169 * - use memory after freeing it
170 * Export WIRESHARK_DEBUG_SCRUB_MEMORY to turn it on.
172 static gboolean debug_use_memory_scrubber = FALSE;
175 static SYSTEM_INFO sysinfo;
176 static OSVERSIONINFO versinfo;
178 #elif defined(USE_GUARD_PAGES)
179 static intptr_t pagesize;
180 #endif /* _WIN32 / USE_GUARD_PAGES */
182 static void *emem_alloc_chunk(size_t size, emem_pool_t *mem);
183 static void *emem_alloc_glib(size_t size, emem_pool_t *mem);
186 * Set a canary value to be placed between memchunks.
189 emem_canary_init(guint8 *canary)
192 static GRand *rand_state = NULL;
194 if (rand_state == NULL) {
195 rand_state = g_rand_new();
197 for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
198 canary[i] = (guint8) g_rand_int_range(rand_state, 1, 0x100);
204 emem_canary_next(guint8 *mem_canary, guint8 *canary, int *len)
209 for (i = 0; i < EMEM_CANARY_SIZE-1; i++)
210 if (mem_canary[i] != canary[i])
213 for (; i < EMEM_CANARY_DATA_SIZE; i++) {
214 if (canary[i] == '\0') {
215 memcpy(&ptr, &canary[i+1], sizeof(void *));
218 *len = i + 1 + (int)sizeof(void *);
222 if (mem_canary[i] != canary[i])
230 * Given an allocation size, return the amount of room needed for the canary
231 * (with a minimum of 8 bytes) while using the canary to pad to an 8-byte
235 emem_canary_pad (size_t allocation)
239 pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
240 if (pad < EMEM_CANARY_SIZE)
241 pad += EMEM_CANARY_SIZE;
246 /* used for debugging canaries, will block */
247 #ifdef DEBUG_INTENSE_CANARY_CHECKS
248 gboolean intense_canary_checking = FALSE;
250 /* used to intensivelly check ep canaries
253 ep_check_canary_integrity(const char* fmt, ...)
256 static gchar there[128] = {
257 'L','a','u','n','c','h',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
258 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
259 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
260 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
262 emem_chunk_t* npc = NULL;
264 if (! intense_canary_checking ) return;
267 g_vsnprintf(here, sizeof(here), fmt, ap);
270 for (npc = ep_packet_mem.free_list; npc != NULL; npc = npc->next) {
271 void *canary_next = npc->canary_last;
273 while (canary_next != NULL) {
274 canary_next = emem_canary_next(ep_packet_mem.canary, canary_next, NULL);
275 /* XXX, check if canary_next is inside allocated memory? */
277 if (canary_next == (void *) -1)
278 g_error("Per-packet memory corrupted\nbetween: %s\nand: %s", there, here);
282 g_strlcpy(there, here, sizeof(there));
287 emem_init_chunk(emem_pool_t *mem)
289 if (mem->debug_use_canary)
290 emem_canary_init(mem->canary);
292 if (mem->debug_use_chunks)
293 mem->memory_alloc = emem_alloc_chunk;
295 mem->memory_alloc = emem_alloc_glib;
299 /* Initialize the packet-lifetime memory allocation pool.
300 * This function should be called only once when Wireshark or TShark starts
306 ep_packet_mem.free_list=NULL;
307 ep_packet_mem.used_list=NULL;
308 ep_packet_mem.trees=NULL; /* not used by this allocator */
310 ep_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_EP_NO_CHUNKS") == NULL);
311 ep_packet_mem.debug_use_canary = ep_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_EP_NO_CANARY") == NULL);
312 ep_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_EP_VERIFY_POINTERS") != NULL);
314 #ifdef DEBUG_INTENSE_CANARY_CHECKS
315 intense_canary_checking = (getenv("WIRESHARK_DEBUG_EP_INTENSE_CANARY") != NULL);
318 emem_init_chunk(&ep_packet_mem);
321 /* Initialize the capture-lifetime memory allocation pool.
322 * This function should be called only once when Wireshark or TShark starts
328 se_packet_mem.free_list = NULL;
329 se_packet_mem.used_list = NULL;
330 se_packet_mem.trees = NULL;
332 se_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_SE_NO_CHUNKS") == NULL);
333 se_packet_mem.debug_use_canary = se_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_SE_USE_CANARY") != NULL);
334 se_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_SE_VERIFY_POINTERS") != NULL);
336 emem_init_chunk(&se_packet_mem);
339 /* Initialize all the allocators here.
340 * This function should be called only once when Wireshark or TShark starts
349 if (getenv("WIRESHARK_DEBUG_SCRUB_MEMORY"))
350 debug_use_memory_scrubber = TRUE;
353 /* Set up our guard page info for Win32 */
354 GetSystemInfo(&sysinfo);
355 pagesize = sysinfo.dwPageSize;
357 /* calling GetVersionEx using the OSVERSIONINFO structure.
358 * OSVERSIONINFOEX requires Win NT4 with SP6 or newer NT Versions.
359 * OSVERSIONINFOEX will fail on Win9x and older NT Versions.
361 * http://msdn.microsoft.com/library/en-us/sysinfo/base/getversionex.asp
362 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
363 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfoex_str.asp
365 versinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
366 GetVersionEx(&versinfo);
368 #elif defined(USE_GUARD_PAGES)
369 pagesize = sysconf(_SC_PAGESIZE);
371 fprintf(stderr, "Warning: call to sysconf() for _SC_PAGESIZE has failed...\n");
373 dev_zero_fd = ws_open("/dev/zero", O_RDWR);
374 g_assert(dev_zero_fd != -1);
376 #endif /* _WIN32 / USE_GUARD_PAGES */
379 #ifdef SHOW_EMEM_STATS
380 #define NUM_ALLOC_DIST 10
381 static guint allocations[NUM_ALLOC_DIST] = { 0 };
382 static guint total_no_chunks = 0;
385 print_alloc_stats(void)
387 guint num_chunks = 0;
388 guint num_allocs = 0;
389 guint total_used = 0;
390 guint total_allocation = 0;
391 guint used_for_canaries = 0;
395 guint total_space_allocated_from_os, total_space_wasted;
396 gboolean ep_stat=TRUE;
398 fprintf(stderr, "\n-------- EP allocator statistics --------\n");
399 fprintf(stderr, "%s chunks, %s canaries, %s memory scrubber\n",
400 ep_packet_mem.debug_use_chunks ? "Using" : "Not using",
401 ep_packet_mem.debug_use_canary ? "using" : "not using",
402 debug_use_memory_scrubber ? "using" : "not using");
404 if (! (ep_packet_mem.free_list || !ep_packet_mem.used_list)) {
405 fprintf(stderr, "No memory allocated\n");
408 if (ep_packet_mem.debug_use_chunks && ep_stat) {
409 /* Nothing interesting without chunks */
410 /* Only look at the used_list since those chunks are fully
411 * used. Looking at the free list would skew our view of what
414 for (chunk = ep_packet_mem.used_list; chunk; chunk = chunk->next) {
416 total_used += (chunk->amount_free_init - chunk->amount_free);
417 total_allocation += chunk->amount_free_init;
419 if (num_chunks > 0) {
420 fprintf (stderr, "\n");
421 fprintf (stderr, "\n---- Buffer space ----\n");
422 fprintf (stderr, "\tChunk allocation size: %10u\n", EMEM_PACKET_CHUNK_SIZE);
423 fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
424 fprintf (stderr, "\t-------------------------------------------\n");
425 fprintf (stderr, "\t= %u (%u including guard pages) total space used for buffers\n",
426 total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
427 fprintf (stderr, "\t-------------------------------------------\n");
428 total_space_allocated_from_os = total_allocation
429 + sizeof(emem_chunk_t) * num_chunks;
430 fprintf (stderr, "Total allocated from OS: %u\n\n",
431 total_space_allocated_from_os);
433 fprintf (stderr, "No fully used chunks, nothing to do\n");
439 total_allocation = 0;
440 used_for_canaries = 0;
444 fprintf(stderr, "\n-------- SE allocator statistics --------\n");
445 fprintf(stderr, "Total number of chunk allocations %u\n",
447 fprintf(stderr, "%s chunks, %s canaries\n",
448 se_packet_mem.debug_use_chunks ? "Using" : "Not using",
449 se_packet_mem.debug_use_canary ? "using" : "not using");
451 if (! (se_packet_mem.free_list || !se_packet_mem.used_list)) {
452 fprintf(stderr, "No memory allocated\n");
456 if (!se_packet_mem.debug_use_chunks )
457 return; /* Nothing interesting without chunks?? */
459 /* Only look at the used_list since those chunks are fully used.
460 * Looking at the free list would skew our view of what we have wasted.
462 for (chunk = se_packet_mem.used_list; chunk; chunk = chunk->next) {
464 total_used += (chunk->amount_free_init - chunk->amount_free);
465 total_allocation += chunk->amount_free_init;
467 if (se_packet_mem.debug_use_canary){
468 void *ptr = chunk->canary_last;
471 while (ptr != NULL) {
472 ptr = emem_canary_next(se_packet_mem.canary, (guint8*)ptr, &len);
474 if (ptr == (void *) -1)
475 g_error("Memory corrupted");
476 used_for_canaries += len;
481 if (num_chunks == 0) {
483 fprintf (stderr, "No fully used chunks, nothing to do\n");
487 fprintf (stderr, "\n");
488 fprintf (stderr, "---------- Allocations from the OS ----------\n");
489 fprintf (stderr, "---- Headers ----\n");
490 fprintf (stderr, "\t( Chunk header size: %10lu\n",
491 sizeof(emem_chunk_t));
492 fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
493 fprintf (stderr, "\t-------------------------------------------\n");
495 total_headers = sizeof(emem_chunk_t) * num_chunks;
496 fprintf (stderr, "\t= %u bytes used for headers\n", total_headers);
497 fprintf (stderr, "\n---- Buffer space ----\n");
498 fprintf (stderr, "\tChunk allocation size: %10u\n",
499 EMEM_PACKET_CHUNK_SIZE);
500 fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
501 fprintf (stderr, "\t-------------------------------------------\n");
502 fprintf (stderr, "\t= %u (%u including guard pages) bytes used for buffers\n",
503 total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
504 fprintf (stderr, "\t-------------------------------------------\n");
505 total_space_allocated_from_os = (EMEM_PACKET_CHUNK_SIZE * num_chunks)
507 fprintf (stderr, "Total bytes allocated from the OS: %u\n\n",
508 total_space_allocated_from_os);
510 for (i = 0; i < NUM_ALLOC_DIST; i++)
511 num_allocs += allocations[i];
513 fprintf (stderr, "---------- Allocations from the SE pool ----------\n");
514 fprintf (stderr, " Number of SE allocations: %10u\n",
516 fprintf (stderr, " Bytes used (incl. canaries): %10u\n",
518 fprintf (stderr, " Bytes used for canaries: %10u\n",
520 fprintf (stderr, "Bytes unused (wasted, excl. guard pages): %10u\n",
521 total_allocation - total_used);
522 fprintf (stderr, "Bytes unused (wasted, incl. guard pages): %10u\n\n",
523 total_space_allocated_from_os - total_used);
525 fprintf (stderr, "---------- Statistics ----------\n");
526 fprintf (stderr, "Average SE allocation size (incl. canaries): %6.2f\n",
527 (float)total_used/(float)num_allocs);
528 fprintf (stderr, "Average SE allocation size (excl. canaries): %6.2f\n",
529 (float)(total_used - used_for_canaries)/(float)num_allocs);
530 fprintf (stderr, " Average wasted bytes per allocation: %6.2f\n",
531 (total_allocation - total_used)/(float)num_allocs);
532 total_space_wasted = (total_allocation - total_used)
533 + (sizeof(emem_chunk_t));
534 fprintf (stderr, " Space used for headers + unused allocation: %8u\n",
536 fprintf (stderr, "--> %% overhead/waste: %4.2f\n",
537 100 * (float)total_space_wasted/(float)total_space_allocated_from_os);
539 fprintf (stderr, "\nAllocation distribution (sizes include canaries):\n");
540 for (i = 0; i < (NUM_ALLOC_DIST-1); i++)
541 fprintf (stderr, "size < %5d: %8u\n", 32<<i, allocations[i]);
542 fprintf (stderr, "size > %5d: %8u\n", 32<<i, allocations[i]);
547 emem_verify_pointer_list(const emem_chunk_t *chunk_list, const void *ptr)
549 const gchar *cptr = (gchar *)ptr;
550 const emem_chunk_t *chunk;
552 for (chunk = chunk_list; chunk; chunk = chunk->next) {
553 if (cptr >= (chunk->buf + chunk->free_offset_init) && cptr < (chunk->buf + chunk->free_offset))
560 emem_verify_pointer(const emem_pool_t *hdr, const void *ptr)
562 return emem_verify_pointer_list(hdr->free_list, ptr) || emem_verify_pointer_list(hdr->used_list, ptr);
566 ep_verify_pointer(const void *ptr)
568 if (ep_packet_mem.debug_verify_pointers)
569 return emem_verify_pointer(&ep_packet_mem, ptr);
575 se_verify_pointer(const void *ptr)
577 if (se_packet_mem.debug_verify_pointers)
578 return emem_verify_pointer(&se_packet_mem, ptr);
584 emem_scrub_memory(char *buf, size_t size, gboolean alloc)
586 guint scrubbed_value;
589 if (!debug_use_memory_scrubber)
592 if (alloc) /* this memory is being allocated */
593 scrubbed_value = 0xBADDCAFE;
594 else /* this memory is being freed */
595 scrubbed_value = 0xDEADBEEF;
597 /* We shouldn't need to check the alignment of the starting address
598 * since this is malloc'd memory (or 'pagesize' bytes into malloc'd
602 /* XXX - if the above is *NOT* true, we should use memcpy here,
603 * in order to avoid problems on alignment-sensitive platforms, e.g.
604 * http://stackoverflow.com/questions/108866/is-there-memset-that-accepts-integers-larger-than-char
607 for (offset = 0; offset + sizeof(guint) <= size; offset += sizeof(guint))
608 *(guint*)(void*)(buf+offset) = scrubbed_value;
610 /* Initialize the last bytes, if any */
612 *(guint8*)(buf+offset) = scrubbed_value >> 24;
615 *(guint8*)(buf+offset) = (scrubbed_value >> 16) & 0xFF;
618 *(guint8*)(buf+offset) = (scrubbed_value >> 8) & 0xFF;
626 static emem_chunk_t *
627 emem_create_chunk(size_t size)
631 npc = g_new(emem_chunk_t, 1);
633 npc->canary_last = NULL;
637 * MSDN documents VirtualAlloc/VirtualProtect at
638 * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
641 /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
642 npc->buf = (char *)VirtualAlloc(NULL, size,
643 MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
645 if (npc->buf == NULL) {
647 if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
650 THROW(OutOfMemoryError);
653 #elif defined(USE_GUARD_PAGES)
654 npc->buf = (char *)mmap(NULL, size,
655 PROT_READ|PROT_WRITE, ANON_PAGE_MODE, ANON_FD, 0);
657 if (npc->buf == MAP_FAILED) {
659 if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
662 THROW(OutOfMemoryError);
665 #else /* Is there a draft in here? */
666 npc->buf = g_malloc(size);
667 /* g_malloc() can't fail */
670 #ifdef SHOW_EMEM_STATS
674 npc->amount_free = npc->amount_free_init = (unsigned int) size;
675 npc->free_offset = npc->free_offset_init = 0;
679 static emem_chunk_t *
680 emem_create_chunk_gp(size_t size)
684 char *buf_end, *prot1, *prot2;
686 #elif defined(USE_GUARD_PAGES)
688 char *buf_end, *prot1, *prot2;
689 #endif /* _WIN32 / USE_GUARD_PAGES */
692 npc = emem_create_chunk(size);
695 buf_end = npc->buf + size;
697 /* Align our guard pages on page-sized boundaries */
698 prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
699 prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
701 ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
702 g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
703 ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
704 g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
706 npc->amount_free_init = (unsigned int) (prot2 - prot1 - pagesize);
707 npc->free_offset_init = (unsigned int) (prot1 - npc->buf) + pagesize;
708 #elif defined(USE_GUARD_PAGES)
709 buf_end = npc->buf + size;
711 /* Align our guard pages on page-sized boundaries */
712 prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
713 prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
715 ret = mprotect(prot1, pagesize, PROT_NONE);
717 ret = mprotect(prot2, pagesize, PROT_NONE);
720 npc->amount_free_init = (unsigned int)(prot2 - prot1 - pagesize);
721 npc->free_offset_init = (unsigned int)((prot1 - npc->buf) + pagesize);
723 npc->amount_free_init = size;
724 npc->free_offset_init = 0;
725 #endif /* USE_GUARD_PAGES */
727 npc->amount_free = npc->amount_free_init;
728 npc->free_offset = npc->free_offset_init;
733 emem_alloc_chunk(size_t size, emem_pool_t *mem)
738 gboolean use_canary = mem->debug_use_canary;
740 emem_chunk_t *free_list;
742 /* Allocate room for at least 8 bytes of canary plus some padding
743 * so the canary ends on an 8-byte boundary.
744 * But first add the room needed for the pointer to the next canary
745 * (so the entire allocation will end on an 8-byte boundary).
748 asize += sizeof(void *);
749 pad = emem_canary_pad(asize);
751 pad = (WS_MEM_ALIGN - (asize & (WS_MEM_ALIGN-1))) & (WS_MEM_ALIGN-1);
755 #ifdef SHOW_EMEM_STATS
756 /* Do this check here so we can include the canary size */
757 if (mem == &se_packet_mem) {
762 else if (asize < 128)
764 else if (asize < 256)
766 else if (asize < 512)
768 else if (asize < 1024)
770 else if (asize < 2048)
772 else if (asize < 4096)
774 else if (asize < 8192)
776 else if (asize < 16384)
779 allocations[(NUM_ALLOC_DIST-1)]++;
783 /* make sure we dont try to allocate too much (arbitrary limit) */
784 DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
787 mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
789 /* oops, we need to allocate more memory to serve this request
790 * than we have free. move this node to the used list and try again
792 if(asize > mem->free_list->amount_free) {
795 mem->free_list=mem->free_list->next;
796 npc->next=mem->used_list;
800 mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
803 free_list = mem->free_list;
805 buf = free_list->buf + free_list->free_offset;
807 free_list->amount_free -= (unsigned int) asize;
808 free_list->free_offset += (unsigned int) asize;
811 char *cptr = (char *)buf + size;
813 memcpy(cptr, mem->canary, pad-1);
815 memcpy(cptr + pad, &free_list->canary_last, sizeof(void *));
817 free_list->canary_last = cptr;
824 emem_alloc_glib(size_t size, emem_pool_t *mem)
828 npc=g_new(emem_chunk_t, 1);
829 npc->next=mem->used_list;
830 npc->buf=(char *)g_malloc(size);
831 npc->canary_last = NULL;
833 /* There's no padding/alignment involved (from our point of view) when
834 * we fetch the memory directly from the system pool, so WYSIWYG */
835 npc->amount_free = npc->free_offset_init = 0;
836 npc->free_offset = npc->amount_free_init = (unsigned int) size;
841 /* allocate 'size' amount of memory. */
843 emem_alloc(size_t size, emem_pool_t *mem)
848 /* For testing wmem, effectively redirects most emem memory to wmem.
849 * You will also have to comment out several assertions in wmem_core.c,
850 * specifically anything g_assert(allocator->in_scope), since it is much
851 * stricter about when it is permitted to be called. */
852 if (mem == &ep_packet_mem) {
853 return wmem_alloc(wmem_packet_scope(), size);
855 else if (mem == &se_packet_mem) {
856 return wmem_alloc(wmem_file_scope(), size);
860 buf = mem->memory_alloc(size, mem);
862 /* XXX - this is a waste of time if the allocator function is going to
863 * memset this straight back to 0.
865 emem_scrub_memory((char *)buf, size, TRUE);
870 /* allocate 'size' amount of memory with an allocation lifetime until the
874 ep_alloc(size_t size)
876 return emem_alloc(size, &ep_packet_mem);
879 /* allocate 'size' amount of memory with an allocation lifetime until the
883 se_alloc(size_t size)
885 return emem_alloc(size, &se_packet_mem);
889 ep_alloc0(size_t size)
891 return memset(ep_alloc(size),'\0',size);
895 se_alloc0(size_t size)
897 return memset(se_alloc(size),'\0',size);
901 emem_strdup(const gchar *src, void *allocator(size_t))
906 /* If str is NULL, just return the string "<NULL>" so that the callers don't
907 * have to bother checking it.
912 len = (guint) strlen(src);
913 dst = (gchar *)memcpy(allocator(len+1), src, len+1);
919 ep_strdup(const gchar *src)
921 return emem_strdup(src, ep_alloc);
925 se_strdup(const gchar *src)
927 return emem_strdup(src, se_alloc);
931 emem_strndup(const gchar *src, size_t len, void *allocator(size_t))
933 gchar *dst = (gchar *)allocator(len+1);
936 for (i = 0; (i < len) && src[i]; i++)
945 ep_strndup(const gchar *src, size_t len)
947 return emem_strndup(src, len, ep_alloc);
951 se_strndup(const gchar *src, size_t len)
953 return emem_strndup(src, len, se_alloc);
959 ep_memdup(const void* src, size_t len)
961 return memcpy(ep_alloc(len), src, len);
965 se_memdup(const void* src, size_t len)
967 return memcpy(se_alloc(len), src, len);
971 emem_strdup_vprintf(const gchar *fmt, va_list ap, void *allocator(size_t))
979 len = g_printf_string_upper_bound(fmt, ap);
981 dst = (gchar *)allocator(len+1);
982 g_vsnprintf (dst, (gulong) len, fmt, ap2);
989 ep_strdup_vprintf(const gchar *fmt, va_list ap)
991 return emem_strdup_vprintf(fmt, ap, ep_alloc);
995 se_strdup_vprintf(const gchar* fmt, va_list ap)
997 return emem_strdup_vprintf(fmt, ap, se_alloc);
1001 ep_strdup_printf(const gchar *fmt, ...)
1007 dst = ep_strdup_vprintf(fmt, ap);
1013 se_strdup_printf(const gchar *fmt, ...)
1019 dst = se_strdup_vprintf(fmt, ap);
1025 ep_strsplit(const gchar* string, const gchar* sep, int max_tokens)
1034 enum { AT_START, IN_PAD, IN_TOKEN } state;
1042 s = splitted = ep_strdup(string);
1043 str_len = (guint) strlen(splitted);
1044 sep_len = (guint) strlen(sep);
1046 if (max_tokens < 1) max_tokens = INT_MAX;
1051 while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
1054 for(i=0; i < sep_len; i++ )
1061 vec = ep_alloc_array(gchar*,tokens+1);
1064 for (i=0; i< str_len; i++) {
1067 switch(splitted[i]) {
1072 vec[curr_tok] = &(splitted[i]);
1078 switch(splitted[i]) {
1085 switch(splitted[i]) {
1087 vec[curr_tok] = &(splitted[i]);
1096 vec[curr_tok] = NULL;
1102 ep_strconcat(const gchar *string1, ...)
1113 l = 1 + strlen(string1);
1114 va_start(args, string1);
1115 s = va_arg(args, gchar*);
1118 s = va_arg(args, gchar*);
1122 concat = (gchar *)ep_alloc(l);
1125 ptr = g_stpcpy(ptr, string1);
1126 va_start(args, string1);
1127 s = va_arg(args, gchar*);
1129 ptr = g_stpcpy(ptr, s);
1130 s = va_arg(args, gchar*);
1139 /* release all allocated memory back to the pool. */
1141 emem_free_all(emem_pool_t *mem)
1143 gboolean use_chunks = mem->debug_use_chunks;
1146 emem_tree_t *tree_list;
1148 /* move all used chunks over to the free list */
1149 while(mem->used_list){
1151 mem->used_list=mem->used_list->next;
1152 npc->next=mem->free_list;
1156 /* clear them all out */
1157 npc = mem->free_list;
1158 while (npc != NULL) {
1160 while (npc->canary_last != NULL) {
1161 npc->canary_last = emem_canary_next(mem->canary, (guint8 *)npc->canary_last, NULL);
1162 /* XXX, check if canary_last is inside allocated memory? */
1164 if (npc->canary_last == (void *) -1)
1165 g_error("Memory corrupted");
1168 emem_scrub_memory((npc->buf + npc->free_offset_init),
1169 (npc->free_offset - npc->free_offset_init),
1172 npc->amount_free = npc->amount_free_init;
1173 npc->free_offset = npc->free_offset_init;
1176 emem_chunk_t *next = npc->next;
1178 emem_scrub_memory(npc->buf, npc->amount_free_init, FALSE);
1187 /* We've freed all this memory already */
1188 mem->free_list = NULL;
1191 /* release/reset all allocated trees */
1192 for(tree_list=mem->trees;tree_list;tree_list=tree_list->next){
1193 tree_list->tree=NULL;
1197 /* release all allocated memory back to the pool. */
1201 emem_free_all(&ep_packet_mem);
1204 /* release all allocated memory back to the pool. */
1208 #ifdef SHOW_EMEM_STATS
1209 print_alloc_stats();
1212 emem_free_all(&se_packet_mem);
1216 ep_stack_new(void) {
1217 ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
1218 *s = ep_new0(struct _ep_stack_frame_t);
1222 /* for ep_stack_t we'll keep the popped frames so we reuse them instead
1223 of allocating new ones.
1227 ep_stack_push(ep_stack_t stack, void* data)
1229 struct _ep_stack_frame_t* frame;
1230 struct _ep_stack_frame_t* head = (*stack);
1233 frame = head->above;
1235 frame = ep_new(struct _ep_stack_frame_t);
1236 head->above = frame;
1237 frame->below = head;
1238 frame->above = NULL;
1241 frame->payload = data;
1248 ep_stack_pop(ep_stack_t stack)
1251 if ((*stack)->below) {
1252 (*stack) = (*stack)->below;
1253 return (*stack)->above->payload;
1260 se_tree_create(int type, const char *name)
1262 emem_tree_t *tree_list;
1264 tree_list=(emem_tree_t *)g_malloc(sizeof(emem_tree_t));
1265 tree_list->next=se_packet_mem.trees;
1266 tree_list->type=type;
1267 tree_list->tree=NULL;
1268 tree_list->name=name;
1269 tree_list->malloc=se_alloc;
1270 se_packet_mem.trees=tree_list;
1276 emem_tree_lookup32(emem_tree_t *se_tree, guint32 key)
1278 emem_tree_node_t *node;
1283 if(key==node->key32){
1286 if(key<node->key32){
1290 if(key>node->key32){
1299 emem_tree_lookup32_le(emem_tree_t *se_tree, guint32 key)
1301 emem_tree_node_t *node;
1311 if(key==node->key32){
1314 if(key<node->key32){
1322 if(key>node->key32){
1337 /* If we are still at the root of the tree this means that this node
1338 * is either smaller than the search key and then we return this
1339 * node or else there is no smaller key available and then
1343 if(key>node->key32){
1350 if(node->parent->left==node){
1353 if(key>node->key32){
1354 /* if this is a left child and its key is smaller than
1355 * the search key, then this is the node we want.
1359 /* if this is a left child and its key is bigger than
1360 * the search key, we have to check if any
1361 * of our ancestors are smaller than the search key.
1364 if(key>node->key32){
1374 if(node->key32<key){
1375 /* if this is the right child and its key is smaller
1376 * than the search key then this is the one we want.
1380 /* if this is the right child and its key is larger
1381 * than the search key then our parent is the one we
1384 return node->parent->data;
1391 static inline emem_tree_node_t *
1392 emem_tree_parent(emem_tree_node_t *node)
1394 return node->parent;
1397 static inline emem_tree_node_t *
1398 emem_tree_grandparent(emem_tree_node_t *node)
1400 emem_tree_node_t *parent;
1402 parent=emem_tree_parent(node);
1404 return parent->parent;
1409 static inline emem_tree_node_t *
1410 emem_tree_uncle(emem_tree_node_t *node)
1412 emem_tree_node_t *parent, *grandparent;
1414 parent=emem_tree_parent(node);
1418 grandparent=emem_tree_parent(parent);
1422 if(parent==grandparent->left){
1423 return grandparent->right;
1425 return grandparent->left;
1428 static inline void rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node);
1429 static inline void rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node);
1432 rotate_left(emem_tree_t *se_tree, emem_tree_node_t *node)
1435 if(node->parent->left==node){
1436 node->parent->left=node->right;
1438 node->parent->right=node->right;
1441 se_tree->tree=node->right;
1443 node->right->parent=node->parent;
1444 node->parent=node->right;
1445 node->right=node->right->left;
1447 node->right->parent=node;
1449 node->parent->left=node;
1453 rotate_right(emem_tree_t *se_tree, emem_tree_node_t *node)
1456 if(node->parent->left==node){
1457 node->parent->left=node->left;
1459 node->parent->right=node->left;
1462 se_tree->tree=node->left;
1464 node->left->parent=node->parent;
1465 node->parent=node->left;
1466 node->left=node->left->right;
1468 node->left->parent=node;
1470 node->parent->right=node;
1474 rb_insert_case5(emem_tree_t *se_tree, emem_tree_node_t *node)
1476 emem_tree_node_t *grandparent;
1477 emem_tree_node_t *parent;
1479 parent=emem_tree_parent(node);
1480 grandparent=emem_tree_parent(parent);
1481 parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1482 grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1483 if( (node==parent->left) && (parent==grandparent->left) ){
1484 rotate_right(se_tree, grandparent);
1486 rotate_left(se_tree, grandparent);
1491 rb_insert_case4(emem_tree_t *se_tree, emem_tree_node_t *node)
1493 emem_tree_node_t *grandparent;
1494 emem_tree_node_t *parent;
1496 parent=emem_tree_parent(node);
1497 grandparent=emem_tree_parent(parent);
1501 if( (node==parent->right) && (parent==grandparent->left) ){
1502 rotate_left(se_tree, parent);
1504 } else if( (node==parent->left) && (parent==grandparent->right) ){
1505 rotate_right(se_tree, parent);
1508 rb_insert_case5(se_tree, node);
1512 rb_insert_case3(emem_tree_t *se_tree, emem_tree_node_t *node)
1514 emem_tree_node_t *grandparent;
1515 emem_tree_node_t *parent;
1516 emem_tree_node_t *uncle;
1518 uncle=emem_tree_uncle(node);
1519 if(uncle && (uncle->u.rb_color==EMEM_TREE_RB_COLOR_RED)){
1520 parent=emem_tree_parent(node);
1521 parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1522 uncle->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1523 grandparent=emem_tree_grandparent(node);
1524 grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1525 rb_insert_case1(se_tree, grandparent);
1527 rb_insert_case4(se_tree, node);
1532 rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node)
1534 emem_tree_node_t *parent;
1536 parent=emem_tree_parent(node);
1537 /* parent is always non-NULL here */
1538 if(parent->u.rb_color==EMEM_TREE_RB_COLOR_BLACK){
1541 rb_insert_case3(se_tree, node);
1545 rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node)
1547 emem_tree_node_t *parent;
1549 parent=emem_tree_parent(node);
1551 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1554 rb_insert_case2(se_tree, node);
1557 /* insert a new node in the tree. if this node matches an already existing node
1558 * then just replace the data for that node */
1560 emem_tree_insert32(emem_tree_t *se_tree, guint32 key, void *data)
1562 emem_tree_node_t *node;
1566 /* is this the first node ?*/
1568 node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
1569 switch(se_tree->type){
1570 case EMEM_TREE_TYPE_RED_BLACK:
1571 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1579 node->u.is_subtree = EMEM_TREE_NODE_IS_DATA;
1584 /* it was not the new root so walk the tree until we find where to
1585 * insert this new leaf.
1588 /* this node already exists, so just replace the data pointer*/
1589 if(key==node->key32){
1593 if(key<node->key32) {
1595 /* new node to the left */
1596 emem_tree_node_t *new_node;
1597 new_node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
1598 node->left=new_node;
1599 new_node->parent=node;
1600 new_node->left=NULL;
1601 new_node->right=NULL;
1602 new_node->key32=key;
1603 new_node->data=data;
1604 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1611 if(key>node->key32) {
1613 /* new node to the right */
1614 emem_tree_node_t *new_node;
1615 new_node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
1616 node->right=new_node;
1617 new_node->parent=node;
1618 new_node->left=NULL;
1619 new_node->right=NULL;
1620 new_node->key32=key;
1621 new_node->data=data;
1622 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1631 /* node will now point to the newly created node */
1632 switch(se_tree->type){
1633 case EMEM_TREE_TYPE_RED_BLACK:
1634 node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1635 rb_insert_case1(se_tree, node);
1641 lookup_or_insert32(emem_tree_t *se_tree, guint32 key, void*(*func)(void*),void* ud, int is_subtree)
1643 emem_tree_node_t *node;
1647 /* is this the first node ?*/
1649 node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
1650 switch(se_tree->type){
1651 case EMEM_TREE_TYPE_RED_BLACK:
1652 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1659 node->data= func(ud);
1660 node->u.is_subtree = is_subtree;
1665 /* it was not the new root so walk the tree until we find where to
1666 * insert this new leaf.
1669 /* this node already exists, so just return the data pointer*/
1670 if(key==node->key32){
1673 if(key<node->key32) {
1675 /* new node to the left */
1676 emem_tree_node_t *new_node;
1677 new_node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
1678 node->left=new_node;
1679 new_node->parent=node;
1680 new_node->left=NULL;
1681 new_node->right=NULL;
1682 new_node->key32=key;
1683 new_node->data= func(ud);
1684 new_node->u.is_subtree = is_subtree;
1691 if(key>node->key32) {
1693 /* new node to the right */
1694 emem_tree_node_t *new_node;
1695 new_node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
1696 node->right=new_node;
1697 new_node->parent=node;
1698 new_node->left=NULL;
1699 new_node->right=NULL;
1700 new_node->key32=key;
1701 new_node->data= func(ud);
1702 new_node->u.is_subtree = is_subtree;
1711 /* node will now point to the newly created node */
1712 switch(se_tree->type){
1713 case EMEM_TREE_TYPE_RED_BLACK:
1714 node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1715 rb_insert_case1(se_tree, node);
1722 /* When the se data is released, this entire tree will dissapear as if it
1723 * never existed including all metadata associated with the tree.
1726 se_tree_create_non_persistent(int type, const char *name)
1728 emem_tree_t *tree_list;
1730 tree_list=(emem_tree_t *)se_alloc(sizeof(emem_tree_t));
1731 tree_list->next=NULL;
1732 tree_list->type=type;
1733 tree_list->tree=NULL;
1734 tree_list->name=name;
1735 tree_list->malloc=se_alloc;
1740 /* This tree is PErmanent and will never be released
1743 pe_tree_create(int type, const char *name)
1745 emem_tree_t *tree_list;
1747 tree_list=g_new(emem_tree_t, 1);
1748 tree_list->next=NULL;
1749 tree_list->type=type;
1750 tree_list->tree=NULL;
1751 tree_list->name=name;
1752 tree_list->malloc=(void *(*)(size_t)) g_malloc;
1757 /* create another (sub)tree using the same memory allocation scope
1758 * as the parent tree.
1760 static emem_tree_t *
1761 emem_tree_create_subtree(emem_tree_t *parent_tree, const char *name)
1763 emem_tree_t *tree_list;
1765 tree_list=(emem_tree_t *)parent_tree->malloc(sizeof(emem_tree_t));
1766 tree_list->next=NULL;
1767 tree_list->type=parent_tree->type;
1768 tree_list->tree=NULL;
1769 tree_list->name=name;
1770 tree_list->malloc=parent_tree->malloc;
1776 create_sub_tree(void* d)
1778 emem_tree_t *se_tree = (emem_tree_t *)d;
1779 return emem_tree_create_subtree(se_tree, "subtree");
1782 /* insert a new node in the tree. if this node matches an already existing node
1783 * then just replace the data for that node */
1786 emem_tree_insert32_array(emem_tree_t *se_tree, emem_tree_key_t *key, void *data)
1788 emem_tree_t *insert_tree = NULL;
1789 emem_tree_key_t *cur_key;
1790 guint32 i, insert_key32 = 0;
1792 if(!se_tree || !key) return;
1794 for (cur_key = key; cur_key->length > 0; cur_key++) {
1795 if(cur_key->length > 100) {
1796 DISSECTOR_ASSERT_NOT_REACHED();
1799 for (i = 0; i < cur_key->length; i++) {
1800 /* Insert using the previous key32 */
1802 insert_tree = se_tree;
1804 insert_tree = (emem_tree_t *)lookup_or_insert32(insert_tree, insert_key32, create_sub_tree, se_tree, EMEM_TREE_NODE_IS_SUBTREE);
1806 insert_key32 = cur_key->key[i];
1811 /* We didn't get a valid key. Should we return NULL instead? */
1812 DISSECTOR_ASSERT_NOT_REACHED();
1815 emem_tree_insert32(insert_tree, insert_key32, data);
1820 emem_tree_lookup32_array(emem_tree_t *se_tree, emem_tree_key_t *key)
1822 emem_tree_t *lookup_tree = NULL;
1823 emem_tree_key_t *cur_key;
1824 guint32 i, lookup_key32 = 0;
1826 if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1828 for (cur_key = key; cur_key->length > 0; cur_key++) {
1829 if(cur_key->length > 100) {
1830 DISSECTOR_ASSERT_NOT_REACHED();
1833 for (i = 0; i < cur_key->length; i++) {
1834 /* Lookup using the previous key32 */
1836 lookup_tree = se_tree;
1838 lookup_tree = (emem_tree_t *)emem_tree_lookup32(lookup_tree, lookup_key32);
1843 lookup_key32 = cur_key->key[i];
1848 /* We didn't get a valid key. Should we return NULL instead? */
1849 DISSECTOR_ASSERT_NOT_REACHED();
1852 return emem_tree_lookup32(lookup_tree, lookup_key32);
1856 emem_tree_lookup32_array_le(emem_tree_t *se_tree, emem_tree_key_t *key)
1858 emem_tree_t *lookup_tree = NULL;
1859 emem_tree_key_t *cur_key;
1860 guint32 i, lookup_key32 = 0;
1862 if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1864 for (cur_key = key; cur_key->length > 0; cur_key++) {
1865 if(cur_key->length > 100) {
1866 DISSECTOR_ASSERT_NOT_REACHED();
1869 for (i = 0; i < cur_key->length; i++) {
1870 /* Lookup using the previous key32 */
1872 lookup_tree = se_tree;
1874 lookup_tree = (emem_tree_t *)emem_tree_lookup32_le(lookup_tree, lookup_key32);
1879 lookup_key32 = cur_key->key[i];
1884 /* We didn't get a valid key. Should we return NULL instead? */
1885 DISSECTOR_ASSERT_NOT_REACHED();
1888 return emem_tree_lookup32_le(lookup_tree, lookup_key32);
1892 /* Strings are stored as an array of uint32 containing the string characters
1893 with 4 characters in each uint32.
1894 The first byte of the string is stored as the most significant byte.
1895 If the string is not a multiple of 4 characters in length the last
1896 uint32 containing the string bytes are padded with 0 bytes.
1897 After the uint32's containing the string, there is one final terminator
1898 uint32 with the value 0x00000001
1901 emem_tree_insert_string(emem_tree_t* se_tree, const gchar* k, void* v, guint32 flags)
1903 emem_tree_key_t key[2];
1904 guint32 *aligned=NULL;
1905 guint32 len = (guint32) strlen(k);
1906 guint32 divx = (len+3)/4+1;
1910 aligned = (guint32 *)g_malloc(divx * sizeof (guint32));
1912 /* pack the bytes one one by one into guint32s */
1914 for (i = 0;i < len;i++) {
1917 ch = (unsigned char)k[i];
1918 if (flags & EMEM_TREE_STRING_NOCASE) {
1930 /* add required padding to the last uint32 */
1936 aligned[i/4-1] = tmp;
1939 /* add the terminator */
1940 aligned[divx-1] = 0x00000001;
1942 key[0].length = divx;
1943 key[0].key = aligned;
1948 emem_tree_insert32_array(se_tree, key, v);
1953 emem_tree_lookup_string(emem_tree_t* se_tree, const gchar* k, guint32 flags)
1955 emem_tree_key_t key[2];
1956 guint32 *aligned=NULL;
1957 guint32 len = (guint) strlen(k);
1958 guint32 divx = (len+3)/4+1;
1963 aligned = (guint32 *)g_malloc(divx * sizeof (guint32));
1965 /* pack the bytes one one by one into guint32s */
1967 for (i = 0;i < len;i++) {
1970 ch = (unsigned char)k[i];
1971 if (flags & EMEM_TREE_STRING_NOCASE) {
1983 /* add required padding to the last uint32 */
1989 aligned[i/4-1] = tmp;
1992 /* add the terminator */
1993 aligned[divx-1] = 0x00000001;
1995 key[0].length = divx;
1996 key[0].key = aligned;
2001 ret = emem_tree_lookup32_array(se_tree, key);
2007 emem_tree_foreach_nodes(emem_tree_node_t* node, tree_foreach_func callback, void *user_data)
2009 gboolean stop_traverse = FALSE;
2015 stop_traverse = emem_tree_foreach_nodes(node->left, callback, user_data);
2016 if (stop_traverse) {
2021 if (node->u.is_subtree == EMEM_TREE_NODE_IS_SUBTREE) {
2022 stop_traverse = emem_tree_foreach((emem_tree_t *)node->data, callback, user_data);
2024 stop_traverse = callback(node->data, user_data);
2027 if (stop_traverse) {
2032 stop_traverse = emem_tree_foreach_nodes(node->right, callback, user_data);
2033 if (stop_traverse) {
2042 emem_tree_foreach(emem_tree_t* emem_tree, tree_foreach_func callback, void *user_data)
2047 if(!emem_tree->tree)
2050 return emem_tree_foreach_nodes(emem_tree->tree, callback, user_data);
2053 static void emem_print_subtree(emem_tree_t* emem_tree, guint32 level);
2056 emem_tree_print_nodes(const char *prefix, emem_tree_node_t* node, guint32 level)
2063 for(i=0;i<level;i++){
2067 printf("%sNODE:%p parent:%p left:%p right:%p colour:%s key:%u %s:%p\n", prefix,
2068 (void *)node,(void *)(node->parent),(void *)(node->left),(void *)(node->right),
2069 (node->u.rb_color)?"Black":"Red",(node->key32),(node->u.is_subtree)?"tree":"data",node->data);
2071 emem_tree_print_nodes("L-", node->left, level+1);
2073 emem_tree_print_nodes("R-", node->right, level+1);
2075 if (node->u.is_subtree)
2076 emem_print_subtree((emem_tree_t *)node->data, level+1);
2080 emem_print_subtree(emem_tree_t* emem_tree, guint32 level)
2087 for(i=0;i<level;i++){
2091 printf("EMEM tree:%p type:%s name:%s root:%p\n",(void *)emem_tree,(emem_tree->type==1)?"RedBlack":"unknown",emem_tree->name,(void *)(emem_tree->tree));
2093 emem_tree_print_nodes("Root-", emem_tree->tree, level);
2097 emem_print_tree(emem_tree_t* emem_tree)
2099 emem_print_subtree(emem_tree, 0);
2107 * Presumably we're using these routines for building strings for the tree.
2108 * Use ITEM_LABEL_LENGTH as the basis for our default lengths.
2111 #define DEFAULT_STRBUF_LEN (ITEM_LABEL_LENGTH / 10)
2112 #define MAX_STRBUF_LEN 65536
2115 next_size(gsize cur_alloc_len, gsize wanted_alloc_len, gsize max_alloc_len)
2117 if (max_alloc_len < 1 || max_alloc_len > MAX_STRBUF_LEN) {
2118 max_alloc_len = MAX_STRBUF_LEN;
2121 if (cur_alloc_len < 1) {
2122 cur_alloc_len = DEFAULT_STRBUF_LEN;
2125 while (cur_alloc_len < wanted_alloc_len) {
2129 return cur_alloc_len < max_alloc_len ? cur_alloc_len : max_alloc_len;
2133 ep_strbuf_grow(emem_strbuf_t *strbuf, gsize wanted_alloc_len)
2135 gsize new_alloc_len;
2138 if (!strbuf || (wanted_alloc_len <= strbuf->alloc_len) || (strbuf->alloc_len >= strbuf->max_alloc_len)) {
2142 new_alloc_len = next_size(strbuf->alloc_len, wanted_alloc_len, strbuf->max_alloc_len);
2143 new_str = (gchar *)ep_alloc(new_alloc_len);
2144 g_strlcpy(new_str, strbuf->str, new_alloc_len);
2146 strbuf->alloc_len = new_alloc_len;
2147 strbuf->str = new_str;
2151 ep_strbuf_sized_new(gsize alloc_len, gsize max_alloc_len)
2153 emem_strbuf_t *strbuf;
2155 strbuf = ep_new(emem_strbuf_t);
2157 if ((max_alloc_len == 0) || (max_alloc_len > MAX_STRBUF_LEN))
2158 max_alloc_len = MAX_STRBUF_LEN;
2161 else if (alloc_len > max_alloc_len)
2162 alloc_len = max_alloc_len;
2164 strbuf->str = (char *)ep_alloc(alloc_len);
2165 strbuf->str[0] = '\0';
2168 strbuf->alloc_len = alloc_len;
2169 strbuf->max_alloc_len = max_alloc_len;
2175 ep_strbuf_new(const gchar *init)
2177 emem_strbuf_t *strbuf;
2179 strbuf = ep_strbuf_sized_new(next_size(0, init?strlen(init)+1:0, 0), 0); /* +1 for NULL terminator */
2182 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2183 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2190 ep_strbuf_new_label(const gchar *init)
2192 emem_strbuf_t *strbuf;
2195 /* Be optimistic: Allocate default size strbuf string and only */
2196 /* request an increase if needed. */
2197 /* XXX: Is it reasonable to assume that much of the usage of */
2198 /* ep_strbuf_new_label will have init==NULL or */
2199 /* strlen(init) < DEFAULT_STRBUF_LEN) ??? */
2200 strbuf = ep_strbuf_sized_new(DEFAULT_STRBUF_LEN, ITEM_LABEL_LENGTH);
2205 /* full_len does not count the trailing '\0'. */
2206 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2207 if (full_len < strbuf->alloc_len) {
2208 strbuf->len += full_len;
2210 strbuf = ep_strbuf_sized_new(full_len+1, ITEM_LABEL_LENGTH);
2211 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2212 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2219 ep_strbuf_append(emem_strbuf_t *strbuf, const gchar *str)
2221 gsize add_len, full_len;
2223 if (!strbuf || !str || str[0] == '\0') {
2227 /* Be optimistic; try the g_strlcpy first & see if enough room. */
2228 /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same */
2229 add_len = strbuf->alloc_len - strbuf->len;
2230 full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2231 if (full_len < add_len) {
2232 strbuf->len += full_len;
2234 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2235 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2236 add_len = strbuf->alloc_len - strbuf->len;
2237 full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2238 strbuf->len += MIN(add_len-1, full_len);
2245 ep_strbuf_append_vprintf(emem_strbuf_t *strbuf, const gchar *format, va_list ap)
2248 gsize add_len, full_len;
2252 /* Be optimistic; try the g_vsnprintf first & see if enough room. */
2253 /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same. */
2254 add_len = strbuf->alloc_len - strbuf->len;
2255 full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap);
2256 if (full_len < add_len) {
2257 strbuf->len += full_len;
2259 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2260 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2261 add_len = strbuf->alloc_len - strbuf->len;
2262 full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap2);
2263 strbuf->len += MIN(add_len-1, full_len);
2270 ep_strbuf_append_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2274 va_start(ap, format);
2275 ep_strbuf_append_vprintf(strbuf, format, ap);
2280 ep_strbuf_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2289 va_start(ap, format);
2290 ep_strbuf_append_vprintf(strbuf, format, ap);
2295 ep_strbuf_append_c(emem_strbuf_t *strbuf, const gchar c)
2301 /* +1 for the new character & +1 for the trailing '\0'. */
2302 if (strbuf->alloc_len < strbuf->len + 1 + 1) {
2303 ep_strbuf_grow(strbuf, strbuf->len + 1 + 1);
2305 if (strbuf->alloc_len >= strbuf->len + 1 + 1) {
2306 strbuf->str[strbuf->len] = c;
2308 strbuf->str[strbuf->len] = '\0';
2315 ep_strbuf_append_unichar(emem_strbuf_t *strbuf, const gunichar c)
2324 charlen = g_unichar_to_utf8(c, buf);
2326 /* +charlen for the new character & +1 for the trailing '\0'. */
2327 if (strbuf->alloc_len < strbuf->len + charlen + 1) {
2328 ep_strbuf_grow(strbuf, strbuf->len + charlen + 1);
2330 if (strbuf->alloc_len >= strbuf->len + charlen + 1) {
2331 memcpy(&strbuf->str[strbuf->len], buf, charlen);
2332 strbuf->len += charlen;
2333 strbuf->str[strbuf->len] = '\0';
2340 ep_strbuf_truncate(emem_strbuf_t *strbuf, gsize len)
2342 if (!strbuf || len >= strbuf->len) {
2346 strbuf->str[len] = '\0';
2358 * indent-tabs-mode: t
2361 * ex: set shiftwidth=8 tabstop=8 noexpandtab:
2362 * :indentSize=8:tabSize=8:noTabs=false: