2 * Wireshark memory management and garbage collection functions
7 * Wireshark - Network traffic analyzer
8 * By Gerald Combs <gerald@wireshark.org>
9 * Copyright 1998 Gerald Combs
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
36 #ifdef HAVE_SYS_TIME_H
47 #include <wiretap/file_util.h>
50 #include <windows.h> /* VirtualAlloc, VirtualProtect */
51 #include <process.h> /* getpid */
56 * Tools like Valgrind and ElectricFence don't work well with memchunks.
57 * Uncomment the defines below to make {ep|se}_alloc() allocate each
58 * object individually.
60 /* #define EP_DEBUG_FREE 1 */
61 /* #define SE_DEBUG_FREE 1 */
63 /* Do we want to use guardpages? if available */
64 #define WANT_GUARD_PAGES 1
66 /* Do we want to use canaries ? */
67 #define DEBUG_USE_CANARIES 1
69 #ifdef WANT_GUARD_PAGES
70 /* Add guard pages at each end of our allocated memory */
71 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
73 #include <sys/types.h>
75 #if defined(MAP_ANONYMOUS)
76 #define ANON_PAGE_MODE (MAP_ANONYMOUS|MAP_PRIVATE)
77 #elif defined(MAP_ANON)
78 #define ANON_PAGE_MODE (MAP_ANON|MAP_PRIVATE)
80 #define ANON_PAGE_MODE (MAP_PRIVATE) /* have to map /dev/zero */
85 static int dev_zero_fd;
86 #define ANON_FD dev_zero_fd
90 #define USE_GUARD_PAGES 1
94 /* When required, allocate more memory from the OS in this size chunks */
95 #define EMEM_PACKET_CHUNK_SIZE 10485760
97 /* The maximum number of allocations per chunk */
98 #define EMEM_ALLOCS_PER_CHUNK (EMEM_PACKET_CHUNK_SIZE / 512)
101 #ifdef DEBUG_USE_CANARIES
102 #define EMEM_CANARY_SIZE 8
103 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
104 guint8 ep_canary[EMEM_CANARY_DATA_SIZE], se_canary[EMEM_CANARY_DATA_SIZE];
105 #endif /* DEBUG_USE_CANARIES */
107 typedef struct _emem_chunk_t {
108 struct _emem_chunk_t *next;
109 unsigned int amount_free_init;
110 unsigned int amount_free;
111 unsigned int free_offset_init;
112 unsigned int free_offset;
114 #ifdef DEBUG_USE_CANARIES
115 #if ! defined(EP_DEBUG_FREE) && ! defined(SE_DEBUG_FREE)
116 unsigned int c_count;
117 void *canary[EMEM_ALLOCS_PER_CHUNK];
118 guint8 cmp_len[EMEM_ALLOCS_PER_CHUNK];
120 #endif /* DEBUG_USE_CANARIES */
123 typedef struct _emem_header_t {
124 emem_chunk_t *free_list;
125 emem_chunk_t *used_list;
128 emem_header_t ep_packet_mem;
129 emem_header_t se_packet_mem;
131 #if !defined(SE_DEBUG_FREE)
133 static SYSTEM_INFO sysinfo;
134 static OSVERSIONINFO versinfo;
136 #elif defined(USE_GUARD_PAGES)
137 static intptr_t pagesize;
138 #endif /* _WIN32 / USE_GUARD_PAGES */
139 #endif /* SE_DEBUG_FREE */
141 #ifdef DEBUG_USE_CANARIES
143 * Set a canary value to be placed between memchunks.
146 emem_canary(guint8 *canary) {
148 #if GLIB_MAJOR_VERSION >= 2
149 static GRand *rand_state = NULL;
153 /* First, use GLib's random function if we have it */
154 #if GLIB_MAJOR_VERSION >= 2
155 if (rand_state == NULL) {
156 rand_state = g_rand_new();
158 for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
159 canary[i] = (guint8) g_rand_int(rand_state);
165 /* Try /dev/urandom */
166 if ((fp = eth_fopen("/dev/urandom", "r")) != NULL) {
167 sz = fread(canary, 1, EMEM_CANARY_DATA_SIZE, fp);
169 if (sz == EMEM_CANARY_SIZE) {
174 /* Our last resort */
175 srandom(time(NULL) | getpid());
176 for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
177 canary[i] = (guint8) random();
180 #endif /* GLIB_MAJOR_VERSION >= 2 */
183 #if !defined(SE_DEBUG_FREE)
185 * Given an allocation size, return the amount of padding needed for
189 emem_canary_pad (size_t allocation) {
192 pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
193 if (pad < EMEM_CANARY_SIZE)
194 pad += EMEM_CANARY_SIZE;
199 #endif /* DEBUG_USE_CANARIES */
202 /* Initialize the packet-lifetime memory allocation pool.
203 * This function should be called only once when Wireshark or TShark starts
209 ep_packet_mem.free_list=NULL;
210 ep_packet_mem.used_list=NULL;
212 #ifdef DEBUG_USE_CANARIES
213 emem_canary(ep_canary);
214 #endif /* DEBUG_USE_CANARIES */
216 #if !defined(SE_DEBUG_FREE)
218 /* Set up our guard page info for Win32 */
219 GetSystemInfo(&sysinfo);
220 pagesize = sysinfo.dwPageSize;
222 /* calling GetVersionEx using the OSVERSIONINFO structure.
223 * OSVERSIONINFOEX requires Win NT4 with SP6 or newer NT Versions.
224 * OSVERSIONINFOEX will fail on Win9x and older NT Versions.
226 * http://msdn.microsoft.com/library/en-us/sysinfo/base/getversionex.asp
227 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
228 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfoex_str.asp
230 versinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
231 GetVersionEx(&versinfo);
233 #elif defined(USE_GUARD_PAGES)
234 pagesize = sysconf(_SC_PAGESIZE);
236 dev_zero_fd = open("/dev/zero", O_RDWR);
237 g_assert(dev_zero_fd != -1);
239 #endif /* _WIN32 / USE_GUARD_PAGES */
240 #endif /* SE_DEBUG_FREE */
244 /* Initialize the capture-lifetime memory allocation pool.
245 * This function should be called only once when Wireshark or TShark starts
251 se_packet_mem.free_list=NULL;
252 se_packet_mem.used_list=NULL;
254 #ifdef DEBUG_USE_CANARIES
255 emem_canary(se_canary);
256 #endif /* DEBUG_USE_CANARIES */
259 #if !defined(SE_DEBUG_FREE)
261 emem_create_chunk(emem_chunk_t **free_list) {
264 char *buf_end, *prot1, *prot2;
266 #elif defined(USE_GUARD_PAGES)
268 char *buf_end, *prot1, *prot2;
269 #endif /* _WIN32 / USE_GUARD_PAGES */
270 /* we dont have any free data, so we must allocate a new one */
273 npc = g_malloc(sizeof(emem_chunk_t));
275 #ifdef DEBUG_USE_CANARIES
276 #if ! defined(EP_DEBUG_FREE) && ! defined(SE_DEBUG_FREE)
279 #endif /* DEBUG_USE_CANARIES */
284 * MSDN documents VirtualAlloc/VirtualProtect at
285 * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
288 /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
289 npc->buf = VirtualAlloc(NULL, EMEM_PACKET_CHUNK_SIZE,
290 MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
291 if(npc->buf == NULL) {
292 THROW(OutOfMemoryError);
294 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
296 /* Align our guard pages on page-sized boundaries */
297 prot1 = (char *) ((((int) npc->buf + pagesize - 1) / pagesize) * pagesize);
298 prot2 = (char *) ((((int) buf_end - (1 * pagesize)) / pagesize) * pagesize);
300 ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
301 g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
302 ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
303 g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
305 npc->amount_free_init = prot2 - prot1 - pagesize;
306 npc->amount_free = npc->amount_free_init;
307 npc->free_offset_init = (prot1 - npc->buf) + pagesize;
308 npc->free_offset = npc->free_offset_init;
310 #elif defined(USE_GUARD_PAGES)
311 npc->buf = mmap(NULL, EMEM_PACKET_CHUNK_SIZE,
312 PROT_READ|PROT_WRITE, ANON_PAGE_MODE, ANON_FD, 0);
313 if(npc->buf == MAP_FAILED) {
314 /* XXX - what do we have to cleanup here? */
315 THROW(OutOfMemoryError);
317 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
319 /* Align our guard pages on page-sized boundaries */
320 prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
321 prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
322 ret = mprotect(prot1, pagesize, PROT_NONE);
324 ret = mprotect(prot2, pagesize, PROT_NONE);
327 npc->amount_free_init = prot2 - prot1 - pagesize;
328 npc->amount_free = npc->amount_free_init;
329 npc->free_offset_init = (prot1 - npc->buf) + pagesize;
330 npc->free_offset = npc->free_offset_init;
332 #else /* Is there a draft in here? */
333 npc->buf = malloc(EMEM_PACKET_CHUNK_SIZE);
334 if(npc->buf == NULL) {
335 THROW(OutOfMemoryError);
337 npc->amount_free_init = EMEM_PACKET_CHUNK_SIZE;
338 npc->amount_free = npc->amount_free_init;
339 npc->free_offset_init = 0;
340 npc->free_offset = npc->free_offset_init;
341 #endif /* USE_GUARD_PAGES */
346 /* allocate 'size' amount of memory with an allocation lifetime until the
350 ep_alloc(size_t size)
353 #ifndef EP_DEBUG_FREE
354 #ifdef DEBUG_USE_CANARIES
356 guint8 pad = emem_canary_pad(size);
359 #endif /* DEBUG_USE_CANARIES */
360 emem_chunk_t *free_list;
363 #ifndef EP_DEBUG_FREE
364 /* Round up to an 8 byte boundary. Make sure we have at least
365 * 8 pad bytes for our canary.
369 /* make sure we dont try to allocate too much (arbitrary limit) */
370 DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
372 emem_create_chunk(&ep_packet_mem.free_list);
374 /* oops, we need to allocate more memory to serve this request
375 * than we have free. move this node to the used list and try again
377 if(size>ep_packet_mem.free_list->amount_free
378 #ifdef DEBUG_USE_CANARIES
379 || ep_packet_mem.free_list->c_count >= EMEM_ALLOCS_PER_CHUNK
380 #endif /* DEBUG_USE_CANARIES */
383 npc=ep_packet_mem.free_list;
384 ep_packet_mem.free_list=ep_packet_mem.free_list->next;
385 npc->next=ep_packet_mem.used_list;
386 ep_packet_mem.used_list=npc;
389 emem_create_chunk(&ep_packet_mem.free_list);
391 free_list = ep_packet_mem.free_list;
393 buf = free_list->buf + free_list->free_offset;
395 free_list->amount_free -= size;
396 free_list->free_offset += size;
398 #ifdef DEBUG_USE_CANARIES
399 cptr = (char *)buf + size - pad;
400 memcpy(cptr, &ep_canary, pad);
401 free_list->canary[free_list->c_count] = cptr;
402 free_list->cmp_len[free_list->c_count] = pad;
403 free_list->c_count++;
404 #endif /* DEBUG_USE_CANARIES */
406 #else /* EP_DEBUG_FREE */
409 npc=g_malloc(sizeof(emem_chunk_t));
410 npc->next=ep_packet_mem.used_list;
411 npc->amount_free=size;
413 npc->buf=g_malloc(size);
415 ep_packet_mem.used_list=npc;
416 #endif /* EP_DEBUG_FREE */
420 /* allocate 'size' amount of memory with an allocation lifetime until the
424 se_alloc(size_t size)
427 #ifndef SE_DEBUG_FREE
428 #ifdef DEBUG_USE_CANARIES
430 guint8 pad = emem_canary_pad(size);
433 #endif /* DEBUG_USE_CANARIES */
434 emem_chunk_t *free_list;
437 #ifndef SE_DEBUG_FREE
438 /* Round up to an 8 byte boundary. Make sure we have at least
439 * 8 pad bytes for our canary.
443 /* make sure we dont try to allocate too much (arbitrary limit) */
444 DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
446 emem_create_chunk(&se_packet_mem.free_list);
448 /* oops, we need to allocate more memory to serve this request
449 * than we have free. move this node to the used list and try again
451 if(size>se_packet_mem.free_list->amount_free
452 #ifdef DEBUG_USE_CANARIES
453 || se_packet_mem.free_list->c_count >= EMEM_ALLOCS_PER_CHUNK
454 #endif /* DEBUG_USE_CANARIES */
457 npc=se_packet_mem.free_list;
458 se_packet_mem.free_list=se_packet_mem.free_list->next;
459 npc->next=se_packet_mem.used_list;
460 se_packet_mem.used_list=npc;
463 emem_create_chunk(&se_packet_mem.free_list);
465 free_list = se_packet_mem.free_list;
467 buf = free_list->buf + free_list->free_offset;
469 free_list->amount_free -= size;
470 free_list->free_offset += size;
472 #ifdef DEBUG_USE_CANARIES
473 cptr = (char *)buf + size - pad;
474 memcpy(cptr, &se_canary, pad);
475 free_list->canary[free_list->c_count] = cptr;
476 free_list->cmp_len[free_list->c_count] = pad;
477 free_list->c_count++;
478 #endif /* DEBUG_USE_CANARIES */
480 #else /* SE_DEBUG_FREE */
483 npc=g_malloc(sizeof(emem_chunk_t));
484 npc->next=se_packet_mem.used_list;
485 npc->amount_free=size;
487 npc->buf=g_malloc(size);
489 se_packet_mem.used_list=npc;
490 #endif /* SE_DEBUG_FREE */
496 void* ep_alloc0(size_t size) {
497 return memset(ep_alloc(size),'\0',size);
500 gchar* ep_strdup(const gchar* src) {
501 guint len = strlen(src);
504 dst = strncpy(ep_alloc(len+1), src, len);
511 gchar* ep_strndup(const gchar* src, size_t len) {
512 gchar* dst = ep_alloc(len+1);
515 for (i = 0; (i < len) && src[i]; i++)
523 void* ep_memdup(const void* src, size_t len) {
524 return memcpy(ep_alloc(len), src, len);
527 gchar* ep_strdup_vprintf(const gchar* fmt, va_list ap) {
534 len = g_printf_string_upper_bound(fmt, ap);
536 dst = ep_alloc(len+1);
537 g_vsnprintf (dst, len, fmt, ap2);
543 gchar* ep_strdup_printf(const gchar* fmt, ...) {
548 dst = ep_strdup_vprintf(fmt, ap);
553 gchar** ep_strsplit(const gchar* string, const gchar* sep, int max_tokens) {
561 enum { AT_START, IN_PAD, IN_TOKEN } state;
569 s = splitted = ep_strdup(string);
570 str_len = strlen(splitted);
571 sep_len = strlen(sep);
573 if (max_tokens < 1) max_tokens = INT_MAX;
578 while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
581 for(i=0; i < sep_len; i++ )
588 vec = ep_alloc_array(gchar*,tokens+1);
591 for (i=0; i< str_len; i++) {
594 switch(splitted[i]) {
599 vec[curr_tok] = &(splitted[i]);
605 switch(splitted[i]) {
612 switch(splitted[i]) {
614 vec[curr_tok] = &(splitted[i]);
623 vec[curr_tok] = NULL;
630 void* se_alloc0(size_t size) {
631 return memset(se_alloc(size),'\0',size);
634 /* If str is NULL, just return the string "<NULL>" so that the callers dont
635 * have to bother checking it.
637 gchar* se_strdup(const gchar* src) {
646 dst = strncpy(se_alloc(len+1), src, len);
653 gchar* se_strndup(const gchar* src, size_t len) {
654 gchar* dst = se_alloc(len+1);
657 for (i = 0; (i < len) && src[i]; i++)
665 void* se_memdup(const void* src, size_t len) {
666 return memcpy(se_alloc(len), src, len);
669 gchar* se_strdup_vprintf(const gchar* fmt, va_list ap) {
676 len = g_printf_string_upper_bound(fmt, ap);
678 dst = se_alloc(len+1);
679 g_vsnprintf (dst, len, fmt, ap2);
685 gchar* se_strdup_printf(const gchar* fmt, ...) {
690 dst = se_strdup_vprintf(fmt, ap);
695 /* release all allocated memory back to the pool.
701 #ifndef EP_DEBUG_FREE
702 #ifdef DEBUG_USE_CANARIES
704 #endif /* DEBUG_USE_CANARIES */
707 /* move all used chunks over to the free list */
708 while(ep_packet_mem.used_list){
709 npc=ep_packet_mem.used_list;
710 ep_packet_mem.used_list=ep_packet_mem.used_list->next;
711 npc->next=ep_packet_mem.free_list;
712 ep_packet_mem.free_list=npc;
715 /* clear them all out */
716 npc = ep_packet_mem.free_list;
717 while (npc != NULL) {
718 #ifndef EP_DEBUG_FREE
719 #ifdef DEBUG_USE_CANARIES
720 for (i = 0; i < npc->c_count; i++) {
721 if (memcmp(npc->canary[i], &ep_canary, npc->cmp_len[i]) != 0)
722 g_error("Per-packet memory corrupted.");
725 #endif /* DEBUG_USE_CANARIES */
726 npc->amount_free = npc->amount_free_init;
727 npc->free_offset = npc->free_offset_init;
729 #else /* EP_DEBUG_FREE */
730 emem_chunk_t *next = npc->next;
735 #endif /* EP_DEBUG_FREE */
742 /* release all allocated memory back to the pool.
748 emem_tree_t *se_tree_list;
749 #ifndef SE_DEBUG_FREE
750 #ifdef DEBUG_USE_CANARIES
752 #endif /* DEBUG_USE_CANARIES */
755 /* move all used chunks over to the free list */
756 while(se_packet_mem.used_list){
757 npc=se_packet_mem.used_list;
758 se_packet_mem.used_list=se_packet_mem.used_list->next;
759 npc->next=se_packet_mem.free_list;
760 se_packet_mem.free_list=npc;
763 /* clear them all out */
764 npc = se_packet_mem.free_list;
765 while (npc != NULL) {
766 #ifndef SE_DEBUG_FREE
767 #ifdef DEBUG_USE_CANARIES
768 for (i = 0; i < npc->c_count; i++) {
769 if (memcmp(npc->canary[i], &se_canary, npc->cmp_len[i]) != 0)
770 g_error("Per-session memory corrupted.");
773 #endif /* DEBUG_USE_CANARIES */
774 npc->amount_free = npc->amount_free_init;
775 npc->free_offset = npc->free_offset_init;
777 #else /* SE_DEBUG_FREE */
778 emem_chunk_t *next = npc->next;
783 #endif /* SE_DEBUG_FREE */
790 /* release/reset all se allocated trees */
791 for(se_tree_list=se_trees;se_tree_list;se_tree_list=se_tree_list->next){
792 se_tree_list->tree=NULL;
797 ep_stack_t ep_stack_new(void) {
798 ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
799 *s = ep_new0(struct _ep_stack_frame_t);
803 /* for ep_stack_t we'll keep the popped frames so we reuse them instead
804 of allocating new ones.
808 void* ep_stack_push(ep_stack_t stack, void* data) {
809 struct _ep_stack_frame_t* frame;
810 struct _ep_stack_frame_t* head = (*stack);
815 frame = ep_new(struct _ep_stack_frame_t);
821 frame->payload = data;
827 void* ep_stack_pop(ep_stack_t stack) {
829 if ((*stack)->below) {
830 (*stack) = (*stack)->below;
831 return (*stack)->above->payload;
840 void print_tree_item(emem_tree_node_t *node, int level){
842 for(i=0;i<level;i++){
845 printf("%s KEY:0x%08x node:0x%08x parent:0x%08x left:0x%08x right:0x%08x\n",node->u.rb_color==EMEM_TREE_RB_COLOR_BLACK?"BLACK":"RED",node->key32,(int)node,(int)node->parent,(int)node->left,(int)node->right);
847 print_tree_item(node->left,level+1);
849 print_tree_item(node->right,level+1);
852 void print_tree(emem_tree_node_t *node){
859 print_tree_item(node,0);
865 /* routines to manage se allocated red-black trees */
866 emem_tree_t *se_trees=NULL;
869 se_tree_create(int type, const char *name)
871 emem_tree_t *tree_list;
873 tree_list=malloc(sizeof(emem_tree_t));
874 tree_list->next=se_trees;
875 tree_list->type=type;
876 tree_list->tree=NULL;
877 tree_list->name=name;
878 tree_list->malloc=se_alloc;
887 emem_tree_lookup32(emem_tree_t *se_tree, guint32 key)
889 emem_tree_node_t *node;
894 if(key==node->key32){
910 emem_tree_lookup32_le(emem_tree_t *se_tree, guint32 key)
912 emem_tree_node_t *node;
922 if(key==node->key32){
944 /* If we are still at the root of the tree this means that this node
945 * is either smaller thant the search key and then we return this
946 * node or else there is no smaller key availabel and then
957 if(node->parent->left==node){
961 /* if this is a left child and its key is smaller than
962 * the search key, then this is the node we want.
966 /* if this is a left child and its key is bigger than
967 * the search key, we have to check if any
968 * of our ancestors are smaller than the search key.
982 /* if this is the right child and its key is smaller
983 * than the search key then this is the one we want.
987 /* if this is the right child and its key is larger
988 * than the search key then our parent is the one we
991 return node->parent->data;
998 static inline emem_tree_node_t *
999 emem_tree_parent(emem_tree_node_t *node)
1001 return node->parent;
1004 static inline emem_tree_node_t *
1005 emem_tree_grandparent(emem_tree_node_t *node)
1007 emem_tree_node_t *parent;
1009 parent=emem_tree_parent(node);
1011 return parent->parent;
1015 static inline emem_tree_node_t *
1016 emem_tree_uncle(emem_tree_node_t *node)
1018 emem_tree_node_t *parent, *grandparent;
1020 parent=emem_tree_parent(node);
1024 grandparent=emem_tree_parent(parent);
1028 if(parent==grandparent->left){
1029 return grandparent->right;
1031 return grandparent->left;
1034 static inline void rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node);
1035 static inline void rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node);
1038 rotate_left(emem_tree_t *se_tree, emem_tree_node_t *node)
1041 if(node->parent->left==node){
1042 node->parent->left=node->right;
1044 node->parent->right=node->right;
1047 se_tree->tree=node->right;
1049 node->right->parent=node->parent;
1050 node->parent=node->right;
1051 node->right=node->right->left;
1053 node->right->parent=node;
1055 node->parent->left=node;
1059 rotate_right(emem_tree_t *se_tree, emem_tree_node_t *node)
1062 if(node->parent->left==node){
1063 node->parent->left=node->left;
1065 node->parent->right=node->left;
1068 se_tree->tree=node->left;
1070 node->left->parent=node->parent;
1071 node->parent=node->left;
1072 node->left=node->left->right;
1074 node->left->parent=node;
1076 node->parent->right=node;
1080 rb_insert_case5(emem_tree_t *se_tree, emem_tree_node_t *node)
1082 emem_tree_node_t *grandparent;
1083 emem_tree_node_t *parent;
1085 parent=emem_tree_parent(node);
1086 grandparent=emem_tree_parent(parent);
1087 parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1088 grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1089 if( (node==parent->left) && (parent==grandparent->left) ){
1090 rotate_right(se_tree, grandparent);
1092 rotate_left(se_tree, grandparent);
1097 rb_insert_case4(emem_tree_t *se_tree, emem_tree_node_t *node)
1099 emem_tree_node_t *grandparent;
1100 emem_tree_node_t *parent;
1102 parent=emem_tree_parent(node);
1103 grandparent=emem_tree_parent(parent);
1107 if( (node==parent->right) && (parent==grandparent->left) ){
1108 rotate_left(se_tree, parent);
1110 } else if( (node==parent->left) && (parent==grandparent->right) ){
1111 rotate_right(se_tree, parent);
1114 rb_insert_case5(se_tree, node);
1118 rb_insert_case3(emem_tree_t *se_tree, emem_tree_node_t *node)
1120 emem_tree_node_t *grandparent;
1121 emem_tree_node_t *parent;
1122 emem_tree_node_t *uncle;
1124 uncle=emem_tree_uncle(node);
1125 if(uncle && (uncle->u.rb_color==EMEM_TREE_RB_COLOR_RED)){
1126 parent=emem_tree_parent(node);
1127 parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1128 uncle->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1129 grandparent=emem_tree_grandparent(node);
1130 grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1131 rb_insert_case1(se_tree, grandparent);
1133 rb_insert_case4(se_tree, node);
1138 rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node)
1140 emem_tree_node_t *parent;
1142 parent=emem_tree_parent(node);
1143 /* parent is always non-NULL here */
1144 if(parent->u.rb_color==EMEM_TREE_RB_COLOR_BLACK){
1147 rb_insert_case3(se_tree, node);
1151 rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node)
1153 emem_tree_node_t *parent;
1155 parent=emem_tree_parent(node);
1157 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1160 rb_insert_case2(se_tree, node);
1163 /* insert a new node in the tree. if this node matches an already existing node
1164 * then just replace the data for that node */
1166 emem_tree_insert32(emem_tree_t *se_tree, guint32 key, void *data)
1168 emem_tree_node_t *node;
1172 /* is this the first node ?*/
1174 node=se_tree->malloc(sizeof(emem_tree_node_t));
1175 switch(se_tree->type){
1176 case EMEM_TREE_TYPE_RED_BLACK:
1177 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1185 node->u.is_subtree = EMEM_TREE_NODE_IS_DATA;
1190 /* it was not the new root so walk the tree until we find where to
1191 * insert this new leaf.
1194 /* this node already exists, so just replace the data pointer*/
1195 if(key==node->key32){
1199 if(key<node->key32) {
1201 /* new node to the left */
1202 emem_tree_node_t *new_node;
1203 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1204 node->left=new_node;
1205 new_node->parent=node;
1206 new_node->left=NULL;
1207 new_node->right=NULL;
1208 new_node->key32=key;
1209 new_node->data=data;
1210 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1217 if(key>node->key32) {
1219 /* new node to the right */
1220 emem_tree_node_t *new_node;
1221 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1222 node->right=new_node;
1223 new_node->parent=node;
1224 new_node->left=NULL;
1225 new_node->right=NULL;
1226 new_node->key32=key;
1227 new_node->data=data;
1228 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1237 /* node will now point to the newly created node */
1238 switch(se_tree->type){
1239 case EMEM_TREE_TYPE_RED_BLACK:
1240 node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1241 rb_insert_case1(se_tree, node);
1246 static void* lookup_or_insert32(emem_tree_t *se_tree, guint32 key, void*(*func)(void*),void* ud, int is_subtree) {
1247 emem_tree_node_t *node;
1251 /* is this the first node ?*/
1253 node=se_tree->malloc(sizeof(emem_tree_node_t));
1254 switch(se_tree->type){
1255 case EMEM_TREE_TYPE_RED_BLACK:
1256 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1263 node->data= func(ud);
1264 node->u.is_subtree = is_subtree;
1269 /* it was not the new root so walk the tree until we find where to
1270 * insert this new leaf.
1273 /* this node already exists, so just return the data pointer*/
1274 if(key==node->key32){
1277 if(key<node->key32) {
1279 /* new node to the left */
1280 emem_tree_node_t *new_node;
1281 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1282 node->left=new_node;
1283 new_node->parent=node;
1284 new_node->left=NULL;
1285 new_node->right=NULL;
1286 new_node->key32=key;
1287 new_node->data= func(ud);
1288 new_node->u.is_subtree = is_subtree;
1295 if(key>node->key32) {
1297 /* new node to the right */
1298 emem_tree_node_t *new_node;
1299 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1300 node->right=new_node;
1301 new_node->parent=node;
1302 new_node->left=NULL;
1303 new_node->right=NULL;
1304 new_node->key32=key;
1305 new_node->data= func(ud);
1306 new_node->u.is_subtree = is_subtree;
1315 /* node will now point to the newly created node */
1316 switch(se_tree->type){
1317 case EMEM_TREE_TYPE_RED_BLACK:
1318 node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1319 rb_insert_case1(se_tree, node);
1326 /* When the se data is released, this entire tree will dissapear as if it
1327 * never existed including all metadata associated with the tree.
1330 se_tree_create_non_persistent(int type, const char *name)
1332 emem_tree_t *tree_list;
1334 tree_list=se_alloc(sizeof(emem_tree_t));
1335 tree_list->next=NULL;
1336 tree_list->type=type;
1337 tree_list->tree=NULL;
1338 tree_list->name=name;
1339 tree_list->malloc=se_alloc;
1344 /* This tree is PErmanent and will never be released
1347 pe_tree_create(int type, char *name)
1349 emem_tree_t *tree_list;
1351 tree_list=g_malloc(sizeof(emem_tree_t));
1352 tree_list->next=NULL;
1353 tree_list->type=type;
1354 tree_list->tree=NULL;
1355 tree_list->name=name;
1356 tree_list->malloc=(void *(*)(size_t)) g_malloc;
1361 /* create another (sub)tree using the same memory allocation scope
1362 * as the parent tree.
1364 static emem_tree_t *
1365 emem_tree_create_subtree(emem_tree_t *parent_tree, char *name)
1367 emem_tree_t *tree_list;
1369 tree_list=parent_tree->malloc(sizeof(emem_tree_t));
1370 tree_list->next=NULL;
1371 tree_list->type=parent_tree->type;
1372 tree_list->tree=NULL;
1373 tree_list->name=name;
1374 tree_list->malloc=parent_tree->malloc;
1379 static void* create_sub_tree(void* d) {
1380 emem_tree_t *se_tree = d;
1381 return emem_tree_create_subtree(se_tree, "subtree");
1384 /* insert a new node in the tree. if this node matches an already existing node
1385 * then just replace the data for that node */
1388 emem_tree_insert32_array(emem_tree_t *se_tree, emem_tree_key_t *key, void *data)
1390 emem_tree_t *next_tree;
1392 if((key[0].length<1)||(key[0].length>100)){
1393 DISSECTOR_ASSERT_NOT_REACHED();
1395 if((key[0].length==1)&&(key[1].length==0)){
1396 emem_tree_insert32(se_tree, *key[0].key, data);
1400 next_tree=lookup_or_insert32(se_tree, *key[0].key, create_sub_tree, se_tree, EMEM_TREE_NODE_IS_SUBTREE);
1402 if(key[0].length==1){
1408 emem_tree_insert32_array(next_tree, key, data);
1412 emem_tree_lookup32_array(emem_tree_t *se_tree, emem_tree_key_t *key)
1414 emem_tree_t *next_tree;
1416 if((key[0].length<1)||(key[0].length>100)){
1417 DISSECTOR_ASSERT_NOT_REACHED();
1419 if((key[0].length==1)&&(key[1].length==0)){
1420 return emem_tree_lookup32(se_tree, *key[0].key);
1422 next_tree=emem_tree_lookup32(se_tree, *key[0].key);
1426 if(key[0].length==1){
1432 return emem_tree_lookup32_array(next_tree, key);
1436 /* Strings are stored as an array of uint32 containing the string characters
1437 with 4 characters in each uint32.
1438 The first byte of the string is stored as the most significant byte.
1439 If the string is not a multiple of 4 characters in length the last
1440 uint32 containing the string bytes are padded with 0 bytes.
1441 After the uint32's containing the string, there is one final terminator
1442 uint32 with the value 0x00000001
1445 emem_tree_insert_string(emem_tree_t* se_tree, const gchar* k, void* v, guint32 flags)
1447 emem_tree_key_t key[2];
1448 guint32 *aligned=NULL;
1449 guint32 len = strlen(k);
1450 guint32 div = (len+3)/4+1;
1454 aligned = malloc(div * sizeof (guint32));
1456 /* pack the bytes one one by one into guint32s */
1458 for (i = 0;i < len;i++) {
1461 ch = (unsigned char)k[i];
1462 if (flags & EMEM_TREE_STRING_NOCASE) {
1474 /* add required padding to the last uint32 */
1480 aligned[i/4-1] = tmp;
1483 /* add the terminator */
1484 aligned[div-1] = 0x00000001;
1486 key[0].length = div;
1487 key[0].key = aligned;
1492 emem_tree_insert32_array(se_tree, key, v);
1497 emem_tree_lookup_string(emem_tree_t* se_tree, const gchar* k, guint32 flags)
1499 emem_tree_key_t key[2];
1500 guint32 *aligned=NULL;
1501 guint32 len = strlen(k);
1502 guint32 div = (len+3)/4+1;
1507 aligned = malloc(div * sizeof (guint32));
1509 /* pack the bytes one one by one into guint32s */
1511 for (i = 0;i < len;i++) {
1514 ch = (unsigned char)k[i];
1515 if (flags & EMEM_TREE_STRING_NOCASE) {
1527 /* add required padding to the last uint32 */
1533 aligned[i/4-1] = tmp;
1536 /* add the terminator */
1537 aligned[div-1] = 0x00000001;
1539 key[0].length = div;
1540 key[0].key = aligned;
1545 ret = emem_tree_lookup32_array(se_tree, key);
1551 emem_tree_foreach_nodes(emem_tree_node_t* node, tree_foreach_func callback, void *user_data)
1553 gboolean stop_traverse = FALSE;
1559 stop_traverse = emem_tree_foreach_nodes(node->left, callback, user_data);
1560 if (stop_traverse) {
1565 if (node->u.is_subtree == EMEM_TREE_NODE_IS_SUBTREE) {
1566 stop_traverse = emem_tree_foreach(node->data, callback, user_data);
1568 stop_traverse = callback(node->data, user_data);
1571 if (stop_traverse) {
1576 stop_traverse = emem_tree_foreach_nodes(node->right, callback, user_data);
1577 if (stop_traverse) {
1586 emem_tree_foreach(emem_tree_t* emem_tree, tree_foreach_func callback, void *user_data)
1591 if(!emem_tree->tree)
1594 return emem_tree_foreach_nodes(emem_tree->tree, callback, user_data);
1599 emem_tree_print_nodes(emem_tree_node_t* node, int level)
1606 for(i=0;i<level;i++){
1610 printf("NODE:%p parent:%p left:0x%p right:%px key:%d data:%p\n",node,node->parent,node->left,node->right,node->key32,node->data);
1612 emem_tree_print_nodes(node->left, level+1);
1614 emem_tree_print_nodes(node->right, level+1);
1617 emem_print_tree(emem_tree_t* emem_tree)
1622 printf("EMEM tree type:%d name:%s tree:%p\n",emem_tree->type,emem_tree->name,emem_tree->tree);
1624 emem_tree_print_nodes(emem_tree->tree, 0);