2 * Wireshark memory management and garbage collection functions
7 * Wireshark - Network traffic analyzer
8 * By Gerald Combs <gerald@wireshark.org>
9 * Copyright 1998 Gerald Combs
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
35 #ifdef HAVE_SYS_TIME_H
44 #include <windows.h> /* VirtualAlloc, VirtualProtect */
45 #include <process.h> /* getpid */
51 #include <wiretap/file_util.h>
55 * Tools like Valgrind and ElectricFence don't work well with memchunks.
56 * Uncomment the defines below to make {ep|se}_alloc() allocate each
57 * object individually.
59 /* #define EP_DEBUG_FREE 1 */
60 /* #define SE_DEBUG_FREE 1 */
62 /* Do we want to use guardpages? if available */
63 #define WANT_GUARD_PAGES 1
65 /* Do we want to use canaries ? */
66 #define DEBUG_USE_CANARIES 1
69 #ifdef WANT_GUARD_PAGES
70 /* Add guard pages at each end of our allocated memory */
71 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
73 #include <sys/types.h>
75 #if defined(MAP_ANONYMOUS)
76 #define ANON_PAGE_MODE (MAP_ANONYMOUS|MAP_PRIVATE)
77 #elif defined(MAP_ANON)
78 #define ANON_PAGE_MODE (MAP_ANON|MAP_PRIVATE)
80 #define ANON_PAGE_MODE (MAP_PRIVATE) /* have to map /dev/zero */
85 static int dev_zero_fd;
86 #define ANON_FD dev_zero_fd
90 #define USE_GUARD_PAGES 1
94 /* When required, allocate more memory from the OS in this size chunks */
95 #define EMEM_PACKET_CHUNK_SIZE 10485760
97 /* The maximum number of allocations per chunk */
98 #define EMEM_ALLOCS_PER_CHUNK (EMEM_PACKET_CHUNK_SIZE / 512)
101 #ifdef DEBUG_USE_CANARIES
102 #define EMEM_CANARY_SIZE 8
103 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
104 guint8 ep_canary[EMEM_CANARY_DATA_SIZE], se_canary[EMEM_CANARY_DATA_SIZE];
105 #endif /* DEBUG_USE_CANARIES */
107 typedef struct _emem_chunk_t {
108 struct _emem_chunk_t *next;
109 unsigned int amount_free_init;
110 unsigned int amount_free;
111 unsigned int free_offset_init;
112 unsigned int free_offset;
114 #ifdef DEBUG_USE_CANARIES
115 #if ! defined(EP_DEBUG_FREE) && ! defined(SE_DEBUG_FREE)
116 unsigned int c_count;
117 void *canary[EMEM_ALLOCS_PER_CHUNK];
118 guint8 cmp_len[EMEM_ALLOCS_PER_CHUNK];
120 #endif /* DEBUG_USE_CANARIES */
123 typedef struct _emem_header_t {
124 emem_chunk_t *free_list;
125 emem_chunk_t *used_list;
128 static emem_header_t ep_packet_mem;
129 static emem_header_t se_packet_mem;
131 #if !defined(SE_DEBUG_FREE)
133 static SYSTEM_INFO sysinfo;
134 static OSVERSIONINFO versinfo;
136 #elif defined(USE_GUARD_PAGES)
137 static intptr_t pagesize;
138 #endif /* _WIN32 / USE_GUARD_PAGES */
139 #endif /* SE_DEBUG_FREE */
141 #ifdef DEBUG_USE_CANARIES
143 * Set a canary value to be placed between memchunks.
146 emem_canary(guint8 *canary) {
148 #if GLIB_MAJOR_VERSION >= 2
149 static GRand *rand_state = NULL;
153 /* First, use GLib's random function if we have it */
154 #if GLIB_MAJOR_VERSION >= 2
155 if (rand_state == NULL) {
156 rand_state = g_rand_new();
158 for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
159 canary[i] = (guint8) g_rand_int(rand_state);
165 /* Try /dev/urandom */
166 if ((fp = eth_fopen("/dev/urandom", "r")) != NULL) {
167 sz = fread(canary, EMEM_CANARY_DATA_SIZE, 1, fp);
169 if (sz == EMEM_CANARY_SIZE) {
174 /* Our last resort */
175 srandom(time(NULL) | getpid());
176 for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
177 canary[i] = (guint8) random();
180 #endif /* GLIB_MAJOR_VERSION >= 2 */
183 #if !defined(SE_DEBUG_FREE)
185 * Given an allocation size, return the amount of padding needed for
189 emem_canary_pad (size_t allocation) {
192 pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
193 if (pad < EMEM_CANARY_SIZE)
194 pad += EMEM_CANARY_SIZE;
199 #endif /* DEBUG_USE_CANARIES */
202 /* Initialize the packet-lifetime memory allocation pool.
203 * This function should be called only once when Wireshark or TShark starts
209 ep_packet_mem.free_list=NULL;
210 ep_packet_mem.used_list=NULL;
212 #ifdef DEBUG_USE_CANARIES
213 emem_canary(ep_canary);
214 #endif /* DEBUG_USE_CANARIES */
216 #if !defined(SE_DEBUG_FREE)
218 /* Set up our guard page info for Win32 */
219 GetSystemInfo(&sysinfo);
220 pagesize = sysinfo.dwPageSize;
222 /* calling GetVersionEx using the OSVERSIONINFO structure.
223 * OSVERSIONINFOEX requires Win NT4 with SP6 or newer NT Versions.
224 * OSVERSIONINFOEX will fail on Win9x and older NT Versions.
226 * http://msdn.microsoft.com/library/en-us/sysinfo/base/getversionex.asp
227 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
228 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfoex_str.asp
230 versinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
231 GetVersionEx(&versinfo);
233 #elif defined(USE_GUARD_PAGES)
234 pagesize = sysconf(_SC_PAGESIZE);
236 dev_zero_fd = open("/dev/zero", O_RDWR);
237 g_assert(dev_zero_fd != -1);
239 #endif /* _WIN32 / USE_GUARD_PAGES */
240 #endif /* SE_DEBUG_FREE */
244 /* Initialize the capture-lifetime memory allocation pool.
245 * This function should be called only once when Wireshark or TShark starts
251 se_packet_mem.free_list=NULL;
252 se_packet_mem.used_list=NULL;
254 #ifdef DEBUG_USE_CANARIES
255 emem_canary(se_canary);
256 #endif /* DEBUG_USE_CANARIES */
259 #if !defined(SE_DEBUG_FREE)
261 emem_create_chunk(emem_chunk_t **free_list) {
264 char *buf_end, *prot1, *prot2;
266 #elif defined(USE_GUARD_PAGES)
268 char *buf_end, *prot1, *prot2;
269 #endif /* _WIN32 / USE_GUARD_PAGES */
270 /* we dont have any free data, so we must allocate a new one */
273 npc = g_malloc(sizeof(emem_chunk_t));
275 #ifdef DEBUG_USE_CANARIES
276 #if ! defined(EP_DEBUG_FREE) && ! defined(SE_DEBUG_FREE)
279 #endif /* DEBUG_USE_CANARIES */
284 * MSDN documents VirtualAlloc/VirtualProtect at
285 * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
288 /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
289 npc->buf = VirtualAlloc(NULL, EMEM_PACKET_CHUNK_SIZE,
290 MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
291 g_assert(npc->buf != NULL);
292 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
294 /* Align our guard pages on page-sized boundaries */
295 prot1 = (char *) ((((int) npc->buf + pagesize - 1) / pagesize) * pagesize);
296 prot2 = (char *) ((((int) buf_end - (1 * pagesize)) / pagesize) * pagesize);
298 ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
299 g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
300 ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
301 g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
303 npc->amount_free_init = prot2 - prot1 - pagesize;
304 npc->amount_free = npc->amount_free_init;
305 npc->free_offset_init = (prot1 - npc->buf) + pagesize;
306 npc->free_offset = npc->free_offset_init;
308 #elif defined(USE_GUARD_PAGES)
309 npc->buf = mmap(NULL, EMEM_PACKET_CHUNK_SIZE,
310 PROT_READ|PROT_WRITE, ANON_PAGE_MODE, ANON_FD, 0);
311 g_assert(npc->buf != MAP_FAILED);
312 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
314 /* Align our guard pages on page-sized boundaries */
315 prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
316 prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
317 ret = mprotect(prot1, pagesize, PROT_NONE);
319 ret = mprotect(prot2, pagesize, PROT_NONE);
322 npc->amount_free_init = prot2 - prot1 - pagesize;
323 npc->amount_free = npc->amount_free_init;
324 npc->free_offset_init = (prot1 - npc->buf) + pagesize;
325 npc->free_offset = npc->free_offset_init;
327 #else /* Is there a draft in here? */
328 npc->amount_free_init = EMEM_PACKET_CHUNK_SIZE;
329 npc->amount_free = npc->amount_free_init;
330 npc->free_offset_init = 0;
331 npc->free_offset = npc->free_offset_init;
332 npc->buf = g_malloc(EMEM_PACKET_CHUNK_SIZE);
333 #endif /* USE_GUARD_PAGES */
338 /* allocate 'size' amount of memory with an allocation lifetime until the
342 ep_alloc(size_t size)
345 #ifndef EP_DEBUG_FREE
346 #ifdef DEBUG_USE_CANARIES
348 guint8 pad = emem_canary_pad(size);
351 #endif /* DEBUG_USE_CANARIES */
352 emem_chunk_t *free_list;
355 #ifndef EP_DEBUG_FREE
356 /* Round up to an 8 byte boundary. Make sure we have at least
357 * 8 pad bytes for our canary.
361 /* make sure we dont try to allocate too much (arbitrary limit) */
362 DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
364 emem_create_chunk(&ep_packet_mem.free_list);
366 /* oops, we need to allocate more memory to serve this request
367 * than we have free. move this node to the used list and try again
369 if(size>ep_packet_mem.free_list->amount_free
370 #ifdef DEBUG_USE_CANARIES
371 || ep_packet_mem.free_list->c_count >= EMEM_ALLOCS_PER_CHUNK
372 #endif /* DEBUG_USE_CANARIES */
375 npc=ep_packet_mem.free_list;
376 ep_packet_mem.free_list=ep_packet_mem.free_list->next;
377 npc->next=ep_packet_mem.used_list;
378 ep_packet_mem.used_list=npc;
381 emem_create_chunk(&ep_packet_mem.free_list);
383 free_list = ep_packet_mem.free_list;
385 buf = free_list->buf + free_list->free_offset;
387 free_list->amount_free -= size;
388 free_list->free_offset += size;
390 #ifdef DEBUG_USE_CANARIES
391 cptr = (char *)buf + size - pad;
392 memcpy(cptr, &ep_canary, pad);
393 free_list->canary[free_list->c_count] = cptr;
394 free_list->cmp_len[free_list->c_count] = pad;
395 free_list->c_count++;
396 #endif /* DEBUG_USE_CANARIES */
398 #else /* EP_DEBUG_FREE */
401 npc=g_malloc(sizeof(emem_chunk_t));
402 npc->next=ep_packet_mem.used_list;
403 npc->amount_free=size;
405 npc->buf=g_malloc(size);
407 ep_packet_mem.used_list=npc;
408 #endif /* EP_DEBUG_FREE */
412 /* allocate 'size' amount of memory with an allocation lifetime until the
416 se_alloc(size_t size)
419 #ifndef SE_DEBUG_FREE
420 #ifdef DEBUG_USE_CANARIES
422 guint8 pad = emem_canary_pad(size);
425 #endif /* DEBUG_USE_CANARIES */
426 emem_chunk_t *free_list;
429 #ifndef SE_DEBUG_FREE
430 /* Round up to an 8 byte boundary. Make sure we have at least
431 * 8 pad bytes for our canary.
435 /* make sure we dont try to allocate too much (arbitrary limit) */
436 DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
438 emem_create_chunk(&se_packet_mem.free_list);
440 /* oops, we need to allocate more memory to serve this request
441 * than we have free. move this node to the used list and try again
443 if(size>se_packet_mem.free_list->amount_free
444 #ifdef DEBUG_USE_CANARIES
445 || se_packet_mem.free_list->c_count >= EMEM_ALLOCS_PER_CHUNK
446 #endif /* DEBUG_USE_CANARIES */
449 npc=se_packet_mem.free_list;
450 se_packet_mem.free_list=se_packet_mem.free_list->next;
451 npc->next=se_packet_mem.used_list;
452 se_packet_mem.used_list=npc;
455 emem_create_chunk(&se_packet_mem.free_list);
457 free_list = se_packet_mem.free_list;
459 buf = free_list->buf + free_list->free_offset;
461 free_list->amount_free -= size;
462 free_list->free_offset += size;
464 #ifdef DEBUG_USE_CANARIES
465 cptr = (char *)buf + size - pad;
466 memcpy(cptr, &se_canary, pad);
467 free_list->canary[free_list->c_count] = cptr;
468 free_list->cmp_len[free_list->c_count] = pad;
469 free_list->c_count++;
470 #endif /* DEBUG_USE_CANARIES */
472 #else /* SE_DEBUG_FREE */
475 npc=g_malloc(sizeof(emem_chunk_t));
476 npc->next=se_packet_mem.used_list;
477 npc->amount_free=size;
479 npc->buf=g_malloc(size);
481 se_packet_mem.used_list=npc;
482 #endif /* SE_DEBUG_FREE */
488 void* ep_alloc0(size_t size) {
489 return memset(ep_alloc(size),'\0',size);
492 gchar* ep_strdup(const gchar* src) {
493 guint len = strlen(src);
496 dst = strncpy(ep_alloc(len+1), src, len);
503 gchar* ep_strndup(const gchar* src, size_t len) {
504 gchar* dst = ep_alloc(len+1);
507 for (i = 0; src[i] && i < len; i++)
515 void* ep_memdup(const void* src, size_t len) {
516 return memcpy(ep_alloc(len), src, len);
519 gchar* ep_strdup_vprintf(const gchar* fmt, va_list ap) {
526 len = g_printf_string_upper_bound(fmt, ap);
528 dst = ep_alloc(len+1);
529 g_vsnprintf (dst, len, fmt, ap2);
535 gchar* ep_strdup_printf(const gchar* fmt, ...) {
540 dst = ep_strdup_vprintf(fmt, ap);
545 gchar** ep_strsplit(const gchar* string, const gchar* sep, int max_tokens) {
553 enum { AT_START, IN_PAD, IN_TOKEN } state;
561 s = splitted = ep_strdup(string);
562 str_len = strlen(splitted);
563 sep_len = strlen(sep);
565 if (max_tokens < 1) max_tokens = INT_MAX;
570 while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
573 for(i=0; i < sep_len; i++ )
580 vec = ep_alloc_array(gchar*,tokens+1);
583 for (i=0; i< str_len; i++) {
586 switch(splitted[i]) {
591 vec[curr_tok] = &(splitted[i]);
597 switch(splitted[i]) {
604 switch(splitted[i]) {
606 vec[curr_tok] = &(splitted[i]);
615 vec[curr_tok] = NULL;
622 void* se_alloc0(size_t size) {
623 return memset(se_alloc(size),'\0',size);
626 /* If str is NULL, just return the string "<NULL>" so that the callers dont
627 * have to bother checking it.
629 gchar* se_strdup(const gchar* src) {
638 dst = strncpy(se_alloc(len+1), src, len);
645 gchar* se_strndup(const gchar* src, size_t len) {
646 gchar* dst = se_alloc(len+1);
649 for (i = 0; src[i] && i < len; i++)
657 void* se_memdup(const void* src, size_t len) {
658 return memcpy(se_alloc(len), src, len);
661 gchar* se_strdup_vprintf(const gchar* fmt, va_list ap) {
668 len = g_printf_string_upper_bound(fmt, ap);
670 dst = se_alloc(len+1);
671 g_vsnprintf (dst, len, fmt, ap2);
677 gchar* se_strdup_printf(const gchar* fmt, ...) {
682 dst = se_strdup_vprintf(fmt, ap);
687 /* release all allocated memory back to the pool.
693 #ifndef EP_DEBUG_FREE
694 #ifdef DEBUG_USE_CANARIES
696 #endif /* DEBUG_USE_CANARIES */
699 /* move all used chunks over to the free list */
700 while(ep_packet_mem.used_list){
701 npc=ep_packet_mem.used_list;
702 ep_packet_mem.used_list=ep_packet_mem.used_list->next;
703 npc->next=ep_packet_mem.free_list;
704 ep_packet_mem.free_list=npc;
707 /* clear them all out */
708 npc = ep_packet_mem.free_list;
709 while (npc != NULL) {
710 #ifndef EP_DEBUG_FREE
711 #ifdef DEBUG_USE_CANARIES
712 for (i = 0; i < npc->c_count; i++) {
713 if (memcmp(npc->canary[i], &ep_canary, npc->cmp_len[i]) != 0)
714 g_error("Per-packet memory corrupted.");
717 #endif /* DEBUG_USE_CANARIES */
718 npc->amount_free = npc->amount_free_init;
719 npc->free_offset = npc->free_offset_init;
721 #else /* EP_DEBUG_FREE */
722 emem_chunk_t *next = npc->next;
727 #endif /* EP_DEBUG_FREE */
734 /* release all allocated memory back to the pool.
740 se_tree_t *se_tree_list;
741 #ifndef SE_DEBUG_FREE
742 #ifdef DEBUG_USE_CANARIES
744 #endif /* DEBUG_USE_CANARIES */
748 /* move all used chunks over to the free list */
749 while(se_packet_mem.used_list){
750 npc=se_packet_mem.used_list;
751 se_packet_mem.used_list=se_packet_mem.used_list->next;
752 npc->next=se_packet_mem.free_list;
753 se_packet_mem.free_list=npc;
756 /* clear them all out */
757 npc = se_packet_mem.free_list;
758 while (npc != NULL) {
759 #ifndef SE_DEBUG_FREE
760 #ifdef DEBUG_USE_CANARIES
761 for (i = 0; i < npc->c_count; i++) {
762 if (memcmp(npc->canary[i], &se_canary, npc->cmp_len[i]) != 0)
763 g_error("Per-session memory corrupted.");
766 #endif /* DEBUG_USE_CANARIES */
767 npc->amount_free = npc->amount_free_init;
768 npc->free_offset = npc->free_offset_init;
770 #else /* SE_DEBUG_FREE */
771 emem_chunk_t *next = npc->next;
776 #endif /* SE_DEBUG_FREE */
783 /* release/reset all se allocated trees */
784 for(se_tree_list=se_trees;se_tree_list;se_tree_list=se_tree_list->next){
785 se_tree_list->tree=NULL;
790 ep_stack_t ep_stack_new(void) {
791 ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
792 *s = ep_new0(struct _ep_stack_frame_t);
796 /* for ep_stack_t we'll keep the popped frames so we reuse them instead
797 of allocating new ones.
801 void* ep_stack_push(ep_stack_t stack, void* data) {
802 struct _ep_stack_frame_t* frame;
803 struct _ep_stack_frame_t* head = (*stack);
808 frame = ep_new(struct _ep_stack_frame_t);
814 frame->payload = data;
820 void* ep_stack_pop(ep_stack_t stack) {
822 if ((*stack)->below) {
823 (*stack) = (*stack)->below;
824 return (*stack)->above->payload;
833 void print_tree_item(se_tree_node_t *node, int level){
835 for(i=0;i<level;i++){
838 printf("%s KEY:0x%08x node:0x%08x parent:0x%08x left:0x%08x right:0x%08x\n",node->u.rb_color==SE_TREE_RB_COLOR_BLACK?"BLACK":"RED",node->key32,(int)node,(int)node->parent,(int)node->left,(int)node->right);
840 print_tree_item(node->left,level+1);
842 print_tree_item(node->right,level+1);
845 void print_tree(se_tree_node_t *node){
852 print_tree_item(node,0);
858 /* routines to manage se allocated red-black trees */
859 se_tree_t *se_trees=NULL;
862 se_tree_create(int type, char *name)
864 se_tree_t *tree_list;
866 tree_list=malloc(sizeof(se_tree_t));
867 tree_list->next=se_trees;
868 tree_list->type=type;
869 tree_list->tree=NULL;
870 tree_list->name=name;
871 tree_list->malloc=se_alloc;
880 se_tree_lookup32(se_tree_t *se_tree, guint32 key)
882 se_tree_node_t *node;
887 if(key==node->key32){
903 se_tree_lookup32_le(se_tree_t *se_tree, guint32 key)
905 se_tree_node_t *node;
915 if(key==node->key32){
937 /* If we are still at the root of the tree this means that this node
938 * is either smaller thant the search key and then we return this
939 * node or else there is no smaller key availabel and then
950 if(node->parent->left==node){
954 /* if this is a left child and its key is smaller than
955 * the search key, then this is the node we want.
959 /* if this is a left child and its key is bigger than
960 * the search key, we have to check if any
961 * of our ancestors are smaller than the search key.
975 /* if this is the right child and its key is smaller
976 * than the search key then this is the one we want.
980 /* if this is the right child and its key is larger
981 * than the search key then our parent is the one we
984 return node->parent->data;
991 static inline se_tree_node_t *
992 emem_tree_parent(se_tree_node_t *node)
997 static inline se_tree_node_t *
998 emem_tree_grandparent(se_tree_node_t *node)
1000 se_tree_node_t *parent;
1002 parent=emem_tree_parent(node);
1004 return parent->parent;
1008 static inline se_tree_node_t *
1009 emem_tree_uncle(se_tree_node_t *node)
1011 se_tree_node_t *parent, *grandparent;
1013 parent=emem_tree_parent(node);
1017 grandparent=emem_tree_parent(parent);
1021 if(parent==grandparent->left){
1022 return grandparent->right;
1024 return grandparent->left;
1027 static inline void rb_insert_case1(se_tree_t *se_tree, se_tree_node_t *node);
1028 static inline void rb_insert_case2(se_tree_t *se_tree, se_tree_node_t *node);
1031 rotate_left(se_tree_t *se_tree, se_tree_node_t *node)
1034 if(node->parent->left==node){
1035 node->parent->left=node->right;
1037 node->parent->right=node->right;
1040 se_tree->tree=node->right;
1042 node->right->parent=node->parent;
1043 node->parent=node->right;
1044 node->right=node->right->left;
1046 node->right->parent=node;
1048 node->parent->left=node;
1052 rotate_right(se_tree_t *se_tree, se_tree_node_t *node)
1055 if(node->parent->left==node){
1056 node->parent->left=node->left;
1058 node->parent->right=node->left;
1061 se_tree->tree=node->left;
1063 node->left->parent=node->parent;
1064 node->parent=node->left;
1065 node->left=node->left->right;
1067 node->left->parent=node;
1069 node->parent->right=node;
1073 rb_insert_case5(se_tree_t *se_tree, se_tree_node_t *node)
1075 se_tree_node_t *grandparent;
1076 se_tree_node_t *parent;
1078 parent=emem_tree_parent(node);
1079 grandparent=emem_tree_parent(parent);
1080 parent->u.rb_color=SE_TREE_RB_COLOR_BLACK;
1081 grandparent->u.rb_color=SE_TREE_RB_COLOR_RED;
1082 if( (node==parent->left) && (parent==grandparent->left) ){
1083 rotate_right(se_tree, grandparent);
1085 rotate_left(se_tree, grandparent);
1090 rb_insert_case4(se_tree_t *se_tree, se_tree_node_t *node)
1092 se_tree_node_t *grandparent;
1093 se_tree_node_t *parent;
1095 parent=emem_tree_parent(node);
1096 grandparent=emem_tree_parent(parent);
1100 if( (node==parent->right) && (parent==grandparent->left) ){
1101 rotate_left(se_tree, parent);
1103 } else if( (node==parent->left) && (parent==grandparent->right) ){
1104 rotate_right(se_tree, parent);
1107 rb_insert_case5(se_tree, node);
1111 rb_insert_case3(se_tree_t *se_tree, se_tree_node_t *node)
1113 se_tree_node_t *grandparent;
1114 se_tree_node_t *parent;
1115 se_tree_node_t *uncle;
1117 uncle=emem_tree_uncle(node);
1118 if(uncle && (uncle->u.rb_color==SE_TREE_RB_COLOR_RED)){
1119 parent=emem_tree_parent(node);
1120 parent->u.rb_color=SE_TREE_RB_COLOR_BLACK;
1121 uncle->u.rb_color=SE_TREE_RB_COLOR_BLACK;
1122 grandparent=emem_tree_grandparent(node);
1123 grandparent->u.rb_color=SE_TREE_RB_COLOR_RED;
1124 rb_insert_case1(se_tree, grandparent);
1126 rb_insert_case4(se_tree, node);
1131 rb_insert_case2(se_tree_t *se_tree, se_tree_node_t *node)
1133 se_tree_node_t *parent;
1135 parent=emem_tree_parent(node);
1136 /* parent is always non-NULL here */
1137 if(parent->u.rb_color==SE_TREE_RB_COLOR_BLACK){
1140 rb_insert_case3(se_tree, node);
1144 rb_insert_case1(se_tree_t *se_tree, se_tree_node_t *node)
1146 se_tree_node_t *parent;
1148 parent=emem_tree_parent(node);
1150 node->u.rb_color=SE_TREE_RB_COLOR_BLACK;
1153 rb_insert_case2(se_tree, node);
1156 /* insert a new node in the tree. if this node matches an already existing node
1157 * then just replace the data for that node */
1159 se_tree_insert32(se_tree_t *se_tree, guint32 key, void *data)
1161 se_tree_node_t *node;
1165 /* is this the first node ?*/
1167 node=se_tree->malloc(sizeof(se_tree_node_t));
1168 switch(se_tree->type){
1169 case SE_TREE_TYPE_RED_BLACK:
1170 node->u.rb_color=SE_TREE_RB_COLOR_BLACK;
1182 /* it was not the new root so walk the tree until we find where to
1183 * insert this new leaf.
1186 /* this node already exists, so just replace the data pointer*/
1187 if(key==node->key32){
1191 if(key<node->key32) {
1193 /* new node to the left */
1194 se_tree_node_t *new_node;
1195 new_node=se_tree->malloc(sizeof(se_tree_node_t));
1196 node->left=new_node;
1197 new_node->parent=node;
1198 new_node->left=NULL;
1199 new_node->right=NULL;
1200 new_node->key32=key;
1201 new_node->data=data;
1208 if(key>node->key32) {
1210 /* new node to the right */
1211 se_tree_node_t *new_node;
1212 new_node=se_tree->malloc(sizeof(se_tree_node_t));
1213 node->right=new_node;
1214 new_node->parent=node;
1215 new_node->left=NULL;
1216 new_node->right=NULL;
1217 new_node->key32=key;
1218 new_node->data=data;
1227 /* node will now point to the newly created node */
1228 switch(se_tree->type){
1229 case SE_TREE_TYPE_RED_BLACK:
1230 node->u.rb_color=SE_TREE_RB_COLOR_RED;
1231 rb_insert_case1(se_tree, node);
1236 static void* lookup_or_insert32(se_tree_t *se_tree, guint32 key, void*(*func)(void*),void* ud) {
1237 se_tree_node_t *node;
1241 /* is this the first node ?*/
1243 node=se_tree->malloc(sizeof(se_tree_node_t));
1244 switch(se_tree->type){
1245 case SE_TREE_TYPE_RED_BLACK:
1246 node->u.rb_color=SE_TREE_RB_COLOR_BLACK;
1253 node->data= func(ud);
1258 /* it was not the new root so walk the tree until we find where to
1259 * insert this new leaf.
1262 /* this node already exists, so just return the data pointer*/
1263 if(key==node->key32){
1266 if(key<node->key32) {
1268 /* new node to the left */
1269 se_tree_node_t *new_node;
1270 new_node=se_tree->malloc(sizeof(se_tree_node_t));
1271 node->left=new_node;
1272 new_node->parent=node;
1273 new_node->left=NULL;
1274 new_node->right=NULL;
1275 new_node->key32=key;
1276 new_node->data= func(ud);
1283 if(key>node->key32) {
1285 /* new node to the right */
1286 se_tree_node_t *new_node;
1287 new_node=se_tree->malloc(sizeof(se_tree_node_t));
1288 node->right=new_node;
1289 new_node->parent=node;
1290 new_node->left=NULL;
1291 new_node->right=NULL;
1292 new_node->key32=key;
1293 new_node->data= func(ud);
1302 /* node will now point to the newly created node */
1303 switch(se_tree->type){
1304 case SE_TREE_TYPE_RED_BLACK:
1305 node->u.rb_color=SE_TREE_RB_COLOR_RED;
1306 rb_insert_case1(se_tree, node);
1313 /* When the se data is released, this entire tree will dissapear as if it
1314 * never existed including all metadata associated with the tree.
1317 se_tree_create_non_persistent(int type, char *name)
1319 se_tree_t *tree_list;
1321 tree_list=se_alloc(sizeof(se_tree_t));
1322 tree_list->next=NULL;
1323 tree_list->type=type;
1324 tree_list->tree=NULL;
1325 tree_list->name=name;
1326 tree_list->malloc=se_alloc;
1331 static void* create_sub_tree(void* d) {
1332 se_tree_t *se_tree = d;
1333 return se_tree_create_non_persistent(se_tree->type, "subtree");
1336 /* insert a new node in the tree. if this node matches an already existing node
1337 * then just replace the data for that node */
1340 se_tree_insert32_array(se_tree_t *se_tree, se_tree_key_t *key, void *data)
1342 se_tree_t *next_tree;
1344 if((key[0].length<1)||(key[0].length>100)){
1345 DISSECTOR_ASSERT_NOT_REACHED();
1347 if((key[0].length==1)&&(key[1].length==0)){
1348 se_tree_insert32(se_tree, *key[0].key, data);
1352 next_tree=lookup_or_insert32(se_tree, *key[0].key, create_sub_tree, se_tree);
1354 if(key[0].length==1){
1360 se_tree_insert32_array(next_tree, key, data);
1364 se_tree_lookup32_array(se_tree_t *se_tree, se_tree_key_t *key)
1366 se_tree_t *next_tree;
1368 if((key[0].length<1)||(key[0].length>100)){
1369 DISSECTOR_ASSERT_NOT_REACHED();
1371 if((key[0].length==1)&&(key[1].length==0)){
1372 return se_tree_lookup32(se_tree, *key[0].key);
1374 next_tree=se_tree_lookup32(se_tree, *key[0].key);
1378 if(key[0].length==1){
1384 return se_tree_lookup32_array(next_tree, key);
1388 void se_tree_insert_string(se_string_hash_t* se_tree, const gchar* k, void* v) {
1389 guint32 len = strlen(k);
1390 guint32 div = (len-1)/4;
1391 guint32 residual = 0;
1392 se_tree_key_t key[] = {
1400 key[1].length = div;
1401 key[1].key = (guint32*)(&k[0]);
1402 key[2].key = &residual;
1405 key[1].length = key[2].length;
1406 key[1].key = key[2].key;
1415 residual |= ( k[div+3] << 24 );
1417 residual |= ( k[div+2] << 16 );
1419 residual |= ( k[div+1] << 8 );
1425 se_tree_insert32_array(se_tree,key,v);
1428 void* se_tree_lookup_string(se_string_hash_t* se_tree, const gchar* k) {
1429 guint32 len = strlen(k);
1430 guint32 div = (len-1)/4;
1431 guint32 residual = 0;
1432 se_tree_key_t key[] = {
1440 key[1].length = div;
1441 key[1].key = (guint32*)(&k[0]);
1442 key[2].key = &residual;
1445 key[1].length = key[2].length;
1446 key[1].key = key[2].key;
1455 residual |= k[div+3] << 24;
1457 residual |= k[div+2] << 16;
1459 residual |= k[div+1] << 8;
1465 return se_tree_lookup32_array(se_tree, key);