2 * Ethereal memory management and garbage collection functions
7 * Ethereal - Network traffic analyzer
8 * By Gerald Combs <gerald@ethereal.com>
9 * Copyright 1998 Gerald Combs
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
35 #ifdef HAVE_SYS_TIME_H
44 #include <windows.h> /* VirtualAlloc, VirtualProtect */
45 #include <process.h> /* getpid */
51 #include <wiretap/file_util.h>
55 * Tools like Valgrind and ElectricFence don't work well with memchunks.
56 * Uncomment the defines below to make {ep|se}_alloc() allocate each
57 * object individually.
59 /* #define EP_DEBUG_FREE 1 */
60 /* #define SE_DEBUG_FREE 1 */
62 /* Do we want to use guardpages? if available */
63 #define WANT_GUARD_PAGES 1
65 /* Do we want to use canaries ? */
66 #define DEBUG_USE_CANARIES 1
69 #ifdef WANT_GUARD_PAGES
70 /* Add guard pages at each end of our allocated memory */
71 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
73 #include <sys/types.h>
75 #define USE_GUARD_PAGES 1
79 /* When required, allocate more memory from the OS in this size chunks */
80 #define EMEM_PACKET_CHUNK_SIZE 10485760
82 /* The maximum number of allocations per chunk */
83 #define EMEM_ALLOCS_PER_CHUNK (EMEM_PACKET_CHUNK_SIZE / 512)
86 #ifdef DEBUG_USE_CANARIES
87 #define EMEM_CANARY_SIZE 8
88 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
89 guint8 ep_canary[EMEM_CANARY_DATA_SIZE], se_canary[EMEM_CANARY_DATA_SIZE];
90 #endif /* DEBUG_USE_CANARIES */
92 typedef struct _emem_chunk_t {
93 struct _emem_chunk_t *next;
94 unsigned int amount_free_init;
95 unsigned int amount_free;
96 unsigned int free_offset_init;
97 unsigned int free_offset;
99 #ifdef DEBUG_USE_CANARIES
100 #if ! defined(EP_DEBUG_FREE) && ! defined(SE_DEBUG_FREE)
101 unsigned int c_count;
102 void *canary[EMEM_ALLOCS_PER_CHUNK];
103 guint8 cmp_len[EMEM_ALLOCS_PER_CHUNK];
105 #endif /* DEBUG_USE_CANARIES */
108 typedef struct _emem_header_t {
109 emem_chunk_t *free_list;
110 emem_chunk_t *used_list;
113 static emem_header_t ep_packet_mem;
114 static emem_header_t se_packet_mem;
117 #ifdef DEBUG_USE_CANARIES
119 * Set a canary value to be placed between memchunks.
122 emem_canary(guint8 *canary) {
124 #if GLIB_MAJOR_VERSION >= 2
125 static GRand *rand_state = NULL;
129 /* First, use GLib's random function if we have it */
130 #if GLIB_MAJOR_VERSION >= 2
131 if (rand_state == NULL) {
132 rand_state = g_rand_new();
134 for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
135 canary[i] = (guint8) g_rand_int(rand_state);
141 /* Try /dev/urandom */
142 if ((fp = eth_fopen("/dev/urandom", "r")) != NULL) {
143 sz = fread(canary, EMEM_CANARY_DATA_SIZE, 1, fp);
145 if (sz == EMEM_CANARY_SIZE) {
150 /* Our last resort */
151 srandom(time(NULL) | getpid());
152 for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
153 canary[i] = (guint8) random();
156 #endif /* GLIB_MAJOR_VERSION >= 2 */
159 #if !defined(SE_DEBUG_FREE)
161 * Given an allocation size, return the amount of padding needed for
165 emem_canary_pad (size_t allocation) {
168 pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
169 if (pad < EMEM_CANARY_SIZE)
170 pad += EMEM_CANARY_SIZE;
175 #endif /* DEBUG_USE_CANARIES */
178 /* Initialize the packet-lifetime memory allocation pool.
179 * This function should be called only once when Ethereal or Tethereal starts
185 ep_packet_mem.free_list=NULL;
186 ep_packet_mem.used_list=NULL;
188 #ifdef DEBUG_USE_CANARIES
189 emem_canary(ep_canary);
190 #endif /* DEBUG_USE_CANARIES */
192 /* Initialize the capture-lifetime memory allocation pool.
193 * This function should be called only once when Ethereal or Tethereal starts
199 se_packet_mem.free_list=NULL;
200 se_packet_mem.used_list=NULL;
202 #ifdef DEBUG_USE_CANARIES
203 emem_canary(se_canary);
204 #endif /* DEBUG_USE_CANARIES */
207 #if !defined(SE_DEBUG_FREE)
209 emem_create_chunk(emem_chunk_t **free_list) {
214 char *buf_end, *prot1, *prot2;
216 #elif defined(USE_GUARD_PAGES)
217 intptr_t pagesize = sysconf(_SC_PAGESIZE);
219 char *buf_end, *prot1, *prot2;
221 /* we dont have any free data, so we must allocate a new one */
224 npc = g_malloc(sizeof(emem_chunk_t));
226 #ifdef DEBUG_USE_CANARIES
227 #if ! defined(EP_DEBUG_FREE) && ! defined(SE_DEBUG_FREE)
230 #endif /* DEBUG_USE_CANARIES */
235 * MSDN documents VirtualAlloc/VirtualProtect at
236 * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
238 GetSystemInfo(&sysinfo);
239 pagesize = sysinfo.dwPageSize;
241 /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
242 npc->buf = VirtualAlloc(NULL, EMEM_PACKET_CHUNK_SIZE,
243 MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
244 g_assert(npc->buf != NULL);
245 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
247 /* Align our guard pages on page-sized boundaries */
248 prot1 = (char *) ((((int) npc->buf + pagesize - 1) / pagesize) * pagesize);
249 prot2 = (char *) ((((int) buf_end - (1 * pagesize)) / pagesize) * pagesize);
251 ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
252 g_assert(ret == TRUE);
253 ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
254 g_assert(ret == TRUE);
256 npc->amount_free_init = prot2 - prot1 - pagesize;
257 npc->amount_free = npc->amount_free_init;
258 npc->free_offset_init = (prot1 - npc->buf) + pagesize;
259 npc->free_offset = npc->free_offset_init;
261 #elif defined(USE_GUARD_PAGES)
262 npc->buf = mmap(NULL, EMEM_PACKET_CHUNK_SIZE,
263 PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
264 g_assert(npc->buf != MAP_FAILED);
265 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
267 /* Align our guard pages on page-sized boundaries */
268 prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
269 prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
270 ret = mprotect(prot1, pagesize, PROT_NONE);
272 ret = mprotect(prot2, pagesize, PROT_NONE);
275 npc->amount_free_init = prot2 - prot1 - pagesize;
276 npc->amount_free = npc->amount_free_init;
277 npc->free_offset_init = (prot1 - npc->buf) + pagesize;
278 npc->free_offset = npc->free_offset_init;
280 #else /* Is there a draft in here? */
281 npc->amount_free_init = EMEM_PACKET_CHUNK_SIZE;
282 npc->amount_free = npc->amount_free_init;
283 npc->free_offset_init = 0;
284 npc->free_offset = npc->free_offset_init;
285 npc->buf = g_malloc(EMEM_PACKET_CHUNK_SIZE);
286 #endif /* USE_GUARD_PAGES */
291 /* allocate 'size' amount of memory with an allocation lifetime until the
295 ep_alloc(size_t size)
298 #ifndef EP_DEBUG_FREE
299 #ifdef DEBUG_USE_CANARIES
301 guint8 pad = emem_canary_pad(size);
304 #endif /* DEBUG_USE_CANARIES */
305 emem_chunk_t *free_list;
308 #ifndef EP_DEBUG_FREE
309 /* Round up to an 8 byte boundary. Make sure we have at least
310 * 8 pad bytes for our canary.
314 /* make sure we dont try to allocate too much (arbitrary limit) */
315 DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
317 emem_create_chunk(&ep_packet_mem.free_list);
319 /* oops, we need to allocate more memory to serve this request
320 * than we have free. move this node to the used list and try again
322 if(size>ep_packet_mem.free_list->amount_free
323 #ifdef DEBUG_USE_CANARIES
324 || ep_packet_mem.free_list->c_count >= EMEM_ALLOCS_PER_CHUNK
325 #endif /* DEBUG_USE_CANARIES */
328 npc=ep_packet_mem.free_list;
329 ep_packet_mem.free_list=ep_packet_mem.free_list->next;
330 npc->next=ep_packet_mem.used_list;
331 ep_packet_mem.used_list=npc;
334 emem_create_chunk(&ep_packet_mem.free_list);
336 free_list = ep_packet_mem.free_list;
338 buf = free_list->buf + free_list->free_offset;
340 free_list->amount_free -= size;
341 free_list->free_offset += size;
343 #ifdef DEBUG_USE_CANARIES
344 cptr = (char *)buf + size - pad;
345 memcpy(cptr, &ep_canary, pad);
346 free_list->canary[free_list->c_count] = cptr;
347 free_list->cmp_len[free_list->c_count] = pad;
348 free_list->c_count++;
349 #endif /* DEBUG_USE_CANARIES */
351 #else /* EP_DEBUG_FREE */
354 npc=g_malloc(sizeof(emem_chunk_t));
355 npc->next=ep_packet_mem.used_list;
356 npc->amount_free=size;
358 npc->buf=g_malloc(size);
360 ep_packet_mem.used_list=npc;
361 #endif /* EP_DEBUG_FREE */
365 /* allocate 'size' amount of memory with an allocation lifetime until the
369 se_alloc(size_t size)
372 #ifndef SE_DEBUG_FREE
373 #ifdef DEBUG_USE_CANARIES
375 guint8 pad = emem_canary_pad(size);
378 #endif /* DEBUG_USE_CANARIES */
379 emem_chunk_t *free_list;
382 #ifndef SE_DEBUG_FREE
383 /* Round up to an 8 byte boundary. Make sure we have at least
384 * 8 pad bytes for our canary.
388 /* make sure we dont try to allocate too much (arbitrary limit) */
389 DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
391 emem_create_chunk(&se_packet_mem.free_list);
393 /* oops, we need to allocate more memory to serve this request
394 * than we have free. move this node to the used list and try again
396 if(size>se_packet_mem.free_list->amount_free
397 #ifdef DEBUG_USE_CANARIES
398 || se_packet_mem.free_list->c_count >= EMEM_ALLOCS_PER_CHUNK
399 #endif /* DEBUG_USE_CANARIES */
402 npc=se_packet_mem.free_list;
403 se_packet_mem.free_list=se_packet_mem.free_list->next;
404 npc->next=se_packet_mem.used_list;
405 se_packet_mem.used_list=npc;
408 emem_create_chunk(&se_packet_mem.free_list);
410 free_list = se_packet_mem.free_list;
412 buf = free_list->buf + free_list->free_offset;
414 free_list->amount_free -= size;
415 free_list->free_offset += size;
417 #ifdef DEBUG_USE_CANARIES
418 cptr = (char *)buf + size - pad;
419 memcpy(cptr, &se_canary, pad);
420 free_list->canary[free_list->c_count] = cptr;
421 free_list->cmp_len[free_list->c_count] = pad;
422 free_list->c_count++;
423 #endif /* DEBUG_USE_CANARIES */
425 #else /* SE_DEBUG_FREE */
428 npc=g_malloc(sizeof(emem_chunk_t));
429 npc->next=se_packet_mem.used_list;
430 npc->amount_free=size;
432 npc->buf=g_malloc(size);
434 se_packet_mem.used_list=npc;
435 #endif /* SE_DEBUG_FREE */
441 void* ep_alloc0(size_t size) {
442 return memset(ep_alloc(size),'\0',size);
445 gchar* ep_strdup(const gchar* src) {
446 guint len = strlen(src);
449 dst = strncpy(ep_alloc(len+1), src, len);
456 gchar* ep_strndup(const gchar* src, size_t len) {
457 gchar* dst = ep_alloc(len+1);
460 for (i = 0; src[i] && i < len; i++)
468 void* ep_memdup(const void* src, size_t len) {
469 return memcpy(ep_alloc(len), src, len);
472 gchar* ep_strdup_vprintf(const gchar* fmt, va_list ap) {
479 len = g_printf_string_upper_bound(fmt, ap);
481 dst = ep_alloc(len+1);
482 g_vsnprintf (dst, len, fmt, ap2);
488 gchar* ep_strdup_printf(const gchar* fmt, ...) {
493 dst = ep_strdup_vprintf(fmt, ap);
498 gchar** ep_strsplit(const gchar* string, const gchar* sep, int max_tokens) {
506 enum { AT_START, IN_PAD, IN_TOKEN } state;
514 s = splitted = ep_strdup(string);
515 str_len = strlen(splitted);
516 sep_len = strlen(sep);
518 if (max_tokens < 1) max_tokens = INT_MAX;
523 while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
526 for(i=0; i < sep_len; i++ )
533 vec = ep_alloc_array(gchar*,tokens+1);
536 for (i=0; i< str_len; i++) {
539 switch(splitted[i]) {
544 vec[curr_tok] = &(splitted[i]);
550 switch(splitted[i]) {
557 switch(splitted[i]) {
559 vec[curr_tok] = &(splitted[i]);
568 vec[curr_tok] = NULL;
575 void* se_alloc0(size_t size) {
576 return memset(se_alloc(size),'\0',size);
579 /* If str is NULL, just return the string "<NULL>" so that the callers dont
580 * have to bother checking it.
582 gchar* se_strdup(const gchar* src) {
591 dst = strncpy(se_alloc(len+1), src, len);
598 gchar* se_strndup(const gchar* src, size_t len) {
599 gchar* dst = se_alloc(len+1);
602 for (i = 0; src[i] && i < len; i++)
610 void* se_memdup(const void* src, size_t len) {
611 return memcpy(se_alloc(len), src, len);
614 gchar* se_strdup_vprintf(const gchar* fmt, va_list ap) {
621 len = g_printf_string_upper_bound(fmt, ap);
623 dst = se_alloc(len+1);
624 g_vsnprintf (dst, len, fmt, ap2);
630 gchar* se_strdup_printf(const gchar* fmt, ...) {
635 dst = se_strdup_vprintf(fmt, ap);
640 /* release all allocated memory back to the pool.
646 #ifndef EP_DEBUG_FREE
647 #ifdef DEBUG_USE_CANARIES
649 #endif /* DEBUG_USE_CANARIES */
652 /* move all used chunks over to the free list */
653 while(ep_packet_mem.used_list){
654 npc=ep_packet_mem.used_list;
655 ep_packet_mem.used_list=ep_packet_mem.used_list->next;
656 npc->next=ep_packet_mem.free_list;
657 ep_packet_mem.free_list=npc;
660 /* clear them all out */
661 npc = ep_packet_mem.free_list;
662 while (npc != NULL) {
663 #ifndef EP_DEBUG_FREE
664 #ifdef DEBUG_USE_CANARIES
665 for (i = 0; i < npc->c_count; i++) {
666 if (memcmp(npc->canary[i], &ep_canary, npc->cmp_len[i]) != 0)
667 g_error("Per-packet memory corrupted.");
670 #endif /* DEBUG_USE_CANARIES */
671 npc->amount_free = npc->amount_free_init;
672 npc->free_offset = npc->free_offset_init;
674 #else /* EP_DEBUG_FREE */
675 emem_chunk_t *next = npc->next;
680 #endif /* EP_DEBUG_FREE */
687 /* release all allocated memory back to the pool.
693 se_tree_t *se_tree_list;
694 #ifndef SE_DEBUG_FREE
695 #ifdef DEBUG_USE_CANARIES
697 #endif /* DEBUG_USE_CANARIES */
701 /* move all used chunks over to the free list */
702 while(se_packet_mem.used_list){
703 npc=se_packet_mem.used_list;
704 se_packet_mem.used_list=se_packet_mem.used_list->next;
705 npc->next=se_packet_mem.free_list;
706 se_packet_mem.free_list=npc;
709 /* clear them all out */
710 npc = se_packet_mem.free_list;
711 while (npc != NULL) {
712 #ifndef SE_DEBUG_FREE
713 #ifdef DEBUG_USE_CANARIES
714 for (i = 0; i < npc->c_count; i++) {
715 if (memcmp(npc->canary[i], &se_canary, npc->cmp_len[i]) != 0)
716 g_error("Per-session memory corrupted.");
719 #endif /* DEBUG_USE_CANARIES */
720 npc->amount_free = npc->amount_free_init;
721 npc->free_offset = npc->free_offset_init;
723 #else /* SE_DEBUG_FREE */
724 emem_chunk_t *next = npc->next;
729 #endif /* SE_DEBUG_FREE */
736 /* release/reset all se allocated trees */
737 for(se_tree_list=se_trees;se_tree_list;se_tree_list=se_tree_list->next){
738 se_tree_list->tree=NULL;
743 ep_stack_t ep_stack_new(void) {
744 ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
745 *s = ep_new0(struct _ep_stack_frame_t);
749 /* for ep_stack_t we'll keep the popped frames so we reuse them instead
750 of allocating new ones.
754 void* ep_stack_push(ep_stack_t stack, void* data) {
755 struct _ep_stack_frame_t* frame;
756 struct _ep_stack_frame_t* head = (*stack);
761 frame = ep_new(struct _ep_stack_frame_t);
767 frame->payload = data;
773 void* ep_stack_pop(ep_stack_t stack) {
775 if ((*stack)->below) {
776 (*stack) = (*stack)->below;
777 return (*stack)->above->payload;
786 void print_tree_item(se_tree_node_t *node, int level){
788 for(i=0;i<level;i++){
791 printf("%s KEY:0x%08x node:0x%08x parent:0x%08x left:0x%08x right:0x%08x\n",node->rb_color==SE_TREE_RB_COLOR_BLACK?"BLACK":"RED",node->key32,(int)node,(int)node->parent,(int)node->left,(int)node->right);
793 print_tree_item(node->left,level+1);
795 print_tree_item(node->right,level+1);
798 void print_tree(se_tree_node_t *node){
805 print_tree_item(node,0);
811 /* routines to manage se allocated red-black trees */
812 se_tree_t *se_trees=NULL;
815 se_tree_create(int type, char *name)
817 se_tree_t *tree_list;
819 tree_list=malloc(sizeof(se_tree_t));
820 tree_list->next=se_trees;
821 tree_list->type=type;
822 tree_list->tree=NULL;
823 tree_list->name=name;
832 se_tree_lookup32(se_tree_t *se_tree, guint32 key)
834 se_tree_node_t *node;
839 if(key==node->key32){
855 se_tree_lookup32_le(se_tree_t *se_tree, guint32 key)
857 se_tree_node_t *node;
867 if(key==node->key32){
889 /* If we are still at the root of the tree this means that this node
890 * is either smaller thant the search key and then we return this
891 * node or else there is no smaller key availabel and then
902 if(node->parent->left==node){
906 /* if this is a left child and its key is smaller than
907 * the search key, then this is the node we want.
911 /* if this is a left child and its key is bigger than
912 * the search key, we have to check if any
913 * of our ancestors are smaller than the search key.
927 /* if this is the right child and its key is smaller
928 * than the search key then this is the one we want.
932 /* if this is the right child and its key is larger
933 * than the search key then our parent is the one we
936 return node->parent->data;
943 static inline se_tree_node_t *
944 se_tree_parent(se_tree_node_t *node)
949 static inline se_tree_node_t *
950 se_tree_grandparent(se_tree_node_t *node)
952 se_tree_node_t *parent;
954 parent=se_tree_parent(node);
956 return parent->parent;
960 static inline se_tree_node_t *
961 se_tree_uncle(se_tree_node_t *node)
963 se_tree_node_t *parent, *grandparent;
965 parent=se_tree_parent(node);
969 grandparent=se_tree_parent(parent);
973 if(parent==grandparent->left){
974 return grandparent->right;
976 return grandparent->left;
979 static inline void rb_insert_case1(se_tree_t *se_tree, se_tree_node_t *node);
980 static inline void rb_insert_case2(se_tree_t *se_tree, se_tree_node_t *node);
983 rotate_left(se_tree_t *se_tree, se_tree_node_t *node)
986 if(node->parent->left==node){
987 node->parent->left=node->right;
989 node->parent->right=node->right;
992 se_tree->tree=node->right;
994 node->right->parent=node->parent;
995 node->parent=node->right;
996 node->right=node->right->left;
998 node->right->parent=node;
1000 node->parent->left=node;
1004 rotate_right(se_tree_t *se_tree, se_tree_node_t *node)
1007 if(node->parent->left==node){
1008 node->parent->left=node->left;
1010 node->parent->right=node->left;
1013 se_tree->tree=node->left;
1015 node->left->parent=node->parent;
1016 node->parent=node->left;
1017 node->left=node->left->right;
1019 node->left->parent=node;
1021 node->parent->right=node;
1025 rb_insert_case5(se_tree_t *se_tree, se_tree_node_t *node)
1027 se_tree_node_t *grandparent;
1028 se_tree_node_t *parent;
1030 parent=se_tree_parent(node);
1031 grandparent=se_tree_parent(parent);
1032 parent->rb_color=SE_TREE_RB_COLOR_BLACK;
1033 grandparent->rb_color=SE_TREE_RB_COLOR_RED;
1034 if( (node==parent->left) && (parent==grandparent->left) ){
1035 rotate_right(se_tree, grandparent);
1037 rotate_left(se_tree, grandparent);
1042 rb_insert_case4(se_tree_t *se_tree, se_tree_node_t *node)
1044 se_tree_node_t *grandparent;
1045 se_tree_node_t *parent;
1047 parent=se_tree_parent(node);
1048 grandparent=se_tree_parent(parent);
1052 if( (node==parent->right) && (parent==grandparent->left) ){
1053 rotate_left(se_tree, parent);
1055 } else if( (node==parent->left) && (parent==grandparent->right) ){
1056 rotate_right(se_tree, parent);
1059 rb_insert_case5(se_tree, node);
1063 rb_insert_case3(se_tree_t *se_tree, se_tree_node_t *node)
1065 se_tree_node_t *grandparent;
1066 se_tree_node_t *parent;
1067 se_tree_node_t *uncle;
1069 uncle=se_tree_uncle(node);
1070 if(uncle && (uncle->rb_color==SE_TREE_RB_COLOR_RED)){
1071 parent=se_tree_parent(node);
1072 parent->rb_color=SE_TREE_RB_COLOR_BLACK;
1073 uncle->rb_color=SE_TREE_RB_COLOR_BLACK;
1074 grandparent=se_tree_grandparent(node);
1075 grandparent->rb_color=SE_TREE_RB_COLOR_RED;
1076 rb_insert_case1(se_tree, grandparent);
1078 rb_insert_case4(se_tree, node);
1083 rb_insert_case2(se_tree_t *se_tree, se_tree_node_t *node)
1085 se_tree_node_t *parent;
1087 parent=se_tree_parent(node);
1088 /* parent is always non-NULL here */
1089 if(parent->rb_color==SE_TREE_RB_COLOR_BLACK){
1092 rb_insert_case3(se_tree, node);
1096 rb_insert_case1(se_tree_t *se_tree, se_tree_node_t *node)
1098 se_tree_node_t *parent;
1100 parent=se_tree_parent(node);
1102 node->rb_color=SE_TREE_RB_COLOR_BLACK;
1105 rb_insert_case2(se_tree, node);
1108 /* insert a new node in the tree. if this node matches an already existing node
1109 * then just replace the data for that node */
1111 se_tree_insert32(se_tree_t *se_tree, guint32 key, void *data)
1113 se_tree_node_t *node;
1117 /* is this the first node ?*/
1119 node=se_alloc(sizeof(se_tree_node_t));
1120 switch(se_tree->type){
1121 case SE_TREE_TYPE_RED_BLACK:
1122 node->rb_color=SE_TREE_RB_COLOR_BLACK;
1134 /* it was not the new root so walk the tree until we find where to
1135 * insert this new leaf.
1138 /* this node already exists, so just replace the data pointer*/
1139 if(key==node->key32){
1143 if(key<node->key32) {
1145 /* new node to the left */
1146 se_tree_node_t *new_node;
1147 new_node=se_alloc(sizeof(se_tree_node_t));
1148 node->left=new_node;
1149 new_node->parent=node;
1150 new_node->left=NULL;
1151 new_node->right=NULL;
1152 new_node->key32=key;
1153 new_node->data=data;
1160 if(key>node->key32) {
1162 /* new node to the right */
1163 se_tree_node_t *new_node;
1164 new_node=se_alloc(sizeof(se_tree_node_t));
1165 node->right=new_node;
1166 new_node->parent=node;
1167 new_node->left=NULL;
1168 new_node->right=NULL;
1169 new_node->key32=key;
1170 new_node->data=data;
1179 /* node will now point to the newly created node */
1180 switch(se_tree->type){
1181 case SE_TREE_TYPE_RED_BLACK:
1182 node->rb_color=SE_TREE_RB_COLOR_RED;
1183 rb_insert_case1(se_tree, node);
1188 static void* lookup_or_insert32(se_tree_t *se_tree, guint32 key, void*(*func)(void*),void* ud) {
1189 se_tree_node_t *node;
1193 /* is this the first node ?*/
1195 node=se_alloc(sizeof(se_tree_node_t));
1196 switch(se_tree->type){
1197 case SE_TREE_TYPE_RED_BLACK:
1198 node->rb_color=SE_TREE_RB_COLOR_BLACK;
1205 node->data= func(ud);
1210 /* it was not the new root so walk the tree until we find where to
1211 * insert this new leaf.
1214 /* this node already exists, so just return the data pointer*/
1215 if(key==node->key32){
1218 if(key<node->key32) {
1220 /* new node to the left */
1221 se_tree_node_t *new_node;
1222 new_node=se_alloc(sizeof(se_tree_node_t));
1223 node->left=new_node;
1224 new_node->parent=node;
1225 new_node->left=NULL;
1226 new_node->right=NULL;
1227 new_node->key32=key;
1228 new_node->data= func(ud);
1235 if(key>node->key32) {
1237 /* new node to the right */
1238 se_tree_node_t *new_node;
1239 new_node=se_alloc(sizeof(se_tree_node_t));
1240 node->right=new_node;
1241 new_node->parent=node;
1242 new_node->left=NULL;
1243 new_node->right=NULL;
1244 new_node->key32=key;
1245 new_node->data= func(ud);
1254 /* node will now point to the newly created node */
1255 switch(se_tree->type){
1256 case SE_TREE_TYPE_RED_BLACK:
1257 node->rb_color=SE_TREE_RB_COLOR_RED;
1258 rb_insert_case1(se_tree, node);
1265 /* When the se data is released, this entire tree will dissapear as if it
1266 * never existed including all metadata associated with the tree.
1269 se_tree_create_non_persistent(int type, char *name)
1271 se_tree_t *tree_list;
1273 tree_list=se_alloc(sizeof(se_tree_t));
1274 tree_list->next=NULL;
1275 tree_list->type=type;
1276 tree_list->tree=NULL;
1277 tree_list->name=name;
1282 static void* create_sub_tree(void* d) {
1283 se_tree_t *se_tree = d;
1284 return se_tree_create_non_persistent(se_tree->type, "subtree");
1287 /* insert a new node in the tree. if this node matches an already existing node
1288 * then just replace the data for that node */
1291 se_tree_insert32_array(se_tree_t *se_tree, se_tree_key_t *key, void *data)
1293 se_tree_t *next_tree;
1295 if((key[0].length<1)||(key[0].length>100)){
1296 DISSECTOR_ASSERT_NOT_REACHED();
1298 if((key[0].length==1)&&(key[1].length==0)){
1299 se_tree_insert32(se_tree, *key[0].key, data);
1303 next_tree=lookup_or_insert32(se_tree, *key[0].key, create_sub_tree, se_tree);
1305 if(key[0].length==1){
1311 se_tree_insert32_array(next_tree, key, data);
1315 se_tree_lookup32_array(se_tree_t *se_tree, se_tree_key_t *key)
1317 se_tree_t *next_tree;
1319 if((key[0].length<1)||(key[0].length>100)){
1320 DISSECTOR_ASSERT_NOT_REACHED();
1322 if((key[0].length==1)&&(key[1].length==0)){
1323 return se_tree_lookup32(se_tree, *key[0].key);
1325 next_tree=se_tree_lookup32(se_tree, *key[0].key);
1329 if(key[0].length==1){
1335 return se_tree_lookup32_array(next_tree, key);
1339 void se_tree_insert_string(se_string_hash_t* se_tree, const gchar* k, void* v) {
1340 guint32 len = strlen(k);
1341 guint32 div = (len-1)/4;
1342 guint32 residual = 0;
1343 se_tree_key_t key[] = {
1345 {div,(guint32*)(&k[0])},
1351 key[1].length = key[2].length;
1352 key[1].key = key[2].key;
1361 residual |= ( k[div+3] << 24 );
1363 residual |= ( k[div+2] << 16 );
1365 residual |= ( k[div+1] << 8 );
1371 se_tree_insert32_array(se_tree,key,v);
1374 void* se_tree_lookup_string(se_string_hash_t* se_tree, const gchar* k) {
1375 guint32 len = strlen(k);
1376 guint32 div = (len-1)/4;
1377 guint32 residual = 0;
1378 se_tree_key_t key[] = {
1380 {div,(guint32*)(&k[0])},
1386 key[1].length = key[2].length;
1387 key[1].key = key[2].key;
1396 residual |= k[div+3] << 24;
1398 residual |= k[div+2] << 16;
1400 residual |= k[div+1] << 8;
1406 return se_tree_lookup32_array(se_tree, key);