2 * Ethereal memory management and garbage collection functions
7 * Ethereal - Network traffic analyzer
8 * By Gerald Combs <gerald@ethereal.com>
9 * Copyright 1998 Gerald Combs
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
35 #ifdef HAVE_SYS_TIME_H
44 #include <windows.h> /* VirtualAlloc, VirtualProtect */
45 #include <process.h> /* getpid */
51 #include <wiretap/file_util.h>
55 * Tools like Valgrind and ElectricFence don't work well with memchunks.
56 * Uncomment the defines below to make {ep|se}_alloc() allocate each
57 * object individually.
59 /* #define EP_DEBUG_FREE 1 */
60 /* #define SE_DEBUG_FREE 1 */
62 /* Do we want to use guardpages? if available */
63 #define WANT_GUARD_PAGES 1
65 /* Do we want to use canaries ? */
66 #define DEBUG_USE_CANARIES 1
69 #ifdef WANT_GUARD_PAGES
70 /* Add guard pages at each end of our allocated memory */
71 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
73 #include <sys/types.h>
75 #define USE_GUARD_PAGES 1
79 /* When required, allocate more memory from the OS in this size chunks */
80 #define EMEM_PACKET_CHUNK_SIZE 10485760
82 /* The maximum number of allocations per chunk */
83 #define EMEM_ALLOCS_PER_CHUNK (EMEM_PACKET_CHUNK_SIZE / 512)
86 #ifdef DEBUG_USE_CANARIES
87 #define EMEM_CANARY_SIZE 8
88 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
89 guint8 ep_canary[EMEM_CANARY_DATA_SIZE], se_canary[EMEM_CANARY_DATA_SIZE];
90 #endif /* DEBUG_USE_CANARIES */
92 typedef struct _emem_chunk_t {
93 struct _emem_chunk_t *next;
94 unsigned int amount_free_init;
95 unsigned int amount_free;
96 unsigned int free_offset_init;
97 unsigned int free_offset;
99 #ifdef DEBUG_USE_CANARIES
100 #if ! defined(EP_DEBUG_FREE) && ! defined(SE_DEBUG_FREE)
101 unsigned int c_count;
102 void *canary[EMEM_ALLOCS_PER_CHUNK];
103 guint8 cmp_len[EMEM_ALLOCS_PER_CHUNK];
105 #endif /* DEBUG_USE_CANARIES */
108 typedef struct _emem_header_t {
109 emem_chunk_t *free_list;
110 emem_chunk_t *used_list;
113 static emem_header_t ep_packet_mem;
114 static emem_header_t se_packet_mem;
116 #if !defined(SE_DEBUG_FREE)
118 static SYSTEM_INFO sysinfo;
119 static OSVERSIONINFO versinfo;
121 #elif defined(USE_GUARD_PAGES)
122 static intptr_t pagesize;
123 #endif /* _WIN32 / USE_GUARD_PAGES */
124 #endif /* SE_DEBUG_FREE */
126 #ifdef DEBUG_USE_CANARIES
128 * Set a canary value to be placed between memchunks.
131 emem_canary(guint8 *canary) {
133 #if GLIB_MAJOR_VERSION >= 2
134 static GRand *rand_state = NULL;
138 /* First, use GLib's random function if we have it */
139 #if GLIB_MAJOR_VERSION >= 2
140 if (rand_state == NULL) {
141 rand_state = g_rand_new();
143 for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
144 canary[i] = (guint8) g_rand_int(rand_state);
150 /* Try /dev/urandom */
151 if ((fp = eth_fopen("/dev/urandom", "r")) != NULL) {
152 sz = fread(canary, EMEM_CANARY_DATA_SIZE, 1, fp);
154 if (sz == EMEM_CANARY_SIZE) {
159 /* Our last resort */
160 srandom(time(NULL) | getpid());
161 for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
162 canary[i] = (guint8) random();
165 #endif /* GLIB_MAJOR_VERSION >= 2 */
168 #if !defined(SE_DEBUG_FREE)
170 * Given an allocation size, return the amount of padding needed for
174 emem_canary_pad (size_t allocation) {
177 pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
178 if (pad < EMEM_CANARY_SIZE)
179 pad += EMEM_CANARY_SIZE;
184 #endif /* DEBUG_USE_CANARIES */
187 /* Initialize the packet-lifetime memory allocation pool.
188 * This function should be called only once when Ethereal or Tethereal starts
194 ep_packet_mem.free_list=NULL;
195 ep_packet_mem.used_list=NULL;
197 #ifdef DEBUG_USE_CANARIES
198 emem_canary(ep_canary);
199 #endif /* DEBUG_USE_CANARIES */
201 #if !defined(SE_DEBUG_FREE)
203 /* Set up our guard page info for Win32 */
204 GetSystemInfo(&sysinfo);
205 pagesize = sysinfo.dwPageSize;
207 /* calling GetVersionEx using the OSVERSIONINFO structure.
208 * OSVERSIONINFOEX requires Win NT4 with SP6 or newer NT Versions.
209 * OSVERSIONINFOEX will fail on Win9x and older NT Versions.
211 * http://msdn.microsoft.com/library/en-us/sysinfo/base/getversionex.asp
212 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
213 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfoex_str.asp
215 versinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
216 GetVersionEx(&versinfo);
218 #elif defined(USE_GUARD_PAGES)
219 pagesize = sysconf(_SC_PAGESIZE);
220 #endif /* _WIN32 / USE_GUARD_PAGES */
221 #endif /* SE_DEBUG_FREE */
225 /* Initialize the capture-lifetime memory allocation pool.
226 * This function should be called only once when Ethereal or Tethereal starts
232 se_packet_mem.free_list=NULL;
233 se_packet_mem.used_list=NULL;
235 #ifdef DEBUG_USE_CANARIES
236 emem_canary(se_canary);
237 #endif /* DEBUG_USE_CANARIES */
240 #if !defined(SE_DEBUG_FREE)
242 emem_create_chunk(emem_chunk_t **free_list) {
245 char *buf_end, *prot1, *prot2;
247 #elif defined(USE_GUARD_PAGES)
249 char *buf_end, *prot1, *prot2;
250 #endif /* _WIN32 / USE_GUARD_PAGES */
251 /* we dont have any free data, so we must allocate a new one */
254 npc = g_malloc(sizeof(emem_chunk_t));
256 #ifdef DEBUG_USE_CANARIES
257 #if ! defined(EP_DEBUG_FREE) && ! defined(SE_DEBUG_FREE)
260 #endif /* DEBUG_USE_CANARIES */
265 * MSDN documents VirtualAlloc/VirtualProtect at
266 * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
269 /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
270 npc->buf = VirtualAlloc(NULL, EMEM_PACKET_CHUNK_SIZE,
271 MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
272 g_assert(npc->buf != NULL);
273 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
275 /* Align our guard pages on page-sized boundaries */
276 prot1 = (char *) ((((int) npc->buf + pagesize - 1) / pagesize) * pagesize);
277 prot2 = (char *) ((((int) buf_end - (1 * pagesize)) / pagesize) * pagesize);
279 ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
280 g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
281 ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
282 g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
284 npc->amount_free_init = prot2 - prot1 - pagesize;
285 npc->amount_free = npc->amount_free_init;
286 npc->free_offset_init = (prot1 - npc->buf) + pagesize;
287 npc->free_offset = npc->free_offset_init;
289 #elif defined(USE_GUARD_PAGES)
290 npc->buf = mmap(NULL, EMEM_PACKET_CHUNK_SIZE,
291 PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
292 g_assert(npc->buf != MAP_FAILED);
293 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
295 /* Align our guard pages on page-sized boundaries */
296 prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
297 prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
298 ret = mprotect(prot1, pagesize, PROT_NONE);
300 ret = mprotect(prot2, pagesize, PROT_NONE);
303 npc->amount_free_init = prot2 - prot1 - pagesize;
304 npc->amount_free = npc->amount_free_init;
305 npc->free_offset_init = (prot1 - npc->buf) + pagesize;
306 npc->free_offset = npc->free_offset_init;
308 #else /* Is there a draft in here? */
309 npc->amount_free_init = EMEM_PACKET_CHUNK_SIZE;
310 npc->amount_free = npc->amount_free_init;
311 npc->free_offset_init = 0;
312 npc->free_offset = npc->free_offset_init;
313 npc->buf = g_malloc(EMEM_PACKET_CHUNK_SIZE);
314 #endif /* USE_GUARD_PAGES */
319 /* allocate 'size' amount of memory with an allocation lifetime until the
323 ep_alloc(size_t size)
326 #ifndef EP_DEBUG_FREE
327 #ifdef DEBUG_USE_CANARIES
329 guint8 pad = emem_canary_pad(size);
332 #endif /* DEBUG_USE_CANARIES */
333 emem_chunk_t *free_list;
336 #ifndef EP_DEBUG_FREE
337 /* Round up to an 8 byte boundary. Make sure we have at least
338 * 8 pad bytes for our canary.
342 /* make sure we dont try to allocate too much (arbitrary limit) */
343 DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
345 emem_create_chunk(&ep_packet_mem.free_list);
347 /* oops, we need to allocate more memory to serve this request
348 * than we have free. move this node to the used list and try again
350 if(size>ep_packet_mem.free_list->amount_free
351 #ifdef DEBUG_USE_CANARIES
352 || ep_packet_mem.free_list->c_count >= EMEM_ALLOCS_PER_CHUNK
353 #endif /* DEBUG_USE_CANARIES */
356 npc=ep_packet_mem.free_list;
357 ep_packet_mem.free_list=ep_packet_mem.free_list->next;
358 npc->next=ep_packet_mem.used_list;
359 ep_packet_mem.used_list=npc;
362 emem_create_chunk(&ep_packet_mem.free_list);
364 free_list = ep_packet_mem.free_list;
366 buf = free_list->buf + free_list->free_offset;
368 free_list->amount_free -= size;
369 free_list->free_offset += size;
371 #ifdef DEBUG_USE_CANARIES
372 cptr = (char *)buf + size - pad;
373 memcpy(cptr, &ep_canary, pad);
374 free_list->canary[free_list->c_count] = cptr;
375 free_list->cmp_len[free_list->c_count] = pad;
376 free_list->c_count++;
377 #endif /* DEBUG_USE_CANARIES */
379 #else /* EP_DEBUG_FREE */
382 npc=g_malloc(sizeof(emem_chunk_t));
383 npc->next=ep_packet_mem.used_list;
384 npc->amount_free=size;
386 npc->buf=g_malloc(size);
388 ep_packet_mem.used_list=npc;
389 #endif /* EP_DEBUG_FREE */
393 /* allocate 'size' amount of memory with an allocation lifetime until the
397 se_alloc(size_t size)
400 #ifndef SE_DEBUG_FREE
401 #ifdef DEBUG_USE_CANARIES
403 guint8 pad = emem_canary_pad(size);
406 #endif /* DEBUG_USE_CANARIES */
407 emem_chunk_t *free_list;
410 #ifndef SE_DEBUG_FREE
411 /* Round up to an 8 byte boundary. Make sure we have at least
412 * 8 pad bytes for our canary.
416 /* make sure we dont try to allocate too much (arbitrary limit) */
417 DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
419 emem_create_chunk(&se_packet_mem.free_list);
421 /* oops, we need to allocate more memory to serve this request
422 * than we have free. move this node to the used list and try again
424 if(size>se_packet_mem.free_list->amount_free
425 #ifdef DEBUG_USE_CANARIES
426 || se_packet_mem.free_list->c_count >= EMEM_ALLOCS_PER_CHUNK
427 #endif /* DEBUG_USE_CANARIES */
430 npc=se_packet_mem.free_list;
431 se_packet_mem.free_list=se_packet_mem.free_list->next;
432 npc->next=se_packet_mem.used_list;
433 se_packet_mem.used_list=npc;
436 emem_create_chunk(&se_packet_mem.free_list);
438 free_list = se_packet_mem.free_list;
440 buf = free_list->buf + free_list->free_offset;
442 free_list->amount_free -= size;
443 free_list->free_offset += size;
445 #ifdef DEBUG_USE_CANARIES
446 cptr = (char *)buf + size - pad;
447 memcpy(cptr, &se_canary, pad);
448 free_list->canary[free_list->c_count] = cptr;
449 free_list->cmp_len[free_list->c_count] = pad;
450 free_list->c_count++;
451 #endif /* DEBUG_USE_CANARIES */
453 #else /* SE_DEBUG_FREE */
456 npc=g_malloc(sizeof(emem_chunk_t));
457 npc->next=se_packet_mem.used_list;
458 npc->amount_free=size;
460 npc->buf=g_malloc(size);
462 se_packet_mem.used_list=npc;
463 #endif /* SE_DEBUG_FREE */
469 void* ep_alloc0(size_t size) {
470 return memset(ep_alloc(size),'\0',size);
473 gchar* ep_strdup(const gchar* src) {
474 guint len = strlen(src);
477 dst = strncpy(ep_alloc(len+1), src, len);
484 gchar* ep_strndup(const gchar* src, size_t len) {
485 gchar* dst = ep_alloc(len+1);
488 for (i = 0; src[i] && i < len; i++)
496 void* ep_memdup(const void* src, size_t len) {
497 return memcpy(ep_alloc(len), src, len);
500 gchar* ep_strdup_vprintf(const gchar* fmt, va_list ap) {
507 len = g_printf_string_upper_bound(fmt, ap);
509 dst = ep_alloc(len+1);
510 g_vsnprintf (dst, len, fmt, ap2);
516 gchar* ep_strdup_printf(const gchar* fmt, ...) {
521 dst = ep_strdup_vprintf(fmt, ap);
526 gchar** ep_strsplit(const gchar* string, const gchar* sep, int max_tokens) {
534 enum { AT_START, IN_PAD, IN_TOKEN } state;
542 s = splitted = ep_strdup(string);
543 str_len = strlen(splitted);
544 sep_len = strlen(sep);
546 if (max_tokens < 1) max_tokens = INT_MAX;
551 while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
554 for(i=0; i < sep_len; i++ )
561 vec = ep_alloc_array(gchar*,tokens+1);
564 for (i=0; i< str_len; i++) {
567 switch(splitted[i]) {
572 vec[curr_tok] = &(splitted[i]);
578 switch(splitted[i]) {
585 switch(splitted[i]) {
587 vec[curr_tok] = &(splitted[i]);
596 vec[curr_tok] = NULL;
603 void* se_alloc0(size_t size) {
604 return memset(se_alloc(size),'\0',size);
607 /* If str is NULL, just return the string "<NULL>" so that the callers dont
608 * have to bother checking it.
610 gchar* se_strdup(const gchar* src) {
619 dst = strncpy(se_alloc(len+1), src, len);
626 gchar* se_strndup(const gchar* src, size_t len) {
627 gchar* dst = se_alloc(len+1);
630 for (i = 0; src[i] && i < len; i++)
638 void* se_memdup(const void* src, size_t len) {
639 return memcpy(se_alloc(len), src, len);
642 gchar* se_strdup_vprintf(const gchar* fmt, va_list ap) {
649 len = g_printf_string_upper_bound(fmt, ap);
651 dst = se_alloc(len+1);
652 g_vsnprintf (dst, len, fmt, ap2);
658 gchar* se_strdup_printf(const gchar* fmt, ...) {
663 dst = se_strdup_vprintf(fmt, ap);
668 /* release all allocated memory back to the pool.
674 #ifndef EP_DEBUG_FREE
675 #ifdef DEBUG_USE_CANARIES
677 #endif /* DEBUG_USE_CANARIES */
680 /* move all used chunks over to the free list */
681 while(ep_packet_mem.used_list){
682 npc=ep_packet_mem.used_list;
683 ep_packet_mem.used_list=ep_packet_mem.used_list->next;
684 npc->next=ep_packet_mem.free_list;
685 ep_packet_mem.free_list=npc;
688 /* clear them all out */
689 npc = ep_packet_mem.free_list;
690 while (npc != NULL) {
691 #ifndef EP_DEBUG_FREE
692 #ifdef DEBUG_USE_CANARIES
693 for (i = 0; i < npc->c_count; i++) {
694 if (memcmp(npc->canary[i], &ep_canary, npc->cmp_len[i]) != 0)
695 g_error("Per-packet memory corrupted.");
698 #endif /* DEBUG_USE_CANARIES */
699 npc->amount_free = npc->amount_free_init;
700 npc->free_offset = npc->free_offset_init;
702 #else /* EP_DEBUG_FREE */
703 emem_chunk_t *next = npc->next;
708 #endif /* EP_DEBUG_FREE */
715 /* release all allocated memory back to the pool.
721 se_tree_t *se_tree_list;
722 #ifndef SE_DEBUG_FREE
723 #ifdef DEBUG_USE_CANARIES
725 #endif /* DEBUG_USE_CANARIES */
729 /* move all used chunks over to the free list */
730 while(se_packet_mem.used_list){
731 npc=se_packet_mem.used_list;
732 se_packet_mem.used_list=se_packet_mem.used_list->next;
733 npc->next=se_packet_mem.free_list;
734 se_packet_mem.free_list=npc;
737 /* clear them all out */
738 npc = se_packet_mem.free_list;
739 while (npc != NULL) {
740 #ifndef SE_DEBUG_FREE
741 #ifdef DEBUG_USE_CANARIES
742 for (i = 0; i < npc->c_count; i++) {
743 if (memcmp(npc->canary[i], &se_canary, npc->cmp_len[i]) != 0)
744 g_error("Per-session memory corrupted.");
747 #endif /* DEBUG_USE_CANARIES */
748 npc->amount_free = npc->amount_free_init;
749 npc->free_offset = npc->free_offset_init;
751 #else /* SE_DEBUG_FREE */
752 emem_chunk_t *next = npc->next;
757 #endif /* SE_DEBUG_FREE */
764 /* release/reset all se allocated trees */
765 for(se_tree_list=se_trees;se_tree_list;se_tree_list=se_tree_list->next){
766 se_tree_list->tree=NULL;
771 ep_stack_t ep_stack_new(void) {
772 ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
773 *s = ep_new0(struct _ep_stack_frame_t);
777 /* for ep_stack_t we'll keep the popped frames so we reuse them instead
778 of allocating new ones.
782 void* ep_stack_push(ep_stack_t stack, void* data) {
783 struct _ep_stack_frame_t* frame;
784 struct _ep_stack_frame_t* head = (*stack);
789 frame = ep_new(struct _ep_stack_frame_t);
795 frame->payload = data;
801 void* ep_stack_pop(ep_stack_t stack) {
803 if ((*stack)->below) {
804 (*stack) = (*stack)->below;
805 return (*stack)->above->payload;
814 void print_tree_item(se_tree_node_t *node, int level){
816 for(i=0;i<level;i++){
819 printf("%s KEY:0x%08x node:0x%08x parent:0x%08x left:0x%08x right:0x%08x\n",node->u.rb_color==SE_TREE_RB_COLOR_BLACK?"BLACK":"RED",node->key32,(int)node,(int)node->parent,(int)node->left,(int)node->right);
821 print_tree_item(node->left,level+1);
823 print_tree_item(node->right,level+1);
826 void print_tree(se_tree_node_t *node){
833 print_tree_item(node,0);
839 /* routines to manage se allocated red-black trees */
840 se_tree_t *se_trees=NULL;
843 se_tree_create(int type, char *name)
845 se_tree_t *tree_list;
847 tree_list=malloc(sizeof(se_tree_t));
848 tree_list->next=se_trees;
849 tree_list->type=type;
850 tree_list->tree=NULL;
851 tree_list->name=name;
860 se_tree_lookup32(se_tree_t *se_tree, guint32 key)
862 se_tree_node_t *node;
867 if(key==node->key32){
883 se_tree_lookup32_le(se_tree_t *se_tree, guint32 key)
885 se_tree_node_t *node;
895 if(key==node->key32){
917 /* If we are still at the root of the tree this means that this node
918 * is either smaller thant the search key and then we return this
919 * node or else there is no smaller key availabel and then
930 if(node->parent->left==node){
934 /* if this is a left child and its key is smaller than
935 * the search key, then this is the node we want.
939 /* if this is a left child and its key is bigger than
940 * the search key, we have to check if any
941 * of our ancestors are smaller than the search key.
955 /* if this is the right child and its key is smaller
956 * than the search key then this is the one we want.
960 /* if this is the right child and its key is larger
961 * than the search key then our parent is the one we
964 return node->parent->data;
971 static inline se_tree_node_t *
972 se_tree_parent(se_tree_node_t *node)
977 static inline se_tree_node_t *
978 se_tree_grandparent(se_tree_node_t *node)
980 se_tree_node_t *parent;
982 parent=se_tree_parent(node);
984 return parent->parent;
988 static inline se_tree_node_t *
989 se_tree_uncle(se_tree_node_t *node)
991 se_tree_node_t *parent, *grandparent;
993 parent=se_tree_parent(node);
997 grandparent=se_tree_parent(parent);
1001 if(parent==grandparent->left){
1002 return grandparent->right;
1004 return grandparent->left;
1007 static inline void rb_insert_case1(se_tree_t *se_tree, se_tree_node_t *node);
1008 static inline void rb_insert_case2(se_tree_t *se_tree, se_tree_node_t *node);
1011 rotate_left(se_tree_t *se_tree, se_tree_node_t *node)
1014 if(node->parent->left==node){
1015 node->parent->left=node->right;
1017 node->parent->right=node->right;
1020 se_tree->tree=node->right;
1022 node->right->parent=node->parent;
1023 node->parent=node->right;
1024 node->right=node->right->left;
1026 node->right->parent=node;
1028 node->parent->left=node;
1032 rotate_right(se_tree_t *se_tree, se_tree_node_t *node)
1035 if(node->parent->left==node){
1036 node->parent->left=node->left;
1038 node->parent->right=node->left;
1041 se_tree->tree=node->left;
1043 node->left->parent=node->parent;
1044 node->parent=node->left;
1045 node->left=node->left->right;
1047 node->left->parent=node;
1049 node->parent->right=node;
1053 rb_insert_case5(se_tree_t *se_tree, se_tree_node_t *node)
1055 se_tree_node_t *grandparent;
1056 se_tree_node_t *parent;
1058 parent=se_tree_parent(node);
1059 grandparent=se_tree_parent(parent);
1060 parent->u.rb_color=SE_TREE_RB_COLOR_BLACK;
1061 grandparent->u.rb_color=SE_TREE_RB_COLOR_RED;
1062 if( (node==parent->left) && (parent==grandparent->left) ){
1063 rotate_right(se_tree, grandparent);
1065 rotate_left(se_tree, grandparent);
1070 rb_insert_case4(se_tree_t *se_tree, se_tree_node_t *node)
1072 se_tree_node_t *grandparent;
1073 se_tree_node_t *parent;
1075 parent=se_tree_parent(node);
1076 grandparent=se_tree_parent(parent);
1080 if( (node==parent->right) && (parent==grandparent->left) ){
1081 rotate_left(se_tree, parent);
1083 } else if( (node==parent->left) && (parent==grandparent->right) ){
1084 rotate_right(se_tree, parent);
1087 rb_insert_case5(se_tree, node);
1091 rb_insert_case3(se_tree_t *se_tree, se_tree_node_t *node)
1093 se_tree_node_t *grandparent;
1094 se_tree_node_t *parent;
1095 se_tree_node_t *uncle;
1097 uncle=se_tree_uncle(node);
1098 if(uncle && (uncle->u.rb_color==SE_TREE_RB_COLOR_RED)){
1099 parent=se_tree_parent(node);
1100 parent->u.rb_color=SE_TREE_RB_COLOR_BLACK;
1101 uncle->u.rb_color=SE_TREE_RB_COLOR_BLACK;
1102 grandparent=se_tree_grandparent(node);
1103 grandparent->u.rb_color=SE_TREE_RB_COLOR_RED;
1104 rb_insert_case1(se_tree, grandparent);
1106 rb_insert_case4(se_tree, node);
1111 rb_insert_case2(se_tree_t *se_tree, se_tree_node_t *node)
1113 se_tree_node_t *parent;
1115 parent=se_tree_parent(node);
1116 /* parent is always non-NULL here */
1117 if(parent->u.rb_color==SE_TREE_RB_COLOR_BLACK){
1120 rb_insert_case3(se_tree, node);
1124 rb_insert_case1(se_tree_t *se_tree, se_tree_node_t *node)
1126 se_tree_node_t *parent;
1128 parent=se_tree_parent(node);
1130 node->u.rb_color=SE_TREE_RB_COLOR_BLACK;
1133 rb_insert_case2(se_tree, node);
1136 /* insert a new node in the tree. if this node matches an already existing node
1137 * then just replace the data for that node */
1139 se_tree_insert32(se_tree_t *se_tree, guint32 key, void *data)
1141 se_tree_node_t *node;
1145 /* is this the first node ?*/
1147 node=se_alloc(sizeof(se_tree_node_t));
1148 switch(se_tree->type){
1149 case SE_TREE_TYPE_RED_BLACK:
1150 node->u.rb_color=SE_TREE_RB_COLOR_BLACK;
1162 /* it was not the new root so walk the tree until we find where to
1163 * insert this new leaf.
1166 /* this node already exists, so just replace the data pointer*/
1167 if(key==node->key32){
1171 if(key<node->key32) {
1173 /* new node to the left */
1174 se_tree_node_t *new_node;
1175 new_node=se_alloc(sizeof(se_tree_node_t));
1176 node->left=new_node;
1177 new_node->parent=node;
1178 new_node->left=NULL;
1179 new_node->right=NULL;
1180 new_node->key32=key;
1181 new_node->data=data;
1188 if(key>node->key32) {
1190 /* new node to the right */
1191 se_tree_node_t *new_node;
1192 new_node=se_alloc(sizeof(se_tree_node_t));
1193 node->right=new_node;
1194 new_node->parent=node;
1195 new_node->left=NULL;
1196 new_node->right=NULL;
1197 new_node->key32=key;
1198 new_node->data=data;
1207 /* node will now point to the newly created node */
1208 switch(se_tree->type){
1209 case SE_TREE_TYPE_RED_BLACK:
1210 node->u.rb_color=SE_TREE_RB_COLOR_RED;
1211 rb_insert_case1(se_tree, node);
1216 static void* lookup_or_insert32(se_tree_t *se_tree, guint32 key, void*(*func)(void*),void* ud) {
1217 se_tree_node_t *node;
1221 /* is this the first node ?*/
1223 node=se_alloc(sizeof(se_tree_node_t));
1224 switch(se_tree->type){
1225 case SE_TREE_TYPE_RED_BLACK:
1226 node->u.rb_color=SE_TREE_RB_COLOR_BLACK;
1233 node->data= func(ud);
1238 /* it was not the new root so walk the tree until we find where to
1239 * insert this new leaf.
1242 /* this node already exists, so just return the data pointer*/
1243 if(key==node->key32){
1246 if(key<node->key32) {
1248 /* new node to the left */
1249 se_tree_node_t *new_node;
1250 new_node=se_alloc(sizeof(se_tree_node_t));
1251 node->left=new_node;
1252 new_node->parent=node;
1253 new_node->left=NULL;
1254 new_node->right=NULL;
1255 new_node->key32=key;
1256 new_node->data= func(ud);
1263 if(key>node->key32) {
1265 /* new node to the right */
1266 se_tree_node_t *new_node;
1267 new_node=se_alloc(sizeof(se_tree_node_t));
1268 node->right=new_node;
1269 new_node->parent=node;
1270 new_node->left=NULL;
1271 new_node->right=NULL;
1272 new_node->key32=key;
1273 new_node->data= func(ud);
1282 /* node will now point to the newly created node */
1283 switch(se_tree->type){
1284 case SE_TREE_TYPE_RED_BLACK:
1285 node->u.rb_color=SE_TREE_RB_COLOR_RED;
1286 rb_insert_case1(se_tree, node);
1293 /* When the se data is released, this entire tree will dissapear as if it
1294 * never existed including all metadata associated with the tree.
1297 se_tree_create_non_persistent(int type, char *name)
1299 se_tree_t *tree_list;
1301 tree_list=se_alloc(sizeof(se_tree_t));
1302 tree_list->next=NULL;
1303 tree_list->type=type;
1304 tree_list->tree=NULL;
1305 tree_list->name=name;
1310 static void* create_sub_tree(void* d) {
1311 se_tree_t *se_tree = d;
1312 return se_tree_create_non_persistent(se_tree->type, "subtree");
1315 /* insert a new node in the tree. if this node matches an already existing node
1316 * then just replace the data for that node */
1319 se_tree_insert32_array(se_tree_t *se_tree, se_tree_key_t *key, void *data)
1321 se_tree_t *next_tree;
1323 if((key[0].length<1)||(key[0].length>100)){
1324 DISSECTOR_ASSERT_NOT_REACHED();
1326 if((key[0].length==1)&&(key[1].length==0)){
1327 se_tree_insert32(se_tree, *key[0].key, data);
1331 next_tree=lookup_or_insert32(se_tree, *key[0].key, create_sub_tree, se_tree);
1333 if(key[0].length==1){
1339 se_tree_insert32_array(next_tree, key, data);
1343 se_tree_lookup32_array(se_tree_t *se_tree, se_tree_key_t *key)
1345 se_tree_t *next_tree;
1347 if((key[0].length<1)||(key[0].length>100)){
1348 DISSECTOR_ASSERT_NOT_REACHED();
1350 if((key[0].length==1)&&(key[1].length==0)){
1351 return se_tree_lookup32(se_tree, *key[0].key);
1353 next_tree=se_tree_lookup32(se_tree, *key[0].key);
1357 if(key[0].length==1){
1363 return se_tree_lookup32_array(next_tree, key);
1367 void se_tree_insert_string(se_string_hash_t* se_tree, const gchar* k, void* v) {
1368 guint32 len = strlen(k);
1369 guint32 div = (len-1)/4;
1370 guint32 residual = 0;
1371 se_tree_key_t key[] = {
1373 {div,(guint32*)(&k[0])},
1379 key[1].length = key[2].length;
1380 key[1].key = key[2].key;
1389 residual |= ( k[div+3] << 24 );
1391 residual |= ( k[div+2] << 16 );
1393 residual |= ( k[div+1] << 8 );
1399 se_tree_insert32_array(se_tree,key,v);
1402 void* se_tree_lookup_string(se_string_hash_t* se_tree, const gchar* k) {
1403 guint32 len = strlen(k);
1404 guint32 div = (len-1)/4;
1405 guint32 residual = 0;
1406 se_tree_key_t key[] = {
1408 {div,(guint32*)(&k[0])},
1414 key[1].length = key[2].length;
1415 key[1].key = key[2].key;
1424 residual |= k[div+3] << 24;
1426 residual |= k[div+2] << 16;
1428 residual |= k[div+1] << 8;
1434 return se_tree_lookup32_array(se_tree, key);