Rename set_string_hash... into se_tree_xxx_string as proposed by ronnie for consisten...
[obnox/wireshark/wip.git] / epan / emem.c
1 /* emem.c
2  * Ethereal memory management and garbage collection functions
3  * Ronnie Sahlberg 2005
4  *
5  * $Id$
6  *
7  * Ethereal - Network traffic analyzer
8  * By Gerald Combs <gerald@ethereal.com>
9  * Copyright 1998 Gerald Combs
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version 2
14  * of the License, or (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
24  */
25 #ifdef HAVE_CONFIG_H
26 #include "config.h"
27 #endif
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <stdarg.h>
33
34 #include <time.h>
35 #ifdef HAVE_SYS_TIME_H
36 #include <sys/time.h>
37 #endif
38
39 #ifdef HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42
43 #ifdef _WIN32
44 #include <windows.h>    /* VirtualAlloc, VirtualProtect */
45 #include <process.h>    /* getpid */
46 #endif
47
48 #include <glib.h>
49 #include <proto.h>
50 #include "emem.h"
51 #include <wiretap/file_util.h>
52
53 /* Add guard pages at each end of our allocated memory */
54 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
55 #include <stdint.h>
56 #include <sys/types.h>
57 #include <sys/mman.h>
58 #define USE_GUARD_PAGES 1
59 #endif
60
61 /* When required, allocate more memory from the OS in this size chunks */
62 #define EMEM_PACKET_CHUNK_SIZE 10485760
63
64 /* The maximum number of allocations per chunk */
65 #define EMEM_ALLOCS_PER_CHUNK (EMEM_PACKET_CHUNK_SIZE / 512)
66
67 /*
68  * Tools like Valgrind and ElectricFence don't work well with memchunks.
69  * Uncomment the defines below to make {ep|se}_alloc() allocate each
70  * object individually.
71  */
72 /* #define EP_DEBUG_FREE 1 */
73 /* #define SE_DEBUG_FREE 1 */
74
75 #if GLIB_MAJOR_VERSION >= 2
76 GRand   *rand_state = NULL;
77 #endif
78
79 #define EMEM_CANARY_SIZE 8
80 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
81 guint8  ep_canary[EMEM_CANARY_DATA_SIZE], se_canary[EMEM_CANARY_DATA_SIZE];
82
83 typedef struct _emem_chunk_t {
84         struct _emem_chunk_t *next;
85         unsigned int    amount_free_init;
86         unsigned int    amount_free;
87         unsigned int    free_offset_init;
88         unsigned int    free_offset;
89         char *buf;
90 #if ! defined(EP_DEBUG_FREE) && ! defined(SE_DEBUG_FREE)
91         unsigned int    c_count;
92         void            *canary[EMEM_ALLOCS_PER_CHUNK];
93         guint8          cmp_len[EMEM_ALLOCS_PER_CHUNK];
94 #endif
95 } emem_chunk_t;
96
97 typedef struct _emem_header_t {
98   emem_chunk_t *free_list;
99   emem_chunk_t *used_list;
100 } emem_header_t;
101
102 static emem_header_t ep_packet_mem;
103 static emem_header_t se_packet_mem;
104
105 /*
106  * Set a canary value to be placed between memchunks.
107  */
108
109 void
110 emem_canary(guint8 *canary) {
111         int i;
112
113         /* First, use GLib's random function if we have it */
114 #if GLIB_MAJOR_VERSION >= 2
115         if (rand_state == NULL) {
116                 rand_state = g_rand_new();
117         }
118         for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
119                 canary[i] = (guint8) g_rand_int(rand_state);
120         }
121         return;
122 #else
123         FILE *fp;
124         size_t sz;
125         /* Try /dev/urandom */
126         if ((fp = eth_fopen("/dev/urandom", "r")) != NULL) {
127                 sz = fread(canary, EMEM_CANARY_DATA_SIZE, 1, fp);
128                 fclose(fp);
129                 if (sz == EMEM_CANARY_SIZE) {
130                         return;
131                 }
132         }
133
134         /* Our last resort */
135         srandom(time(NULL) | getpid());
136         for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
137                 canary[i] = (guint8) random();
138         }
139         return;
140 #endif /* GLIB_MAJOR_VERSION >= 2 */
141 }
142
143 /*
144  * Given an allocation size, return the amount of padding needed for
145  * the canary value.
146  */
147 static guint8
148 emem_canary_pad (size_t allocation) {
149         guint8 pad;
150
151         pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
152         if (pad < EMEM_CANARY_SIZE)
153                 pad += EMEM_CANARY_SIZE;
154
155         return pad;
156 }
157
158 /* Initialize the packet-lifetime memory allocation pool.
159  * This function should be called only once when Ethereal or Tethereal starts
160  * up.
161  */
162 void
163 ep_init_chunk(void)
164 {
165         ep_packet_mem.free_list=NULL;
166         ep_packet_mem.used_list=NULL;
167
168         emem_canary(ep_canary);
169 }
170 /* Initialize the capture-lifetime memory allocation pool.
171  * This function should be called only once when Ethereal or Tethereal starts
172  * up.
173  */
174 void
175 se_init_chunk(void)
176 {
177         se_packet_mem.free_list=NULL;
178         se_packet_mem.used_list=NULL;
179
180         emem_canary(se_canary);
181 }
182
183 static void
184 emem_create_chunk(emem_chunk_t **free_list) {
185 #if defined (_WIN32)
186         SYSTEM_INFO sysinfo;
187         int pagesize;
188         BOOL ret;
189         char *buf_end, *prot1, *prot2;
190         DWORD oldprot;
191 #elif defined(USE_GUARD_PAGES)
192         intptr_t pagesize = sysconf(_SC_PAGESIZE);
193         int ret;
194         char *buf_end, *prot1, *prot2;
195 #endif
196         /* we dont have any free data, so we must allocate a new one */
197         if(!*free_list){
198                 emem_chunk_t *npc;
199                 npc = g_malloc(sizeof(emem_chunk_t));
200                 npc->next = NULL;
201                 npc->c_count = 0;
202                 *free_list = npc;
203 #if defined (_WIN32)
204                 /*
205                  * MSDN documents VirtualAlloc/VirtualProtect at
206                  * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
207                  */
208                 GetSystemInfo(&sysinfo);
209                 pagesize = sysinfo.dwPageSize;
210
211                 /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
212                 npc->buf = VirtualAlloc(NULL, EMEM_PACKET_CHUNK_SIZE,
213                         MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
214                 g_assert(npc->buf != NULL);
215                 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
216
217                 /* Align our guard pages on page-sized boundaries */
218                 prot1 = (char *) ((((int) npc->buf + pagesize - 1) / pagesize) * pagesize);
219                 prot2 = (char *) ((((int) buf_end - (1 * pagesize)) / pagesize) * pagesize);
220
221                 ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
222                 g_assert(ret == TRUE);
223                 ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
224                 g_assert(ret == TRUE);
225
226                 npc->amount_free_init = prot2 - prot1 - pagesize;
227                 npc->amount_free = npc->amount_free_init;
228                 npc->free_offset_init = (prot1 - npc->buf) + pagesize;
229                 npc->free_offset = npc->free_offset_init;
230
231 #elif defined(USE_GUARD_PAGES)
232                 npc->buf = mmap(NULL, EMEM_PACKET_CHUNK_SIZE,
233                         PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
234                 g_assert(npc->buf != MAP_FAILED);
235                 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
236
237                 /* Align our guard pages on page-sized boundaries */
238                 prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
239                 prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
240                 ret = mprotect(prot1, pagesize, PROT_NONE);
241                 g_assert(ret != -1);
242                 ret = mprotect(prot2, pagesize, PROT_NONE);
243                 g_assert(ret != -1);
244
245                 npc->amount_free_init = prot2 - prot1 - pagesize;
246                 npc->amount_free = npc->amount_free_init;
247                 npc->free_offset_init = (prot1 - npc->buf) + pagesize;
248                 npc->free_offset = npc->free_offset_init;
249
250 #else /* Is there a draft in here? */
251                 npc->amount_free_init = EMEM_PACKET_CHUNK_SIZE;
252                 npc->amount_free = npc->amount_free_init;
253                 npc->free_offset_init = 0;
254                 npc->free_offset = npc->free_offset_init;
255                 npc->buf = g_malloc(EMEM_PACKET_CHUNK_SIZE);
256 #endif /* USE_GUARD_PAGES */
257         }
258 }
259
260 /* allocate 'size' amount of memory with an allocation lifetime until the
261  * next packet.
262  */
263 void *
264 ep_alloc(size_t size)
265 {
266         void *buf, *cptr;
267         guint8 pad = emem_canary_pad(size);
268         emem_chunk_t *free_list;
269
270 #ifndef EP_DEBUG_FREE
271         /* Round up to an 8 byte boundary.  Make sure we have at least
272          * 8 pad bytes for our canary.
273          */
274         size += pad;
275
276         /* make sure we dont try to allocate too much (arbitrary limit) */
277         DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
278
279         emem_create_chunk(&ep_packet_mem.free_list);
280
281         /* oops, we need to allocate more memory to serve this request
282          * than we have free. move this node to the used list and try again
283          */
284         if(size>ep_packet_mem.free_list->amount_free || ep_packet_mem.free_list->c_count >= EMEM_ALLOCS_PER_CHUNK){
285                 emem_chunk_t *npc;
286                 npc=ep_packet_mem.free_list;
287                 ep_packet_mem.free_list=ep_packet_mem.free_list->next;
288                 npc->next=ep_packet_mem.used_list;
289                 ep_packet_mem.used_list=npc;
290         }
291
292         emem_create_chunk(&ep_packet_mem.free_list);
293
294         free_list = ep_packet_mem.free_list;
295
296         buf = free_list->buf + free_list->free_offset;
297
298         free_list->amount_free -= size;
299         free_list->free_offset += size;
300
301         cptr = (char *)buf + size - pad;
302         memcpy(cptr, &ep_canary, pad);
303         free_list->canary[free_list->c_count] = cptr;
304         free_list->cmp_len[free_list->c_count] = pad;
305         free_list->c_count++;
306
307 #else /* EP_DEBUG_FREE */
308         emem_chunk_t *npc;
309
310         npc=g_malloc(sizeof(emem_chunk_t));
311         npc->next=ep_packet_mem.used_list;
312         npc->amount_free=size;
313         npc->free_offset=0;
314         npc->buf=g_malloc(size);
315         buf = npc->buf;
316         ep_packet_mem.used_list=npc;
317 #endif /* EP_DEBUG_FREE */
318
319         return buf;
320 }
321 /* allocate 'size' amount of memory with an allocation lifetime until the
322  * next capture.
323  */
324 void *
325 se_alloc(size_t size)
326 {
327         void *buf, *cptr;
328         guint8 pad = emem_canary_pad(size);
329         emem_chunk_t *free_list;
330
331 #ifndef SE_DEBUG_FREE
332         /* Round up to an 8 byte boundary.  Make sure we have at least
333          * 8 pad bytes for our canary.
334          */
335         size += pad;
336
337         /* make sure we dont try to allocate too much (arbitrary limit) */
338         DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
339
340         emem_create_chunk(&se_packet_mem.free_list);
341
342         /* oops, we need to allocate more memory to serve this request
343          * than we have free. move this node to the used list and try again
344          */
345         if(size>se_packet_mem.free_list->amount_free || se_packet_mem.free_list->c_count >= EMEM_ALLOCS_PER_CHUNK){
346                 emem_chunk_t *npc;
347                 npc=se_packet_mem.free_list;
348                 se_packet_mem.free_list=se_packet_mem.free_list->next;
349                 npc->next=se_packet_mem.used_list;
350                 se_packet_mem.used_list=npc;
351         }
352
353         emem_create_chunk(&se_packet_mem.free_list);
354
355         free_list = se_packet_mem.free_list;
356
357         buf = free_list->buf + free_list->free_offset;
358
359         free_list->amount_free -= size;
360         free_list->free_offset += size;
361
362         cptr = (char *)buf + size - pad;
363         memcpy(cptr, &se_canary, pad);
364         free_list->canary[free_list->c_count] = cptr;
365         free_list->cmp_len[free_list->c_count] = pad;
366         free_list->c_count++;
367
368 #else /* SE_DEBUG_FREE */
369         emem_chunk_t *npc;
370
371         npc=g_malloc(sizeof(emem_chunk_t));
372         npc->next=se_packet_mem.used_list;
373         npc->amount_free=size;
374         npc->free_offset=0;
375         npc->buf=g_malloc(size);
376         buf = npc->buf;
377         se_packet_mem.used_list=npc;
378 #endif /* SE_DEBUG_FREE */
379
380         return buf;
381 }
382
383
384 void* ep_alloc0(size_t size) {
385         return memset(ep_alloc(size),'\0',size);
386 }
387
388 gchar* ep_strdup(const gchar* src) {
389         guint len = strlen(src);
390         gchar* dst;
391
392         dst = strncpy(ep_alloc(len+1), src, len);
393
394         dst[len] = '\0';
395
396         return dst;
397 }
398
399 gchar* ep_strndup(const gchar* src, size_t len) {
400         gchar* dst = ep_alloc(len+1);
401         guint i;
402
403         for (i = 0; src[i] && i < len; i++)
404                 dst[i] = src[i];
405
406         dst[i] = '\0';
407
408         return dst;
409 }
410
411 void* ep_memdup(const void* src, size_t len) {
412         return memcpy(ep_alloc(len), src, len);
413 }
414
415 gchar* ep_strdup_vprintf(const gchar* fmt, va_list ap) {
416         va_list ap2;
417         guint len;
418         gchar* dst;
419
420         G_VA_COPY(ap2, ap);
421
422         len = g_printf_string_upper_bound(fmt, ap);
423
424         dst = ep_alloc(len+1);
425         g_vsnprintf (dst, len, fmt, ap2);
426         va_end(ap2);
427
428         return dst;
429 }
430
431 gchar* ep_strdup_printf(const gchar* fmt, ...) {
432         va_list ap;
433         gchar* dst;
434
435         va_start(ap,fmt);
436         dst = ep_strdup_vprintf(fmt, ap);
437         va_end(ap);
438         return dst;
439 }
440
441 gchar** ep_strsplit(const gchar* string, const gchar* sep, int max_tokens) {
442         gchar* splitted;
443         gchar* s;
444         guint tokens;
445         guint str_len;
446         guint sep_len;
447         guint i;
448         gchar** vec;
449         enum { AT_START, IN_PAD, IN_TOKEN } state;
450         guint curr_tok = 0;
451
452         if ( ! string
453                  || ! sep
454                  || ! sep[0])
455                 return NULL;
456
457         s = splitted = ep_strdup(string);
458         str_len = strlen(splitted);
459         sep_len = strlen(sep);
460
461         if (max_tokens < 1) max_tokens = INT_MAX;
462
463         tokens = 1;
464
465
466         while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
467                 tokens++;
468
469                 for(i=0; i < sep_len; i++ )
470                         s[i] = '\0';
471
472                 s += sep_len;
473
474         }
475
476         vec = ep_alloc_array(gchar*,tokens+1);
477         state = AT_START;
478
479         for (i=0; i< str_len; i++) {
480                 switch(state) {
481                         case AT_START:
482                                 switch(splitted[i]) {
483                                         case '\0':
484                                                 state  = IN_PAD;
485                                                 continue;
486                                         default:
487                                                 vec[curr_tok] = &(splitted[i]);
488                                                 curr_tok++;
489                                                 state = IN_TOKEN;
490                                                 continue;
491                                 }
492                         case IN_TOKEN:
493                                 switch(splitted[i]) {
494                                         case '\0':
495                                                 state = IN_PAD;
496                                         default:
497                                                 continue;
498                                 }
499                         case IN_PAD:
500                                 switch(splitted[i]) {
501                                         default:
502                                                 vec[curr_tok] = &(splitted[i]);
503                                                 curr_tok++;
504                                                 state = IN_TOKEN;
505                                         case '\0':
506                                                 continue;
507                                 }
508                 }
509         }
510
511         vec[curr_tok] = NULL;
512
513         return vec;
514 }
515
516
517
518 void* se_alloc0(size_t size) {
519         return memset(se_alloc(size),'\0',size);
520 }
521
522 /* If str is NULL, just return the string "<NULL>" so that the callers dont
523  * have to bother checking it.
524  */
525 gchar* se_strdup(const gchar* src) {
526         guint len;
527         gchar* dst;
528
529         if(!src){
530                 return "<NULL>";
531         }
532
533         len = strlen(src);
534         dst = strncpy(se_alloc(len+1), src, len);
535
536         dst[len] = '\0';
537
538         return dst;
539 }
540
541 gchar* se_strndup(const gchar* src, size_t len) {
542         gchar* dst = se_alloc(len+1);
543         guint i;
544
545         for (i = 0; src[i] && i < len; i++)
546                 dst[i] = src[i];
547
548         dst[i] = '\0';
549
550         return dst;
551 }
552
553 void* se_memdup(const void* src, size_t len) {
554         return memcpy(se_alloc(len), src, len);
555 }
556
557 gchar* se_strdup_vprintf(const gchar* fmt, va_list ap) {
558         va_list ap2;
559         guint len;
560         gchar* dst;
561
562         G_VA_COPY(ap2, ap);
563
564         len = g_printf_string_upper_bound(fmt, ap);
565
566         dst = se_alloc(len+1);
567         g_vsnprintf (dst, len, fmt, ap2);
568         va_end(ap2);
569
570         return dst;
571 }
572
573 gchar* se_strdup_printf(const gchar* fmt, ...) {
574         va_list ap;
575         gchar* dst;
576
577         va_start(ap,fmt);
578         dst = se_strdup_vprintf(fmt, ap);
579         va_end(ap);
580         return dst;
581 }
582
583 /* release all allocated memory back to the pool.
584  */
585 void
586 ep_free_all(void)
587 {
588         emem_chunk_t *npc;
589         guint i;
590
591         /* move all used chunks over to the free list */
592         while(ep_packet_mem.used_list){
593                 npc=ep_packet_mem.used_list;
594                 ep_packet_mem.used_list=ep_packet_mem.used_list->next;
595                 npc->next=ep_packet_mem.free_list;
596                 ep_packet_mem.free_list=npc;
597         }
598
599         /* clear them all out */
600         npc = ep_packet_mem.free_list;
601         while (npc != NULL) {
602 #ifndef EP_DEBUG_FREE
603                 for (i = 0; i < npc->c_count; i++) {
604                         if (memcmp(npc->canary[i], &ep_canary, npc->cmp_len[i]) != 0)
605                                 g_error("Per-packet memory corrupted.");
606                 }
607                 npc->c_count = 0;
608                 npc->amount_free = npc->amount_free_init;
609                 npc->free_offset = npc->free_offset_init;
610                 npc = npc->next;
611 #else /* EP_DEBUG_FREE */
612                 emem_chunk_t *next = npc->next;
613
614                 g_free(npc->buf);
615                 g_free(npc);
616                 npc = next;
617 #endif /* EP_DEBUG_FREE */
618         }
619
620 #ifdef EP_DEBUG_FREE
621         ep_init_chunk();
622 #endif
623 }
624 /* release all allocated memory back to the pool.
625  */
626 void
627 se_free_all(void)
628 {
629         emem_chunk_t *npc;
630         guint i;
631         se_tree_t *se_tree_list;
632
633
634         /* move all used chunks over to the free list */
635         while(se_packet_mem.used_list){
636                 npc=se_packet_mem.used_list;
637                 se_packet_mem.used_list=se_packet_mem.used_list->next;
638                 npc->next=se_packet_mem.free_list;
639                 se_packet_mem.free_list=npc;
640         }
641
642         /* clear them all out */
643         npc = se_packet_mem.free_list;
644         while (npc != NULL) {
645 #ifndef SE_DEBUG_FREE
646                 for (i = 0; i < npc->c_count; i++) {
647                         if (memcmp(npc->canary[i], &se_canary, npc->cmp_len[i]) != 0)
648                                 g_error("Per-session memory corrupted.");
649                 }
650                 npc->c_count = 0;
651                 npc->amount_free = npc->amount_free_init;
652                 npc->free_offset = npc->free_offset_init;
653                 npc = npc->next;
654 #else /* SE_DEBUG_FREE */
655                 emem_chunk_t *next = npc->next;
656
657                 g_free(npc->buf);
658                 g_free(npc);
659                 npc = next;
660 #endif /* SE_DEBUG_FREE */
661         }
662
663 #ifdef SE_DEBUG_FREE
664                 se_init_chunk();
665 #endif
666
667         /* release/reset all se allocated trees */
668         for(se_tree_list=se_trees;se_tree_list;se_tree_list=se_tree_list->next){
669                 se_tree_list->tree=NULL;
670         }
671 }
672
673
674 ep_stack_t ep_stack_new(void) {
675     ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
676     *s = ep_new0(struct _ep_stack_frame_t);
677     return s;
678 }
679
680 /*  for ep_stack_t we'll keep the popped frames so we reuse them instead
681 of allocating new ones.
682 */
683
684
685 void* ep_stack_push(ep_stack_t stack, void* data) {
686     struct _ep_stack_frame_t* frame;
687     struct _ep_stack_frame_t* head = (*stack);
688
689     if (head->above) {
690         frame = head->above;
691     } else {
692        frame = ep_new(struct _ep_stack_frame_t);
693        head->above = frame;
694        frame->below = head;
695        frame->above = NULL;
696     }
697
698     frame->payload = data;
699     (*stack) = frame;
700
701     return data;
702 }
703
704 void* ep_stack_pop(ep_stack_t stack) {
705
706     if ((*stack)->below) {
707         (*stack) = (*stack)->below;
708         return (*stack)->above->payload;
709     } else {
710         return NULL;
711     }
712 }
713
714
715
716
717
718 /* routines to manage se allocated red-black trees */
719 se_tree_t *se_trees=NULL;
720
721 se_tree_t *
722 se_tree_create(int type)
723 {
724         se_tree_t *tree_list;
725
726         tree_list=malloc(sizeof(se_tree_t));
727         tree_list->next=se_trees;
728         tree_list->type=type;
729         tree_list->tree=NULL;
730         se_trees=tree_list;
731
732         return tree_list;
733 }
734
735
736
737 void *
738 se_tree_lookup32(se_tree_t *se_tree, guint32 key)
739 {
740         se_tree_node_t *node;
741
742         node=se_tree->tree;
743
744         while(node){
745                 if(key==node->key32){
746                         return node->data;
747                 }
748                 if(key<node->key32){
749                         node=node->left;
750                         continue;
751                 }
752                 if(key>node->key32){
753                         node=node->right;
754                         continue;
755                 }
756         }
757         return NULL;
758 }
759
760
761 static inline se_tree_node_t *
762 se_tree_parent(se_tree_node_t *node)
763 {
764         return node->parent;
765 }
766
767 static inline se_tree_node_t *
768 se_tree_grandparent(se_tree_node_t *node)
769 {
770         se_tree_node_t *parent;
771
772         parent=se_tree_parent(node);
773         if(parent){
774                 return parent->parent;
775         }
776         return NULL;
777 }
778 static inline se_tree_node_t *
779 se_tree_uncle(se_tree_node_t *node)
780 {
781         se_tree_node_t *parent, *grandparent;
782
783         parent=se_tree_parent(node);
784         if(!parent){
785                 return NULL;
786         }
787         grandparent=se_tree_parent(parent);
788         if(!grandparent){
789                 return NULL;
790         }
791         if(parent==grandparent->left){
792                 return grandparent->right;
793         }
794         return grandparent->left;
795 }
796
797 static inline void rb_insert_case1(se_tree_t *se_tree, se_tree_node_t *node);
798 static inline void rb_insert_case2(se_tree_t *se_tree, se_tree_node_t *node);
799
800 #ifdef REMOVED
801 void print_tree_item(se_tree_node_t *node, int level){
802         int i;
803         for(i=0;i<level;i++){
804                 printf("   ");
805         }
806         printf("%s  KEY:0x%08x node:0x%08x parent:0x%08x left:0x%08x right:0x%08x\n",node->rb_color==SE_TREE_RB_COLOR_BLACK?"BLACK":"RED",node->key32,(int)node,(int)node->parent,(int)node->left,(int)node->right);
807         if(node->left)
808                 print_tree_item(node->left,level+1);
809         if(node->right)
810                 print_tree_item(node->right,level+1);
811 }
812
813 void print_tree(se_tree_node_t *node){
814         while(node->parent){
815                 node=node->parent;
816         }
817         print_tree_item(node,0);
818 }
819 #endif
820
821 static inline void
822 rotate_left(se_tree_t *se_tree, se_tree_node_t *node)
823 {
824         if(node->parent){
825                 if(node->parent->left==node){
826                         node->parent->left=node->right;
827                 } else {
828                         node->parent->right=node->right;
829                 }
830         } else {
831                 se_tree->tree=node->right;
832         }
833         node->right->parent=node->parent;
834         node->parent=node->right;
835         node->right=node->right->left;
836         if(node->right){
837                 node->right->parent=node;
838         }
839         node->parent->left=node;
840 }
841
842 static inline void
843 rotate_right(se_tree_t *se_tree, se_tree_node_t *node)
844 {
845         if(node->parent){
846                 if(node->parent->left==node){
847                         node->parent->left=node->left;
848                 } else {
849                         node->parent->right=node->left;
850                 }
851         } else {
852                 se_tree->tree=node->left;
853         }
854         node->left->parent=node->parent;
855         node->parent=node->left;
856         node->left=node->left->right;
857         if(node->left){
858                 node->left->parent=node;
859         }
860         node->parent->right=node;
861 }
862
863 static inline void
864 rb_insert_case5(se_tree_t *se_tree, se_tree_node_t *node)
865 {
866         se_tree_node_t *grandparent;
867         se_tree_node_t *parent;
868
869         parent=se_tree_parent(node);
870         parent->rb_color=SE_TREE_RB_COLOR_BLACK;
871         grandparent=se_tree_parent(parent);
872         if(!grandparent){
873                 return;
874         }
875         grandparent->rb_color=SE_TREE_RB_COLOR_RED;
876         if( (node==parent->left) && (parent==grandparent->left) ){
877                 rotate_right(se_tree, grandparent);
878         } else {
879                 rotate_left(se_tree, grandparent);
880         }
881 }
882
883 static inline void
884 rb_insert_case4(se_tree_t *se_tree, se_tree_node_t *node)
885 {
886         se_tree_node_t *grandparent;
887         se_tree_node_t *parent;
888
889         parent=se_tree_parent(node);
890         grandparent=se_tree_parent(parent);
891         if(!grandparent){
892                 return;
893         }
894         if( (node==parent->right) && (parent==grandparent->left) ){
895                 rotate_left(se_tree, parent);
896                 node=node->left;
897         } else if( (node==parent->left) && (parent==grandparent->right) ){
898                 rotate_right(se_tree, parent);
899                 node=node->right;
900         }
901         rb_insert_case5(se_tree, node);
902 }
903
904 static inline void
905 rb_insert_case3(se_tree_t *se_tree, se_tree_node_t *node)
906 {
907         se_tree_node_t *grandparent;
908         se_tree_node_t *parent;
909         se_tree_node_t *uncle;
910         
911         uncle=se_tree_uncle(node);
912         if(uncle && (uncle->rb_color==SE_TREE_RB_COLOR_RED)){
913                 parent=se_tree_parent(node);
914                 parent->rb_color=SE_TREE_RB_COLOR_BLACK;
915                 uncle->rb_color=SE_TREE_RB_COLOR_BLACK;
916                 grandparent=se_tree_grandparent(node);
917                 if(grandparent){
918                         rb_insert_case1(se_tree, grandparent);
919                 }
920         } else {
921                 rb_insert_case4(se_tree, node);
922         }
923 }               
924
925 static inline void
926 rb_insert_case2(se_tree_t *se_tree, se_tree_node_t *node)
927 {
928         se_tree_node_t *parent;
929
930         parent=se_tree_parent(node);
931         /* parent is always non-NULL here */
932         if(parent->rb_color==SE_TREE_RB_COLOR_BLACK){
933                 return;
934         }
935         rb_insert_case3(se_tree, node);
936 }
937
938 static inline void
939 rb_insert_case1(se_tree_t *se_tree, se_tree_node_t *node)
940 {
941         se_tree_node_t *parent;
942
943         parent=se_tree_parent(node);
944         if(!parent){
945                 node->rb_color=SE_TREE_RB_COLOR_BLACK;
946                 return;
947         }
948         rb_insert_case2(se_tree, node);
949 }
950         
951 /* insert a new node in the tree. if this node matches an already existing node
952  * then just replace the data for that node */
953 void 
954 se_tree_insert32(se_tree_t *se_tree, guint32 key, void *data)
955 {
956         se_tree_node_t *node;
957
958         node=se_tree->tree;
959
960         /* is this the first node ?*/
961         if(!node){
962                 node=se_alloc(sizeof(se_tree_node_t));
963                 switch(se_tree->type){
964                 case SE_TREE_TYPE_RED_BLACK:
965                         node->rb_color=SE_TREE_RB_COLOR_BLACK;
966                         break;
967                 }
968                 node->parent=NULL;
969                 node->left=NULL;
970                 node->right=NULL;
971                 node->key32=key;
972                 node->data=data;
973                 se_tree->tree=node;
974                 return;
975         }
976
977         /* it was not the new root so walk the tree until we find where to
978          * insert this new leaf.
979          */
980         while(1){
981                 /* this node already exists, so just replace the data pointer*/
982                 if(key==node->key32){
983                         node->data=data;
984                         return;
985                 }
986                 if(key<node->key32) {
987                         if(!node->left){
988                                 /* new node to the left */
989                                 se_tree_node_t *new_node;       
990                                 new_node=se_alloc(sizeof(se_tree_node_t));
991                                 node->left=new_node;
992                                 new_node->parent=node;
993                                 new_node->left=NULL;
994                                 new_node->right=NULL;
995                                 new_node->key32=key;
996                                 new_node->data=data;
997                                 node=new_node;
998                                 break;
999                         }
1000                         node=node->left;
1001                         continue;
1002                 }
1003                 if(key>node->key32) {
1004                         if(!node->right){
1005                                 /* new node to the right */
1006                                 se_tree_node_t *new_node;       
1007                                 new_node=se_alloc(sizeof(se_tree_node_t));
1008                                 node->right=new_node;
1009                                 new_node->parent=node;
1010                                 new_node->left=NULL;
1011                                 new_node->right=NULL;
1012                                 new_node->key32=key;
1013                                 new_node->data=data;
1014                                 node=new_node;
1015                                 break;
1016                         }
1017                         node=node->right;
1018                         continue;
1019                 }
1020         }
1021
1022         /* node will now point to the newly created node */
1023         switch(se_tree->type){
1024         case SE_TREE_TYPE_RED_BLACK:
1025                 node->rb_color=SE_TREE_RB_COLOR_RED;
1026                 rb_insert_case1(se_tree, node);
1027                 break;
1028         }
1029 }
1030
1031
1032 /* When the se data is released, this entire tree will dissapear as if it 
1033  * never existed including all metadata associated with the tree.
1034  */
1035 se_tree_t *
1036 se_tree_create_non_persistent(int type)
1037 {
1038         se_tree_t *tree_list;
1039
1040         tree_list=se_alloc(sizeof(se_tree_t));
1041         tree_list->next=NULL;
1042         tree_list->type=type;
1043         tree_list->tree=NULL;
1044
1045         return tree_list;
1046 }
1047
1048 /* insert a new node in the tree. if this node matches an already existing node
1049  * then just replace the data for that node */
1050 void 
1051 se_tree_insert32_array(se_tree_t *se_tree, se_tree_key_t *key, void *data)
1052 {
1053         se_tree_t *next_tree;
1054
1055         if((key[0].length<1)||(key[0].length>100)){
1056                 DISSECTOR_ASSERT_NOT_REACHED();
1057         }
1058         if((key[0].length==1)&&(key[1].length==0)){
1059                 se_tree_insert32(se_tree, *key[0].key, data);
1060                 return;
1061         }
1062         next_tree=se_tree_lookup32(se_tree, *key[0].key);
1063         if(!next_tree){
1064                 next_tree=se_tree_create_non_persistent(se_tree->type);
1065                 se_tree_insert32(se_tree, *key[0].key, next_tree);
1066         }
1067         if(key[0].length==1){
1068                 key++;
1069         } else {
1070                 key[0].length--;
1071                 key[0].key++;
1072         }
1073         se_tree_insert32_array(next_tree, key, data);
1074 }
1075
1076 void *
1077 se_tree_lookup32_array(se_tree_t *se_tree, se_tree_key_t *key)
1078 {
1079         se_tree_t *next_tree;
1080
1081         if((key[0].length<1)||(key[0].length>100)){
1082                 DISSECTOR_ASSERT_NOT_REACHED();
1083         }
1084         if((key[0].length==1)&&(key[1].length==0)){
1085                 return se_tree_lookup32(se_tree, *key[0].key);
1086         }
1087         next_tree=se_tree_lookup32(se_tree, *key[0].key);
1088         if(!next_tree){
1089                 return NULL;
1090         }
1091         if(key[0].length==1){
1092                 key++;
1093         } else {
1094                 key[0].length--;
1095                 key[0].key++;
1096         }
1097         se_tree_lookup32_array(next_tree, key);
1098 }
1099
1100
1101 void se_tree_insert_string(se_string_hash_t* se_tree, const gchar* k, void* v) {
1102         guint32 len = strlen(k);
1103         guint32 div = (len-1)/4;
1104         guint32 residual = 0;
1105         se_tree_key_t key[] = {
1106                 {1,&len},
1107                 {div,(guint32*)(&k[0])},
1108                 {1,&residual},
1109                 {0,NULL}
1110         };
1111         
1112         if (! div) {
1113                 key[1].length = key[2].length;
1114                 key[1].key = key[2].key;
1115                 key[2].length = 0;
1116                 key[2].key = NULL;
1117         }
1118         
1119         div *= 4;
1120         
1121         switch(len%4) {
1122                 case 0:
1123                         residual |= ( k[div+3] << 24 );
1124                 case 3:
1125                         residual |= ( k[div+2] << 16 );
1126                 case 2:
1127                         residual |= ( k[div+1] << 8  );
1128                 case 1:
1129                         residual |= k[div];
1130                         break;
1131         }
1132         
1133         se_tree_insert32_array(se_tree,key,v);
1134 }
1135
1136 void* se_tree_lookup_string(se_string_hash_t* se_tree, const gchar* k) {
1137         guint32 len = strlen(k);
1138         guint32 div = (len-1)/4;
1139         guint32 residual = 0;
1140         se_tree_key_t key[] = {
1141                 {1,&len},
1142                 {div,(guint32*)(&k[0])},
1143                 {1,&residual},
1144                 {0,NULL}
1145         };
1146         
1147         if (! div) {
1148                 key[1].length = key[2].length;
1149                 key[1].key = key[2].key;
1150                 key[2].length = 0;
1151                 key[2].key = NULL;
1152         }
1153         
1154         div *= 4;
1155         
1156         switch(len%4) {
1157                 case 0:
1158                         residual |= k[div+3] << 24;
1159                 case 3:
1160                         residual |= k[div+2] << 16;
1161                 case 2:
1162                         residual |= k[div+1] << 8;
1163                 case 1:
1164                         residual |= k[div];
1165                         break;
1166         }
1167         
1168         return se_tree_lookup32_array(se_tree, key);
1169 }