make using canaries optional
[obnox/wireshark/wip.git] / epan / emem.c
1 /* emem.c
2  * Ethereal memory management and garbage collection functions
3  * Ronnie Sahlberg 2005
4  *
5  * $Id$
6  *
7  * Ethereal - Network traffic analyzer
8  * By Gerald Combs <gerald@ethereal.com>
9  * Copyright 1998 Gerald Combs
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version 2
14  * of the License, or (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
24  */
25 #ifdef HAVE_CONFIG_H
26 #include "config.h"
27 #endif
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <stdarg.h>
33
34 #include <time.h>
35 #ifdef HAVE_SYS_TIME_H
36 #include <sys/time.h>
37 #endif
38
39 #ifdef HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42
43 #ifdef _WIN32
44 #include <windows.h>    /* VirtualAlloc, VirtualProtect */
45 #include <process.h>    /* getpid */
46 #endif
47
48 #include <glib.h>
49 #include <proto.h>
50 #include "emem.h"
51 #include <wiretap/file_util.h>
52
53
54 /*
55  * Tools like Valgrind and ElectricFence don't work well with memchunks.
56  * Uncomment the defines below to make {ep|se}_alloc() allocate each
57  * object individually.
58  */
59 /* #define EP_DEBUG_FREE 1 */
60 /* #define SE_DEBUG_FREE 1 */
61
62 /* Do we want to use guardpages? if available */
63 #define WANT_GUARD_PAGES 1
64
65 /* Do we want to use canaries ? */
66 #define DEBUG_USE_CANARIES 1
67
68  
69 #ifdef WANT_GUARD_PAGES
70 /* Add guard pages at each end of our allocated memory */
71 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
72 #include <stdint.h>
73 #include <sys/types.h>
74 #include <sys/mman.h>
75 #define USE_GUARD_PAGES 1
76 #endif
77 #endif
78
79 /* When required, allocate more memory from the OS in this size chunks */
80 #define EMEM_PACKET_CHUNK_SIZE 10485760
81
82 /* The maximum number of allocations per chunk */
83 #define EMEM_ALLOCS_PER_CHUNK (EMEM_PACKET_CHUNK_SIZE / 512)
84
85
86 #ifdef DEBUG_USE_CANARIES
87 #define EMEM_CANARY_SIZE 8
88 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
89 guint8  ep_canary[EMEM_CANARY_DATA_SIZE], se_canary[EMEM_CANARY_DATA_SIZE];
90 #endif /* DEBUG_USE_CANARIES */
91
92 typedef struct _emem_chunk_t {
93         struct _emem_chunk_t *next;
94         unsigned int    amount_free_init;
95         unsigned int    amount_free;
96         unsigned int    free_offset_init;
97         unsigned int    free_offset;
98         char *buf;
99 #ifdef DEBUG_USE_CANARIES
100 #if ! defined(EP_DEBUG_FREE) && ! defined(SE_DEBUG_FREE)
101         unsigned int    c_count;
102         void            *canary[EMEM_ALLOCS_PER_CHUNK];
103         guint8          cmp_len[EMEM_ALLOCS_PER_CHUNK];
104 #endif
105 #endif /* DEBUG_USE_CANARIES */
106 } emem_chunk_t;
107
108 typedef struct _emem_header_t {
109   emem_chunk_t *free_list;
110   emem_chunk_t *used_list;
111 } emem_header_t;
112
113 static emem_header_t ep_packet_mem;
114 static emem_header_t se_packet_mem;
115
116
117 #ifdef DEBUG_USE_CANARIES
118 /*
119  * Set a canary value to be placed between memchunks.
120  */
121 void
122 emem_canary(guint8 *canary) {
123         int i;
124 #if GLIB_MAJOR_VERSION >= 2
125         static GRand   *rand_state = NULL;
126 #endif
127
128
129         /* First, use GLib's random function if we have it */
130 #if GLIB_MAJOR_VERSION >= 2
131         if (rand_state == NULL) {
132                 rand_state = g_rand_new();
133         }
134         for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
135                 canary[i] = (guint8) g_rand_int(rand_state);
136         }
137         return;
138 #else
139         FILE *fp;
140         size_t sz;
141         /* Try /dev/urandom */
142         if ((fp = eth_fopen("/dev/urandom", "r")) != NULL) {
143                 sz = fread(canary, EMEM_CANARY_DATA_SIZE, 1, fp);
144                 fclose(fp);
145                 if (sz == EMEM_CANARY_SIZE) {
146                         return;
147                 }
148         }
149
150         /* Our last resort */
151         srandom(time(NULL) | getpid());
152         for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
153                 canary[i] = (guint8) random();
154         }
155         return;
156 #endif /* GLIB_MAJOR_VERSION >= 2 */
157 }
158
159 #if !defined(SE_DEBUG_FREE)
160 /*
161  * Given an allocation size, return the amount of padding needed for
162  * the canary value.
163  */
164 static guint8
165 emem_canary_pad (size_t allocation) {
166         guint8 pad;
167
168         pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
169         if (pad < EMEM_CANARY_SIZE)
170                 pad += EMEM_CANARY_SIZE;
171
172         return pad;
173 }
174 #endif
175 #endif /* DEBUG_USE_CANARIES */
176
177
178 /* Initialize the packet-lifetime memory allocation pool.
179  * This function should be called only once when Ethereal or Tethereal starts
180  * up.
181  */
182 void
183 ep_init_chunk(void)
184 {
185         ep_packet_mem.free_list=NULL;
186         ep_packet_mem.used_list=NULL;
187
188 #ifdef DEBUG_USE_CANARIES
189         emem_canary(ep_canary);
190 #endif /* DEBUG_USE_CANARIES */
191 }
192 /* Initialize the capture-lifetime memory allocation pool.
193  * This function should be called only once when Ethereal or Tethereal starts
194  * up.
195  */
196 void
197 se_init_chunk(void)
198 {
199         se_packet_mem.free_list=NULL;
200         se_packet_mem.used_list=NULL;
201
202 #ifdef DEBUG_USE_CANARIES
203         emem_canary(se_canary);
204 #endif /* DEBUG_USE_CANARIES */
205 }
206
207 #if !defined(SE_DEBUG_FREE)
208 static void
209 emem_create_chunk(emem_chunk_t **free_list) {
210 #if defined (_WIN32)
211         SYSTEM_INFO sysinfo;
212         int pagesize;
213         BOOL ret;
214         char *buf_end, *prot1, *prot2;
215         DWORD oldprot;
216 #elif defined(USE_GUARD_PAGES)
217         intptr_t pagesize = sysconf(_SC_PAGESIZE);
218         int ret;
219         char *buf_end, *prot1, *prot2;
220 #endif
221         /* we dont have any free data, so we must allocate a new one */
222         if(!*free_list){
223                 emem_chunk_t *npc;
224                 npc = g_malloc(sizeof(emem_chunk_t));
225                 npc->next = NULL;
226 #ifdef DEBUG_USE_CANARIES
227 #if ! defined(EP_DEBUG_FREE) && ! defined(SE_DEBUG_FREE)
228                 npc->c_count = 0;
229 #endif
230 #endif /* DEBUG_USE_CANARIES */
231
232                 *free_list = npc;
233 #if defined (_WIN32)
234                 /*
235                  * MSDN documents VirtualAlloc/VirtualProtect at
236                  * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
237                  */
238                 GetSystemInfo(&sysinfo);
239                 pagesize = sysinfo.dwPageSize;
240
241                 /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
242                 npc->buf = VirtualAlloc(NULL, EMEM_PACKET_CHUNK_SIZE,
243                         MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
244                 g_assert(npc->buf != NULL);
245                 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
246
247                 /* Align our guard pages on page-sized boundaries */
248                 prot1 = (char *) ((((int) npc->buf + pagesize - 1) / pagesize) * pagesize);
249                 prot2 = (char *) ((((int) buf_end - (1 * pagesize)) / pagesize) * pagesize);
250
251                 ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
252                 g_assert(ret == TRUE);
253                 ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
254                 g_assert(ret == TRUE);
255
256                 npc->amount_free_init = prot2 - prot1 - pagesize;
257                 npc->amount_free = npc->amount_free_init;
258                 npc->free_offset_init = (prot1 - npc->buf) + pagesize;
259                 npc->free_offset = npc->free_offset_init;
260
261 #elif defined(USE_GUARD_PAGES)
262                 npc->buf = mmap(NULL, EMEM_PACKET_CHUNK_SIZE,
263                         PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
264                 g_assert(npc->buf != MAP_FAILED);
265                 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
266
267                 /* Align our guard pages on page-sized boundaries */
268                 prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
269                 prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
270                 ret = mprotect(prot1, pagesize, PROT_NONE);
271                 g_assert(ret != -1);
272                 ret = mprotect(prot2, pagesize, PROT_NONE);
273                 g_assert(ret != -1);
274
275                 npc->amount_free_init = prot2 - prot1 - pagesize;
276                 npc->amount_free = npc->amount_free_init;
277                 npc->free_offset_init = (prot1 - npc->buf) + pagesize;
278                 npc->free_offset = npc->free_offset_init;
279
280 #else /* Is there a draft in here? */
281                 npc->amount_free_init = EMEM_PACKET_CHUNK_SIZE;
282                 npc->amount_free = npc->amount_free_init;
283                 npc->free_offset_init = 0;
284                 npc->free_offset = npc->free_offset_init;
285                 npc->buf = g_malloc(EMEM_PACKET_CHUNK_SIZE);
286 #endif /* USE_GUARD_PAGES */
287         }
288 }
289 #endif
290
291 /* allocate 'size' amount of memory with an allocation lifetime until the
292  * next packet.
293  */
294 void *
295 ep_alloc(size_t size)
296 {
297         void *buf;
298 #ifndef EP_DEBUG_FREE
299 #ifdef DEBUG_USE_CANARIES
300         void *cptr;
301         guint8 pad = emem_canary_pad(size);
302 #else
303         static guint8 pad=8;
304 #endif /* DEBUG_USE_CANARIES */
305         emem_chunk_t *free_list;
306 #endif
307
308 #ifndef EP_DEBUG_FREE
309         /* Round up to an 8 byte boundary.  Make sure we have at least
310          * 8 pad bytes for our canary.
311          */
312         size += pad;
313
314         /* make sure we dont try to allocate too much (arbitrary limit) */
315         DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
316
317         emem_create_chunk(&ep_packet_mem.free_list);
318
319         /* oops, we need to allocate more memory to serve this request
320          * than we have free. move this node to the used list and try again
321          */
322         if(size>ep_packet_mem.free_list->amount_free
323 #ifdef DEBUG_USE_CANARIES
324               || ep_packet_mem.free_list->c_count >= EMEM_ALLOCS_PER_CHUNK
325 #endif /* DEBUG_USE_CANARIES */
326         ){
327                 emem_chunk_t *npc;
328                 npc=ep_packet_mem.free_list;
329                 ep_packet_mem.free_list=ep_packet_mem.free_list->next;
330                 npc->next=ep_packet_mem.used_list;
331                 ep_packet_mem.used_list=npc;
332         }
333
334         emem_create_chunk(&ep_packet_mem.free_list);
335
336         free_list = ep_packet_mem.free_list;
337
338         buf = free_list->buf + free_list->free_offset;
339
340         free_list->amount_free -= size;
341         free_list->free_offset += size;
342
343 #ifdef DEBUG_USE_CANARIES
344         cptr = (char *)buf + size - pad;
345         memcpy(cptr, &ep_canary, pad);
346         free_list->canary[free_list->c_count] = cptr;
347         free_list->cmp_len[free_list->c_count] = pad;
348         free_list->c_count++;
349 #endif /* DEBUG_USE_CANARIES */
350
351 #else /* EP_DEBUG_FREE */
352         emem_chunk_t *npc;
353
354         npc=g_malloc(sizeof(emem_chunk_t));
355         npc->next=ep_packet_mem.used_list;
356         npc->amount_free=size;
357         npc->free_offset=0;
358         npc->buf=g_malloc(size);
359         buf = npc->buf;
360         ep_packet_mem.used_list=npc;
361 #endif /* EP_DEBUG_FREE */
362
363         return buf;
364 }
365 /* allocate 'size' amount of memory with an allocation lifetime until the
366  * next capture.
367  */
368 void *
369 se_alloc(size_t size)
370 {
371         void *buf;
372 #ifndef SE_DEBUG_FREE
373 #ifdef DEBUG_USE_CANARIES
374         void *cptr;
375         guint8 pad = emem_canary_pad(size);
376 #else
377         static guint8 pad=8;
378 #endif /* DEBUG_USE_CANARIES */
379         emem_chunk_t *free_list;
380 #endif
381
382 #ifndef SE_DEBUG_FREE
383         /* Round up to an 8 byte boundary.  Make sure we have at least
384          * 8 pad bytes for our canary.
385          */
386         size += pad;
387
388         /* make sure we dont try to allocate too much (arbitrary limit) */
389         DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
390
391         emem_create_chunk(&se_packet_mem.free_list);
392
393         /* oops, we need to allocate more memory to serve this request
394          * than we have free. move this node to the used list and try again
395          */
396         if(size>se_packet_mem.free_list->amount_free
397 #ifdef DEBUG_USE_CANARIES
398         || se_packet_mem.free_list->c_count >= EMEM_ALLOCS_PER_CHUNK
399 #endif /* DEBUG_USE_CANARIES */
400         ){
401                 emem_chunk_t *npc;
402                 npc=se_packet_mem.free_list;
403                 se_packet_mem.free_list=se_packet_mem.free_list->next;
404                 npc->next=se_packet_mem.used_list;
405                 se_packet_mem.used_list=npc;
406         }
407
408         emem_create_chunk(&se_packet_mem.free_list);
409
410         free_list = se_packet_mem.free_list;
411
412         buf = free_list->buf + free_list->free_offset;
413
414         free_list->amount_free -= size;
415         free_list->free_offset += size;
416
417 #ifdef DEBUG_USE_CANARIES
418         cptr = (char *)buf + size - pad;
419         memcpy(cptr, &se_canary, pad);
420         free_list->canary[free_list->c_count] = cptr;
421         free_list->cmp_len[free_list->c_count] = pad;
422         free_list->c_count++;
423 #endif /* DEBUG_USE_CANARIES */
424
425 #else /* SE_DEBUG_FREE */
426         emem_chunk_t *npc;
427
428         npc=g_malloc(sizeof(emem_chunk_t));
429         npc->next=se_packet_mem.used_list;
430         npc->amount_free=size;
431         npc->free_offset=0;
432         npc->buf=g_malloc(size);
433         buf = npc->buf;
434         se_packet_mem.used_list=npc;
435 #endif /* SE_DEBUG_FREE */
436
437         return buf;
438 }
439
440
441 void* ep_alloc0(size_t size) {
442         return memset(ep_alloc(size),'\0',size);
443 }
444
445 gchar* ep_strdup(const gchar* src) {
446         guint len = strlen(src);
447         gchar* dst;
448
449         dst = strncpy(ep_alloc(len+1), src, len);
450
451         dst[len] = '\0';
452
453         return dst;
454 }
455
456 gchar* ep_strndup(const gchar* src, size_t len) {
457         gchar* dst = ep_alloc(len+1);
458         guint i;
459
460         for (i = 0; src[i] && i < len; i++)
461                 dst[i] = src[i];
462
463         dst[i] = '\0';
464
465         return dst;
466 }
467
468 void* ep_memdup(const void* src, size_t len) {
469         return memcpy(ep_alloc(len), src, len);
470 }
471
472 gchar* ep_strdup_vprintf(const gchar* fmt, va_list ap) {
473         va_list ap2;
474         guint len;
475         gchar* dst;
476
477         G_VA_COPY(ap2, ap);
478
479         len = g_printf_string_upper_bound(fmt, ap);
480
481         dst = ep_alloc(len+1);
482         g_vsnprintf (dst, len, fmt, ap2);
483         va_end(ap2);
484
485         return dst;
486 }
487
488 gchar* ep_strdup_printf(const gchar* fmt, ...) {
489         va_list ap;
490         gchar* dst;
491
492         va_start(ap,fmt);
493         dst = ep_strdup_vprintf(fmt, ap);
494         va_end(ap);
495         return dst;
496 }
497
498 gchar** ep_strsplit(const gchar* string, const gchar* sep, int max_tokens) {
499         gchar* splitted;
500         gchar* s;
501         guint tokens;
502         guint str_len;
503         guint sep_len;
504         guint i;
505         gchar** vec;
506         enum { AT_START, IN_PAD, IN_TOKEN } state;
507         guint curr_tok = 0;
508
509         if ( ! string
510                  || ! sep
511                  || ! sep[0])
512                 return NULL;
513
514         s = splitted = ep_strdup(string);
515         str_len = strlen(splitted);
516         sep_len = strlen(sep);
517
518         if (max_tokens < 1) max_tokens = INT_MAX;
519
520         tokens = 1;
521
522
523         while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
524                 tokens++;
525
526                 for(i=0; i < sep_len; i++ )
527                         s[i] = '\0';
528
529                 s += sep_len;
530
531         }
532
533         vec = ep_alloc_array(gchar*,tokens+1);
534         state = AT_START;
535
536         for (i=0; i< str_len; i++) {
537                 switch(state) {
538                         case AT_START:
539                                 switch(splitted[i]) {
540                                         case '\0':
541                                                 state  = IN_PAD;
542                                                 continue;
543                                         default:
544                                                 vec[curr_tok] = &(splitted[i]);
545                                                 curr_tok++;
546                                                 state = IN_TOKEN;
547                                                 continue;
548                                 }
549                         case IN_TOKEN:
550                                 switch(splitted[i]) {
551                                         case '\0':
552                                                 state = IN_PAD;
553                                         default:
554                                                 continue;
555                                 }
556                         case IN_PAD:
557                                 switch(splitted[i]) {
558                                         default:
559                                                 vec[curr_tok] = &(splitted[i]);
560                                                 curr_tok++;
561                                                 state = IN_TOKEN;
562                                         case '\0':
563                                                 continue;
564                                 }
565                 }
566         }
567
568         vec[curr_tok] = NULL;
569
570         return vec;
571 }
572
573
574
575 void* se_alloc0(size_t size) {
576         return memset(se_alloc(size),'\0',size);
577 }
578
579 /* If str is NULL, just return the string "<NULL>" so that the callers dont
580  * have to bother checking it.
581  */
582 gchar* se_strdup(const gchar* src) {
583         guint len;
584         gchar* dst;
585
586         if(!src){
587                 return "<NULL>";
588         }
589
590         len = strlen(src);
591         dst = strncpy(se_alloc(len+1), src, len);
592
593         dst[len] = '\0';
594
595         return dst;
596 }
597
598 gchar* se_strndup(const gchar* src, size_t len) {
599         gchar* dst = se_alloc(len+1);
600         guint i;
601
602         for (i = 0; src[i] && i < len; i++)
603                 dst[i] = src[i];
604
605         dst[i] = '\0';
606
607         return dst;
608 }
609
610 void* se_memdup(const void* src, size_t len) {
611         return memcpy(se_alloc(len), src, len);
612 }
613
614 gchar* se_strdup_vprintf(const gchar* fmt, va_list ap) {
615         va_list ap2;
616         guint len;
617         gchar* dst;
618
619         G_VA_COPY(ap2, ap);
620
621         len = g_printf_string_upper_bound(fmt, ap);
622
623         dst = se_alloc(len+1);
624         g_vsnprintf (dst, len, fmt, ap2);
625         va_end(ap2);
626
627         return dst;
628 }
629
630 gchar* se_strdup_printf(const gchar* fmt, ...) {
631         va_list ap;
632         gchar* dst;
633
634         va_start(ap,fmt);
635         dst = se_strdup_vprintf(fmt, ap);
636         va_end(ap);
637         return dst;
638 }
639
640 /* release all allocated memory back to the pool.
641  */
642 void
643 ep_free_all(void)
644 {
645         emem_chunk_t *npc;
646 #ifndef EP_DEBUG_FREE
647 #ifdef DEBUG_USE_CANARIES
648         guint i;
649 #endif /* DEBUG_USE_CANARIES */
650 #endif
651
652         /* move all used chunks over to the free list */
653         while(ep_packet_mem.used_list){
654                 npc=ep_packet_mem.used_list;
655                 ep_packet_mem.used_list=ep_packet_mem.used_list->next;
656                 npc->next=ep_packet_mem.free_list;
657                 ep_packet_mem.free_list=npc;
658         }
659
660         /* clear them all out */
661         npc = ep_packet_mem.free_list;
662         while (npc != NULL) {
663 #ifndef EP_DEBUG_FREE
664 #ifdef DEBUG_USE_CANARIES
665                 for (i = 0; i < npc->c_count; i++) {
666                         if (memcmp(npc->canary[i], &ep_canary, npc->cmp_len[i]) != 0)
667                                 g_error("Per-packet memory corrupted.");
668                 }
669                 npc->c_count = 0;
670 #endif /* DEBUG_USE_CANARIES */
671                 npc->amount_free = npc->amount_free_init;
672                 npc->free_offset = npc->free_offset_init;
673                 npc = npc->next;
674 #else /* EP_DEBUG_FREE */
675                 emem_chunk_t *next = npc->next;
676
677                 g_free(npc->buf);
678                 g_free(npc);
679                 npc = next;
680 #endif /* EP_DEBUG_FREE */
681         }
682
683 #ifdef EP_DEBUG_FREE
684         ep_init_chunk();
685 #endif
686 }
687 /* release all allocated memory back to the pool.
688  */
689 void
690 se_free_all(void)
691 {
692         emem_chunk_t *npc;
693         se_tree_t *se_tree_list;
694 #ifndef SE_DEBUG_FREE
695 #ifdef DEBUG_USE_CANARIES
696         guint i;
697 #endif /* DEBUG_USE_CANARIES */
698 #endif
699
700
701         /* move all used chunks over to the free list */
702         while(se_packet_mem.used_list){
703                 npc=se_packet_mem.used_list;
704                 se_packet_mem.used_list=se_packet_mem.used_list->next;
705                 npc->next=se_packet_mem.free_list;
706                 se_packet_mem.free_list=npc;
707         }
708
709         /* clear them all out */
710         npc = se_packet_mem.free_list;
711         while (npc != NULL) {
712 #ifndef SE_DEBUG_FREE
713 #ifdef DEBUG_USE_CANARIES
714                 for (i = 0; i < npc->c_count; i++) {
715                         if (memcmp(npc->canary[i], &se_canary, npc->cmp_len[i]) != 0)
716                                 g_error("Per-session memory corrupted.");
717                 }
718                 npc->c_count = 0;
719 #endif /* DEBUG_USE_CANARIES */
720                 npc->amount_free = npc->amount_free_init;
721                 npc->free_offset = npc->free_offset_init;
722                 npc = npc->next;
723 #else /* SE_DEBUG_FREE */
724                 emem_chunk_t *next = npc->next;
725
726                 g_free(npc->buf);
727                 g_free(npc);
728                 npc = next;
729 #endif /* SE_DEBUG_FREE */
730         }
731
732 #ifdef SE_DEBUG_FREE
733                 se_init_chunk();
734 #endif
735
736         /* release/reset all se allocated trees */
737         for(se_tree_list=se_trees;se_tree_list;se_tree_list=se_tree_list->next){
738                 se_tree_list->tree=NULL;
739         }
740 }
741
742
743 ep_stack_t ep_stack_new(void) {
744     ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
745     *s = ep_new0(struct _ep_stack_frame_t);
746     return s;
747 }
748
749 /*  for ep_stack_t we'll keep the popped frames so we reuse them instead
750 of allocating new ones.
751 */
752
753
754 void* ep_stack_push(ep_stack_t stack, void* data) {
755     struct _ep_stack_frame_t* frame;
756     struct _ep_stack_frame_t* head = (*stack);
757
758     if (head->above) {
759         frame = head->above;
760     } else {
761        frame = ep_new(struct _ep_stack_frame_t);
762        head->above = frame;
763        frame->below = head;
764        frame->above = NULL;
765     }
766
767     frame->payload = data;
768     (*stack) = frame;
769
770     return data;
771 }
772
773 void* ep_stack_pop(ep_stack_t stack) {
774
775     if ((*stack)->below) {
776         (*stack) = (*stack)->below;
777         return (*stack)->above->payload;
778     } else {
779         return NULL;
780     }
781 }
782
783
784
785
786
787 /* routines to manage se allocated red-black trees */
788 se_tree_t *se_trees=NULL;
789
790 se_tree_t *
791 se_tree_create(int type)
792 {
793         se_tree_t *tree_list;
794
795         tree_list=malloc(sizeof(se_tree_t));
796         tree_list->next=se_trees;
797         tree_list->type=type;
798         tree_list->tree=NULL;
799         se_trees=tree_list;
800
801         return tree_list;
802 }
803
804
805
806 void *
807 se_tree_lookup32(se_tree_t *se_tree, guint32 key)
808 {
809         se_tree_node_t *node;
810
811         node=se_tree->tree;
812
813         while(node){
814                 if(key==node->key32){
815                         return node->data;
816                 }
817                 if(key<node->key32){
818                         node=node->left;
819                         continue;
820                 }
821                 if(key>node->key32){
822                         node=node->right;
823                         continue;
824                 }
825         }
826         return NULL;
827 }
828
829
830 static inline se_tree_node_t *
831 se_tree_parent(se_tree_node_t *node)
832 {
833         return node->parent;
834 }
835
836 static inline se_tree_node_t *
837 se_tree_grandparent(se_tree_node_t *node)
838 {
839         se_tree_node_t *parent;
840
841         parent=se_tree_parent(node);
842         if(parent){
843                 return parent->parent;
844         }
845         return NULL;
846 }
847 static inline se_tree_node_t *
848 se_tree_uncle(se_tree_node_t *node)
849 {
850         se_tree_node_t *parent, *grandparent;
851
852         parent=se_tree_parent(node);
853         if(!parent){
854                 return NULL;
855         }
856         grandparent=se_tree_parent(parent);
857         if(!grandparent){
858                 return NULL;
859         }
860         if(parent==grandparent->left){
861                 return grandparent->right;
862         }
863         return grandparent->left;
864 }
865
866 static inline void rb_insert_case1(se_tree_t *se_tree, se_tree_node_t *node);
867 static inline void rb_insert_case2(se_tree_t *se_tree, se_tree_node_t *node);
868
869 #ifdef REMOVED
870 void print_tree_item(se_tree_node_t *node, int level){
871         int i;
872         for(i=0;i<level;i++){
873                 printf("   ");
874         }
875         printf("%s  KEY:0x%08x node:0x%08x parent:0x%08x left:0x%08x right:0x%08x\n",node->rb_color==SE_TREE_RB_COLOR_BLACK?"BLACK":"RED",node->key32,(int)node,(int)node->parent,(int)node->left,(int)node->right);
876         if(node->left)
877                 print_tree_item(node->left,level+1);
878         if(node->right)
879                 print_tree_item(node->right,level+1);
880 }
881
882 void print_tree(se_tree_node_t *node){
883         while(node->parent){
884                 node=node->parent;
885         }
886         print_tree_item(node,0);
887 }
888 #endif
889
890 static inline void
891 rotate_left(se_tree_t *se_tree, se_tree_node_t *node)
892 {
893         if(node->parent){
894                 if(node->parent->left==node){
895                         node->parent->left=node->right;
896                 } else {
897                         node->parent->right=node->right;
898                 }
899         } else {
900                 se_tree->tree=node->right;
901         }
902         node->right->parent=node->parent;
903         node->parent=node->right;
904         node->right=node->right->left;
905         if(node->right){
906                 node->right->parent=node;
907         }
908         node->parent->left=node;
909 }
910
911 static inline void
912 rotate_right(se_tree_t *se_tree, se_tree_node_t *node)
913 {
914         if(node->parent){
915                 if(node->parent->left==node){
916                         node->parent->left=node->left;
917                 } else {
918                         node->parent->right=node->left;
919                 }
920         } else {
921                 se_tree->tree=node->left;
922         }
923         node->left->parent=node->parent;
924         node->parent=node->left;
925         node->left=node->left->right;
926         if(node->left){
927                 node->left->parent=node;
928         }
929         node->parent->right=node;
930 }
931
932 static inline void
933 rb_insert_case5(se_tree_t *se_tree, se_tree_node_t *node)
934 {
935         se_tree_node_t *grandparent;
936         se_tree_node_t *parent;
937
938         parent=se_tree_parent(node);
939         parent->rb_color=SE_TREE_RB_COLOR_BLACK;
940         grandparent=se_tree_parent(parent);
941         if(!grandparent){
942                 return;
943         }
944         grandparent->rb_color=SE_TREE_RB_COLOR_RED;
945         if( (node==parent->left) && (parent==grandparent->left) ){
946                 rotate_right(se_tree, grandparent);
947         } else {
948                 rotate_left(se_tree, grandparent);
949         }
950 }
951
952 static inline void
953 rb_insert_case4(se_tree_t *se_tree, se_tree_node_t *node)
954 {
955         se_tree_node_t *grandparent;
956         se_tree_node_t *parent;
957
958         parent=se_tree_parent(node);
959         grandparent=se_tree_parent(parent);
960         if(!grandparent){
961                 return;
962         }
963         if( (node==parent->right) && (parent==grandparent->left) ){
964                 rotate_left(se_tree, parent);
965                 node=node->left;
966         } else if( (node==parent->left) && (parent==grandparent->right) ){
967                 rotate_right(se_tree, parent);
968                 node=node->right;
969         }
970         rb_insert_case5(se_tree, node);
971 }
972
973 static inline void
974 rb_insert_case3(se_tree_t *se_tree, se_tree_node_t *node)
975 {
976         se_tree_node_t *grandparent;
977         se_tree_node_t *parent;
978         se_tree_node_t *uncle;
979
980         uncle=se_tree_uncle(node);
981         if(uncle && (uncle->rb_color==SE_TREE_RB_COLOR_RED)){
982                 parent=se_tree_parent(node);
983                 parent->rb_color=SE_TREE_RB_COLOR_BLACK;
984                 uncle->rb_color=SE_TREE_RB_COLOR_BLACK;
985                 grandparent=se_tree_grandparent(node);
986                 if(grandparent){
987                         rb_insert_case1(se_tree, grandparent);
988                 }
989         } else {
990                 rb_insert_case4(se_tree, node);
991         }
992 }
993
994 static inline void
995 rb_insert_case2(se_tree_t *se_tree, se_tree_node_t *node)
996 {
997         se_tree_node_t *parent;
998
999         parent=se_tree_parent(node);
1000         /* parent is always non-NULL here */
1001         if(parent->rb_color==SE_TREE_RB_COLOR_BLACK){
1002                 return;
1003         }
1004         rb_insert_case3(se_tree, node);
1005 }
1006
1007 static inline void
1008 rb_insert_case1(se_tree_t *se_tree, se_tree_node_t *node)
1009 {
1010         se_tree_node_t *parent;
1011
1012         parent=se_tree_parent(node);
1013         if(!parent){
1014                 node->rb_color=SE_TREE_RB_COLOR_BLACK;
1015                 return;
1016         }
1017         rb_insert_case2(se_tree, node);
1018 }
1019
1020 /* insert a new node in the tree. if this node matches an already existing node
1021  * then just replace the data for that node */
1022 void
1023 se_tree_insert32(se_tree_t *se_tree, guint32 key, void *data)
1024 {
1025         se_tree_node_t *node;
1026
1027         node=se_tree->tree;
1028
1029         /* is this the first node ?*/
1030         if(!node){
1031                 node=se_alloc(sizeof(se_tree_node_t));
1032                 switch(se_tree->type){
1033                 case SE_TREE_TYPE_RED_BLACK:
1034                         node->rb_color=SE_TREE_RB_COLOR_BLACK;
1035                         break;
1036                 }
1037                 node->parent=NULL;
1038                 node->left=NULL;
1039                 node->right=NULL;
1040                 node->key32=key;
1041                 node->data=data;
1042                 se_tree->tree=node;
1043                 return;
1044         }
1045
1046         /* it was not the new root so walk the tree until we find where to
1047          * insert this new leaf.
1048          */
1049         while(1){
1050                 /* this node already exists, so just replace the data pointer*/
1051                 if(key==node->key32){
1052                         node->data=data;
1053                         return;
1054                 }
1055                 if(key<node->key32) {
1056                         if(!node->left){
1057                                 /* new node to the left */
1058                                 se_tree_node_t *new_node;
1059                                 new_node=se_alloc(sizeof(se_tree_node_t));
1060                                 node->left=new_node;
1061                                 new_node->parent=node;
1062                                 new_node->left=NULL;
1063                                 new_node->right=NULL;
1064                                 new_node->key32=key;
1065                                 new_node->data=data;
1066                                 node=new_node;
1067                                 break;
1068                         }
1069                         node=node->left;
1070                         continue;
1071                 }
1072                 if(key>node->key32) {
1073                         if(!node->right){
1074                                 /* new node to the right */
1075                                 se_tree_node_t *new_node;
1076                                 new_node=se_alloc(sizeof(se_tree_node_t));
1077                                 node->right=new_node;
1078                                 new_node->parent=node;
1079                                 new_node->left=NULL;
1080                                 new_node->right=NULL;
1081                                 new_node->key32=key;
1082                                 new_node->data=data;
1083                                 node=new_node;
1084                                 break;
1085                         }
1086                         node=node->right;
1087                         continue;
1088                 }
1089         }
1090
1091         /* node will now point to the newly created node */
1092         switch(se_tree->type){
1093         case SE_TREE_TYPE_RED_BLACK:
1094                 node->rb_color=SE_TREE_RB_COLOR_RED;
1095                 rb_insert_case1(se_tree, node);
1096                 break;
1097         }
1098 }
1099
1100
1101 /* When the se data is released, this entire tree will dissapear as if it
1102  * never existed including all metadata associated with the tree.
1103  */
1104 se_tree_t *
1105 se_tree_create_non_persistent(int type)
1106 {
1107         se_tree_t *tree_list;
1108
1109         tree_list=se_alloc(sizeof(se_tree_t));
1110         tree_list->next=NULL;
1111         tree_list->type=type;
1112         tree_list->tree=NULL;
1113
1114         return tree_list;
1115 }
1116
1117 /* insert a new node in the tree. if this node matches an already existing node
1118  * then just replace the data for that node */
1119 void
1120 se_tree_insert32_array(se_tree_t *se_tree, se_tree_key_t *key, void *data)
1121 {
1122         se_tree_t *next_tree;
1123
1124         if((key[0].length<1)||(key[0].length>100)){
1125                 DISSECTOR_ASSERT_NOT_REACHED();
1126         }
1127         if((key[0].length==1)&&(key[1].length==0)){
1128                 se_tree_insert32(se_tree, *key[0].key, data);
1129                 return;
1130         }
1131         next_tree=se_tree_lookup32(se_tree, *key[0].key);
1132         if(!next_tree){
1133                 next_tree=se_tree_create_non_persistent(se_tree->type);
1134                 se_tree_insert32(se_tree, *key[0].key, next_tree);
1135         }
1136         if(key[0].length==1){
1137                 key++;
1138         } else {
1139                 key[0].length--;
1140                 key[0].key++;
1141         }
1142         se_tree_insert32_array(next_tree, key, data);
1143 }
1144
1145 void *
1146 se_tree_lookup32_array(se_tree_t *se_tree, se_tree_key_t *key)
1147 {
1148         se_tree_t *next_tree;
1149
1150         if((key[0].length<1)||(key[0].length>100)){
1151                 DISSECTOR_ASSERT_NOT_REACHED();
1152         }
1153         if((key[0].length==1)&&(key[1].length==0)){
1154                 return se_tree_lookup32(se_tree, *key[0].key);
1155         }
1156         next_tree=se_tree_lookup32(se_tree, *key[0].key);
1157         if(!next_tree){
1158                 return NULL;
1159         }
1160         if(key[0].length==1){
1161                 key++;
1162         } else {
1163                 key[0].length--;
1164                 key[0].key++;
1165         }
1166         se_tree_lookup32_array(next_tree, key);
1167 }
1168
1169
1170 void se_tree_insert_string(se_string_hash_t* se_tree, const gchar* k, void* v) {
1171         guint32 len = strlen(k);
1172         guint32 div = (len-1)/4;
1173         guint32 residual = 0;
1174         se_tree_key_t key[] = {
1175                 {1,&len},
1176                 {div,(guint32*)(&k[0])},
1177                 {1,&residual},
1178                 {0,NULL}
1179         };
1180
1181         if (! div) {
1182                 key[1].length = key[2].length;
1183                 key[1].key = key[2].key;
1184                 key[2].length = 0;
1185                 key[2].key = NULL;
1186         }
1187
1188         div *= 4;
1189
1190         switch(len%4) {
1191                 case 0:
1192                         residual |= ( k[div+3] << 24 );
1193                 case 3:
1194                         residual |= ( k[div+2] << 16 );
1195                 case 2:
1196                         residual |= ( k[div+1] << 8  );
1197                 case 1:
1198                         residual |= k[div];
1199                         break;
1200         }
1201
1202         se_tree_insert32_array(se_tree,key,v);
1203 }
1204
1205 void* se_tree_lookup_string(se_string_hash_t* se_tree, const gchar* k) {
1206         guint32 len = strlen(k);
1207         guint32 div = (len-1)/4;
1208         guint32 residual = 0;
1209         se_tree_key_t key[] = {
1210                 {1,&len},
1211                 {div,(guint32*)(&k[0])},
1212                 {1,&residual},
1213                 {0,NULL}
1214         };
1215
1216         if (! div) {
1217                 key[1].length = key[2].length;
1218                 key[1].key = key[2].key;
1219                 key[2].length = 0;
1220                 key[2].key = NULL;
1221         }
1222
1223         div *= 4;
1224
1225         switch(len%4) {
1226                 case 0:
1227                         residual |= k[div+3] << 24;
1228                 case 3:
1229                         residual |= k[div+2] << 16;
1230                 case 2:
1231                         residual |= k[div+1] << 8;
1232                 case 1:
1233                         residual |= k[div];
1234                         break;
1235         }
1236
1237         return se_tree_lookup32_array(se_tree, key);
1238 }