Vastly improve the debug presentation( of trees)+.
[metze/wireshark/wip.git] / epan / emem.c
1 /* emem.c
2  * Wireshark memory management and garbage collection functions
3  * Ronnie Sahlberg 2005
4  *
5  * $Id$
6  *
7  * Wireshark - Network traffic analyzer
8  * By Gerald Combs <gerald@wireshark.org>
9  * Copyright 1998 Gerald Combs
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version 2
14  * of the License, or (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24  */
25 #include "config.h"
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <stdarg.h>
31 #include <ctype.h>
32
33 #include <time.h>
34 #ifdef HAVE_SYS_TIME_H
35 #include <sys/time.h>
36 #endif
37
38 #ifdef HAVE_UNISTD_H
39 #include <unistd.h>
40 #endif
41
42 #include <glib.h>
43
44 #include "proto.h"
45 #include "emem.h"
46
47 #ifdef _WIN32
48 #include <windows.h>    /* VirtualAlloc, VirtualProtect */
49 #include <process.h>    /* getpid */
50 #endif
51
52 /* Print out statistics about our memory allocations? */
53 /*#define SHOW_EMEM_STATS*/
54
55 /* Do we want to use guardpages? if available */
56 #define WANT_GUARD_PAGES 1
57
58 #ifdef WANT_GUARD_PAGES
59 /* Add guard pages at each end of our allocated memory */
60 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
61 #include <stdint.h>
62 #ifdef HAVE_SYS_TYPES_H
63 #include <sys/types.h>
64 #endif
65 #include <sys/mman.h>
66 #if defined(MAP_ANONYMOUS)
67 #define ANON_PAGE_MODE  (MAP_ANONYMOUS|MAP_PRIVATE)
68 #elif defined(MAP_ANON)
69 #define ANON_PAGE_MODE  (MAP_ANON|MAP_PRIVATE)
70 #else
71 #define ANON_PAGE_MODE  (MAP_PRIVATE)   /* have to map /dev/zero */
72 #define NEED_DEV_ZERO
73 #endif
74 #ifdef NEED_DEV_ZERO
75 #include <fcntl.h>
76 static int dev_zero_fd;
77 #define ANON_FD dev_zero_fd
78 #else
79 #define ANON_FD -1
80 #endif
81 #define USE_GUARD_PAGES 1
82 #endif
83 #endif
84
85 /* When required, allocate more memory from the OS in this size chunks */
86 #define EMEM_PACKET_CHUNK_SIZE (10 * 1024 * 1024)
87
88 /* The canary between allocations is at least 8 bytes and up to 16 bytes to
89  * allow future allocations to be 4- or 8-byte aligned.
90  * All but the last byte of the canary are randomly generated; the last byte is
91  * NULL to separate the canary and the pointer to the next canary.
92  *
93  * For example, if the allocation is a multiple of 8 bytes, the canary and
94  * pointer would look like:
95  *   |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
96  *   |c|c|c|c|c|c|c|0||p|p|p|p|p|p|p|p| (64-bit), or:
97  *   |c|c|c|c|c|c|c|0||p|p|p|p|         (32-bit)
98  *
99  * If the allocation was, for example, 12 bytes, the canary would look like:
100  *        |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
101  *   [...]|a|a|a|a|c|c|c|c||c|c|c|c|c|c|c|0| (followed by the pointer)
102  */
103 #define EMEM_CANARY_SIZE 8
104 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
105
106 typedef struct _emem_chunk_t {
107         struct _emem_chunk_t *next;
108         char            *buf;
109         unsigned int    amount_free_init;
110         unsigned int    amount_free;
111         unsigned int    free_offset_init;
112         unsigned int    free_offset;
113         void            *canary_last;
114 } emem_chunk_t;
115
116 typedef struct _emem_header_t {
117         emem_chunk_t *free_list;
118         emem_chunk_t *used_list;
119
120         emem_tree_t *trees;             /* only used by se_mem allocator */
121
122         guint8 canary[EMEM_CANARY_DATA_SIZE];
123         void *(*memory_alloc)(size_t size, struct _emem_header_t *);
124
125         /*
126          * Tools like Valgrind and ElectricFence don't work well with memchunks.
127          * Export the following environment variables to make {ep|se}_alloc() allocate each
128          * object individually.
129          *
130          * WIRESHARK_DEBUG_EP_NO_CHUNKS
131          * WIRESHARK_DEBUG_SE_NO_CHUNKS
132          */
133         gboolean debug_use_chunks;
134
135         /* Do we want to use canaries?
136          * Export the following environment variables to disable/enable canaries
137          *
138          * WIRESHARK_DEBUG_EP_NO_CANARY
139          * For SE memory use of canary is default off as the memory overhead
140          * is considerable.
141          * WIRESHARK_DEBUG_SE_USE_CANARY
142          */
143         gboolean debug_use_canary;
144
145         /*  Do we want to verify no one is using a pointer to an ep_ or se_
146          *  allocated thing where they shouldn't be?
147          *
148          * Export WIRESHARK_EP_VERIFY_POINTERS or WIRESHARK_SE_VERIFY_POINTERS
149          * to turn this on.
150          */
151         gboolean debug_verify_pointers;
152
153 } emem_header_t;
154
155 static emem_header_t ep_packet_mem;
156 static emem_header_t se_packet_mem;
157
158 /*
159  *  Memory scrubbing is expensive but can be useful to ensure we don't:
160  *    - use memory before initializing it
161  *    - use memory after freeing it
162  *  Export WIRESHARK_DEBUG_SCRUB_MEMORY to turn it on.
163  */
164 static gboolean debug_use_memory_scrubber = FALSE;
165
166 #if defined (_WIN32)
167 static SYSTEM_INFO sysinfo;
168 static OSVERSIONINFO versinfo;
169 static int pagesize;
170 #elif defined(USE_GUARD_PAGES)
171 static intptr_t pagesize;
172 #endif /* _WIN32 / USE_GUARD_PAGES */
173
174 static void *emem_alloc_chunk(size_t size, emem_header_t *mem);
175 static void *emem_alloc_glib(size_t size, emem_header_t *mem);
176
177 /*
178  * Set a canary value to be placed between memchunks.
179  */
180 static void
181 emem_canary_init(guint8 *canary)
182 {
183         int i;
184         static GRand *rand_state = NULL;
185
186         if (rand_state == NULL) {
187                 rand_state = g_rand_new();
188         }
189         for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
190                 canary[i] = (guint8) g_rand_int_range(rand_state, 1, 0x100);
191         }
192         return;
193 }
194
195 static void *
196 emem_canary_next(guint8 *mem_canary, guint8 *canary, int *len)
197 {
198         void *ptr;
199         int i;
200
201         for (i = 0; i < EMEM_CANARY_SIZE-1; i++)
202                 if (mem_canary[i] != canary[i])
203                         return (void *) -1;
204
205         for (; i < EMEM_CANARY_DATA_SIZE; i++) {
206                 if (canary[i] == '\0') {
207                         memcpy(&ptr, &canary[i+1], sizeof(void *));
208
209                         if (len)
210                                 *len = i + 1 + sizeof(void *);
211                         return ptr;
212                 }
213
214                 if (mem_canary[i] != canary[i])
215                         return (void *) -1;
216         }
217
218         return (void *) -1;
219 }
220
221 /*
222  * Given an allocation size, return the amount of room needed for the canary
223  * (with a minimum of 8 bytes) while using the canary to pad to an 8-byte
224  * boundary.
225  */
226 static guint8
227 emem_canary_pad (size_t allocation)
228 {
229         guint8 pad;
230
231         pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
232         if (pad < EMEM_CANARY_SIZE)
233                 pad += EMEM_CANARY_SIZE;
234
235         return pad;
236 }
237
238 /* used for debugging canaries, will block */
239 #ifdef DEBUG_INTENSE_CANARY_CHECKS
240 gboolean intense_canary_checking = FALSE;
241
242 /*  used to intensivelly check ep canaries
243  */
244 void
245 ep_check_canary_integrity(const char* fmt, ...)
246 {
247         va_list ap;
248         static gchar there[128] = {
249                 'L','a','u','n','c','h',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
250                 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
251                 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
252                 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
253         gchar here[128];
254         emem_chunk_t* npc = NULL;
255
256         if (! intense_canary_checking ) return;
257
258         va_start(ap,fmt);
259         g_vsnprintf(here, sizeof(here), fmt, ap);
260         va_end(ap);
261
262         for (npc = ep_packet_mem.free_list; npc != NULL; npc = npc->next) {
263                 void *canary_next = npc->canary_last;
264
265                 while (canary_next != NULL) {
266                         canary_next = emem_canary_next(ep_packet_mem.canary, canary_next, NULL);
267                         /* XXX, check if canary_next is inside allocated memory? */
268
269                         if (canary_next == (void *) -1)
270                                 g_error("Per-packet memory corrupted\nbetween: %s\nand: %s", there, here);
271                 }
272         }
273
274         g_strlcpy(there, here, sizeof(there));
275 }
276 #endif
277
278 static void
279 emem_init_chunk(emem_header_t *mem)
280 {
281         if (mem->debug_use_canary)
282                 emem_canary_init(mem->canary);
283
284         if (mem->debug_use_chunks)
285                 mem->memory_alloc = emem_alloc_chunk;
286         else
287                 mem->memory_alloc = emem_alloc_glib;
288 }
289
290
291 /* Initialize the packet-lifetime memory allocation pool.
292  * This function should be called only once when Wireshark or TShark starts
293  * up.
294  */
295 static void
296 ep_init_chunk(void)
297 {
298         ep_packet_mem.free_list=NULL;
299         ep_packet_mem.used_list=NULL;
300         ep_packet_mem.trees=NULL;       /* not used by this allocator */
301
302         ep_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_EP_NO_CHUNKS") == NULL);
303         ep_packet_mem.debug_use_canary = ep_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_EP_NO_CANARY") == NULL);
304         ep_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_EP_VERIFY_POINTERS") != NULL);
305
306 #ifdef DEBUG_INTENSE_CANARY_CHECKS
307         intense_canary_checking = (getenv("WIRESHARK_DEBUG_EP_INTENSE_CANARY") != NULL);
308 #endif
309
310         emem_init_chunk(&ep_packet_mem);
311 }
312
313 /* Initialize the capture-lifetime memory allocation pool.
314  * This function should be called only once when Wireshark or TShark starts
315  * up.
316  */
317 static void
318 se_init_chunk(void)
319 {
320         se_packet_mem.free_list = NULL;
321         se_packet_mem.used_list = NULL;
322         se_packet_mem.trees = NULL;
323
324         se_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_SE_NO_CHUNKS") == NULL);
325         se_packet_mem.debug_use_canary = se_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_SE_USE_CANARY") != NULL);
326         se_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_SE_VERIFY_POINTERS") != NULL);
327
328         emem_init_chunk(&se_packet_mem);
329 }
330
331 /*  Initialize all the allocators here.
332  *  This function should be called only once when Wireshark or TShark starts
333  *  up.
334  */
335 void
336 emem_init(void)
337 {
338         ep_init_chunk();
339         se_init_chunk();
340
341         if (getenv("WIRESHARK_DEBUG_SCRUB_MEMORY"))
342                 debug_use_memory_scrubber  = TRUE;
343
344 #if defined (_WIN32)
345         /* Set up our guard page info for Win32 */
346         GetSystemInfo(&sysinfo);
347         pagesize = sysinfo.dwPageSize;
348
349         /* calling GetVersionEx using the OSVERSIONINFO structure.
350          * OSVERSIONINFOEX requires Win NT4 with SP6 or newer NT Versions.
351          * OSVERSIONINFOEX will fail on Win9x and older NT Versions.
352          * See also:
353          * http://msdn.microsoft.com/library/en-us/sysinfo/base/getversionex.asp
354          * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
355          * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfoex_str.asp
356          */
357         versinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
358         GetVersionEx(&versinfo);
359
360 #elif defined(USE_GUARD_PAGES)
361         pagesize = sysconf(_SC_PAGESIZE);
362 #ifdef NEED_DEV_ZERO
363         dev_zero_fd = ws_open("/dev/zero", O_RDWR);
364         g_assert(dev_zero_fd != -1);
365 #endif
366 #endif /* _WIN32 / USE_GUARD_PAGES */
367 }
368
369 #ifdef SHOW_EMEM_STATS
370 #define NUM_ALLOC_DIST 10
371 static guint allocations[NUM_ALLOC_DIST] = { 0 };
372 static guint total_no_chunks = 0;
373
374 static void
375 print_alloc_stats()
376 {
377         guint num_chunks = 0;
378         guint num_allocs = 0;
379         guint total_used = 0;
380         guint total_allocation = 0;
381         guint used_for_canaries = 0;
382         guint total_headers;
383         guint i;
384         emem_chunk_t *chunk;
385         guint total_space_allocated_from_os, total_space_wasted;
386         gboolean ep_stat=TRUE;
387
388         fprintf(stderr, "\n-------- EP allocator statistics --------\n");
389         fprintf(stderr, "%s chunks, %s canaries, %s memory scrubber\n",
390                ep_packet_mem.debug_use_chunks ? "Using" : "Not using",
391                ep_packet_mem.debug_use_canary ? "using" : "not using",
392                debug_use_memory_scrubber ? "using" : "not using");
393
394         if (! (ep_packet_mem.free_list || !ep_packet_mem.used_list)) {
395                 fprintf(stderr, "No memory allocated\n");
396                 ep_stat = FALSE;
397         }
398         if (ep_packet_mem.debug_use_chunks && ep_stat) {
399                 /* Nothing interesting without chunks */
400                 /*  Only look at the used_list since those chunks are fully
401                  *  used.  Looking at the free list would skew our view of what
402                  *  we have wasted.
403                  */
404                 for (chunk = ep_packet_mem.used_list; chunk; chunk = chunk->next) {
405                         num_chunks++;
406                         total_used += (chunk->amount_free_init - chunk->amount_free);
407                         total_allocation += chunk->amount_free_init;
408                 }
409                 if (num_chunks > 0) {
410                         fprintf (stderr, "\n");
411                         fprintf (stderr, "\n---- Buffer space ----\n");
412                         fprintf (stderr, "\tChunk allocation size: %10u\n", EMEM_PACKET_CHUNK_SIZE);
413                         fprintf (stderr, "\t*    Number of chunks: %10u\n", num_chunks);
414                         fprintf (stderr, "\t-------------------------------------------\n");
415                         fprintf (stderr, "\t= %u (%u including guard pages) total space used for buffers\n",
416                         total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
417                         fprintf (stderr, "\t-------------------------------------------\n");
418                         total_space_allocated_from_os = total_allocation
419                                 + sizeof(emem_chunk_t) * num_chunks;
420                         fprintf (stderr, "Total allocated from OS: %u\n\n",
421                                 total_space_allocated_from_os);
422                 }else{
423                         fprintf (stderr, "No fully used chunks, nothing to do\n");
424                 }
425                 /* Reset stats */
426                 num_chunks = 0;
427                 num_allocs = 0;
428                 total_used = 0;
429                 total_allocation = 0;
430                 used_for_canaries = 0;
431         }
432
433
434         fprintf(stderr, "\n-------- SE allocator statistics --------\n");
435         fprintf(stderr, "Total number of chunk allocations %u\n",
436                 total_no_chunks);
437         fprintf(stderr, "%s chunks, %s canaries\n",
438                se_packet_mem.debug_use_chunks ? "Using" : "Not using",
439                se_packet_mem.debug_use_canary ? "using" : "not using");
440
441         if (! (se_packet_mem.free_list || !se_packet_mem.used_list)) {
442                 fprintf(stderr, "No memory allocated\n");
443                 return;
444         }
445
446         if (!se_packet_mem.debug_use_chunks )
447                 return; /* Nothing interesting without chunks?? */
448
449         /*  Only look at the used_list since those chunks are fully used.
450          *  Looking at the free list would skew our view of what we have wasted.
451          */
452         for (chunk = se_packet_mem.used_list; chunk; chunk = chunk->next) {
453                 num_chunks++;
454                 total_used += (chunk->amount_free_init - chunk->amount_free);
455                 total_allocation += chunk->amount_free_init;
456
457                 if (se_packet_mem.debug_use_canary){
458                         void *ptr = chunk->canary_last;
459                         int len;
460
461                         while (ptr != NULL) {
462                                 ptr = emem_canary_next(se_packet_mem.canary, ptr, &len);
463
464                                 if (ptr == (void *) -1)
465                                         g_error("Memory corrupted");
466                                 used_for_canaries += len;
467                         }
468                 }
469         }
470
471         if (num_chunks == 0) {
472
473                 fprintf (stderr, "No fully used chunks, nothing to do\n");
474                 return;
475         }
476
477         fprintf (stderr, "\n");
478         fprintf (stderr, "---------- Allocations from the OS ----------\n");
479         fprintf (stderr, "---- Headers ----\n");
480         fprintf (stderr, "\t(    Chunk header size: %10lu\n",
481                  sizeof(emem_chunk_t));
482         fprintf (stderr, "\t*     Number of chunks: %10u\n", num_chunks);
483         fprintf (stderr, "\t-------------------------------------------\n");
484
485         total_headers = sizeof(emem_chunk_t) * num_chunks;
486         fprintf (stderr, "\t= %u bytes used for headers\n", total_headers);
487         fprintf (stderr, "\n---- Buffer space ----\n");
488         fprintf (stderr, "\tChunk allocation size: %10u\n",
489                  EMEM_PACKET_CHUNK_SIZE);
490         fprintf (stderr, "\t*    Number of chunks: %10u\n", num_chunks);
491         fprintf (stderr, "\t-------------------------------------------\n");
492         fprintf (stderr, "\t= %u (%u including guard pages) bytes used for buffers\n",
493                 total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
494         fprintf (stderr, "\t-------------------------------------------\n");
495         total_space_allocated_from_os = (EMEM_PACKET_CHUNK_SIZE * num_chunks)
496                                         + total_headers;
497         fprintf (stderr, "Total bytes allocated from the OS: %u\n\n",
498                 total_space_allocated_from_os);
499
500         for (i = 0; i < NUM_ALLOC_DIST; i++)
501                 num_allocs += allocations[i];
502
503         fprintf (stderr, "---------- Allocations from the SE pool ----------\n");
504         fprintf (stderr, "                Number of SE allocations: %10u\n",
505                  num_allocs);
506         fprintf (stderr, "             Bytes used (incl. canaries): %10u\n",
507                  total_used);
508         fprintf (stderr, "                 Bytes used for canaries: %10u\n",
509                  used_for_canaries);
510         fprintf (stderr, "Bytes unused (wasted, excl. guard pages): %10u\n",
511                  total_allocation - total_used);
512         fprintf (stderr, "Bytes unused (wasted, incl. guard pages): %10u\n\n",
513                  total_space_allocated_from_os - total_used);
514
515         fprintf (stderr, "---------- Statistics ----------\n");
516         fprintf (stderr, "Average SE allocation size (incl. canaries): %6.2f\n",
517                 (float)total_used/(float)num_allocs);
518         fprintf (stderr, "Average SE allocation size (excl. canaries): %6.2f\n",
519                 (float)(total_used - used_for_canaries)/(float)num_allocs);
520         fprintf (stderr, "        Average wasted bytes per allocation: %6.2f\n",
521                 (total_allocation - total_used)/(float)num_allocs);
522         total_space_wasted = (total_allocation - total_used)
523                 + (sizeof(emem_chunk_t));
524         fprintf (stderr, " Space used for headers + unused allocation: %8u\n",
525                 total_space_wasted);
526         fprintf (stderr, "--> %% overhead/waste: %4.2f\n",
527                 100 * (float)total_space_wasted/(float)total_space_allocated_from_os);
528
529         fprintf (stderr, "\nAllocation distribution (sizes include canaries):\n");
530         for (i = 0; i < (NUM_ALLOC_DIST-1); i++)
531                 fprintf (stderr, "size < %5d: %8u\n", 32<<i, allocations[i]);
532         fprintf (stderr, "size > %5d: %8u\n", 32<<i, allocations[i]);
533 }
534 #endif
535
536 static gboolean
537 emem_verify_pointer_list(const emem_chunk_t *chunk_list, const void *ptr)
538 {
539         const gchar *cptr = ptr;
540         const emem_chunk_t *chunk;
541
542         for (chunk = chunk_list; chunk; chunk = chunk->next) {
543                 if (cptr >= (chunk->buf + chunk->free_offset_init) && cptr < (chunk->buf + chunk->free_offset))
544                         return TRUE;
545         }
546         return FALSE;
547 }
548
549 static gboolean
550 emem_verify_pointer(const emem_header_t *hdr, const void *ptr)
551 {
552         return emem_verify_pointer_list(hdr->free_list, ptr) || emem_verify_pointer_list(hdr->used_list, ptr);
553 }
554
555 gboolean
556 ep_verify_pointer(const void *ptr)
557 {
558         if (ep_packet_mem.debug_verify_pointers)
559                 return emem_verify_pointer(&ep_packet_mem, ptr);
560         else
561                 return FALSE;
562 }
563
564 gboolean
565 se_verify_pointer(const void *ptr)
566 {
567         if (se_packet_mem.debug_verify_pointers)
568                 return emem_verify_pointer(&se_packet_mem, ptr);
569         else
570                 return FALSE;
571 }
572
573 static void
574 emem_scrub_memory(char *buf, size_t size, gboolean alloc)
575 {
576         guint scrubbed_value;
577         guint offset;
578
579         if (!debug_use_memory_scrubber)
580                 return;
581
582         if (alloc) /* this memory is being allocated */
583                 scrubbed_value = 0xBADDCAFE;
584         else /* this memory is being freed */
585                 scrubbed_value = 0xDEADBEEF;
586
587         /*  We shouldn't need to check the alignment of the starting address
588          *  since this is malloc'd memory (or 'pagesize' bytes into malloc'd
589          *  memory).
590          */
591
592         /* XXX - if the above is *NOT* true, we should use memcpy here,
593          * in order to avoid problems on alignment-sensitive platforms, e.g.
594          * http://stackoverflow.com/questions/108866/is-there-memset-that-accepts-integers-larger-than-char
595          */
596
597         for (offset = 0; offset + sizeof(guint) <= size; offset += sizeof(guint))
598                 *(guint*)(void*)(buf+offset) = scrubbed_value;
599
600         /* Initialize the last bytes, if any */
601         if (offset < size) {
602                 *(guint8*)(buf+offset) = scrubbed_value >> 24;
603                 offset++;
604                 if (offset < size) {
605                         *(guint8*)(buf+offset) = (scrubbed_value >> 16) & 0xFF;
606                         offset++;
607                         if (offset < size) {
608                                 *(guint8*)(buf+offset) = (scrubbed_value >> 8) & 0xFF;
609                         }
610                 }
611         }
612
613
614 }
615
616 static emem_chunk_t *
617 emem_create_chunk(size_t size)
618 {
619         emem_chunk_t *npc;
620
621         npc = g_new(emem_chunk_t, 1);
622         npc->next = NULL;
623         npc->canary_last = NULL;
624
625 #if defined (_WIN32)
626         /*
627          * MSDN documents VirtualAlloc/VirtualProtect at
628          * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
629          */
630
631         /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
632         npc->buf = VirtualAlloc(NULL, size,
633                 MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
634
635         if (npc->buf == NULL) {
636                 g_free(npc);
637                 if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
638                         abort();
639                 else
640                         THROW(OutOfMemoryError);
641         }
642
643 #elif defined(USE_GUARD_PAGES)
644         npc->buf = mmap(NULL, size,
645                 PROT_READ|PROT_WRITE, ANON_PAGE_MODE, ANON_FD, 0);
646
647         if (npc->buf == MAP_FAILED) {
648                 g_free(npc);
649                 if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
650                         abort();
651                 else
652                         THROW(OutOfMemoryError);
653         }
654
655 #else /* Is there a draft in here? */
656         npc->buf = g_malloc(size);
657         /* g_malloc() can't fail */
658 #endif
659
660 #ifdef SHOW_EMEM_STATS
661         total_no_chunks++;
662 #endif
663
664         npc->amount_free = npc->amount_free_init = (unsigned int) size;
665         npc->free_offset = npc->free_offset_init = 0;
666         return npc;
667 }
668
669 static void
670 emem_destroy_chunk(emem_chunk_t *npc)
671 {
672 #if defined (_WIN32)
673         VirtualFree(npc->buf, 0, MEM_RELEASE);
674 #elif defined(USE_GUARD_PAGES)
675         munmap(npc->buf, npc->amount_free_init);
676 #else
677         g_free(npc->buf);
678 #endif
679 #ifdef SHOW_EMEM_STATS
680         total_no_chunks--;
681 #endif
682         g_free(npc);
683 }
684
685 static emem_chunk_t *
686 emem_create_chunk_gp(size_t size)
687 {
688 #if defined (_WIN32)
689         BOOL ret;
690         char *buf_end, *prot1, *prot2;
691         DWORD oldprot;
692 #elif defined(USE_GUARD_PAGES)
693         int ret;
694         char *buf_end, *prot1, *prot2;
695 #endif /* _WIN32 / USE_GUARD_PAGES */
696         emem_chunk_t *npc;
697
698         npc = emem_create_chunk(size);
699
700 #if defined (_WIN32)
701         buf_end = npc->buf + size;
702
703         /* Align our guard pages on page-sized boundaries */
704         prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
705         prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
706
707         ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
708         g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
709         ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
710         g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
711
712         npc->amount_free_init = (unsigned int) (prot2 - prot1 - pagesize);
713         npc->free_offset_init = (unsigned int) (prot1 - npc->buf) + pagesize;
714 #elif defined(USE_GUARD_PAGES)
715         buf_end = npc->buf + size;
716
717         /* Align our guard pages on page-sized boundaries */
718         prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
719         prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
720
721         ret = mprotect(prot1, pagesize, PROT_NONE);
722         g_assert(ret != -1);
723         ret = mprotect(prot2, pagesize, PROT_NONE);
724         g_assert(ret != -1);
725
726         npc->amount_free_init = prot2 - prot1 - pagesize;
727         npc->free_offset_init = (prot1 - npc->buf) + pagesize;
728 #else
729         npc->amount_free_init = size;
730         npc->free_offset_init = 0;
731 #endif /* USE_GUARD_PAGES */
732
733         npc->amount_free = npc->amount_free_init;
734         npc->free_offset = npc->free_offset_init;
735         return npc;
736 }
737
738 static void *
739 emem_alloc_chunk(size_t size, emem_header_t *mem)
740 {
741         void *buf;
742
743         size_t asize = size;
744         gboolean use_canary = mem->debug_use_canary;
745         guint8 pad;
746         emem_chunk_t *free_list;
747
748         /* Allocate room for at least 8 bytes of canary plus some padding
749          * so the canary ends on an 8-byte boundary.
750          * But first add the room needed for the pointer to the next canary
751          * (so the entire allocation will end on an 8-byte boundary).
752          */
753          if (use_canary) {
754                 asize += sizeof(void *);
755                 pad = emem_canary_pad(asize);
756         } else
757                 pad = (WS_MEM_ALIGN - (asize & (WS_MEM_ALIGN-1))) & (WS_MEM_ALIGN-1);
758
759         asize += pad;
760
761 #ifdef SHOW_EMEM_STATS
762         /* Do this check here so we can include the canary size */
763         if (mem == &se_packet_mem) {
764                 if (asize < 32)
765                         allocations[0]++;
766                 else if (asize < 64)
767                         allocations[1]++;
768                 else if (asize < 128)
769                         allocations[2]++;
770                 else if (asize < 256)
771                         allocations[3]++;
772                 else if (asize < 512)
773                         allocations[4]++;
774                 else if (asize < 1024)
775                         allocations[5]++;
776                 else if (asize < 2048)
777                         allocations[6]++;
778                 else if (asize < 4096)
779                         allocations[7]++;
780                 else if (asize < 8192)
781                         allocations[8]++;
782                 else if (asize < 16384)
783                         allocations[8]++;
784                 else
785                         allocations[(NUM_ALLOC_DIST-1)]++;
786         }
787 #endif
788
789         /* make sure we dont try to allocate too much (arbitrary limit) */
790         DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
791
792         if (!mem->free_list)
793                 mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
794
795         /* oops, we need to allocate more memory to serve this request
796          * than we have free. move this node to the used list and try again
797          */
798         if(asize > mem->free_list->amount_free) {
799                 emem_chunk_t *npc;
800                 npc=mem->free_list;
801                 mem->free_list=mem->free_list->next;
802                 npc->next=mem->used_list;
803                 mem->used_list=npc;
804
805                 if (!mem->free_list)
806                         mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
807         }
808
809         free_list = mem->free_list;
810
811         buf = free_list->buf + free_list->free_offset;
812
813         free_list->amount_free -= (unsigned int) asize;
814         free_list->free_offset += (unsigned int) asize;
815
816         if (use_canary) {
817                 char *cptr = (char *)buf + size;
818
819                 memcpy(cptr, mem->canary, pad-1);
820                 cptr[pad-1] = '\0';
821                 memcpy(cptr + pad, &free_list->canary_last, sizeof(void *));
822
823                 free_list->canary_last = cptr;
824         }
825
826         return buf;
827 }
828
829 static void *
830 emem_alloc_glib(size_t size, emem_header_t *mem)
831 {
832         emem_chunk_t *npc;
833
834         npc=g_new(emem_chunk_t, 1);
835         npc->next=mem->used_list;
836         npc->buf=g_malloc(size);
837         npc->canary_last = NULL;
838         mem->used_list=npc;
839         /* There's no padding/alignment involved (from our point of view) when
840          * we fetch the memory directly from the system pool, so WYSIWYG */
841         npc->free_offset = npc->free_offset_init = 0;
842         npc->amount_free = npc->amount_free_init = (unsigned int) size;
843
844         return npc->buf;
845 }
846
847 /* allocate 'size' amount of memory. */
848 static void *
849 emem_alloc(size_t size, emem_header_t *mem)
850 {
851         void *buf = mem->memory_alloc(size, mem);
852
853         /*  XXX - this is a waste of time if the allocator function is going to
854          *  memset this straight back to 0.
855          */
856         emem_scrub_memory(buf, size, TRUE);
857
858         return buf;
859 }
860
861 /* allocate 'size' amount of memory with an allocation lifetime until the
862  * next packet.
863  */
864 void *
865 ep_alloc(size_t size)
866 {
867         return emem_alloc(size, &ep_packet_mem);
868 }
869
870 /* allocate 'size' amount of memory with an allocation lifetime until the
871  * next capture.
872  */
873 void *
874 se_alloc(size_t size)
875 {
876         return emem_alloc(size, &se_packet_mem);
877 }
878
879 void *
880 sl_alloc(struct ws_memory_slab *mem_chunk)
881 {
882         emem_chunk_t *chunk;
883         void *ptr;
884
885         /* XXX, debug_use_slices -> fallback to g_slice_alloc0 */
886
887         if ((mem_chunk->freed != NULL)) {
888                 ptr = mem_chunk->freed;
889                 memcpy(&mem_chunk->freed, ptr, sizeof(void *));
890                 return ptr;
891         }
892
893         if (!(chunk = mem_chunk->chunk_list) || chunk->amount_free < (guint) mem_chunk->item_size) {
894                 size_t alloc_size = mem_chunk->item_size * mem_chunk->count;
895
896                 /* align to page-size */
897 #if defined (_WIN32) || defined(USE_GUARD_PAGES)
898                 alloc_size = (alloc_size + (pagesize - 1)) & ~(pagesize - 1);
899 #endif
900
901                 chunk = emem_create_chunk(alloc_size);  /* NOTE: using version without guard pages! */
902                 chunk->next = mem_chunk->chunk_list;
903                 mem_chunk->chunk_list = chunk;
904         }
905
906         ptr = chunk->buf + chunk->free_offset;
907         chunk->free_offset += mem_chunk->item_size;
908         chunk->amount_free -= mem_chunk->item_size;
909
910         return ptr;
911 }
912
913 void
914 sl_free(struct ws_memory_slab *mem_chunk, gpointer ptr)
915 {
916         /* XXX, debug_use_slices -> fallback to g_slice_free1 */
917
918         /* XXX, abort if ptr not found in emem_verify_pointer_list()? */
919         if (ptr != NULL /* && emem_verify_pointer_list(mem_chunk->chunk_list, ptr) */) {
920                 memcpy(ptr, &(mem_chunk->freed), sizeof(void *));
921                 mem_chunk->freed = ptr;
922         }
923 }
924
925 void *
926 ep_alloc0(size_t size)
927 {
928         return memset(ep_alloc(size),'\0',size);
929 }
930
931 void *
932 se_alloc0(size_t size)
933 {
934         return memset(se_alloc(size),'\0',size);
935 }
936
937 void *
938 sl_alloc0(struct ws_memory_slab *mem_chunk)
939 {
940         return memset(sl_alloc(mem_chunk), '\0', mem_chunk->item_size);
941 }
942
943 static gchar *
944 emem_strdup(const gchar *src, void *allocator(size_t))
945 {
946         guint len;
947         gchar *dst;
948
949         /* If str is NULL, just return the string "<NULL>" so that the callers don't
950          * have to bother checking it.
951          */
952         if(!src)
953                 return "<NULL>";
954
955         len = (guint) strlen(src);
956         dst = memcpy(allocator(len+1), src, len+1);
957
958         return dst;
959 }
960
961 gchar *
962 ep_strdup(const gchar *src)
963 {
964         return emem_strdup(src, ep_alloc);
965 }
966
967 gchar *
968 se_strdup(const gchar *src)
969 {
970         return emem_strdup(src, se_alloc);
971 }
972
973 static gchar *
974 emem_strndup(const gchar *src, size_t len, void *allocator(size_t))
975 {
976         gchar *dst = allocator(len+1);
977         guint i;
978
979         for (i = 0; (i < len) && src[i]; i++)
980                 dst[i] = src[i];
981
982         dst[i] = '\0';
983
984         return dst;
985 }
986
987 gchar *
988 ep_strndup(const gchar *src, size_t len)
989 {
990         return emem_strndup(src, len, ep_alloc);
991 }
992
993 gchar *
994 se_strndup(const gchar *src, size_t len)
995 {
996         return emem_strndup(src, len, se_alloc);
997 }
998
999
1000
1001 void *
1002 ep_memdup(const void* src, size_t len)
1003 {
1004         return memcpy(ep_alloc(len), src, len);
1005 }
1006
1007 void *
1008 se_memdup(const void* src, size_t len)
1009 {
1010         return memcpy(se_alloc(len), src, len);
1011 }
1012
1013 static gchar *
1014 emem_strdup_vprintf(const gchar *fmt, va_list ap, void *allocator(size_t))
1015 {
1016         va_list ap2;
1017         gsize len;
1018         gchar* dst;
1019
1020         G_VA_COPY(ap2, ap);
1021
1022         len = g_printf_string_upper_bound(fmt, ap);
1023
1024         dst = allocator(len+1);
1025         g_vsnprintf (dst, (gulong) len, fmt, ap2);
1026         va_end(ap2);
1027
1028         return dst;
1029 }
1030
1031 gchar *
1032 ep_strdup_vprintf(const gchar *fmt, va_list ap)
1033 {
1034         return emem_strdup_vprintf(fmt, ap, ep_alloc);
1035 }
1036
1037 gchar *
1038 se_strdup_vprintf(const gchar* fmt, va_list ap)
1039 {
1040         return emem_strdup_vprintf(fmt, ap, se_alloc);
1041 }
1042
1043 gchar *
1044 ep_strdup_printf(const gchar *fmt, ...)
1045 {
1046         va_list ap;
1047         gchar *dst;
1048
1049         va_start(ap, fmt);
1050         dst = ep_strdup_vprintf(fmt, ap);
1051         va_end(ap);
1052         return dst;
1053 }
1054
1055 gchar *
1056 se_strdup_printf(const gchar *fmt, ...)
1057 {
1058         va_list ap;
1059         gchar *dst;
1060
1061         va_start(ap, fmt);
1062         dst = se_strdup_vprintf(fmt, ap);
1063         va_end(ap);
1064         return dst;
1065 }
1066
1067 gchar **
1068 ep_strsplit(const gchar* string, const gchar* sep, int max_tokens)
1069 {
1070         gchar* splitted;
1071         gchar* s;
1072         guint tokens;
1073         guint str_len;
1074         guint sep_len;
1075         guint i;
1076         gchar** vec;
1077         enum { AT_START, IN_PAD, IN_TOKEN } state;
1078         guint curr_tok = 0;
1079
1080         if (    ! string
1081              || ! sep
1082              || ! sep[0])
1083                 return NULL;
1084
1085         s = splitted = ep_strdup(string);
1086         str_len = (guint) strlen(splitted);
1087         sep_len = (guint) strlen(sep);
1088
1089         if (max_tokens < 1) max_tokens = INT_MAX;
1090
1091         tokens = 1;
1092
1093
1094         while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
1095                 tokens++;
1096
1097                 for(i=0; i < sep_len; i++ )
1098                         s[i] = '\0';
1099
1100                 s += sep_len;
1101
1102         }
1103
1104         vec = ep_alloc_array(gchar*,tokens+1);
1105         state = AT_START;
1106
1107         for (i=0; i< str_len; i++) {
1108                 switch(state) {
1109                         case AT_START:
1110                                 switch(splitted[i]) {
1111                                         case '\0':
1112                                                 state  = IN_PAD;
1113                                                 continue;
1114                                         default:
1115                                                 vec[curr_tok] = &(splitted[i]);
1116                                                 curr_tok++;
1117                                                 state = IN_TOKEN;
1118                                                 continue;
1119                                 }
1120                         case IN_TOKEN:
1121                                 switch(splitted[i]) {
1122                                         case '\0':
1123                                                 state = IN_PAD;
1124                                         default:
1125                                                 continue;
1126                                 }
1127                         case IN_PAD:
1128                                 switch(splitted[i]) {
1129                                         default:
1130                                                 vec[curr_tok] = &(splitted[i]);
1131                                                 curr_tok++;
1132                                                 state = IN_TOKEN;
1133                                         case '\0':
1134                                                 continue;
1135                                 }
1136                 }
1137         }
1138
1139         vec[curr_tok] = NULL;
1140
1141         return vec;
1142 }
1143
1144 gchar *
1145 ep_strconcat(const gchar *string1, ...)
1146 {
1147         gsize   l;
1148         va_list args;
1149         gchar   *s;
1150         gchar   *concat;
1151         gchar   *ptr;
1152
1153         if (!string1)
1154                 return NULL;
1155
1156         l = 1 + strlen(string1);
1157         va_start(args, string1);
1158         s = va_arg(args, gchar*);
1159         while (s) {
1160                 l += strlen(s);
1161                 s = va_arg(args, gchar*);
1162         }
1163         va_end(args);
1164
1165         concat = ep_alloc(l);
1166         ptr = concat;
1167
1168         ptr = g_stpcpy(ptr, string1);
1169         va_start(args, string1);
1170         s = va_arg(args, gchar*);
1171         while (s) {
1172                 ptr = g_stpcpy(ptr, s);
1173                 s = va_arg(args, gchar*);
1174         }
1175         va_end(args);
1176
1177         return concat;
1178 }
1179
1180
1181
1182 /* release all allocated memory back to the pool. */
1183 static void
1184 emem_free_all(emem_header_t *mem)
1185 {
1186         gboolean use_chunks = mem->debug_use_chunks;
1187
1188         emem_chunk_t *npc;
1189         emem_tree_t *tree_list;
1190
1191         /* move all used chunks over to the free list */
1192         while(mem->used_list){
1193                 npc=mem->used_list;
1194                 mem->used_list=mem->used_list->next;
1195                 npc->next=mem->free_list;
1196                 mem->free_list=npc;
1197         }
1198
1199         /* clear them all out */
1200         npc = mem->free_list;
1201         while (npc != NULL) {
1202                 if (use_chunks) {
1203                         while (npc->canary_last != NULL) {
1204                                 npc->canary_last = emem_canary_next(mem->canary, npc->canary_last, NULL);
1205                                 /* XXX, check if canary_last is inside allocated memory? */
1206
1207                                 if (npc->canary_last == (void *) -1)
1208                                         g_error("Memory corrupted");
1209                         }
1210
1211                         emem_scrub_memory((npc->buf + npc->free_offset_init),
1212                                           (npc->free_offset - npc->free_offset_init),
1213                                           FALSE);
1214
1215                         npc->amount_free = npc->amount_free_init;
1216                         npc->free_offset = npc->free_offset_init;
1217                         npc = npc->next;
1218                 } else {
1219                         emem_chunk_t *next = npc->next;
1220
1221                         emem_scrub_memory(npc->buf, npc->amount_free_init, FALSE);
1222
1223                         g_free(npc->buf);
1224                         g_free(npc);
1225                         npc = next;
1226                 }
1227         }
1228
1229         if (!use_chunks) {
1230                 /* We've freed all this memory already */
1231                 mem->free_list = NULL;
1232         }
1233
1234         /* release/reset all allocated trees */
1235         for(tree_list=mem->trees;tree_list;tree_list=tree_list->next){
1236                 tree_list->tree=NULL;
1237         }
1238 }
1239
1240 /* release all allocated memory back to the pool. */
1241 void
1242 ep_free_all(void)
1243 {
1244         emem_free_all(&ep_packet_mem);
1245 }
1246
1247 /* release all allocated memory back to the pool. */
1248 void
1249 se_free_all(void)
1250 {
1251 #ifdef SHOW_EMEM_STATS
1252         print_alloc_stats();
1253 #endif
1254
1255         emem_free_all(&se_packet_mem);
1256 }
1257
1258 void
1259 sl_free_all(struct ws_memory_slab *mem_chunk)
1260 {
1261         emem_chunk_t *chunk_list = mem_chunk->chunk_list;
1262
1263         mem_chunk->chunk_list = NULL;
1264         mem_chunk->freed = NULL;
1265         while (chunk_list) {
1266                 emem_chunk_t *chunk = chunk_list;
1267
1268                 chunk_list = chunk_list->next;
1269                 emem_destroy_chunk(chunk);
1270         }
1271 }
1272
1273 ep_stack_t
1274 ep_stack_new(void) {
1275         ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
1276         *s = ep_new0(struct _ep_stack_frame_t);
1277         return s;
1278 }
1279
1280 /*  for ep_stack_t we'll keep the popped frames so we reuse them instead
1281 of allocating new ones.
1282 */
1283
1284 void *
1285 ep_stack_push(ep_stack_t stack, void* data)
1286 {
1287         struct _ep_stack_frame_t* frame;
1288         struct _ep_stack_frame_t* head = (*stack);
1289
1290         if (head->above) {
1291                 frame = head->above;
1292         } else {
1293                 frame = ep_new(struct _ep_stack_frame_t);
1294                 head->above = frame;
1295                 frame->below = head;
1296                 frame->above = NULL;
1297         }
1298
1299         frame->payload = data;
1300         (*stack) = frame;
1301
1302         return data;
1303 }
1304
1305 void *
1306 ep_stack_pop(ep_stack_t stack)
1307 {
1308
1309         if ((*stack)->below) {
1310                 (*stack) = (*stack)->below;
1311                 return (*stack)->above->payload;
1312         } else {
1313                 return NULL;
1314         }
1315 }
1316
1317 emem_tree_t *
1318 se_tree_create(int type, const char *name)
1319 {
1320         emem_tree_t *tree_list;
1321
1322         tree_list=g_malloc(sizeof(emem_tree_t));
1323         tree_list->next=se_packet_mem.trees;
1324         tree_list->type=type;
1325         tree_list->tree=NULL;
1326         tree_list->name=name;
1327         tree_list->malloc=se_alloc;
1328         se_packet_mem.trees=tree_list;
1329
1330         return tree_list;
1331 }
1332
1333 void *
1334 emem_tree_lookup32(emem_tree_t *se_tree, guint32 key)
1335 {
1336         emem_tree_node_t *node;
1337
1338         node=se_tree->tree;
1339
1340         while(node){
1341                 if(key==node->key32){
1342                         return node->data;
1343                 }
1344                 if(key<node->key32){
1345                         node=node->left;
1346                         continue;
1347                 }
1348                 if(key>node->key32){
1349                         node=node->right;
1350                         continue;
1351                 }
1352         }
1353         return NULL;
1354 }
1355
1356 void *
1357 emem_tree_lookup32_le(emem_tree_t *se_tree, guint32 key)
1358 {
1359         emem_tree_node_t *node;
1360
1361         node=se_tree->tree;
1362
1363         if(!node){
1364                 return NULL;
1365         }
1366
1367
1368         while(node){
1369                 if(key==node->key32){
1370                         return node->data;
1371                 }
1372                 if(key<node->key32){
1373                         if(node->left){
1374                                 node=node->left;
1375                                 continue;
1376                         } else {
1377                                 break;
1378                         }
1379                 }
1380                 if(key>node->key32){
1381                         if(node->right){
1382                                 node=node->right;
1383                                 continue;
1384                         } else {
1385                                 break;
1386                         }
1387                 }
1388         }
1389
1390
1391         if(!node){
1392                 return NULL;
1393         }
1394
1395         /* If we are still at the root of the tree this means that this node
1396          * is either smaller than the search key and then we return this
1397          * node or else there is no smaller key available and then
1398          * we return NULL.
1399          */
1400         if(!node->parent){
1401                 if(key>node->key32){
1402                         return node->data;
1403                 } else {
1404                         return NULL;
1405                 }
1406         }
1407
1408         if(node->parent->left==node){
1409                 /* left child */
1410
1411                 if(key>node->key32){
1412                         /* if this is a left child and its key is smaller than
1413                          * the search key, then this is the node we want.
1414                          */
1415                         return node->data;
1416                 } else {
1417                         /* if this is a left child and its key is bigger than
1418                          * the search key, we have to check if any
1419                          * of our ancestors are smaller than the search key.
1420                          */
1421                         while(node){
1422                                 if(key>node->key32){
1423                                         return node->data;
1424                                 }
1425                                 node=node->parent;
1426                         }
1427                         return NULL;
1428                 }
1429         } else {
1430                 /* right child */
1431
1432                 if(node->key32<key){
1433                         /* if this is the right child and its key is smaller
1434                          * than the search key then this is the one we want.
1435                          */
1436                         return node->data;
1437                 } else {
1438                         /* if this is the right child and its key is larger
1439                          * than the search key then our parent is the one we
1440                          * want.
1441                          */
1442                         return node->parent->data;
1443                 }
1444         }
1445
1446 }
1447
1448
1449 static inline emem_tree_node_t *
1450 emem_tree_parent(emem_tree_node_t *node)
1451 {
1452         return node->parent;
1453 }
1454
1455 static inline emem_tree_node_t *
1456 emem_tree_grandparent(emem_tree_node_t *node)
1457 {
1458         emem_tree_node_t *parent;
1459
1460         parent=emem_tree_parent(node);
1461         if(parent){
1462                 return parent->parent;
1463         }
1464         return NULL;
1465 }
1466
1467 static inline emem_tree_node_t *
1468 emem_tree_uncle(emem_tree_node_t *node)
1469 {
1470         emem_tree_node_t *parent, *grandparent;
1471
1472         parent=emem_tree_parent(node);
1473         if(!parent){
1474                 return NULL;
1475         }
1476         grandparent=emem_tree_parent(parent);
1477         if(!grandparent){
1478                 return NULL;
1479         }
1480         if(parent==grandparent->left){
1481                 return grandparent->right;
1482         }
1483         return grandparent->left;
1484 }
1485
1486 static inline void rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node);
1487 static inline void rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node);
1488
1489 static inline void
1490 rotate_left(emem_tree_t *se_tree, emem_tree_node_t *node)
1491 {
1492         if(node->parent){
1493                 if(node->parent->left==node){
1494                         node->parent->left=node->right;
1495                 } else {
1496                         node->parent->right=node->right;
1497                 }
1498         } else {
1499                 se_tree->tree=node->right;
1500         }
1501         node->right->parent=node->parent;
1502         node->parent=node->right;
1503         node->right=node->right->left;
1504         if(node->right){
1505                 node->right->parent=node;
1506         }
1507         node->parent->left=node;
1508 }
1509
1510 static inline void
1511 rotate_right(emem_tree_t *se_tree, emem_tree_node_t *node)
1512 {
1513         if(node->parent){
1514                 if(node->parent->left==node){
1515                         node->parent->left=node->left;
1516                 } else {
1517                         node->parent->right=node->left;
1518                 }
1519         } else {
1520                 se_tree->tree=node->left;
1521         }
1522         node->left->parent=node->parent;
1523         node->parent=node->left;
1524         node->left=node->left->right;
1525         if(node->left){
1526                 node->left->parent=node;
1527         }
1528         node->parent->right=node;
1529 }
1530
1531 static inline void
1532 rb_insert_case5(emem_tree_t *se_tree, emem_tree_node_t *node)
1533 {
1534         emem_tree_node_t *grandparent;
1535         emem_tree_node_t *parent;
1536
1537         parent=emem_tree_parent(node);
1538         grandparent=emem_tree_parent(parent);
1539         parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1540         grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1541         if( (node==parent->left) && (parent==grandparent->left) ){
1542                 rotate_right(se_tree, grandparent);
1543         } else {
1544                 rotate_left(se_tree, grandparent);
1545         }
1546 }
1547
1548 static inline void
1549 rb_insert_case4(emem_tree_t *se_tree, emem_tree_node_t *node)
1550 {
1551         emem_tree_node_t *grandparent;
1552         emem_tree_node_t *parent;
1553
1554         parent=emem_tree_parent(node);
1555         grandparent=emem_tree_parent(parent);
1556         if(!grandparent){
1557                 return;
1558         }
1559         if( (node==parent->right) && (parent==grandparent->left) ){
1560                 rotate_left(se_tree, parent);
1561                 node=node->left;
1562         } else if( (node==parent->left) && (parent==grandparent->right) ){
1563                 rotate_right(se_tree, parent);
1564                 node=node->right;
1565         }
1566         rb_insert_case5(se_tree, node);
1567 }
1568
1569 static inline void
1570 rb_insert_case3(emem_tree_t *se_tree, emem_tree_node_t *node)
1571 {
1572         emem_tree_node_t *grandparent;
1573         emem_tree_node_t *parent;
1574         emem_tree_node_t *uncle;
1575
1576         uncle=emem_tree_uncle(node);
1577         if(uncle && (uncle->u.rb_color==EMEM_TREE_RB_COLOR_RED)){
1578                 parent=emem_tree_parent(node);
1579                 parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1580                 uncle->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1581                 grandparent=emem_tree_grandparent(node);
1582                 grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1583                 rb_insert_case1(se_tree, grandparent);
1584         } else {
1585                 rb_insert_case4(se_tree, node);
1586         }
1587 }
1588
1589 static inline void
1590 rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node)
1591 {
1592         emem_tree_node_t *parent;
1593
1594         parent=emem_tree_parent(node);
1595         /* parent is always non-NULL here */
1596         if(parent->u.rb_color==EMEM_TREE_RB_COLOR_BLACK){
1597                 return;
1598         }
1599         rb_insert_case3(se_tree, node);
1600 }
1601
1602 static inline void
1603 rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node)
1604 {
1605         emem_tree_node_t *parent;
1606
1607         parent=emem_tree_parent(node);
1608         if(!parent){
1609                 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1610                 return;
1611         }
1612         rb_insert_case2(se_tree, node);
1613 }
1614
1615 /* insert a new node in the tree. if this node matches an already existing node
1616  * then just replace the data for that node */
1617 void
1618 emem_tree_insert32(emem_tree_t *se_tree, guint32 key, void *data)
1619 {
1620         emem_tree_node_t *node;
1621
1622         node=se_tree->tree;
1623
1624         /* is this the first node ?*/
1625         if(!node){
1626                 node=se_tree->malloc(sizeof(emem_tree_node_t));
1627                 switch(se_tree->type){
1628                 case EMEM_TREE_TYPE_RED_BLACK:
1629                         node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1630                         break;
1631                 }
1632                 node->parent=NULL;
1633                 node->left=NULL;
1634                 node->right=NULL;
1635                 node->key32=key;
1636                 node->data=data;
1637                 node->u.is_subtree = EMEM_TREE_NODE_IS_DATA;
1638                 se_tree->tree=node;
1639                 return;
1640         }
1641
1642         /* it was not the new root so walk the tree until we find where to
1643          * insert this new leaf.
1644          */
1645         while(1){
1646                 /* this node already exists, so just replace the data pointer*/
1647                 if(key==node->key32){
1648                         node->data=data;
1649                         return;
1650                 }
1651                 if(key<node->key32) {
1652                         if(!node->left){
1653                                 /* new node to the left */
1654                                 emem_tree_node_t *new_node;
1655                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1656                                 node->left=new_node;
1657                                 new_node->parent=node;
1658                                 new_node->left=NULL;
1659                                 new_node->right=NULL;
1660                                 new_node->key32=key;
1661                                 new_node->data=data;
1662                                 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1663                                 node=new_node;
1664                                 break;
1665                         }
1666                         node=node->left;
1667                         continue;
1668                 }
1669                 if(key>node->key32) {
1670                         if(!node->right){
1671                                 /* new node to the right */
1672                                 emem_tree_node_t *new_node;
1673                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1674                                 node->right=new_node;
1675                                 new_node->parent=node;
1676                                 new_node->left=NULL;
1677                                 new_node->right=NULL;
1678                                 new_node->key32=key;
1679                                 new_node->data=data;
1680                                 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1681                                 node=new_node;
1682                                 break;
1683                         }
1684                         node=node->right;
1685                         continue;
1686                 }
1687         }
1688
1689         /* node will now point to the newly created node */
1690         switch(se_tree->type){
1691         case EMEM_TREE_TYPE_RED_BLACK:
1692                 node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1693                 rb_insert_case1(se_tree, node);
1694                 break;
1695         }
1696 }
1697
1698 static void *
1699 lookup_or_insert32(emem_tree_t *se_tree, guint32 key, void*(*func)(void*),void* ud, int is_subtree)
1700 {
1701         emem_tree_node_t *node;
1702
1703         node=se_tree->tree;
1704
1705         /* is this the first node ?*/
1706         if(!node){
1707                 node=se_tree->malloc(sizeof(emem_tree_node_t));
1708                 switch(se_tree->type){
1709                         case EMEM_TREE_TYPE_RED_BLACK:
1710                                 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1711                                 break;
1712                 }
1713                 node->parent=NULL;
1714                 node->left=NULL;
1715                 node->right=NULL;
1716                 node->key32=key;
1717                 node->data= func(ud);
1718                 node->u.is_subtree = is_subtree;
1719                 se_tree->tree=node;
1720                 return node->data;
1721         }
1722
1723         /* it was not the new root so walk the tree until we find where to
1724                 * insert this new leaf.
1725                 */
1726         while(1){
1727                 /* this node already exists, so just return the data pointer*/
1728                 if(key==node->key32){
1729                         return node->data;
1730                 }
1731                 if(key<node->key32) {
1732                         if(!node->left){
1733                                 /* new node to the left */
1734                                 emem_tree_node_t *new_node;
1735                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1736                                 node->left=new_node;
1737                                 new_node->parent=node;
1738                                 new_node->left=NULL;
1739                                 new_node->right=NULL;
1740                                 new_node->key32=key;
1741                                 new_node->data= func(ud);
1742                                 new_node->u.is_subtree = is_subtree;
1743                                 node=new_node;
1744                                 break;
1745                         }
1746                         node=node->left;
1747                         continue;
1748                 }
1749                 if(key>node->key32) {
1750                         if(!node->right){
1751                                 /* new node to the right */
1752                                 emem_tree_node_t *new_node;
1753                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1754                                 node->right=new_node;
1755                                 new_node->parent=node;
1756                                 new_node->left=NULL;
1757                                 new_node->right=NULL;
1758                                 new_node->key32=key;
1759                                 new_node->data= func(ud);
1760                                 new_node->u.is_subtree = is_subtree;
1761                                 node=new_node;
1762                                 break;
1763                         }
1764                         node=node->right;
1765                         continue;
1766                 }
1767         }
1768
1769         /* node will now point to the newly created node */
1770         switch(se_tree->type){
1771                 case EMEM_TREE_TYPE_RED_BLACK:
1772                         node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1773                         rb_insert_case1(se_tree, node);
1774                         break;
1775         }
1776
1777         return node->data;
1778 }
1779
1780 /* When the se data is released, this entire tree will dissapear as if it
1781  * never existed including all metadata associated with the tree.
1782  */
1783 emem_tree_t *
1784 se_tree_create_non_persistent(int type, const char *name)
1785 {
1786         emem_tree_t *tree_list;
1787
1788         tree_list=se_alloc(sizeof(emem_tree_t));
1789         tree_list->next=NULL;
1790         tree_list->type=type;
1791         tree_list->tree=NULL;
1792         tree_list->name=name;
1793         tree_list->malloc=se_alloc;
1794
1795         return tree_list;
1796 }
1797
1798 /* This tree is PErmanent and will never be released
1799  */
1800 emem_tree_t *
1801 pe_tree_create(int type, const char *name)
1802 {
1803         emem_tree_t *tree_list;
1804
1805         tree_list=g_new(emem_tree_t, 1);
1806         tree_list->next=NULL;
1807         tree_list->type=type;
1808         tree_list->tree=NULL;
1809         tree_list->name=name;
1810         tree_list->malloc=(void *(*)(size_t)) g_malloc;
1811
1812         return tree_list;
1813 }
1814
1815 /* create another (sub)tree using the same memory allocation scope
1816  * as the parent tree.
1817  */
1818 static emem_tree_t *
1819 emem_tree_create_subtree(emem_tree_t *parent_tree, const char *name)
1820 {
1821         emem_tree_t *tree_list;
1822
1823         tree_list=parent_tree->malloc(sizeof(emem_tree_t));
1824         tree_list->next=NULL;
1825         tree_list->type=parent_tree->type;
1826         tree_list->tree=NULL;
1827         tree_list->name=name;
1828         tree_list->malloc=parent_tree->malloc;
1829
1830         return tree_list;
1831 }
1832
1833 static void *
1834 create_sub_tree(void* d)
1835 {
1836         emem_tree_t *se_tree = d;
1837         return emem_tree_create_subtree(se_tree, "subtree");
1838 }
1839
1840 /* insert a new node in the tree. if this node matches an already existing node
1841  * then just replace the data for that node */
1842
1843 void
1844 emem_tree_insert32_array(emem_tree_t *se_tree, emem_tree_key_t *key, void *data)
1845 {
1846         emem_tree_t *insert_tree = NULL;
1847         emem_tree_key_t *cur_key;
1848         guint32 i, insert_key32 = 0;
1849
1850         if(!se_tree || !key) return;
1851
1852         for (cur_key = key; cur_key->length > 0; cur_key++) {
1853                 if(cur_key->length > 100) {
1854                         DISSECTOR_ASSERT_NOT_REACHED();
1855                 }
1856
1857                 for (i = 0; i < cur_key->length; i++) {
1858                         /* Insert using the previous key32 */
1859                         if (!insert_tree) {
1860                                 insert_tree = se_tree;
1861                         } else {
1862                                 insert_tree = lookup_or_insert32(insert_tree, insert_key32, create_sub_tree, se_tree, EMEM_TREE_NODE_IS_SUBTREE);
1863                         }
1864                         insert_key32 = cur_key->key[i];
1865                 }
1866         }
1867
1868         if(!insert_tree) {
1869                 /* We didn't get a valid key. Should we return NULL instead? */
1870                 DISSECTOR_ASSERT_NOT_REACHED();
1871         }
1872
1873         emem_tree_insert32(insert_tree, insert_key32, data);
1874
1875 }
1876
1877 void *
1878 emem_tree_lookup32_array(emem_tree_t *se_tree, emem_tree_key_t *key)
1879 {
1880         emem_tree_t *lookup_tree = NULL;
1881         emem_tree_key_t *cur_key;
1882         guint32 i, lookup_key32 = 0;
1883
1884         if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1885
1886         for (cur_key = key; cur_key->length > 0; cur_key++) {
1887                 if(cur_key->length > 100) {
1888                         DISSECTOR_ASSERT_NOT_REACHED();
1889                 }
1890
1891                 for (i = 0; i < cur_key->length; i++) {
1892                         /* Lookup using the previous key32 */
1893                         if (!lookup_tree) {
1894                                 lookup_tree = se_tree;
1895                         } else {
1896                                 lookup_tree = emem_tree_lookup32(lookup_tree, lookup_key32);
1897                                 if (!lookup_tree) {
1898                                         return NULL;
1899                                 }
1900                         }
1901                         lookup_key32 = cur_key->key[i];
1902                 }
1903         }
1904
1905         if(!lookup_tree) {
1906                 /* We didn't get a valid key. Should we return NULL instead? */
1907                 DISSECTOR_ASSERT_NOT_REACHED();
1908         }
1909
1910         return emem_tree_lookup32(lookup_tree, lookup_key32);
1911 }
1912
1913 void *
1914 emem_tree_lookup32_array_le(emem_tree_t *se_tree, emem_tree_key_t *key)
1915 {
1916         emem_tree_t *lookup_tree = NULL;
1917         emem_tree_key_t *cur_key;
1918         guint32 i, lookup_key32 = 0;
1919
1920         if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1921
1922         for (cur_key = key; cur_key->length > 0; cur_key++) {
1923                 if(cur_key->length > 100) {
1924                         DISSECTOR_ASSERT_NOT_REACHED();
1925                 }
1926
1927                 for (i = 0; i < cur_key->length; i++) {
1928                         /* Lookup using the previous key32 */
1929                         if (!lookup_tree) {
1930                                 lookup_tree = se_tree;
1931                         } else {
1932                                 lookup_tree = emem_tree_lookup32_le(lookup_tree, lookup_key32);
1933                                 if (!lookup_tree) {
1934                                         return NULL;
1935                                 }
1936                         }
1937                         lookup_key32 = cur_key->key[i];
1938                 }
1939         }
1940
1941         if(!lookup_tree) {
1942                 /* We didn't get a valid key. Should we return NULL instead? */
1943                 DISSECTOR_ASSERT_NOT_REACHED();
1944         }
1945
1946         return emem_tree_lookup32_le(lookup_tree, lookup_key32);
1947
1948 }
1949
1950 /* Strings are stored as an array of uint32 containing the string characters
1951    with 4 characters in each uint32.
1952    The first byte of the string is stored as the most significant byte.
1953    If the string is not a multiple of 4 characters in length the last
1954    uint32 containing the string bytes are padded with 0 bytes.
1955    After the uint32's containing the string, there is one final terminator
1956    uint32 with the value 0x00000001
1957 */
1958 void
1959 emem_tree_insert_string(emem_tree_t* se_tree, const gchar* k, void* v, guint32 flags)
1960 {
1961         emem_tree_key_t key[2];
1962         guint32 *aligned=NULL;
1963         guint32 len = (guint32) strlen(k);
1964         guint32 divx = (len+3)/4+1;
1965         guint32 i;
1966         guint32 tmp;
1967
1968         aligned = g_malloc(divx * sizeof (guint32));
1969
1970         /* pack the bytes one one by one into guint32s */
1971         tmp = 0;
1972         for (i = 0;i < len;i++) {
1973                 unsigned char ch;
1974
1975                 ch = (unsigned char)k[i];
1976                 if (flags & EMEM_TREE_STRING_NOCASE) {
1977                         if(isupper(ch)) {
1978                                 ch = tolower(ch);
1979                         }
1980                 }
1981                 tmp <<= 8;
1982                 tmp |= ch;
1983                 if (i%4 == 3) {
1984                         aligned[i/4] = tmp;
1985                         tmp = 0;
1986                 }
1987         }
1988         /* add required padding to the last uint32 */
1989         if (i%4 != 0) {
1990                 while (i%4 != 0) {
1991                         i++;
1992                         tmp <<= 8;
1993                 }
1994                 aligned[i/4-1] = tmp;
1995         }
1996
1997         /* add the terminator */
1998         aligned[divx-1] = 0x00000001;
1999
2000         key[0].length = divx;
2001         key[0].key = aligned;
2002         key[1].length = 0;
2003         key[1].key = NULL;
2004
2005
2006         emem_tree_insert32_array(se_tree, key, v);
2007         g_free(aligned);
2008 }
2009
2010 void *
2011 emem_tree_lookup_string(emem_tree_t* se_tree, const gchar* k, guint32 flags)
2012 {
2013         emem_tree_key_t key[2];
2014         guint32 *aligned=NULL;
2015         guint32 len = (guint) strlen(k);
2016         guint32 divx = (len+3)/4+1;
2017         guint32 i;
2018         guint32 tmp;
2019         void *ret;
2020
2021         aligned = g_malloc(divx * sizeof (guint32));
2022
2023         /* pack the bytes one one by one into guint32s */
2024         tmp = 0;
2025         for (i = 0;i < len;i++) {
2026                 unsigned char ch;
2027
2028                 ch = (unsigned char)k[i];
2029                 if (flags & EMEM_TREE_STRING_NOCASE) {
2030                         if(isupper(ch)) {
2031                                 ch = tolower(ch);
2032                         }
2033                 }
2034                 tmp <<= 8;
2035                 tmp |= ch;
2036                 if (i%4 == 3) {
2037                         aligned[i/4] = tmp;
2038                         tmp = 0;
2039                 }
2040         }
2041         /* add required padding to the last uint32 */
2042         if (i%4 != 0) {
2043                 while (i%4 != 0) {
2044                         i++;
2045                         tmp <<= 8;
2046                 }
2047                 aligned[i/4-1] = tmp;
2048         }
2049
2050         /* add the terminator */
2051         aligned[divx-1] = 0x00000001;
2052
2053         key[0].length = divx;
2054         key[0].key = aligned;
2055         key[1].length = 0;
2056         key[1].key = NULL;
2057
2058
2059         ret = emem_tree_lookup32_array(se_tree, key);
2060         g_free(aligned);
2061         return ret;
2062 }
2063
2064 static gboolean
2065 emem_tree_foreach_nodes(emem_tree_node_t* node, tree_foreach_func callback, void *user_data)
2066 {
2067         gboolean stop_traverse = FALSE;
2068
2069         if (!node)
2070                 return FALSE;
2071
2072         if(node->left) {
2073                 stop_traverse = emem_tree_foreach_nodes(node->left, callback, user_data);
2074                 if (stop_traverse) {
2075                         return TRUE;
2076                 }
2077         }
2078
2079         if (node->u.is_subtree == EMEM_TREE_NODE_IS_SUBTREE) {
2080                 stop_traverse = emem_tree_foreach(node->data, callback, user_data);
2081         } else {
2082                 stop_traverse = callback(node->data, user_data);
2083         }
2084
2085         if (stop_traverse) {
2086                 return TRUE;
2087         }
2088
2089         if(node->right) {
2090                 stop_traverse = emem_tree_foreach_nodes(node->right, callback, user_data);
2091                 if (stop_traverse) {
2092                         return TRUE;
2093                 }
2094         }
2095
2096         return FALSE;
2097 }
2098
2099 gboolean
2100 emem_tree_foreach(emem_tree_t* emem_tree, tree_foreach_func callback, void *user_data)
2101 {
2102         if (!emem_tree)
2103                 return FALSE;
2104
2105         if(!emem_tree->tree)
2106                 return FALSE;
2107
2108         return emem_tree_foreach_nodes(emem_tree->tree, callback, user_data);
2109 }
2110
2111 static void emem_print_subtree(emem_tree_t* emem_tree, guint32 level);
2112
2113 static void
2114 emem_tree_print_nodes(const char *prefix, emem_tree_node_t* node, guint32 level)
2115 {
2116         guint32 i;
2117
2118         if (!node)
2119                 return;
2120
2121         for(i=0;i<level;i++){
2122                 printf("    ");
2123         }
2124
2125         printf("%sNODE:%p parent:%p left:%p right:%p colour:%s key:%u %s:%p\n", prefix,
2126                 (void *)node,(void *)(node->parent),(void *)(node->left),(void *)(node->right),
2127                 (node->u.rb_color)?"Black":"Red",(node->key32),(node->u.is_subtree)?"tree":"data",node->data);
2128         if(node->left)
2129                 emem_tree_print_nodes("L-", node->left, level+1);
2130         if(node->right)
2131                 emem_tree_print_nodes("R-", node->right, level+1);
2132
2133         if (node->u.is_subtree)
2134                 emem_print_subtree(node->data, level+1);
2135 }
2136
2137 static void
2138 emem_print_subtree(emem_tree_t* emem_tree, guint32 level)
2139 {
2140         guint32 i;
2141         
2142         if (!emem_tree)
2143                 return;
2144
2145         for(i=0;i<level;i++){
2146                 printf("    ");
2147         }
2148
2149         printf("EMEM tree:%p type:%s name:%s root:%p\n",emem_tree,(emem_tree->type==1)?"RedBlack":"unknown",emem_tree->name,(void *)(emem_tree->tree));
2150         if(emem_tree->tree)
2151                 emem_tree_print_nodes("Root-", emem_tree->tree, level);
2152 }
2153
2154 void
2155 emem_print_tree(emem_tree_t* emem_tree)
2156 {
2157         emem_print_subtree(emem_tree, 0);
2158 }
2159
2160 /*
2161  * String buffers
2162  */
2163
2164 /*
2165  * Presumably we're using these routines for building strings for the tree.
2166  * Use ITEM_LABEL_LENGTH as the basis for our default lengths.
2167  */
2168
2169 #define DEFAULT_STRBUF_LEN (ITEM_LABEL_LENGTH / 10)
2170 #define MAX_STRBUF_LEN 65536
2171
2172 static gsize
2173 next_size(gsize cur_alloc_len, gsize wanted_alloc_len, gsize max_alloc_len)
2174 {
2175         if (max_alloc_len < 1 || max_alloc_len > MAX_STRBUF_LEN) {
2176                 max_alloc_len = MAX_STRBUF_LEN;
2177         }
2178
2179         if (cur_alloc_len < 1) {
2180                 cur_alloc_len = DEFAULT_STRBUF_LEN;
2181         }
2182
2183         while (cur_alloc_len < wanted_alloc_len) {
2184                 cur_alloc_len *= 2;
2185         }
2186
2187         return cur_alloc_len < max_alloc_len ? cur_alloc_len : max_alloc_len;
2188 }
2189
2190 static void
2191 ep_strbuf_grow(emem_strbuf_t *strbuf, gsize wanted_alloc_len)
2192 {
2193         gsize new_alloc_len;
2194         gchar *new_str;
2195
2196         if (!strbuf || (wanted_alloc_len <= strbuf->alloc_len) || (strbuf->alloc_len >= strbuf->max_alloc_len)) {
2197                 return;
2198         }
2199
2200         new_alloc_len = next_size(strbuf->alloc_len, wanted_alloc_len, strbuf->max_alloc_len);
2201         new_str = ep_alloc(new_alloc_len);
2202         g_strlcpy(new_str, strbuf->str, new_alloc_len);
2203
2204         strbuf->alloc_len = new_alloc_len;
2205         strbuf->str = new_str;
2206 }
2207
2208 emem_strbuf_t *
2209 ep_strbuf_sized_new(gsize alloc_len, gsize max_alloc_len)
2210 {
2211         emem_strbuf_t *strbuf;
2212
2213         strbuf = ep_alloc(sizeof(emem_strbuf_t));
2214
2215         if ((max_alloc_len == 0) || (max_alloc_len > MAX_STRBUF_LEN))
2216                 max_alloc_len = MAX_STRBUF_LEN;
2217         if (alloc_len == 0)
2218                 alloc_len = 1;
2219         else if (alloc_len > max_alloc_len)
2220                 alloc_len = max_alloc_len;
2221
2222         strbuf->str = ep_alloc(alloc_len);
2223         strbuf->str[0] = '\0';
2224
2225         strbuf->len = 0;
2226         strbuf->alloc_len = alloc_len;
2227         strbuf->max_alloc_len = max_alloc_len;
2228
2229         return strbuf;
2230 }
2231
2232 emem_strbuf_t *
2233 ep_strbuf_new(const gchar *init)
2234 {
2235         emem_strbuf_t *strbuf;
2236
2237         strbuf = ep_strbuf_sized_new(next_size(0, init?strlen(init)+1:0, 0), 0);  /* +1 for NULL terminator */
2238         if (init) {
2239                 gsize full_len;
2240                 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2241                 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2242         }
2243
2244         return strbuf;
2245 }
2246
2247 emem_strbuf_t *
2248 ep_strbuf_new_label(const gchar *init)
2249 {
2250         emem_strbuf_t *strbuf;
2251         gsize full_len;
2252
2253         /* Be optimistic: Allocate default size strbuf string and only      */
2254         /*  request an increase if needed.                                  */
2255         /* XXX: Is it reasonable to assume that much of the usage of        */
2256         /*  ep_strbuf_new_label will have  init==NULL or                    */
2257         /*   strlen(init) < DEFAULT_STRBUF_LEN) ???                         */
2258         strbuf = ep_strbuf_sized_new(DEFAULT_STRBUF_LEN, ITEM_LABEL_LENGTH);
2259
2260         if (!init)
2261                 return strbuf;
2262
2263         /* full_len does not count the trailing '\0'.                       */
2264         full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2265         if (full_len < strbuf->alloc_len) {
2266                 strbuf->len += full_len;
2267         } else {
2268                 strbuf = ep_strbuf_sized_new(full_len+1, ITEM_LABEL_LENGTH);
2269                 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2270                 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2271         }
2272
2273         return strbuf;
2274 }
2275
2276 emem_strbuf_t *
2277 ep_strbuf_append(emem_strbuf_t *strbuf, const gchar *str)
2278 {
2279         gsize add_len, full_len;
2280
2281         if (!strbuf || !str || str[0] == '\0') {
2282                 return strbuf;
2283         }
2284
2285         /* Be optimistic; try the g_strlcpy first & see if enough room.                 */
2286         /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same  */
2287         add_len = strbuf->alloc_len - strbuf->len;
2288         full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2289         if (full_len < add_len) {
2290                 strbuf->len += full_len;
2291         } else {
2292                 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2293                 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2294                 add_len = strbuf->alloc_len - strbuf->len;
2295                 full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2296                 strbuf->len += MIN(add_len-1, full_len);
2297         }
2298
2299         return strbuf;
2300 }
2301
2302 void
2303 ep_strbuf_append_vprintf(emem_strbuf_t *strbuf, const gchar *format, va_list ap)
2304 {
2305         va_list ap2;
2306         gsize add_len, full_len;
2307
2308         G_VA_COPY(ap2, ap);
2309
2310         /* Be optimistic; try the g_vsnprintf first & see if enough room.               */
2311         /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same. */
2312         add_len = strbuf->alloc_len - strbuf->len;
2313         full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap);
2314         if (full_len < add_len) {
2315                 strbuf->len += full_len;
2316         } else {
2317                 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2318                 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2319                 add_len = strbuf->alloc_len - strbuf->len;
2320                 full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap2);
2321                 strbuf->len += MIN(add_len-1, full_len);
2322         }
2323
2324         va_end(ap2);
2325 }
2326
2327 void
2328 ep_strbuf_append_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2329 {
2330         va_list ap;
2331
2332         va_start(ap, format);
2333         ep_strbuf_append_vprintf(strbuf, format, ap);
2334         va_end(ap);
2335 }
2336
2337 void
2338 ep_strbuf_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2339 {
2340         va_list ap;
2341         if (!strbuf) {
2342                 return;
2343         }
2344
2345         strbuf->len = 0;
2346
2347         va_start(ap, format);
2348         ep_strbuf_append_vprintf(strbuf, format, ap);
2349         va_end(ap);
2350 }
2351
2352 emem_strbuf_t *
2353 ep_strbuf_append_c(emem_strbuf_t *strbuf, const gchar c)
2354 {
2355         if (!strbuf) {
2356                 return strbuf;
2357         }
2358
2359         /* +1 for the new character & +1 for the trailing '\0'. */
2360         if (strbuf->alloc_len < strbuf->len + 1 + 1) {
2361                 ep_strbuf_grow(strbuf, strbuf->len + 1 + 1);
2362         }
2363         if (strbuf->alloc_len >= strbuf->len + 1 + 1) {
2364                 strbuf->str[strbuf->len] = c;
2365                 strbuf->len++;
2366                 strbuf->str[strbuf->len] = '\0';
2367         }
2368
2369         return strbuf;
2370 }
2371
2372 emem_strbuf_t *
2373 ep_strbuf_append_unichar(emem_strbuf_t *strbuf, const gunichar c)
2374 {
2375         gchar buf[6];
2376         gint charlen;
2377
2378         if (!strbuf) {
2379                 return strbuf;
2380         }
2381
2382         charlen = g_unichar_to_utf8(c, buf);
2383
2384         /* +charlen for the new character & +1 for the trailing '\0'. */
2385         if (strbuf->alloc_len < strbuf->len + charlen + 1) {
2386                 ep_strbuf_grow(strbuf, strbuf->len + charlen + 1);
2387         }
2388         if (strbuf->alloc_len >= strbuf->len + charlen + 1) {
2389                 memcpy(&strbuf->str[strbuf->len], buf, charlen);
2390                 strbuf->len += charlen;
2391                 strbuf->str[strbuf->len] = '\0';
2392         }
2393
2394         return strbuf;
2395 }
2396
2397 emem_strbuf_t *
2398 ep_strbuf_truncate(emem_strbuf_t *strbuf, gsize len)
2399 {
2400         if (!strbuf || len >= strbuf->len) {
2401                 return strbuf;
2402         }
2403
2404         strbuf->str[len] = '\0';
2405         strbuf->len = len;
2406
2407         return strbuf;
2408 }
2409
2410 /*
2411  * Editor modelines
2412  *
2413  * Local Variables:
2414  * c-basic-offset: 8
2415  * tab-width: 8
2416  * indent-tabs-mode: t
2417  * End:
2418  *
2419  * ex: set shiftwidth=8 tabstop=8 noexpandtab:
2420  * :indentSize=8:tabSize=8:noTabs=false:
2421  */