Fix bugs I introduced. Now
[metze/wireshark/wip.git] / epan / emem.c
1 /* emem.c
2  * Wireshark memory management and garbage collection functions
3  * Ronnie Sahlberg 2005
4  *
5  * $Id$
6  *
7  * Wireshark - Network traffic analyzer
8  * By Gerald Combs <gerald@wireshark.org>
9  * Copyright 1998 Gerald Combs
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version 2
14  * of the License, or (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24  */
25 #include "config.h"
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <stdarg.h>
31 #include <ctype.h>
32
33 #include <time.h>
34 #ifdef HAVE_SYS_TIME_H
35 #include <sys/time.h>
36 #endif
37
38 #ifdef HAVE_UNISTD_H
39 #include <unistd.h>
40 #endif
41
42 #include <glib.h>
43
44 #include "proto.h"
45 #include "emem.h"
46
47 #ifdef _WIN32
48 #include <windows.h>    /* VirtualAlloc, VirtualProtect */
49 #include <process.h>    /* getpid */
50 #endif
51
52 /* Print out statistics about our memory allocations? */
53 /*#define SHOW_EMEM_STATS*/
54
55 /* Do we want to use guardpages? if available */
56 #define WANT_GUARD_PAGES 1
57
58 #ifdef WANT_GUARD_PAGES
59 /* Add guard pages at each end of our allocated memory */
60
61 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
62 #include <stdint.h>
63
64 #ifdef HAVE_SYS_TYPES_H
65 #include <sys/types.h>
66 #endif /* HAVE_SYS_TYPES_H */
67
68 #include <sys/mman.h>
69
70 #if defined(MAP_ANONYMOUS)
71 #define ANON_PAGE_MODE  (MAP_ANONYMOUS|MAP_PRIVATE)
72 #elif defined(MAP_ANON)
73 #define ANON_PAGE_MODE  (MAP_ANON|MAP_PRIVATE)
74 #else
75 #define ANON_PAGE_MODE  (MAP_PRIVATE)   /* have to map /dev/zero */
76 #define NEED_DEV_ZERO
77 #endif /* defined(MAP_ANONYMOUS) */
78
79 #ifdef NEED_DEV_ZERO
80 #include <fcntl.h>
81 static int dev_zero_fd;
82 #define ANON_FD dev_zero_fd
83 #else
84 #define ANON_FD -1
85 #endif /* NEED_DEV_ZERO */
86
87 #define USE_GUARD_PAGES 1
88 #endif /* defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H) */
89 #endif /* WANT_GUARD_PAGES */
90
91 /* When required, allocate more memory from the OS in this size chunks */
92 #define EMEM_PACKET_CHUNK_SIZE (10 * 1024 * 1024)
93
94 /* The canary between allocations is at least 8 bytes and up to 16 bytes to
95  * allow future allocations to be 4- or 8-byte aligned.
96  * All but the last byte of the canary are randomly generated; the last byte is
97  * NULL to separate the canary and the pointer to the next canary.
98  *
99  * For example, if the allocation is a multiple of 8 bytes, the canary and
100  * pointer would look like:
101  *   |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
102  *   |c|c|c|c|c|c|c|0||p|p|p|p|p|p|p|p| (64-bit), or:
103  *   |c|c|c|c|c|c|c|0||p|p|p|p|         (32-bit)
104  *
105  * If the allocation was, for example, 12 bytes, the canary would look like:
106  *        |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
107  *   [...]|a|a|a|a|c|c|c|c||c|c|c|c|c|c|c|0| (followed by the pointer)
108  */
109 #define EMEM_CANARY_SIZE 8
110 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
111
112 typedef struct _emem_chunk_t {
113         struct _emem_chunk_t *next;
114         char            *buf;
115         char            *org;
116         size_t           size;
117         unsigned int    amount_free_init;
118         unsigned int    amount_free;
119         unsigned int    free_offset_init;
120         unsigned int    free_offset;
121         void            *canary_last;
122 } emem_chunk_t;
123
124 typedef struct _emem_pool_t {
125         emem_chunk_t *free_list;
126         emem_chunk_t *used_list;
127
128         emem_tree_t *trees;             /* only used by se_mem allocator */
129
130         guint8 canary[EMEM_CANARY_DATA_SIZE];
131         void *(*memory_alloc)(size_t size, struct _emem_pool_t *);
132
133         /*
134          * Tools like Valgrind and ElectricFence don't work well with memchunks.
135          * Export the following environment variables to make {ep|se}_alloc() allocate each
136          * object individually.
137          *
138          * WIRESHARK_DEBUG_EP_NO_CHUNKS
139          * WIRESHARK_DEBUG_SE_NO_CHUNKS
140          */
141         gboolean debug_use_chunks;
142
143         /* Do we want to use canaries?
144          * Export the following environment variables to disable/enable canaries
145          *
146          * WIRESHARK_DEBUG_EP_NO_CANARY
147          * For SE memory use of canary is default off as the memory overhead
148          * is considerable.
149          * WIRESHARK_DEBUG_SE_USE_CANARY
150          */
151         gboolean debug_use_canary;
152
153         /*  Do we want to verify no one is using a pointer to an ep_ or se_
154          *  allocated thing where they shouldn't be?
155          *
156          * Export WIRESHARK_EP_VERIFY_POINTERS or WIRESHARK_SE_VERIFY_POINTERS
157          * to turn this on.
158          */
159         gboolean debug_verify_pointers;
160
161 } emem_pool_t;
162
163 static emem_pool_t ep_packet_mem;
164 static emem_pool_t se_packet_mem;
165
166 /*
167  *  Memory scrubbing is expensive but can be useful to ensure we don't:
168  *    - use memory before initializing it
169  *    - use memory after freeing it
170  *  Export WIRESHARK_DEBUG_SCRUB_MEMORY to turn it on.
171  */
172 static gboolean debug_use_memory_scrubber = FALSE;
173
174 #if defined (_WIN32)
175 static SYSTEM_INFO sysinfo;
176 static OSVERSIONINFO versinfo;
177 static int pagesize;
178 #elif defined(USE_GUARD_PAGES)
179 static intptr_t pagesize;
180 #endif /* _WIN32 / USE_GUARD_PAGES */
181
182 static void *emem_alloc_chunk(size_t size, emem_pool_t *mem);
183 static void *emem_alloc_glib(size_t size, emem_pool_t *mem);
184
185 /*
186  * Set a canary value to be placed between memchunks.
187  */
188 static void
189 emem_canary_init(guint8 *canary)
190 {
191         int i;
192         static GRand *rand_state = NULL;
193
194         if (rand_state == NULL) {
195                 rand_state = g_rand_new();
196         }
197         for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
198                 canary[i] = (guint8) g_rand_int_range(rand_state, 1, 0x100);
199         }
200         return;
201 }
202
203 static void *
204 emem_canary_next(guint8 *mem_canary, guint8 *canary, int *len)
205 {
206         void *ptr;
207         int i;
208
209         for (i = 0; i < EMEM_CANARY_SIZE-1; i++)
210                 if (mem_canary[i] != canary[i])
211                         return (void *) -1;
212
213         for (; i < EMEM_CANARY_DATA_SIZE; i++) {
214                 if (canary[i] == '\0') {
215                         memcpy(&ptr, &canary[i+1], sizeof(void *));
216
217                         if (len)
218                                 *len = i + 1 + sizeof(void *);
219                         return ptr;
220                 }
221
222                 if (mem_canary[i] != canary[i])
223                         return (void *) -1;
224         }
225
226         return (void *) -1;
227 }
228
229 /*
230  * Given an allocation size, return the amount of room needed for the canary
231  * (with a minimum of 8 bytes) while using the canary to pad to an 8-byte
232  * boundary.
233  */
234 static guint8
235 emem_canary_pad (size_t allocation)
236 {
237         guint8 pad;
238
239         pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
240         if (pad < EMEM_CANARY_SIZE)
241                 pad += EMEM_CANARY_SIZE;
242
243         return pad;
244 }
245
246 /* used for debugging canaries, will block */
247 #ifdef DEBUG_INTENSE_CANARY_CHECKS
248 gboolean intense_canary_checking = FALSE;
249
250 /*  used to intensivelly check ep canaries
251  */
252 void
253 ep_check_canary_integrity(const char* fmt, ...)
254 {
255         va_list ap;
256         static gchar there[128] = {
257                 'L','a','u','n','c','h',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
258                 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
259                 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
260                 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
261         gchar here[128];
262         emem_chunk_t* npc = NULL;
263
264         if (! intense_canary_checking ) return;
265
266         va_start(ap,fmt);
267         g_vsnprintf(here, sizeof(here), fmt, ap);
268         va_end(ap);
269
270         for (npc = ep_packet_mem.free_list; npc != NULL; npc = npc->next) {
271                 void *canary_next = npc->canary_last;
272
273                 while (canary_next != NULL) {
274                         canary_next = emem_canary_next(ep_packet_mem.canary, canary_next, NULL);
275                         /* XXX, check if canary_next is inside allocated memory? */
276
277                         if (canary_next == (void *) -1)
278                                 g_error("Per-packet memory corrupted\nbetween: %s\nand: %s", there, here);
279                 }
280         }
281
282         g_strlcpy(there, here, sizeof(there));
283 }
284 #endif
285
286 static void
287 emem_init_chunk(emem_pool_t *mem)
288 {
289         if (mem->debug_use_canary)
290                 emem_canary_init(mem->canary);
291
292         if (mem->debug_use_chunks)
293                 mem->memory_alloc = emem_alloc_chunk;
294         else
295                 mem->memory_alloc = emem_alloc_glib;
296 }
297
298
299 /* Initialize the packet-lifetime memory allocation pool.
300  * This function should be called only once when Wireshark or TShark starts
301  * up.
302  */
303 static void
304 ep_init_chunk(void)
305 {
306         ep_packet_mem.free_list=NULL;
307         ep_packet_mem.used_list=NULL;
308         ep_packet_mem.trees=NULL;       /* not used by this allocator */
309
310         ep_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_EP_NO_CHUNKS") == NULL);
311         ep_packet_mem.debug_use_canary = ep_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_EP_NO_CANARY") == NULL);
312         ep_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_EP_VERIFY_POINTERS") != NULL);
313
314 #ifdef DEBUG_INTENSE_CANARY_CHECKS
315         intense_canary_checking = (getenv("WIRESHARK_DEBUG_EP_INTENSE_CANARY") != NULL);
316 #endif
317
318         emem_init_chunk(&ep_packet_mem);
319 }
320
321 /* Initialize the capture-lifetime memory allocation pool.
322  * This function should be called only once when Wireshark or TShark starts
323  * up.
324  */
325 static void
326 se_init_chunk(void)
327 {
328         se_packet_mem.free_list = NULL;
329         se_packet_mem.used_list = NULL;
330         se_packet_mem.trees = NULL;
331
332         se_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_SE_NO_CHUNKS") == NULL);
333         se_packet_mem.debug_use_canary = se_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_SE_USE_CANARY") != NULL);
334         se_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_SE_VERIFY_POINTERS") != NULL);
335
336         emem_init_chunk(&se_packet_mem);
337 }
338
339 /*  Initialize all the allocators here.
340  *  This function should be called only once when Wireshark or TShark starts
341  *  up.
342  */
343 void
344 emem_init(void)
345 {
346         ep_init_chunk();
347         se_init_chunk();
348
349         if (getenv("WIRESHARK_DEBUG_SCRUB_MEMORY"))
350                 debug_use_memory_scrubber  = TRUE;
351
352 #if defined (_WIN32)
353         /* Set up our guard page info for Win32 */
354         GetSystemInfo(&sysinfo);
355         pagesize = sysinfo.dwPageSize;
356
357         /* calling GetVersionEx using the OSVERSIONINFO structure.
358          * OSVERSIONINFOEX requires Win NT4 with SP6 or newer NT Versions.
359          * OSVERSIONINFOEX will fail on Win9x and older NT Versions.
360          * See also:
361          * http://msdn.microsoft.com/library/en-us/sysinfo/base/getversionex.asp
362          * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
363          * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfoex_str.asp
364          */
365         versinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
366         GetVersionEx(&versinfo);
367
368 #elif defined(USE_GUARD_PAGES)
369         pagesize = sysconf(_SC_PAGESIZE);
370         if (pagesize == -1) 
371                 fprintf(stderr, "Warning: call to sysconf() for _SC_PAGESIZE has failed...\n");
372 #ifdef NEED_DEV_ZERO
373         dev_zero_fd = ws_open("/dev/zero", O_RDWR);
374         g_assert(dev_zero_fd != -1);
375 #endif
376 #endif /* _WIN32 / USE_GUARD_PAGES */
377 }
378
379 #ifdef SHOW_EMEM_STATS
380 #define NUM_ALLOC_DIST 10
381 static guint allocations[NUM_ALLOC_DIST] = { 0 };
382 static guint total_no_chunks = 0;
383
384 static void
385 print_alloc_stats()
386 {
387         guint num_chunks = 0;
388         guint num_allocs = 0;
389         guint total_used = 0;
390         guint total_allocation = 0;
391         guint used_for_canaries = 0;
392         guint total_headers;
393         guint i;
394         emem_chunk_t *chunk;
395         guint total_space_allocated_from_os, total_space_wasted;
396         gboolean ep_stat=TRUE;
397
398         fprintf(stderr, "\n-------- EP allocator statistics --------\n");
399         fprintf(stderr, "%s chunks, %s canaries, %s memory scrubber\n",
400                ep_packet_mem.debug_use_chunks ? "Using" : "Not using",
401                ep_packet_mem.debug_use_canary ? "using" : "not using",
402                debug_use_memory_scrubber ? "using" : "not using");
403
404         if (! (ep_packet_mem.free_list || !ep_packet_mem.used_list)) {
405                 fprintf(stderr, "No memory allocated\n");
406                 ep_stat = FALSE;
407         }
408         if (ep_packet_mem.debug_use_chunks && ep_stat) {
409                 /* Nothing interesting without chunks */
410                 /*  Only look at the used_list since those chunks are fully
411                  *  used.  Looking at the free list would skew our view of what
412                  *  we have wasted.
413                  */
414                 for (chunk = ep_packet_mem.used_list; chunk; chunk = chunk->next) {
415                         num_chunks++;
416                         total_used += (chunk->amount_free_init - chunk->amount_free);
417                         total_allocation += chunk->amount_free_init;
418                 }
419                 if (num_chunks > 0) {
420                         fprintf (stderr, "\n");
421                         fprintf (stderr, "\n---- Buffer space ----\n");
422                         fprintf (stderr, "\tChunk allocation size: %10u\n", EMEM_PACKET_CHUNK_SIZE);
423                         fprintf (stderr, "\t*    Number of chunks: %10u\n", num_chunks);
424                         fprintf (stderr, "\t-------------------------------------------\n");
425                         fprintf (stderr, "\t= %u (%u including guard pages) total space used for buffers\n",
426                         total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
427                         fprintf (stderr, "\t-------------------------------------------\n");
428                         total_space_allocated_from_os = total_allocation
429                                 + sizeof(emem_chunk_t) * num_chunks;
430                         fprintf (stderr, "Total allocated from OS: %u\n\n",
431                                 total_space_allocated_from_os);
432                 }else{
433                         fprintf (stderr, "No fully used chunks, nothing to do\n");
434                 }
435                 /* Reset stats */
436                 num_chunks = 0;
437                 num_allocs = 0;
438                 total_used = 0;
439                 total_allocation = 0;
440                 used_for_canaries = 0;
441         }
442
443
444         fprintf(stderr, "\n-------- SE allocator statistics --------\n");
445         fprintf(stderr, "Total number of chunk allocations %u\n",
446                 total_no_chunks);
447         fprintf(stderr, "%s chunks, %s canaries\n",
448                se_packet_mem.debug_use_chunks ? "Using" : "Not using",
449                se_packet_mem.debug_use_canary ? "using" : "not using");
450
451         if (! (se_packet_mem.free_list || !se_packet_mem.used_list)) {
452                 fprintf(stderr, "No memory allocated\n");
453                 return;
454         }
455
456         if (!se_packet_mem.debug_use_chunks )
457                 return; /* Nothing interesting without chunks?? */
458
459         /*  Only look at the used_list since those chunks are fully used.
460          *  Looking at the free list would skew our view of what we have wasted.
461          */
462         for (chunk = se_packet_mem.used_list; chunk; chunk = chunk->next) {
463                 num_chunks++;
464                 total_used += (chunk->amount_free_init - chunk->amount_free);
465                 total_allocation += chunk->amount_free_init;
466
467                 if (se_packet_mem.debug_use_canary){
468                         void *ptr = chunk->canary_last;
469                         int len;
470
471                         while (ptr != NULL) {
472                                 ptr = emem_canary_next(se_packet_mem.canary, ptr, &len);
473
474                                 if (ptr == (void *) -1)
475                                         g_error("Memory corrupted");
476                                 used_for_canaries += len;
477                         }
478                 }
479         }
480
481         if (num_chunks == 0) {
482
483                 fprintf (stderr, "No fully used chunks, nothing to do\n");
484                 return;
485         }
486
487         fprintf (stderr, "\n");
488         fprintf (stderr, "---------- Allocations from the OS ----------\n");
489         fprintf (stderr, "---- Headers ----\n");
490         fprintf (stderr, "\t(    Chunk header size: %10lu\n",
491                  sizeof(emem_chunk_t));
492         fprintf (stderr, "\t*     Number of chunks: %10u\n", num_chunks);
493         fprintf (stderr, "\t-------------------------------------------\n");
494
495         total_headers = sizeof(emem_chunk_t) * num_chunks;
496         fprintf (stderr, "\t= %u bytes used for headers\n", total_headers);
497         fprintf (stderr, "\n---- Buffer space ----\n");
498         fprintf (stderr, "\tChunk allocation size: %10u\n",
499                  EMEM_PACKET_CHUNK_SIZE);
500         fprintf (stderr, "\t*    Number of chunks: %10u\n", num_chunks);
501         fprintf (stderr, "\t-------------------------------------------\n");
502         fprintf (stderr, "\t= %u (%u including guard pages) bytes used for buffers\n",
503                 total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
504         fprintf (stderr, "\t-------------------------------------------\n");
505         total_space_allocated_from_os = (EMEM_PACKET_CHUNK_SIZE * num_chunks)
506                                         + total_headers;
507         fprintf (stderr, "Total bytes allocated from the OS: %u\n\n",
508                 total_space_allocated_from_os);
509
510         for (i = 0; i < NUM_ALLOC_DIST; i++)
511                 num_allocs += allocations[i];
512
513         fprintf (stderr, "---------- Allocations from the SE pool ----------\n");
514         fprintf (stderr, "                Number of SE allocations: %10u\n",
515                  num_allocs);
516         fprintf (stderr, "             Bytes used (incl. canaries): %10u\n",
517                  total_used);
518         fprintf (stderr, "                 Bytes used for canaries: %10u\n",
519                  used_for_canaries);
520         fprintf (stderr, "Bytes unused (wasted, excl. guard pages): %10u\n",
521                  total_allocation - total_used);
522         fprintf (stderr, "Bytes unused (wasted, incl. guard pages): %10u\n\n",
523                  total_space_allocated_from_os - total_used);
524
525         fprintf (stderr, "---------- Statistics ----------\n");
526         fprintf (stderr, "Average SE allocation size (incl. canaries): %6.2f\n",
527                 (float)total_used/(float)num_allocs);
528         fprintf (stderr, "Average SE allocation size (excl. canaries): %6.2f\n",
529                 (float)(total_used - used_for_canaries)/(float)num_allocs);
530         fprintf (stderr, "        Average wasted bytes per allocation: %6.2f\n",
531                 (total_allocation - total_used)/(float)num_allocs);
532         total_space_wasted = (total_allocation - total_used)
533                 + (sizeof(emem_chunk_t));
534         fprintf (stderr, " Space used for headers + unused allocation: %8u\n",
535                 total_space_wasted);
536         fprintf (stderr, "--> %% overhead/waste: %4.2f\n",
537                 100 * (float)total_space_wasted/(float)total_space_allocated_from_os);
538
539         fprintf (stderr, "\nAllocation distribution (sizes include canaries):\n");
540         for (i = 0; i < (NUM_ALLOC_DIST-1); i++)
541                 fprintf (stderr, "size < %5d: %8u\n", 32<<i, allocations[i]);
542         fprintf (stderr, "size > %5d: %8u\n", 32<<i, allocations[i]);
543 }
544 #endif
545
546 static gboolean
547 emem_verify_pointer_list(const emem_chunk_t *chunk_list, const void *ptr)
548 {
549         const gchar *cptr = ptr;
550         const emem_chunk_t *chunk;
551
552         for (chunk = chunk_list; chunk; chunk = chunk->next) {
553                 if (cptr >= (chunk->buf + chunk->free_offset_init) && cptr < (chunk->buf + chunk->free_offset))
554                         return TRUE;
555         }
556         return FALSE;
557 }
558
559 static gboolean
560 emem_verify_pointer(const emem_pool_t *hdr, const void *ptr)
561 {
562         return emem_verify_pointer_list(hdr->free_list, ptr) || emem_verify_pointer_list(hdr->used_list, ptr);
563 }
564
565 gboolean
566 ep_verify_pointer(const void *ptr)
567 {
568         if (ep_packet_mem.debug_verify_pointers)
569                 return emem_verify_pointer(&ep_packet_mem, ptr);
570         else
571                 return FALSE;
572 }
573
574 gboolean
575 se_verify_pointer(const void *ptr)
576 {
577         if (se_packet_mem.debug_verify_pointers)
578                 return emem_verify_pointer(&se_packet_mem, ptr);
579         else
580                 return FALSE;
581 }
582
583 static void
584 emem_scrub_memory(char *buf, size_t size, gboolean alloc)
585 {
586         guint scrubbed_value;
587         guint offset;
588
589         if (!debug_use_memory_scrubber)
590                 return;
591
592         if (alloc) /* this memory is being allocated */
593                 scrubbed_value = 0xBADDCAFE;
594         else /* this memory is being freed */
595                 scrubbed_value = 0xDEADBEEF;
596
597         /*  We shouldn't need to check the alignment of the starting address
598          *  since this is malloc'd memory (or 'pagesize' bytes into malloc'd
599          *  memory).
600          */
601
602         /* XXX - if the above is *NOT* true, we should use memcpy here,
603          * in order to avoid problems on alignment-sensitive platforms, e.g.
604          * http://stackoverflow.com/questions/108866/is-there-memset-that-accepts-integers-larger-than-char
605          */
606
607         for (offset = 0; offset + sizeof(guint) <= size; offset += sizeof(guint))
608                 *(guint*)(void*)(buf+offset) = scrubbed_value;
609
610         /* Initialize the last bytes, if any */
611         if (offset < size) {
612                 *(guint8*)(buf+offset) = scrubbed_value >> 24;
613                 offset++;
614                 if (offset < size) {
615                         *(guint8*)(buf+offset) = (scrubbed_value >> 16) & 0xFF;
616                         offset++;
617                         if (offset < size) {
618                                 *(guint8*)(buf+offset) = (scrubbed_value >> 8) & 0xFF;
619                         }
620                 }
621         }
622
623
624 }
625
626 static emem_chunk_t *
627 emem_create_chunk(size_t size)
628 {
629         emem_chunk_t *npc;
630
631         npc = g_new(emem_chunk_t, 1);
632         npc->next = NULL;
633         npc->canary_last = NULL;
634
635 #if defined (_WIN32)
636         /*
637          * MSDN documents VirtualAlloc/VirtualProtect at
638          * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
639          */
640
641         /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
642         npc->buf = VirtualAlloc(NULL, size,
643                 MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
644
645         if (npc->buf == NULL) {
646                 g_free(npc);
647                 if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
648                         abort();
649                 else
650                         THROW(OutOfMemoryError);
651         }
652
653 #elif defined(USE_GUARD_PAGES)
654         npc->buf = mmap(NULL, size,
655                 PROT_READ|PROT_WRITE, ANON_PAGE_MODE, ANON_FD, 0);
656
657         if (npc->buf == MAP_FAILED) {
658                 g_free(npc);
659                 if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
660                         abort();
661                 else
662                         THROW(OutOfMemoryError);
663         }
664
665 #else /* Is there a draft in here? */
666         npc->buf = g_malloc(size);
667         /* g_malloc() can't fail */
668 #endif
669
670 #ifdef SHOW_EMEM_STATS
671         total_no_chunks++;
672 #endif
673
674         npc->amount_free = npc->amount_free_init = (unsigned int) size;
675         npc->free_offset = npc->free_offset_init = 0;
676         return npc;
677 }
678
679 static void
680 emem_destroy_chunk(emem_chunk_t *npc)
681 {
682 #if defined (_WIN32)
683         VirtualFree(npc->buf, 0, MEM_RELEASE);
684 #elif defined(USE_GUARD_PAGES)
685
686         /* we cannot recover from a munmap() failure, but we    */
687         /* can print an informative error message to stderr     */
688
689         if (munmap(npc->buf, npc->amount_free_init) != 0) 
690                 fprintf(stderr, "Warning: Unable to unmap memory chunk which has address %p and size %u\n",
691                         npc->buf, npc->amount_free_init); 
692 #else
693         g_free(npc->buf);
694 #endif
695 #ifdef SHOW_EMEM_STATS
696         total_no_chunks--;
697 #endif
698         g_free(npc);
699 }
700
701 static emem_chunk_t *
702 emem_create_chunk_gp(size_t size)
703 {
704 #if defined (_WIN32)
705         BOOL ret;
706         char *buf_end, *prot1, *prot2;
707         DWORD oldprot;
708 #elif defined(USE_GUARD_PAGES)
709         int ret;
710         char *buf_end, *prot1, *prot2;
711 #endif /* _WIN32 / USE_GUARD_PAGES */
712         emem_chunk_t *npc;
713
714         npc = emem_create_chunk(size);
715
716 #if defined (_WIN32)
717         buf_end = npc->buf + size;
718
719         /* Align our guard pages on page-sized boundaries */
720         prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
721         prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
722
723         ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
724         g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
725         ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
726         g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
727
728         npc->amount_free_init = (unsigned int) (prot2 - prot1 - pagesize);
729         npc->free_offset_init = (unsigned int) (prot1 - npc->buf) + pagesize;
730 #elif defined(USE_GUARD_PAGES)
731         buf_end = npc->buf + size;
732
733         /* Align our guard pages on page-sized boundaries */
734         prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
735         prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
736
737         ret = mprotect(prot1, pagesize, PROT_NONE);
738         g_assert(ret != -1);
739         ret = mprotect(prot2, pagesize, PROT_NONE);
740         g_assert(ret != -1);
741
742         npc->amount_free_init = prot2 - prot1 - pagesize;
743         npc->free_offset_init = (prot1 - npc->buf) + pagesize;
744 #else
745         npc->amount_free_init = size;
746         npc->free_offset_init = 0;
747 #endif /* USE_GUARD_PAGES */
748
749         npc->amount_free = npc->amount_free_init;
750         npc->free_offset = npc->free_offset_init;
751         return npc;
752 }
753
754 static void *
755 emem_alloc_chunk(size_t size, emem_pool_t *mem)
756 {
757         void *buf;
758
759         size_t asize = size;
760         gboolean use_canary = mem->debug_use_canary;
761         guint8 pad;
762         emem_chunk_t *free_list;
763
764         /* Allocate room for at least 8 bytes of canary plus some padding
765          * so the canary ends on an 8-byte boundary.
766          * But first add the room needed for the pointer to the next canary
767          * (so the entire allocation will end on an 8-byte boundary).
768          */
769          if (use_canary) {
770                 asize += sizeof(void *);
771                 pad = emem_canary_pad(asize);
772         } else
773                 pad = (WS_MEM_ALIGN - (asize & (WS_MEM_ALIGN-1))) & (WS_MEM_ALIGN-1);
774
775         asize += pad;
776
777 #ifdef SHOW_EMEM_STATS
778         /* Do this check here so we can include the canary size */
779         if (mem == &se_packet_mem) {
780                 if (asize < 32)
781                         allocations[0]++;
782                 else if (asize < 64)
783                         allocations[1]++;
784                 else if (asize < 128)
785                         allocations[2]++;
786                 else if (asize < 256)
787                         allocations[3]++;
788                 else if (asize < 512)
789                         allocations[4]++;
790                 else if (asize < 1024)
791                         allocations[5]++;
792                 else if (asize < 2048)
793                         allocations[6]++;
794                 else if (asize < 4096)
795                         allocations[7]++;
796                 else if (asize < 8192)
797                         allocations[8]++;
798                 else if (asize < 16384)
799                         allocations[8]++;
800                 else
801                         allocations[(NUM_ALLOC_DIST-1)]++;
802         }
803 #endif
804
805         /* make sure we dont try to allocate too much (arbitrary limit) */
806         DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
807
808         if (!mem->free_list)
809                 mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
810
811         /* oops, we need to allocate more memory to serve this request
812          * than we have free. move this node to the used list and try again
813          */
814         if(asize > mem->free_list->amount_free) {
815                 emem_chunk_t *npc;
816                 npc=mem->free_list;
817                 mem->free_list=mem->free_list->next;
818                 npc->next=mem->used_list;
819                 mem->used_list=npc;
820
821                 if (!mem->free_list)
822                         mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
823         }
824
825         free_list = mem->free_list;
826
827         buf = free_list->buf + free_list->free_offset;
828
829         free_list->amount_free -= (unsigned int) asize;
830         free_list->free_offset += (unsigned int) asize;
831
832         if (use_canary) {
833                 char *cptr = (char *)buf + size;
834
835                 memcpy(cptr, mem->canary, pad-1);
836                 cptr[pad-1] = '\0';
837                 memcpy(cptr + pad, &free_list->canary_last, sizeof(void *));
838
839                 free_list->canary_last = cptr;
840         }
841
842         return buf;
843 }
844
845 static void *
846 emem_alloc_glib(size_t size, emem_pool_t *mem)
847 {
848         emem_chunk_t *npc;
849
850         npc=g_new(emem_chunk_t, 1);
851         npc->next=mem->used_list;
852         npc->buf=g_malloc(size);
853         npc->canary_last = NULL;
854         mem->used_list=npc;
855         /* There's no padding/alignment involved (from our point of view) when
856          * we fetch the memory directly from the system pool, so WYSIWYG */
857         npc->free_offset = npc->free_offset_init = 0;
858         npc->amount_free = npc->amount_free_init = (unsigned int) size;
859
860         return npc->buf;
861 }
862
863 /* allocate 'size' amount of memory. */
864 static void *
865 emem_alloc(size_t size, emem_pool_t *mem)
866 {
867         void *buf = mem->memory_alloc(size, mem);
868
869         /*  XXX - this is a waste of time if the allocator function is going to
870          *  memset this straight back to 0.
871          */
872         emem_scrub_memory(buf, size, TRUE);
873
874         return buf;
875 }
876
877 /* allocate 'size' amount of memory with an allocation lifetime until the
878  * next packet.
879  */
880 void *
881 ep_alloc(size_t size)
882 {
883         return emem_alloc(size, &ep_packet_mem);
884 }
885
886 /* allocate 'size' amount of memory with an allocation lifetime until the
887  * next capture.
888  */
889 void *
890 se_alloc(size_t size)
891 {
892         return emem_alloc(size, &se_packet_mem);
893 }
894
895 void *
896 sl_alloc(struct ws_memory_slab *mem_chunk)
897 {
898         emem_chunk_t *chunk;
899         void *ptr;
900
901         /* XXX, debug_use_slices -> fallback to g_slice_alloc0 */
902
903         if ((mem_chunk->freed != NULL)) {
904                 ptr = mem_chunk->freed;
905                 memcpy(&mem_chunk->freed, ptr, sizeof(void *));
906                 return ptr;
907         }
908
909         if (!(chunk = mem_chunk->chunk_list) || chunk->amount_free < (guint) mem_chunk->item_size) {
910                 size_t alloc_size = mem_chunk->item_size * mem_chunk->count;
911
912                 /* align to page-size */
913 #if defined (_WIN32) || defined(USE_GUARD_PAGES)
914                 alloc_size = (alloc_size + (pagesize - 1)) & ~(pagesize - 1);
915 #endif
916
917                 chunk = emem_create_chunk(alloc_size);  /* NOTE: using version without guard pages! */
918                 chunk->next = mem_chunk->chunk_list;
919                 mem_chunk->chunk_list = chunk;
920         }
921
922         ptr = chunk->buf + chunk->free_offset;
923         chunk->free_offset += mem_chunk->item_size;
924         chunk->amount_free -= mem_chunk->item_size;
925
926         return ptr;
927 }
928
929 void
930 sl_free(struct ws_memory_slab *mem_chunk, gpointer ptr)
931 {
932         /* XXX, debug_use_slices -> fallback to g_slice_free1 */
933
934         /* XXX, abort if ptr not found in emem_verify_pointer_list()? */
935         if (ptr != NULL /* && emem_verify_pointer_list(mem_chunk->chunk_list, ptr) */) {
936                 memcpy(ptr, &(mem_chunk->freed), sizeof(void *));
937                 mem_chunk->freed = ptr;
938         }
939 }
940
941 void *
942 ep_alloc0(size_t size)
943 {
944         return memset(ep_alloc(size),'\0',size);
945 }
946
947 void *
948 se_alloc0(size_t size)
949 {
950         return memset(se_alloc(size),'\0',size);
951 }
952
953 void *
954 sl_alloc0(struct ws_memory_slab *mem_chunk)
955 {
956         return memset(sl_alloc(mem_chunk), '\0', mem_chunk->item_size);
957 }
958
959 static gchar *
960 emem_strdup(const gchar *src, void *allocator(size_t))
961 {
962         guint len;
963         gchar *dst;
964
965         /* If str is NULL, just return the string "<NULL>" so that the callers don't
966          * have to bother checking it.
967          */
968         if(!src)
969                 return "<NULL>";
970
971         len = (guint) strlen(src);
972         dst = memcpy(allocator(len+1), src, len+1);
973
974         return dst;
975 }
976
977 gchar *
978 ep_strdup(const gchar *src)
979 {
980         return emem_strdup(src, ep_alloc);
981 }
982
983 gchar *
984 se_strdup(const gchar *src)
985 {
986         return emem_strdup(src, se_alloc);
987 }
988
989 static gchar *
990 emem_strndup(const gchar *src, size_t len, void *allocator(size_t))
991 {
992         gchar *dst = allocator(len+1);
993         guint i;
994
995         for (i = 0; (i < len) && src[i]; i++)
996                 dst[i] = src[i];
997
998         dst[i] = '\0';
999
1000         return dst;
1001 }
1002
1003 gchar *
1004 ep_strndup(const gchar *src, size_t len)
1005 {
1006         return emem_strndup(src, len, ep_alloc);
1007 }
1008
1009 gchar *
1010 se_strndup(const gchar *src, size_t len)
1011 {
1012         return emem_strndup(src, len, se_alloc);
1013 }
1014
1015
1016
1017 void *
1018 ep_memdup(const void* src, size_t len)
1019 {
1020         return memcpy(ep_alloc(len), src, len);
1021 }
1022
1023 void *
1024 se_memdup(const void* src, size_t len)
1025 {
1026         return memcpy(se_alloc(len), src, len);
1027 }
1028
1029 static gchar *
1030 emem_strdup_vprintf(const gchar *fmt, va_list ap, void *allocator(size_t))
1031 {
1032         va_list ap2;
1033         gsize len;
1034         gchar* dst;
1035
1036         G_VA_COPY(ap2, ap);
1037
1038         len = g_printf_string_upper_bound(fmt, ap);
1039
1040         dst = allocator(len+1);
1041         g_vsnprintf (dst, (gulong) len, fmt, ap2);
1042         va_end(ap2);
1043
1044         return dst;
1045 }
1046
1047 gchar *
1048 ep_strdup_vprintf(const gchar *fmt, va_list ap)
1049 {
1050         return emem_strdup_vprintf(fmt, ap, ep_alloc);
1051 }
1052
1053 gchar *
1054 se_strdup_vprintf(const gchar* fmt, va_list ap)
1055 {
1056         return emem_strdup_vprintf(fmt, ap, se_alloc);
1057 }
1058
1059 gchar *
1060 ep_strdup_printf(const gchar *fmt, ...)
1061 {
1062         va_list ap;
1063         gchar *dst;
1064
1065         va_start(ap, fmt);
1066         dst = ep_strdup_vprintf(fmt, ap);
1067         va_end(ap);
1068         return dst;
1069 }
1070
1071 gchar *
1072 se_strdup_printf(const gchar *fmt, ...)
1073 {
1074         va_list ap;
1075         gchar *dst;
1076
1077         va_start(ap, fmt);
1078         dst = se_strdup_vprintf(fmt, ap);
1079         va_end(ap);
1080         return dst;
1081 }
1082
1083 gchar **
1084 ep_strsplit(const gchar* string, const gchar* sep, int max_tokens)
1085 {
1086         gchar* splitted;
1087         gchar* s;
1088         guint tokens;
1089         guint str_len;
1090         guint sep_len;
1091         guint i;
1092         gchar** vec;
1093         enum { AT_START, IN_PAD, IN_TOKEN } state;
1094         guint curr_tok = 0;
1095
1096         if (    ! string
1097              || ! sep
1098              || ! sep[0])
1099                 return NULL;
1100
1101         s = splitted = ep_strdup(string);
1102         str_len = (guint) strlen(splitted);
1103         sep_len = (guint) strlen(sep);
1104
1105         if (max_tokens < 1) max_tokens = INT_MAX;
1106
1107         tokens = 1;
1108
1109
1110         while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
1111                 tokens++;
1112
1113                 for(i=0; i < sep_len; i++ )
1114                         s[i] = '\0';
1115
1116                 s += sep_len;
1117
1118         }
1119
1120         vec = ep_alloc_array(gchar*,tokens+1);
1121         state = AT_START;
1122
1123         for (i=0; i< str_len; i++) {
1124                 switch(state) {
1125                         case AT_START:
1126                                 switch(splitted[i]) {
1127                                         case '\0':
1128                                                 state  = IN_PAD;
1129                                                 continue;
1130                                         default:
1131                                                 vec[curr_tok] = &(splitted[i]);
1132                                                 curr_tok++;
1133                                                 state = IN_TOKEN;
1134                                                 continue;
1135                                 }
1136                         case IN_TOKEN:
1137                                 switch(splitted[i]) {
1138                                         case '\0':
1139                                                 state = IN_PAD;
1140                                         default:
1141                                                 continue;
1142                                 }
1143                         case IN_PAD:
1144                                 switch(splitted[i]) {
1145                                         default:
1146                                                 vec[curr_tok] = &(splitted[i]);
1147                                                 curr_tok++;
1148                                                 state = IN_TOKEN;
1149                                         case '\0':
1150                                                 continue;
1151                                 }
1152                 }
1153         }
1154
1155         vec[curr_tok] = NULL;
1156
1157         return vec;
1158 }
1159
1160 gchar *
1161 ep_strconcat(const gchar *string1, ...)
1162 {
1163         gsize   l;
1164         va_list args;
1165         gchar   *s;
1166         gchar   *concat;
1167         gchar   *ptr;
1168
1169         if (!string1)
1170                 return NULL;
1171
1172         l = 1 + strlen(string1);
1173         va_start(args, string1);
1174         s = va_arg(args, gchar*);
1175         while (s) {
1176                 l += strlen(s);
1177                 s = va_arg(args, gchar*);
1178         }
1179         va_end(args);
1180
1181         concat = ep_alloc(l);
1182         ptr = concat;
1183
1184         ptr = g_stpcpy(ptr, string1);
1185         va_start(args, string1);
1186         s = va_arg(args, gchar*);
1187         while (s) {
1188                 ptr = g_stpcpy(ptr, s);
1189                 s = va_arg(args, gchar*);
1190         }
1191         va_end(args);
1192
1193         return concat;
1194 }
1195
1196
1197
1198 /* release all allocated memory back to the pool. */
1199 static void
1200 emem_free_all(emem_pool_t *mem)
1201 {
1202         gboolean use_chunks = mem->debug_use_chunks;
1203
1204         emem_chunk_t *npc;
1205         emem_tree_t *tree_list;
1206
1207         /* move all used chunks over to the free list */
1208         while(mem->used_list){
1209                 npc=mem->used_list;
1210                 mem->used_list=mem->used_list->next;
1211                 npc->next=mem->free_list;
1212                 mem->free_list=npc;
1213         }
1214
1215         /* clear them all out */
1216         npc = mem->free_list;
1217         while (npc != NULL) {
1218                 if (use_chunks) {
1219                         while (npc->canary_last != NULL) {
1220                                 npc->canary_last = emem_canary_next(mem->canary, npc->canary_last, NULL);
1221                                 /* XXX, check if canary_last is inside allocated memory? */
1222
1223                                 if (npc->canary_last == (void *) -1)
1224                                         g_error("Memory corrupted");
1225                         }
1226
1227                         emem_scrub_memory((npc->buf + npc->free_offset_init),
1228                                           (npc->free_offset - npc->free_offset_init),
1229                                           FALSE);
1230
1231                         npc->amount_free = npc->amount_free_init;
1232                         npc->free_offset = npc->free_offset_init;
1233                         npc = npc->next;
1234                 } else {
1235                         emem_chunk_t *next = npc->next;
1236
1237                         emem_scrub_memory(npc->buf, npc->amount_free_init, FALSE);
1238
1239                         g_free(npc->buf);
1240                         g_free(npc);
1241                         npc = next;
1242                 }
1243         }
1244
1245         if (!use_chunks) {
1246                 /* We've freed all this memory already */
1247                 mem->free_list = NULL;
1248         }
1249
1250         /* release/reset all allocated trees */
1251         for(tree_list=mem->trees;tree_list;tree_list=tree_list->next){
1252                 tree_list->tree=NULL;
1253         }
1254 }
1255
1256 /* release all allocated memory back to the pool. */
1257 void
1258 ep_free_all(void)
1259 {
1260         emem_free_all(&ep_packet_mem);
1261 }
1262
1263 /* release all allocated memory back to the pool. */
1264 void
1265 se_free_all(void)
1266 {
1267 #ifdef SHOW_EMEM_STATS
1268         print_alloc_stats();
1269 #endif
1270
1271         emem_free_all(&se_packet_mem);
1272 }
1273
1274 void
1275 sl_free_all(struct ws_memory_slab *mem_chunk)
1276 {
1277         emem_chunk_t *chunk_list = mem_chunk->chunk_list;
1278
1279         mem_chunk->chunk_list = NULL;
1280         mem_chunk->freed = NULL;
1281         while (chunk_list) {
1282                 emem_chunk_t *chunk = chunk_list;
1283
1284                 chunk_list = chunk_list->next;
1285                 emem_destroy_chunk(chunk);
1286         }
1287 }
1288
1289 ep_stack_t
1290 ep_stack_new(void) {
1291         ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
1292         *s = ep_new0(struct _ep_stack_frame_t);
1293         return s;
1294 }
1295
1296 /*  for ep_stack_t we'll keep the popped frames so we reuse them instead
1297 of allocating new ones.
1298 */
1299
1300 void *
1301 ep_stack_push(ep_stack_t stack, void* data)
1302 {
1303         struct _ep_stack_frame_t* frame;
1304         struct _ep_stack_frame_t* head = (*stack);
1305
1306         if (head->above) {
1307                 frame = head->above;
1308         } else {
1309                 frame = ep_new(struct _ep_stack_frame_t);
1310                 head->above = frame;
1311                 frame->below = head;
1312                 frame->above = NULL;
1313         }
1314
1315         frame->payload = data;
1316         (*stack) = frame;
1317
1318         return data;
1319 }
1320
1321 void *
1322 ep_stack_pop(ep_stack_t stack)
1323 {
1324
1325         if ((*stack)->below) {
1326                 (*stack) = (*stack)->below;
1327                 return (*stack)->above->payload;
1328         } else {
1329                 return NULL;
1330         }
1331 }
1332
1333 emem_tree_t *
1334 se_tree_create(int type, const char *name)
1335 {
1336         emem_tree_t *tree_list;
1337
1338         tree_list=g_malloc(sizeof(emem_tree_t));
1339         tree_list->next=se_packet_mem.trees;
1340         tree_list->type=type;
1341         tree_list->tree=NULL;
1342         tree_list->name=name;
1343         tree_list->malloc=se_alloc;
1344         se_packet_mem.trees=tree_list;
1345
1346         return tree_list;
1347 }
1348
1349 void *
1350 emem_tree_lookup32(emem_tree_t *se_tree, guint32 key)
1351 {
1352         emem_tree_node_t *node;
1353
1354         node=se_tree->tree;
1355
1356         while(node){
1357                 if(key==node->key32){
1358                         return node->data;
1359                 }
1360                 if(key<node->key32){
1361                         node=node->left;
1362                         continue;
1363                 }
1364                 if(key>node->key32){
1365                         node=node->right;
1366                         continue;
1367                 }
1368         }
1369         return NULL;
1370 }
1371
1372 void *
1373 emem_tree_lookup32_le(emem_tree_t *se_tree, guint32 key)
1374 {
1375         emem_tree_node_t *node;
1376
1377         node=se_tree->tree;
1378
1379         if(!node){
1380                 return NULL;
1381         }
1382
1383
1384         while(node){
1385                 if(key==node->key32){
1386                         return node->data;
1387                 }
1388                 if(key<node->key32){
1389                         if(node->left){
1390                                 node=node->left;
1391                                 continue;
1392                         } else {
1393                                 break;
1394                         }
1395                 }
1396                 if(key>node->key32){
1397                         if(node->right){
1398                                 node=node->right;
1399                                 continue;
1400                         } else {
1401                                 break;
1402                         }
1403                 }
1404         }
1405
1406
1407         if(!node){
1408                 return NULL;
1409         }
1410
1411         /* If we are still at the root of the tree this means that this node
1412          * is either smaller than the search key and then we return this
1413          * node or else there is no smaller key available and then
1414          * we return NULL.
1415          */
1416         if(!node->parent){
1417                 if(key>node->key32){
1418                         return node->data;
1419                 } else {
1420                         return NULL;
1421                 }
1422         }
1423
1424         if(node->parent->left==node){
1425                 /* left child */
1426
1427                 if(key>node->key32){
1428                         /* if this is a left child and its key is smaller than
1429                          * the search key, then this is the node we want.
1430                          */
1431                         return node->data;
1432                 } else {
1433                         /* if this is a left child and its key is bigger than
1434                          * the search key, we have to check if any
1435                          * of our ancestors are smaller than the search key.
1436                          */
1437                         while(node){
1438                                 if(key>node->key32){
1439                                         return node->data;
1440                                 }
1441                                 node=node->parent;
1442                         }
1443                         return NULL;
1444                 }
1445         } else {
1446                 /* right child */
1447
1448                 if(node->key32<key){
1449                         /* if this is the right child and its key is smaller
1450                          * than the search key then this is the one we want.
1451                          */
1452                         return node->data;
1453                 } else {
1454                         /* if this is the right child and its key is larger
1455                          * than the search key then our parent is the one we
1456                          * want.
1457                          */
1458                         return node->parent->data;
1459                 }
1460         }
1461
1462 }
1463
1464
1465 static inline emem_tree_node_t *
1466 emem_tree_parent(emem_tree_node_t *node)
1467 {
1468         return node->parent;
1469 }
1470
1471 static inline emem_tree_node_t *
1472 emem_tree_grandparent(emem_tree_node_t *node)
1473 {
1474         emem_tree_node_t *parent;
1475
1476         parent=emem_tree_parent(node);
1477         if(parent){
1478                 return parent->parent;
1479         }
1480         return NULL;
1481 }
1482
1483 static inline emem_tree_node_t *
1484 emem_tree_uncle(emem_tree_node_t *node)
1485 {
1486         emem_tree_node_t *parent, *grandparent;
1487
1488         parent=emem_tree_parent(node);
1489         if(!parent){
1490                 return NULL;
1491         }
1492         grandparent=emem_tree_parent(parent);
1493         if(!grandparent){
1494                 return NULL;
1495         }
1496         if(parent==grandparent->left){
1497                 return grandparent->right;
1498         }
1499         return grandparent->left;
1500 }
1501
1502 static inline void rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node);
1503 static inline void rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node);
1504
1505 static inline void
1506 rotate_left(emem_tree_t *se_tree, emem_tree_node_t *node)
1507 {
1508         if(node->parent){
1509                 if(node->parent->left==node){
1510                         node->parent->left=node->right;
1511                 } else {
1512                         node->parent->right=node->right;
1513                 }
1514         } else {
1515                 se_tree->tree=node->right;
1516         }
1517         node->right->parent=node->parent;
1518         node->parent=node->right;
1519         node->right=node->right->left;
1520         if(node->right){
1521                 node->right->parent=node;
1522         }
1523         node->parent->left=node;
1524 }
1525
1526 static inline void
1527 rotate_right(emem_tree_t *se_tree, emem_tree_node_t *node)
1528 {
1529         if(node->parent){
1530                 if(node->parent->left==node){
1531                         node->parent->left=node->left;
1532                 } else {
1533                         node->parent->right=node->left;
1534                 }
1535         } else {
1536                 se_tree->tree=node->left;
1537         }
1538         node->left->parent=node->parent;
1539         node->parent=node->left;
1540         node->left=node->left->right;
1541         if(node->left){
1542                 node->left->parent=node;
1543         }
1544         node->parent->right=node;
1545 }
1546
1547 static inline void
1548 rb_insert_case5(emem_tree_t *se_tree, emem_tree_node_t *node)
1549 {
1550         emem_tree_node_t *grandparent;
1551         emem_tree_node_t *parent;
1552
1553         parent=emem_tree_parent(node);
1554         grandparent=emem_tree_parent(parent);
1555         parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1556         grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1557         if( (node==parent->left) && (parent==grandparent->left) ){
1558                 rotate_right(se_tree, grandparent);
1559         } else {
1560                 rotate_left(se_tree, grandparent);
1561         }
1562 }
1563
1564 static inline void
1565 rb_insert_case4(emem_tree_t *se_tree, emem_tree_node_t *node)
1566 {
1567         emem_tree_node_t *grandparent;
1568         emem_tree_node_t *parent;
1569
1570         parent=emem_tree_parent(node);
1571         grandparent=emem_tree_parent(parent);
1572         if(!grandparent){
1573                 return;
1574         }
1575         if( (node==parent->right) && (parent==grandparent->left) ){
1576                 rotate_left(se_tree, parent);
1577                 node=node->left;
1578         } else if( (node==parent->left) && (parent==grandparent->right) ){
1579                 rotate_right(se_tree, parent);
1580                 node=node->right;
1581         }
1582         rb_insert_case5(se_tree, node);
1583 }
1584
1585 static inline void
1586 rb_insert_case3(emem_tree_t *se_tree, emem_tree_node_t *node)
1587 {
1588         emem_tree_node_t *grandparent;
1589         emem_tree_node_t *parent;
1590         emem_tree_node_t *uncle;
1591
1592         uncle=emem_tree_uncle(node);
1593         if(uncle && (uncle->u.rb_color==EMEM_TREE_RB_COLOR_RED)){
1594                 parent=emem_tree_parent(node);
1595                 parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1596                 uncle->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1597                 grandparent=emem_tree_grandparent(node);
1598                 grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1599                 rb_insert_case1(se_tree, grandparent);
1600         } else {
1601                 rb_insert_case4(se_tree, node);
1602         }
1603 }
1604
1605 static inline void
1606 rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node)
1607 {
1608         emem_tree_node_t *parent;
1609
1610         parent=emem_tree_parent(node);
1611         /* parent is always non-NULL here */
1612         if(parent->u.rb_color==EMEM_TREE_RB_COLOR_BLACK){
1613                 return;
1614         }
1615         rb_insert_case3(se_tree, node);
1616 }
1617
1618 static inline void
1619 rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node)
1620 {
1621         emem_tree_node_t *parent;
1622
1623         parent=emem_tree_parent(node);
1624         if(!parent){
1625                 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1626                 return;
1627         }
1628         rb_insert_case2(se_tree, node);
1629 }
1630
1631 /* insert a new node in the tree. if this node matches an already existing node
1632  * then just replace the data for that node */
1633 void
1634 emem_tree_insert32(emem_tree_t *se_tree, guint32 key, void *data)
1635 {
1636         emem_tree_node_t *node;
1637
1638         node=se_tree->tree;
1639
1640         /* is this the first node ?*/
1641         if(!node){
1642                 node=se_tree->malloc(sizeof(emem_tree_node_t));
1643                 switch(se_tree->type){
1644                 case EMEM_TREE_TYPE_RED_BLACK:
1645                         node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1646                         break;
1647                 }
1648                 node->parent=NULL;
1649                 node->left=NULL;
1650                 node->right=NULL;
1651                 node->key32=key;
1652                 node->data=data;
1653                 node->u.is_subtree = EMEM_TREE_NODE_IS_DATA;
1654                 se_tree->tree=node;
1655                 return;
1656         }
1657
1658         /* it was not the new root so walk the tree until we find where to
1659          * insert this new leaf.
1660          */
1661         while(1){
1662                 /* this node already exists, so just replace the data pointer*/
1663                 if(key==node->key32){
1664                         node->data=data;
1665                         return;
1666                 }
1667                 if(key<node->key32) {
1668                         if(!node->left){
1669                                 /* new node to the left */
1670                                 emem_tree_node_t *new_node;
1671                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1672                                 node->left=new_node;
1673                                 new_node->parent=node;
1674                                 new_node->left=NULL;
1675                                 new_node->right=NULL;
1676                                 new_node->key32=key;
1677                                 new_node->data=data;
1678                                 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1679                                 node=new_node;
1680                                 break;
1681                         }
1682                         node=node->left;
1683                         continue;
1684                 }
1685                 if(key>node->key32) {
1686                         if(!node->right){
1687                                 /* new node to the right */
1688                                 emem_tree_node_t *new_node;
1689                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1690                                 node->right=new_node;
1691                                 new_node->parent=node;
1692                                 new_node->left=NULL;
1693                                 new_node->right=NULL;
1694                                 new_node->key32=key;
1695                                 new_node->data=data;
1696                                 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1697                                 node=new_node;
1698                                 break;
1699                         }
1700                         node=node->right;
1701                         continue;
1702                 }
1703         }
1704
1705         /* node will now point to the newly created node */
1706         switch(se_tree->type){
1707         case EMEM_TREE_TYPE_RED_BLACK:
1708                 node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1709                 rb_insert_case1(se_tree, node);
1710                 break;
1711         }
1712 }
1713
1714 static void *
1715 lookup_or_insert32(emem_tree_t *se_tree, guint32 key, void*(*func)(void*),void* ud, int is_subtree)
1716 {
1717         emem_tree_node_t *node;
1718
1719         node=se_tree->tree;
1720
1721         /* is this the first node ?*/
1722         if(!node){
1723                 node=se_tree->malloc(sizeof(emem_tree_node_t));
1724                 switch(se_tree->type){
1725                         case EMEM_TREE_TYPE_RED_BLACK:
1726                                 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1727                                 break;
1728                 }
1729                 node->parent=NULL;
1730                 node->left=NULL;
1731                 node->right=NULL;
1732                 node->key32=key;
1733                 node->data= func(ud);
1734                 node->u.is_subtree = is_subtree;
1735                 se_tree->tree=node;
1736                 return node->data;
1737         }
1738
1739         /* it was not the new root so walk the tree until we find where to
1740                 * insert this new leaf.
1741                 */
1742         while(1){
1743                 /* this node already exists, so just return the data pointer*/
1744                 if(key==node->key32){
1745                         return node->data;
1746                 }
1747                 if(key<node->key32) {
1748                         if(!node->left){
1749                                 /* new node to the left */
1750                                 emem_tree_node_t *new_node;
1751                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1752                                 node->left=new_node;
1753                                 new_node->parent=node;
1754                                 new_node->left=NULL;
1755                                 new_node->right=NULL;
1756                                 new_node->key32=key;
1757                                 new_node->data= func(ud);
1758                                 new_node->u.is_subtree = is_subtree;
1759                                 node=new_node;
1760                                 break;
1761                         }
1762                         node=node->left;
1763                         continue;
1764                 }
1765                 if(key>node->key32) {
1766                         if(!node->right){
1767                                 /* new node to the right */
1768                                 emem_tree_node_t *new_node;
1769                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1770                                 node->right=new_node;
1771                                 new_node->parent=node;
1772                                 new_node->left=NULL;
1773                                 new_node->right=NULL;
1774                                 new_node->key32=key;
1775                                 new_node->data= func(ud);
1776                                 new_node->u.is_subtree = is_subtree;
1777                                 node=new_node;
1778                                 break;
1779                         }
1780                         node=node->right;
1781                         continue;
1782                 }
1783         }
1784
1785         /* node will now point to the newly created node */
1786         switch(se_tree->type){
1787                 case EMEM_TREE_TYPE_RED_BLACK:
1788                         node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1789                         rb_insert_case1(se_tree, node);
1790                         break;
1791         }
1792
1793         return node->data;
1794 }
1795
1796 /* When the se data is released, this entire tree will dissapear as if it
1797  * never existed including all metadata associated with the tree.
1798  */
1799 emem_tree_t *
1800 se_tree_create_non_persistent(int type, const char *name)
1801 {
1802         emem_tree_t *tree_list;
1803
1804         tree_list=se_alloc(sizeof(emem_tree_t));
1805         tree_list->next=NULL;
1806         tree_list->type=type;
1807         tree_list->tree=NULL;
1808         tree_list->name=name;
1809         tree_list->malloc=se_alloc;
1810
1811         return tree_list;
1812 }
1813
1814 /* This tree is PErmanent and will never be released
1815  */
1816 emem_tree_t *
1817 pe_tree_create(int type, const char *name)
1818 {
1819         emem_tree_t *tree_list;
1820
1821         tree_list=g_new(emem_tree_t, 1);
1822         tree_list->next=NULL;
1823         tree_list->type=type;
1824         tree_list->tree=NULL;
1825         tree_list->name=name;
1826         tree_list->malloc=(void *(*)(size_t)) g_malloc;
1827
1828         return tree_list;
1829 }
1830
1831 /* create another (sub)tree using the same memory allocation scope
1832  * as the parent tree.
1833  */
1834 static emem_tree_t *
1835 emem_tree_create_subtree(emem_tree_t *parent_tree, const char *name)
1836 {
1837         emem_tree_t *tree_list;
1838
1839         tree_list=parent_tree->malloc(sizeof(emem_tree_t));
1840         tree_list->next=NULL;
1841         tree_list->type=parent_tree->type;
1842         tree_list->tree=NULL;
1843         tree_list->name=name;
1844         tree_list->malloc=parent_tree->malloc;
1845
1846         return tree_list;
1847 }
1848
1849 static void *
1850 create_sub_tree(void* d)
1851 {
1852         emem_tree_t *se_tree = d;
1853         return emem_tree_create_subtree(se_tree, "subtree");
1854 }
1855
1856 /* insert a new node in the tree. if this node matches an already existing node
1857  * then just replace the data for that node */
1858
1859 void
1860 emem_tree_insert32_array(emem_tree_t *se_tree, emem_tree_key_t *key, void *data)
1861 {
1862         emem_tree_t *insert_tree = NULL;
1863         emem_tree_key_t *cur_key;
1864         guint32 i, insert_key32 = 0;
1865
1866         if(!se_tree || !key) return;
1867
1868         for (cur_key = key; cur_key->length > 0; cur_key++) {
1869                 if(cur_key->length > 100) {
1870                         DISSECTOR_ASSERT_NOT_REACHED();
1871                 }
1872
1873                 for (i = 0; i < cur_key->length; i++) {
1874                         /* Insert using the previous key32 */
1875                         if (!insert_tree) {
1876                                 insert_tree = se_tree;
1877                         } else {
1878                                 insert_tree = lookup_or_insert32(insert_tree, insert_key32, create_sub_tree, se_tree, EMEM_TREE_NODE_IS_SUBTREE);
1879                         }
1880                         insert_key32 = cur_key->key[i];
1881                 }
1882         }
1883
1884         if(!insert_tree) {
1885                 /* We didn't get a valid key. Should we return NULL instead? */
1886                 DISSECTOR_ASSERT_NOT_REACHED();
1887         }
1888
1889         emem_tree_insert32(insert_tree, insert_key32, data);
1890
1891 }
1892
1893 void *
1894 emem_tree_lookup32_array(emem_tree_t *se_tree, emem_tree_key_t *key)
1895 {
1896         emem_tree_t *lookup_tree = NULL;
1897         emem_tree_key_t *cur_key;
1898         guint32 i, lookup_key32 = 0;
1899
1900         if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1901
1902         for (cur_key = key; cur_key->length > 0; cur_key++) {
1903                 if(cur_key->length > 100) {
1904                         DISSECTOR_ASSERT_NOT_REACHED();
1905                 }
1906
1907                 for (i = 0; i < cur_key->length; i++) {
1908                         /* Lookup using the previous key32 */
1909                         if (!lookup_tree) {
1910                                 lookup_tree = se_tree;
1911                         } else {
1912                                 lookup_tree = emem_tree_lookup32(lookup_tree, lookup_key32);
1913                                 if (!lookup_tree) {
1914                                         return NULL;
1915                                 }
1916                         }
1917                         lookup_key32 = cur_key->key[i];
1918                 }
1919         }
1920
1921         if(!lookup_tree) {
1922                 /* We didn't get a valid key. Should we return NULL instead? */
1923                 DISSECTOR_ASSERT_NOT_REACHED();
1924         }
1925
1926         return emem_tree_lookup32(lookup_tree, lookup_key32);
1927 }
1928
1929 void *
1930 emem_tree_lookup32_array_le(emem_tree_t *se_tree, emem_tree_key_t *key)
1931 {
1932         emem_tree_t *lookup_tree = NULL;
1933         emem_tree_key_t *cur_key;
1934         guint32 i, lookup_key32 = 0;
1935
1936         if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1937
1938         for (cur_key = key; cur_key->length > 0; cur_key++) {
1939                 if(cur_key->length > 100) {
1940                         DISSECTOR_ASSERT_NOT_REACHED();
1941                 }
1942
1943                 for (i = 0; i < cur_key->length; i++) {
1944                         /* Lookup using the previous key32 */
1945                         if (!lookup_tree) {
1946                                 lookup_tree = se_tree;
1947                         } else {
1948                                 lookup_tree = emem_tree_lookup32_le(lookup_tree, lookup_key32);
1949                                 if (!lookup_tree) {
1950                                         return NULL;
1951                                 }
1952                         }
1953                         lookup_key32 = cur_key->key[i];
1954                 }
1955         }
1956
1957         if(!lookup_tree) {
1958                 /* We didn't get a valid key. Should we return NULL instead? */
1959                 DISSECTOR_ASSERT_NOT_REACHED();
1960         }
1961
1962         return emem_tree_lookup32_le(lookup_tree, lookup_key32);
1963
1964 }
1965
1966 /* Strings are stored as an array of uint32 containing the string characters
1967    with 4 characters in each uint32.
1968    The first byte of the string is stored as the most significant byte.
1969    If the string is not a multiple of 4 characters in length the last
1970    uint32 containing the string bytes are padded with 0 bytes.
1971    After the uint32's containing the string, there is one final terminator
1972    uint32 with the value 0x00000001
1973 */
1974 void
1975 emem_tree_insert_string(emem_tree_t* se_tree, const gchar* k, void* v, guint32 flags)
1976 {
1977         emem_tree_key_t key[2];
1978         guint32 *aligned=NULL;
1979         guint32 len = (guint32) strlen(k);
1980         guint32 divx = (len+3)/4+1;
1981         guint32 i;
1982         guint32 tmp;
1983
1984         aligned = g_malloc(divx * sizeof (guint32));
1985
1986         /* pack the bytes one one by one into guint32s */
1987         tmp = 0;
1988         for (i = 0;i < len;i++) {
1989                 unsigned char ch;
1990
1991                 ch = (unsigned char)k[i];
1992                 if (flags & EMEM_TREE_STRING_NOCASE) {
1993                         if(isupper(ch)) {
1994                                 ch = tolower(ch);
1995                         }
1996                 }
1997                 tmp <<= 8;
1998                 tmp |= ch;
1999                 if (i%4 == 3) {
2000                         aligned[i/4] = tmp;
2001                         tmp = 0;
2002                 }
2003         }
2004         /* add required padding to the last uint32 */
2005         if (i%4 != 0) {
2006                 while (i%4 != 0) {
2007                         i++;
2008                         tmp <<= 8;
2009                 }
2010                 aligned[i/4-1] = tmp;
2011         }
2012
2013         /* add the terminator */
2014         aligned[divx-1] = 0x00000001;
2015
2016         key[0].length = divx;
2017         key[0].key = aligned;
2018         key[1].length = 0;
2019         key[1].key = NULL;
2020
2021
2022         emem_tree_insert32_array(se_tree, key, v);
2023         g_free(aligned);
2024 }
2025
2026 void *
2027 emem_tree_lookup_string(emem_tree_t* se_tree, const gchar* k, guint32 flags)
2028 {
2029         emem_tree_key_t key[2];
2030         guint32 *aligned=NULL;
2031         guint32 len = (guint) strlen(k);
2032         guint32 divx = (len+3)/4+1;
2033         guint32 i;
2034         guint32 tmp;
2035         void *ret;
2036
2037         aligned = g_malloc(divx * sizeof (guint32));
2038
2039         /* pack the bytes one one by one into guint32s */
2040         tmp = 0;
2041         for (i = 0;i < len;i++) {
2042                 unsigned char ch;
2043
2044                 ch = (unsigned char)k[i];
2045                 if (flags & EMEM_TREE_STRING_NOCASE) {
2046                         if(isupper(ch)) {
2047                                 ch = tolower(ch);
2048                         }
2049                 }
2050                 tmp <<= 8;
2051                 tmp |= ch;
2052                 if (i%4 == 3) {
2053                         aligned[i/4] = tmp;
2054                         tmp = 0;
2055                 }
2056         }
2057         /* add required padding to the last uint32 */
2058         if (i%4 != 0) {
2059                 while (i%4 != 0) {
2060                         i++;
2061                         tmp <<= 8;
2062                 }
2063                 aligned[i/4-1] = tmp;
2064         }
2065
2066         /* add the terminator */
2067         aligned[divx-1] = 0x00000001;
2068
2069         key[0].length = divx;
2070         key[0].key = aligned;
2071         key[1].length = 0;
2072         key[1].key = NULL;
2073
2074
2075         ret = emem_tree_lookup32_array(se_tree, key);
2076         g_free(aligned);
2077         return ret;
2078 }
2079
2080 static gboolean
2081 emem_tree_foreach_nodes(emem_tree_node_t* node, tree_foreach_func callback, void *user_data)
2082 {
2083         gboolean stop_traverse = FALSE;
2084
2085         if (!node)
2086                 return FALSE;
2087
2088         if(node->left) {
2089                 stop_traverse = emem_tree_foreach_nodes(node->left, callback, user_data);
2090                 if (stop_traverse) {
2091                         return TRUE;
2092                 }
2093         }
2094
2095         if (node->u.is_subtree == EMEM_TREE_NODE_IS_SUBTREE) {
2096                 stop_traverse = emem_tree_foreach(node->data, callback, user_data);
2097         } else {
2098                 stop_traverse = callback(node->data, user_data);
2099         }
2100
2101         if (stop_traverse) {
2102                 return TRUE;
2103         }
2104
2105         if(node->right) {
2106                 stop_traverse = emem_tree_foreach_nodes(node->right, callback, user_data);
2107                 if (stop_traverse) {
2108                         return TRUE;
2109                 }
2110         }
2111
2112         return FALSE;
2113 }
2114
2115 gboolean
2116 emem_tree_foreach(emem_tree_t* emem_tree, tree_foreach_func callback, void *user_data)
2117 {
2118         if (!emem_tree)
2119                 return FALSE;
2120
2121         if(!emem_tree->tree)
2122                 return FALSE;
2123
2124         return emem_tree_foreach_nodes(emem_tree->tree, callback, user_data);
2125 }
2126
2127 static void emem_print_subtree(emem_tree_t* emem_tree, guint32 level);
2128
2129 static void
2130 emem_tree_print_nodes(const char *prefix, emem_tree_node_t* node, guint32 level)
2131 {
2132         guint32 i;
2133
2134         if (!node)
2135                 return;
2136
2137         for(i=0;i<level;i++){
2138                 printf("    ");
2139         }
2140
2141         printf("%sNODE:%p parent:%p left:%p right:%p colour:%s key:%u %s:%p\n", prefix,
2142                 (void *)node,(void *)(node->parent),(void *)(node->left),(void *)(node->right),
2143                 (node->u.rb_color)?"Black":"Red",(node->key32),(node->u.is_subtree)?"tree":"data",node->data);
2144         if(node->left)
2145                 emem_tree_print_nodes("L-", node->left, level+1);
2146         if(node->right)
2147                 emem_tree_print_nodes("R-", node->right, level+1);
2148
2149         if (node->u.is_subtree)
2150                 emem_print_subtree(node->data, level+1);
2151 }
2152
2153 static void
2154 emem_print_subtree(emem_tree_t* emem_tree, guint32 level)
2155 {
2156         guint32 i;
2157
2158         if (!emem_tree)
2159                 return;
2160
2161         for(i=0;i<level;i++){
2162                 printf("    ");
2163         }
2164
2165         printf("EMEM tree:%p type:%s name:%s root:%p\n",emem_tree,(emem_tree->type==1)?"RedBlack":"unknown",emem_tree->name,(void *)(emem_tree->tree));
2166         if(emem_tree->tree)
2167                 emem_tree_print_nodes("Root-", emem_tree->tree, level);
2168 }
2169
2170 void
2171 emem_print_tree(emem_tree_t* emem_tree)
2172 {
2173         emem_print_subtree(emem_tree, 0);
2174 }
2175
2176 /*
2177  * String buffers
2178  */
2179
2180 /*
2181  * Presumably we're using these routines for building strings for the tree.
2182  * Use ITEM_LABEL_LENGTH as the basis for our default lengths.
2183  */
2184
2185 #define DEFAULT_STRBUF_LEN (ITEM_LABEL_LENGTH / 10)
2186 #define MAX_STRBUF_LEN 65536
2187
2188 static gsize
2189 next_size(gsize cur_alloc_len, gsize wanted_alloc_len, gsize max_alloc_len)
2190 {
2191         if (max_alloc_len < 1 || max_alloc_len > MAX_STRBUF_LEN) {
2192                 max_alloc_len = MAX_STRBUF_LEN;
2193         }
2194
2195         if (cur_alloc_len < 1) {
2196                 cur_alloc_len = DEFAULT_STRBUF_LEN;
2197         }
2198
2199         while (cur_alloc_len < wanted_alloc_len) {
2200                 cur_alloc_len *= 2;
2201         }
2202
2203         return cur_alloc_len < max_alloc_len ? cur_alloc_len : max_alloc_len;
2204 }
2205
2206 static void
2207 ep_strbuf_grow(emem_strbuf_t *strbuf, gsize wanted_alloc_len)
2208 {
2209         gsize new_alloc_len;
2210         gchar *new_str;
2211
2212         if (!strbuf || (wanted_alloc_len <= strbuf->alloc_len) || (strbuf->alloc_len >= strbuf->max_alloc_len)) {
2213                 return;
2214         }
2215
2216         new_alloc_len = next_size(strbuf->alloc_len, wanted_alloc_len, strbuf->max_alloc_len);
2217         new_str = ep_alloc(new_alloc_len);
2218         g_strlcpy(new_str, strbuf->str, new_alloc_len);
2219
2220         strbuf->alloc_len = new_alloc_len;
2221         strbuf->str = new_str;
2222 }
2223
2224 emem_strbuf_t *
2225 ep_strbuf_sized_new(gsize alloc_len, gsize max_alloc_len)
2226 {
2227         emem_strbuf_t *strbuf;
2228
2229         strbuf = ep_alloc(sizeof(emem_strbuf_t));
2230
2231         if ((max_alloc_len == 0) || (max_alloc_len > MAX_STRBUF_LEN))
2232                 max_alloc_len = MAX_STRBUF_LEN;
2233         if (alloc_len == 0)
2234                 alloc_len = 1;
2235         else if (alloc_len > max_alloc_len)
2236                 alloc_len = max_alloc_len;
2237
2238         strbuf->str = ep_alloc(alloc_len);
2239         strbuf->str[0] = '\0';
2240
2241         strbuf->len = 0;
2242         strbuf->alloc_len = alloc_len;
2243         strbuf->max_alloc_len = max_alloc_len;
2244
2245         return strbuf;
2246 }
2247
2248 emem_strbuf_t *
2249 ep_strbuf_new(const gchar *init)
2250 {
2251         emem_strbuf_t *strbuf;
2252
2253         strbuf = ep_strbuf_sized_new(next_size(0, init?strlen(init)+1:0, 0), 0);  /* +1 for NULL terminator */
2254         if (init) {
2255                 gsize full_len;
2256                 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2257                 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2258         }
2259
2260         return strbuf;
2261 }
2262
2263 emem_strbuf_t *
2264 ep_strbuf_new_label(const gchar *init)
2265 {
2266         emem_strbuf_t *strbuf;
2267         gsize full_len;
2268
2269         /* Be optimistic: Allocate default size strbuf string and only      */
2270         /*  request an increase if needed.                                  */
2271         /* XXX: Is it reasonable to assume that much of the usage of        */
2272         /*  ep_strbuf_new_label will have  init==NULL or                    */
2273         /*   strlen(init) < DEFAULT_STRBUF_LEN) ???                         */
2274         strbuf = ep_strbuf_sized_new(DEFAULT_STRBUF_LEN, ITEM_LABEL_LENGTH);
2275
2276         if (!init)
2277                 return strbuf;
2278
2279         /* full_len does not count the trailing '\0'.                       */
2280         full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2281         if (full_len < strbuf->alloc_len) {
2282                 strbuf->len += full_len;
2283         } else {
2284                 strbuf = ep_strbuf_sized_new(full_len+1, ITEM_LABEL_LENGTH);
2285                 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2286                 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2287         }
2288
2289         return strbuf;
2290 }
2291
2292 emem_strbuf_t *
2293 ep_strbuf_append(emem_strbuf_t *strbuf, const gchar *str)
2294 {
2295         gsize add_len, full_len;
2296
2297         if (!strbuf || !str || str[0] == '\0') {
2298                 return strbuf;
2299         }
2300
2301         /* Be optimistic; try the g_strlcpy first & see if enough room.                 */
2302         /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same  */
2303         add_len = strbuf->alloc_len - strbuf->len;
2304         full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2305         if (full_len < add_len) {
2306                 strbuf->len += full_len;
2307         } else {
2308                 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2309                 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2310                 add_len = strbuf->alloc_len - strbuf->len;
2311                 full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2312                 strbuf->len += MIN(add_len-1, full_len);
2313         }
2314
2315         return strbuf;
2316 }
2317
2318 void
2319 ep_strbuf_append_vprintf(emem_strbuf_t *strbuf, const gchar *format, va_list ap)
2320 {
2321         va_list ap2;
2322         gsize add_len, full_len;
2323
2324         G_VA_COPY(ap2, ap);
2325
2326         /* Be optimistic; try the g_vsnprintf first & see if enough room.               */
2327         /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same. */
2328         add_len = strbuf->alloc_len - strbuf->len;
2329         full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap);
2330         if (full_len < add_len) {
2331                 strbuf->len += full_len;
2332         } else {
2333                 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2334                 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2335                 add_len = strbuf->alloc_len - strbuf->len;
2336                 full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap2);
2337                 strbuf->len += MIN(add_len-1, full_len);
2338         }
2339
2340         va_end(ap2);
2341 }
2342
2343 void
2344 ep_strbuf_append_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2345 {
2346         va_list ap;
2347
2348         va_start(ap, format);
2349         ep_strbuf_append_vprintf(strbuf, format, ap);
2350         va_end(ap);
2351 }
2352
2353 void
2354 ep_strbuf_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2355 {
2356         va_list ap;
2357         if (!strbuf) {
2358                 return;
2359         }
2360
2361         strbuf->len = 0;
2362
2363         va_start(ap, format);
2364         ep_strbuf_append_vprintf(strbuf, format, ap);
2365         va_end(ap);
2366 }
2367
2368 emem_strbuf_t *
2369 ep_strbuf_append_c(emem_strbuf_t *strbuf, const gchar c)
2370 {
2371         if (!strbuf) {
2372                 return strbuf;
2373         }
2374
2375         /* +1 for the new character & +1 for the trailing '\0'. */
2376         if (strbuf->alloc_len < strbuf->len + 1 + 1) {
2377                 ep_strbuf_grow(strbuf, strbuf->len + 1 + 1);
2378         }
2379         if (strbuf->alloc_len >= strbuf->len + 1 + 1) {
2380                 strbuf->str[strbuf->len] = c;
2381                 strbuf->len++;
2382                 strbuf->str[strbuf->len] = '\0';
2383         }
2384
2385         return strbuf;
2386 }
2387
2388 emem_strbuf_t *
2389 ep_strbuf_append_unichar(emem_strbuf_t *strbuf, const gunichar c)
2390 {
2391         gchar buf[6];
2392         gint charlen;
2393
2394         if (!strbuf) {
2395                 return strbuf;
2396         }
2397
2398         charlen = g_unichar_to_utf8(c, buf);
2399
2400         /* +charlen for the new character & +1 for the trailing '\0'. */
2401         if (strbuf->alloc_len < strbuf->len + charlen + 1) {
2402                 ep_strbuf_grow(strbuf, strbuf->len + charlen + 1);
2403         }
2404         if (strbuf->alloc_len >= strbuf->len + charlen + 1) {
2405                 memcpy(&strbuf->str[strbuf->len], buf, charlen);
2406                 strbuf->len += charlen;
2407                 strbuf->str[strbuf->len] = '\0';
2408         }
2409
2410         return strbuf;
2411 }
2412
2413 emem_strbuf_t *
2414 ep_strbuf_truncate(emem_strbuf_t *strbuf, gsize len)
2415 {
2416         if (!strbuf || len >= strbuf->len) {
2417                 return strbuf;
2418         }
2419
2420         strbuf->str[len] = '\0';
2421         strbuf->len = len;
2422
2423         return strbuf;
2424 }
2425
2426 /*
2427  * Editor modelines
2428  *
2429  * Local Variables:
2430  * c-basic-offset: 8
2431  * tab-width: 8
2432  * indent-tabs-mode: t
2433  * End:
2434  *
2435  * ex: set shiftwidth=8 tabstop=8 noexpandtab:
2436  * :indentSize=8:tabSize=8:noTabs=false:
2437  */