Fix some dead code and zero division issues found by Clang scan-build.
[metze/wireshark/wip.git] / epan / emem.c
1 /* emem.c
2  * Wireshark memory management and garbage collection functions
3  * Ronnie Sahlberg 2005
4  *
5  * $Id$
6  *
7  * Wireshark - Network traffic analyzer
8  * By Gerald Combs <gerald@wireshark.org>
9  * Copyright 1998 Gerald Combs
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version 2
14  * of the License, or (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
24  */
25 #ifdef HAVE_CONFIG_H
26 #include "config.h"
27 #endif
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <stdarg.h>
33 #include <ctype.h>
34
35 #include <time.h>
36 #ifdef HAVE_SYS_TIME_H
37 #include <sys/time.h>
38 #endif
39
40 #ifdef HAVE_UNISTD_H
41 #include <unistd.h>
42 #endif
43
44 #include <glib.h>
45
46 #include "proto.h"
47 #include "emem.h"
48
49 #ifdef _WIN32
50 #include <windows.h>    /* VirtualAlloc, VirtualProtect */
51 #include <process.h>    /* getpid */
52 #endif
53
54 /* Print out statistics about our memory allocations? */
55 /*#define SHOW_EMEM_STATS*/
56
57 /* Do we want to use guardpages? if available */
58 #define WANT_GUARD_PAGES 1
59
60 #ifdef WANT_GUARD_PAGES
61 /* Add guard pages at each end of our allocated memory */
62 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
63 #include <stdint.h>
64 #ifdef HAVE_SYS_TYPES_H
65 #include <sys/types.h>
66 #endif
67 #include <sys/mman.h>
68 #if defined(MAP_ANONYMOUS)
69 #define ANON_PAGE_MODE  (MAP_ANONYMOUS|MAP_PRIVATE)
70 #elif defined(MAP_ANON)
71 #define ANON_PAGE_MODE  (MAP_ANON|MAP_PRIVATE)
72 #else
73 #define ANON_PAGE_MODE  (MAP_PRIVATE)   /* have to map /dev/zero */
74 #define NEED_DEV_ZERO
75 #endif
76 #ifdef NEED_DEV_ZERO
77 #include <fcntl.h>
78 static int dev_zero_fd;
79 #define ANON_FD dev_zero_fd
80 #else
81 #define ANON_FD -1
82 #endif
83 #define USE_GUARD_PAGES 1
84 #endif
85 #endif
86
87 /* When required, allocate more memory from the OS in this size chunks */
88 #define EMEM_PACKET_CHUNK_SIZE (10 * 1024 * 1024)
89
90 #define EMEM_CANARY_SIZE 8
91 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
92
93 typedef struct _emem_chunk_t {
94         struct _emem_chunk_t *next;
95         char            *buf;
96         unsigned int    amount_free_init;
97         unsigned int    amount_free;
98         unsigned int    free_offset_init;
99         unsigned int    free_offset;
100         void            *canary_last;
101 } emem_chunk_t;
102
103 typedef struct _emem_header_t {
104         emem_chunk_t *free_list;
105         emem_chunk_t *used_list;
106
107         emem_tree_t *trees;             /* only used by se_mem allocator */
108
109         guint8 canary[EMEM_CANARY_DATA_SIZE];
110         void *(*memory_alloc)(size_t size, struct _emem_header_t *);
111
112         /*
113          * Tools like Valgrind and ElectricFence don't work well with memchunks.
114          * Export the following environment variables to make {ep|se}_alloc() allocate each
115          * object individually.
116          *
117          * WIRESHARK_DEBUG_EP_NO_CHUNKS
118          * WIRESHARK_DEBUG_SE_NO_CHUNKS
119          */
120         gboolean debug_use_chunks;
121
122         /* Do we want to use canaries?
123          * Export the following environment variables to disable/enable canaries
124          *
125          * WIRESHARK_DEBUG_EP_NO_CANARY
126          * For SE memory use of canary is default off as the memory overhead
127          * is considerable.
128          * WIRESHARK_DEBUG_SE_USE_CANARY
129          */
130         gboolean debug_use_canary;
131
132         /*  Do we want to verify no one is using a pointer to an ep_ or se_
133          *  allocated thing where they shouldn't be?
134          *
135          * Export WIRESHARK_EP_VERIFY_POINTERS or WIRESHARK_SE_VERIFY_POINTERS
136          * to turn this on.
137          */
138         gboolean debug_verify_pointers;
139
140 } emem_header_t;
141
142 static emem_header_t ep_packet_mem;
143 static emem_header_t se_packet_mem;
144
145 /*
146  *  Memory scrubbing is expensive but can be useful to ensure we don't:
147  *    - use memory before initializing it
148  *    - use memory after freeing it
149  *  Export WIRESHARK_DEBUG_SCRUB_MEMORY to turn it on.
150  */
151 static gboolean debug_use_memory_scrubber = FALSE;
152
153 #if defined (_WIN32)
154 static SYSTEM_INFO sysinfo;
155 static OSVERSIONINFO versinfo;
156 static int pagesize;
157 #elif defined(USE_GUARD_PAGES)
158 static intptr_t pagesize;
159 #endif /* _WIN32 / USE_GUARD_PAGES */
160
161 static void *emem_alloc_chunk(size_t size, emem_header_t *mem);
162 static void *emem_alloc_glib(size_t size, emem_header_t *mem);
163
164 /*
165  * Set a canary value to be placed between memchunks.
166  */
167 static void
168 emem_canary_init(guint8 *canary)
169 {
170         int i;
171         static GRand *rand_state = NULL;
172
173         if (rand_state == NULL) {
174                 rand_state = g_rand_new();
175         }
176         for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
177                 canary[i] = (guint8) g_rand_int_range(rand_state, 1, 0x100);
178         }
179         return;
180 }
181
182 static void *
183 emem_canary_next(guint8 *mem_canary, guint8 *canary, int *len)
184 {
185         void *ptr;
186         int i;
187
188         for (i = 0; i < EMEM_CANARY_SIZE-1; i++)
189                 if (mem_canary[i] != canary[i])
190                         return (void *) -1;
191
192         for (; i < EMEM_CANARY_DATA_SIZE; i++) {
193                 if (canary[i] == '\0') {
194                         memcpy(&ptr, &canary[i+1], sizeof(void *));
195
196                         if (len)
197                                 *len = i + 1 + sizeof(void *);
198                         return ptr;
199                 }
200
201                 if (mem_canary[i] != canary[i])
202                         return (void *) -1;
203         }
204
205         return (void *) -1;
206 }
207
208 /*
209  * Given an allocation size, return the amount of padding needed for
210  * the canary value.
211  */
212 static guint8
213 emem_canary_pad (size_t allocation)
214 {
215         guint8 pad;
216
217         pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
218         if (pad < EMEM_CANARY_SIZE)
219                 pad += EMEM_CANARY_SIZE;
220
221         return pad;
222 }
223
224 /* used for debugging canaries, will block */
225 #ifdef DEBUG_INTENSE_CANARY_CHECKS
226 gboolean intense_canary_checking = FALSE;
227
228 /*  used to intensivelly check ep canaries
229  */
230 void
231 ep_check_canary_integrity(const char* fmt, ...)
232 {
233         va_list ap;
234         static gchar there[128] = {
235                 'L','a','u','n','c','h',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
236                 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
237                 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
238                 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
239         gchar here[128];
240         emem_chunk_t* npc = NULL;
241
242         if (! intense_canary_checking ) return;
243
244         va_start(ap,fmt);
245         g_vsnprintf(here, sizeof(here), fmt, ap);
246         va_end(ap);
247
248         for (npc = ep_packet_mem.free_list; npc != NULL; npc = npc->next) {
249                 void *canary_next = npc->canary_last;
250
251                 while (canary_next != NULL) {
252                         canary_next = emem_canary_next(ep_packet_mem.canary, canary_next, NULL);
253                         /* XXX, check if canary_last is inside allocated memory? */
254
255                         if (npc->canary_last == (void *) -1)
256                                 g_error("Per-packet memory corrupted\nbetween: %s\nand: %s", there, here);
257                 }
258         }
259
260         g_strlcpy(there, here, sizeof(there));
261 }
262 #endif
263
264 static void
265 emem_init_chunk(emem_header_t *mem)
266 {
267         if (mem->debug_use_canary)
268                 emem_canary_init(mem->canary);
269
270         if (mem->debug_use_chunks)
271                 mem->memory_alloc = emem_alloc_chunk;
272         else
273                 mem->memory_alloc = emem_alloc_glib;
274 }
275
276
277 /* Initialize the packet-lifetime memory allocation pool.
278  * This function should be called only once when Wireshark or TShark starts
279  * up.
280  */
281 static void
282 ep_init_chunk(void)
283 {
284         ep_packet_mem.free_list=NULL;
285         ep_packet_mem.used_list=NULL;
286         ep_packet_mem.trees=NULL;       /* not used by this allocator */
287
288         ep_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_EP_NO_CHUNKS") == NULL);
289         ep_packet_mem.debug_use_canary = ep_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_EP_NO_CANARY") == NULL);
290         ep_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_EP_VERIFY_POINTERS") != NULL);
291
292 #ifdef DEBUG_INTENSE_CANARY_CHECKS
293         intense_canary_checking = (getenv("WIRESHARK_DEBUG_EP_INTENSE_CANARY") != NULL);
294 #endif
295
296         emem_init_chunk(&ep_packet_mem);
297 }
298
299 /* Initialize the capture-lifetime memory allocation pool.
300  * This function should be called only once when Wireshark or TShark starts
301  * up.
302  */
303 static void
304 se_init_chunk(void)
305 {
306         se_packet_mem.free_list = NULL;
307         se_packet_mem.used_list = NULL;
308         se_packet_mem.trees = NULL;
309
310         se_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_SE_NO_CHUNKS") == NULL);
311         se_packet_mem.debug_use_canary = se_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_SE_USE_CANARY") != NULL);
312         se_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_SE_VERIFY_POINTERS") != NULL);
313
314         emem_init_chunk(&se_packet_mem);
315 }
316
317 /*  Initialize all the allocators here.
318  *  This function should be called only once when Wireshark or TShark starts
319  *  up.
320  */
321 void
322 emem_init(void)
323 {
324         ep_init_chunk();
325         se_init_chunk();
326
327         if (getenv("WIRESHARK_DEBUG_SCRUB_MEMORY"))
328                 debug_use_memory_scrubber  = TRUE;
329
330 #if defined (_WIN32)
331         /* Set up our guard page info for Win32 */
332         GetSystemInfo(&sysinfo);
333         pagesize = sysinfo.dwPageSize;
334
335         /* calling GetVersionEx using the OSVERSIONINFO structure.
336          * OSVERSIONINFOEX requires Win NT4 with SP6 or newer NT Versions.
337          * OSVERSIONINFOEX will fail on Win9x and older NT Versions.
338          * See also:
339          * http://msdn.microsoft.com/library/en-us/sysinfo/base/getversionex.asp
340          * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
341          * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfoex_str.asp
342          */
343         versinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
344         GetVersionEx(&versinfo);
345
346 #elif defined(USE_GUARD_PAGES)
347         pagesize = sysconf(_SC_PAGESIZE);
348 #ifdef NEED_DEV_ZERO
349         dev_zero_fd = ws_open("/dev/zero", O_RDWR);
350         g_assert(dev_zero_fd != -1);
351 #endif
352 #endif /* _WIN32 / USE_GUARD_PAGES */
353 }
354
355 #ifdef SHOW_EMEM_STATS
356 #define NUM_ALLOC_DIST 10
357 static guint allocations[NUM_ALLOC_DIST] = { 0 };
358 static guint total_no_chunks = 0;
359
360 static void
361 print_alloc_stats()
362 {
363         guint num_chunks = 0;
364         guint num_allocs = 0;
365         guint total_used = 0;
366         guint total_allocation = 0;
367         guint total_free = 0;
368         guint used_for_canaries = 0;
369         guint total_headers;
370         guint i;
371         emem_chunk_t *chunk;
372         guint total_space_allocated_from_os, total_space_wasted;
373         gboolean ep_stat=TRUE;
374
375         fprintf(stderr, "\n-------- EP allocator statistics --------\n");
376         fprintf(stderr, "%s chunks, %s canaries, %s memory scrubber\n",
377                ep_packet_mem.debug_use_chunks ? "Using" : "Not using",
378                ep_packet_mem.debug_use_canary ? "using" : "not using",
379                debug_use_memory_scrubber ? "using" : "not using");
380
381         if (! (ep_packet_mem.free_list || !ep_packet_mem.used_list)) {
382                 fprintf(stderr, "No memory allocated\n");
383                 ep_stat = FALSE;
384         }
385         if (ep_packet_mem.debug_use_chunks && ep_stat) {
386                 /* Nothing interesting without chunks */
387                 /*  Only look at the used_list since those chunks are fully
388                  *  used.  Looking at the free list would skew our view of what
389                  *  we have wasted.
390                  */
391                 for (chunk = ep_packet_mem.used_list; chunk; chunk = chunk->next) {
392                         num_chunks++;
393                         total_used += (chunk->amount_free_init - chunk->amount_free);
394                         total_allocation += chunk->amount_free_init;
395                         total_free += chunk->amount_free;
396                 }
397                 if (num_chunks > 0) {
398                         fprintf (stderr, "\n");
399                         fprintf (stderr, "\n---- Buffer space ----\n");
400                         fprintf (stderr, "\tChunk allocation size: %10u\n", EMEM_PACKET_CHUNK_SIZE);
401                         fprintf (stderr, "\t*    Number of chunks: %10u\n", num_chunks);
402                         fprintf (stderr, "\t-------------------------------------------\n");
403                         fprintf (stderr, "\t= %u (%u including guard pages) total space used for buffers\n",
404                         total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
405                         fprintf (stderr, "\t-------------------------------------------\n");
406                         total_space_allocated_from_os = total_allocation
407                                 + sizeof(emem_chunk_t) * num_chunks;
408                         fprintf (stderr, "Total allocated from OS: %u\n\n",
409                                 total_space_allocated_from_os);
410                 }else{
411                         fprintf (stderr, "No fully used chunks, nothing to do\n");
412                 }
413                 /* Reset stats */
414                 num_chunks = 0;
415                 num_allocs = 0;
416                 total_used = 0;
417                 total_allocation = 0;
418                 total_free = 0;
419                 used_for_canaries = 0;
420         }
421
422
423         fprintf(stderr, "\n-------- SE allocator statistics --------\n");
424         fprintf(stderr, "Total number of chunk allocations %u\n",
425                 total_no_chunks);
426         fprintf(stderr, "%s chunks, %s canaries\n",
427                se_packet_mem.debug_use_chunks ? "Using" : "Not using",
428                se_packet_mem.debug_use_canary ? "using" : "not using");
429
430         if (! (se_packet_mem.free_list || !se_packet_mem.used_list)) {
431                 fprintf(stderr, "No memory allocated\n");
432                 return;
433         }
434
435         if (!se_packet_mem.debug_use_chunks )
436                 return; /* Nothing interesting without chunks?? */
437
438         /*  Only look at the used_list since those chunks are fully used.
439          *  Looking at the free list would skew our view of what we have wasted.
440          */
441         for (chunk = se_packet_mem.used_list; chunk; chunk = chunk->next) {
442                 num_chunks++;
443                 total_used += (chunk->amount_free_init - chunk->amount_free);
444                 total_allocation += chunk->amount_free_init;
445                 total_free += chunk->amount_free;
446
447                 if (se_packet_mem.debug_use_canary){
448                         void *ptr = chunk->canary_last;
449                         int len;
450
451                         while (ptr != NULL) {
452                                 ptr = emem_canary_next(se_packet_mem.canary, ptr, &len);
453
454                                 if (ptr == (void *) -1)
455                                         g_error("Memory corrupted");
456                                 used_for_canaries += len;
457                         }
458                 }
459         }
460
461         if (num_chunks == 0) {
462
463                 fprintf (stderr, "No fully used chunks, nothing to do\n");
464                 return;
465         }
466
467         fprintf (stderr, "\n");
468         fprintf (stderr, "---------- Allocations from the OS ----------\n");
469         fprintf (stderr, "---- Headers ----\n");
470         fprintf (stderr, "\t(    Chunk header size: %10lu\n",
471                  sizeof(emem_chunk_t));
472         fprintf (stderr, "\t*     Number of chunks: %10u\n", num_chunks);
473         fprintf (stderr, "\t-------------------------------------------\n");
474
475         total_headers = sizeof(emem_chunk_t) * num_chunks;
476         fprintf (stderr, "\t= %u bytes used for headers\n", total_headers);
477         fprintf (stderr, "\n---- Buffer space ----\n");
478         fprintf (stderr, "\tChunk allocation size: %10u\n",
479                  EMEM_PACKET_CHUNK_SIZE);
480         fprintf (stderr, "\t*    Number of chunks: %10u\n", num_chunks);
481         fprintf (stderr, "\t-------------------------------------------\n");
482         fprintf (stderr, "\t= %u (%u including guard pages) bytes used for buffers\n",
483                 total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
484         fprintf (stderr, "\t-------------------------------------------\n");
485         total_space_allocated_from_os = (EMEM_PACKET_CHUNK_SIZE * num_chunks)
486                                         + total_headers;
487         fprintf (stderr, "Total bytes allocated from the OS: %u\n\n",
488                 total_space_allocated_from_os);
489
490         for (i = 0; i < NUM_ALLOC_DIST; i++)
491                 num_allocs += allocations[i];
492
493         fprintf (stderr, "---------- Allocations from the SE pool ----------\n");
494         fprintf (stderr, "                Number of SE allocations: %10u\n",
495                  num_allocs);
496         fprintf (stderr, "             Bytes used (incl. canaries): %10u\n",
497                  total_used);
498         fprintf (stderr, "                 Bytes used for canaries: %10u\n",
499                  used_for_canaries);
500         fprintf (stderr, "Bytes unused (wasted, excl. guard pages): %10u\n",
501                  total_allocation - total_used);
502         fprintf (stderr, "Bytes unused (wasted, incl. guard pages): %10u\n\n",
503                  total_space_allocated_from_os - total_used);
504
505         fprintf (stderr, "---------- Statistics ----------\n");
506         fprintf (stderr, "Average SE allocation size (incl. canaries): %6.2f\n",
507                 (float)total_used/(float)num_allocs);
508         fprintf (stderr, "Average SE allocation size (excl. canaries): %6.2f\n",
509                 (float)(total_used - used_for_canaries)/(float)num_allocs);
510         fprintf (stderr, "        Average wasted bytes per allocation: %6.2f\n",
511                 (total_allocation - total_used)/(float)num_allocs);
512         total_space_wasted = (total_allocation - total_used)
513                 + (sizeof(emem_chunk_t));
514         fprintf (stderr, " Space used for headers + unused allocation: %8u\n",
515                 total_space_wasted);
516         fprintf (stderr, "--> %% overhead/waste: %4.2f\n",
517                 100 * (float)total_space_wasted/(float)total_space_allocated_from_os);
518
519         fprintf (stderr, "\nAllocation distribution (sizes include canaries):\n");
520         for (i = 0; i < (NUM_ALLOC_DIST-1); i++)
521                 fprintf (stderr, "size < %5d: %8u\n", 32<<i, allocations[i]);
522         fprintf (stderr, "size > %5d: %8u\n", 32<<i, allocations[i]);
523 }
524 #endif
525
526 static gboolean
527 emem_verify_pointer(emem_header_t *hdr, const void *ptr)
528 {
529         const gchar *cptr = ptr;
530         emem_chunk_t *used_list[2];
531         guint8 used_list_idx;
532         emem_chunk_t *chunk;
533
534         used_list[0] = hdr->free_list;
535         used_list[1] = hdr->used_list;
536
537         for (used_list_idx=0; used_list_idx < G_N_ELEMENTS(used_list); ++used_list_idx) {
538                 chunk = used_list[used_list_idx];
539                 for ( ; chunk ; chunk = chunk->next) {
540                         if (cptr >= (chunk->buf + chunk->free_offset_init) &&
541                                 cptr < (chunk->buf + chunk->free_offset))
542                                 return TRUE;
543                 }
544         }
545
546         return FALSE;
547 }
548
549 gboolean
550 ep_verify_pointer(const void *ptr)
551 {
552         if (ep_packet_mem.debug_verify_pointers)
553                 return emem_verify_pointer(&ep_packet_mem, ptr);
554         else
555                 return FALSE;
556 }
557
558 gboolean
559 se_verify_pointer(const void *ptr)
560 {
561         if (se_packet_mem.debug_verify_pointers)
562                 return emem_verify_pointer(&se_packet_mem, ptr);
563         else
564                 return FALSE;
565 }
566
567 static void
568 emem_scrub_memory(char *buf, size_t size, gboolean alloc)
569 {
570         guint scrubbed_value;
571         guint offset;
572
573         if (!debug_use_memory_scrubber)
574                 return;
575
576         if (alloc) /* this memory is being allocated */
577                 scrubbed_value = 0xBADDCAFE;
578         else /* this memory is being freed */
579                 scrubbed_value = 0xDEADBEEF;
580
581         /*  We shouldn't need to check the alignment of the starting address
582          *  since this is malloc'd memory (or 'pagesize' bytes into malloc'd
583          *  memory).
584          */
585
586         /* XXX - We might want to use memset here in order to avoid problems on
587          * alignment-sensitive platforms, e.g.
588          * http://stackoverflow.com/questions/108866/is-there-memset-that-accepts-integers-larger-than-char
589          */
590
591         for (offset = 0; offset + sizeof(guint) <= size; offset += sizeof(guint))
592                 *(guint*)(buf+offset) = scrubbed_value;
593
594         /* Initialize the last bytes, if any */
595         if (offset < size) {
596                 *(guint8*)(buf+offset) = scrubbed_value >> 24;
597                 offset++;
598                 if (offset < size) {
599                         *(guint8*)(buf+offset) = (scrubbed_value >> 16) & 0xFF;
600                         offset++;
601                         if (offset < size) {
602                                 *(guint8*)(buf+offset) = (scrubbed_value >> 8) & 0xFF;
603                         }
604                 }
605         }
606
607
608 }
609
610 static emem_chunk_t *
611 emem_create_chunk(void) {
612 #if defined (_WIN32)
613         BOOL ret;
614         char *buf_end, *prot1, *prot2;
615         DWORD oldprot;
616 #elif defined(USE_GUARD_PAGES)
617         int ret;
618         char *buf_end, *prot1, *prot2;
619 #endif /* _WIN32 / USE_GUARD_PAGES */
620         emem_chunk_t *npc;
621
622         npc = g_new(emem_chunk_t, 1);
623         npc->next = NULL;
624         npc->canary_last = NULL;
625
626 #if defined (_WIN32)
627         /*
628          * MSDN documents VirtualAlloc/VirtualProtect at
629          * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
630          */
631
632         /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
633         npc->buf = VirtualAlloc(NULL, EMEM_PACKET_CHUNK_SIZE,
634                 MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
635
636         if (npc->buf == NULL) {
637                 g_free(npc);
638                 THROW(OutOfMemoryError);
639         }
640
641 #elif defined(USE_GUARD_PAGES)
642         npc->buf = mmap(NULL, EMEM_PACKET_CHUNK_SIZE,
643                 PROT_READ|PROT_WRITE, ANON_PAGE_MODE, ANON_FD, 0);
644
645         if (npc->buf == MAP_FAILED) {
646                 g_free(npc);
647                 THROW(OutOfMemoryError);
648         }
649
650 #else /* Is there a draft in here? */
651         npc->buf = g_malloc(EMEM_PACKET_CHUNK_SIZE);
652         /* g_malloc() can't fail */
653 #endif
654
655 #ifdef SHOW_EMEM_STATS
656         total_no_chunks++;
657 #endif
658
659 #if defined (_WIN32)
660         buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
661
662         /* Align our guard pages on page-sized boundaries */
663         prot1 = (char *) ((((int) npc->buf + pagesize - 1) / pagesize) * pagesize);
664         prot2 = (char *) ((((int) buf_end - (1 * pagesize)) / pagesize) * pagesize);
665
666         ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
667         g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
668         ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
669         g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
670
671         npc->amount_free_init = (unsigned int) (prot2 - prot1 - pagesize);
672         npc->free_offset_init = (unsigned int) (prot1 - npc->buf) + pagesize;
673 #elif defined(USE_GUARD_PAGES)
674         buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
675
676         /* Align our guard pages on page-sized boundaries */
677         prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
678         prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
679
680         ret = mprotect(prot1, pagesize, PROT_NONE);
681         g_assert(ret != -1);
682         ret = mprotect(prot2, pagesize, PROT_NONE);
683         g_assert(ret != -1);
684
685         npc->amount_free_init = prot2 - prot1 - pagesize;
686         npc->free_offset_init = (prot1 - npc->buf) + pagesize;
687 #else
688         npc->amount_free_init = EMEM_PACKET_CHUNK_SIZE;
689         npc->free_offset_init = 0;
690 #endif /* USE_GUARD_PAGES */
691
692         npc->amount_free = npc->amount_free_init;
693         npc->free_offset = npc->free_offset_init;
694         return npc;
695 }
696
697 static void *
698 emem_alloc_chunk(size_t size, emem_header_t *mem)
699 {
700         void *buf;
701
702         size_t asize = size;
703         gboolean use_canary = mem->debug_use_canary;
704         guint8 pad;
705         emem_chunk_t *free_list;
706
707         /* Round up to an 8 byte boundary. Make sure we have at least
708          * 8 pad bytes for our canary.
709          */
710          if (use_canary) {
711                 pad = emem_canary_pad(asize);
712                 asize += sizeof(void *);
713         } else
714                 pad = (G_MEM_ALIGN - (asize & (G_MEM_ALIGN-1))) & (G_MEM_ALIGN-1);
715
716         asize += pad;
717
718 #ifdef SHOW_EMEM_STATS
719         /* Do this check here so we can include the canary size */
720         if (mem == &se_packet_mem) {
721                 if (asize < 32)
722                         allocations[0]++;
723                 else if (asize < 64)
724                         allocations[1]++;
725                 else if (asize < 128)
726                         allocations[2]++;
727                 else if (asize < 256)
728                         allocations[3]++;
729                 else if (asize < 512)
730                         allocations[4]++;
731                 else if (asize < 1024)
732                         allocations[5]++;
733                 else if (asize < 2048)
734                         allocations[6]++;
735                 else if (asize < 4096)
736                         allocations[7]++;
737                 else if (asize < 8192)
738                         allocations[8]++;
739                 else if (asize < 16384)
740                         allocations[8]++;
741                 else
742                         allocations[(NUM_ALLOC_DIST-1)]++;
743         }
744 #endif
745
746         /* make sure we dont try to allocate too much (arbitrary limit) */
747         DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
748
749         if (!mem->free_list)
750                 mem->free_list = emem_create_chunk();
751
752         /* oops, we need to allocate more memory to serve this request
753          * than we have free. move this node to the used list and try again
754          */
755         if(asize > mem->free_list->amount_free) {
756                 emem_chunk_t *npc;
757                 npc=mem->free_list;
758                 mem->free_list=mem->free_list->next;
759                 npc->next=mem->used_list;
760                 mem->used_list=npc;
761
762                 if (!mem->free_list)
763                         mem->free_list = emem_create_chunk();
764         }
765
766         free_list = mem->free_list;
767
768         buf = free_list->buf + free_list->free_offset;
769
770         free_list->amount_free -= (unsigned int) asize;
771         free_list->free_offset += (unsigned int) asize;
772
773         if (use_canary) {
774                 char *cptr = (char *)buf + size;
775
776                 memcpy(cptr, mem->canary, pad-1);
777                 cptr[pad-1] = '\0';
778                 memcpy(cptr + pad, &free_list->canary_last, sizeof(void *));
779
780                 free_list->canary_last = cptr;
781         }
782
783         return buf;
784 }
785
786 static void *
787 emem_alloc_glib(size_t size, emem_header_t *mem)
788 {
789         emem_chunk_t *npc;
790
791         npc=g_new(emem_chunk_t, 1);
792         npc->next=mem->used_list;
793         npc->buf=g_malloc(size);
794         npc->canary_last = NULL;
795         mem->used_list=npc;
796         /* There's no padding/alignment involved (from our point of view) when
797          * we fetch the memory directly from the system pool, so WYSIWYG */
798         npc->free_offset = npc->free_offset_init = 0;
799         npc->amount_free = npc->amount_free_init = (unsigned int) size;
800
801         return npc->buf;
802 }
803
804 /* allocate 'size' amount of memory. */
805 static void *
806 emem_alloc(size_t size, emem_header_t *mem)
807 {
808         void *buf = mem->memory_alloc(size, mem);
809
810         /*  XXX - this is a waste of time if the allocator function is going to
811          *  memset this straight back to 0.
812          */
813         emem_scrub_memory(buf, size, TRUE);
814
815         return buf;
816 }
817
818 /* allocate 'size' amount of memory with an allocation lifetime until the
819  * next packet.
820  */
821 void *
822 ep_alloc(size_t size)
823 {
824         return emem_alloc(size, &ep_packet_mem);
825 }
826
827 /* allocate 'size' amount of memory with an allocation lifetime until the
828  * next capture.
829  */
830 void *
831 se_alloc(size_t size)
832 {
833         return emem_alloc(size, &se_packet_mem);
834 }
835
836 void *
837 ep_alloc0(size_t size)
838 {
839         return memset(ep_alloc(size),'\0',size);
840 }
841
842 gchar *
843 ep_strdup(const gchar* src)
844 {
845         guint len = (guint) strlen(src);
846         gchar* dst;
847
848         dst = memcpy(ep_alloc(len+1), src, len+1);
849
850         return dst;
851 }
852
853 gchar *
854 ep_strndup(const gchar* src, size_t len)
855 {
856         gchar* dst = ep_alloc(len+1);
857         guint i;
858
859         for (i = 0; (i < len) && src[i]; i++)
860                 dst[i] = src[i];
861
862         dst[i] = '\0';
863
864         return dst;
865 }
866
867 void *
868 ep_memdup(const void* src, size_t len)
869 {
870         return memcpy(ep_alloc(len), src, len);
871 }
872
873 gchar *
874 ep_strdup_vprintf(const gchar* fmt, va_list ap)
875 {
876         va_list ap2;
877         gsize len;
878         gchar* dst;
879
880         G_VA_COPY(ap2, ap);
881
882         len = g_printf_string_upper_bound(fmt, ap);
883
884         dst = ep_alloc(len+1);
885         g_vsnprintf (dst, (gulong) len, fmt, ap2);
886         va_end(ap2);
887
888         return dst;
889 }
890
891 gchar *
892 ep_strdup_printf(const gchar* fmt, ...)
893 {
894         va_list ap;
895         gchar* dst;
896
897         va_start(ap,fmt);
898         dst = ep_strdup_vprintf(fmt, ap);
899         va_end(ap);
900         return dst;
901 }
902
903 gchar **
904 ep_strsplit(const gchar* string, const gchar* sep, int max_tokens)
905 {
906         gchar* splitted;
907         gchar* s;
908         guint tokens;
909         guint str_len;
910         guint sep_len;
911         guint i;
912         gchar** vec;
913         enum { AT_START, IN_PAD, IN_TOKEN } state;
914         guint curr_tok = 0;
915
916         if (    ! string
917              || ! sep
918              || ! sep[0])
919                 return NULL;
920
921         s = splitted = ep_strdup(string);
922         str_len = (guint) strlen(splitted);
923         sep_len = (guint) strlen(sep);
924
925         if (max_tokens < 1) max_tokens = INT_MAX;
926
927         tokens = 1;
928
929
930         while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
931                 tokens++;
932
933                 for(i=0; i < sep_len; i++ )
934                         s[i] = '\0';
935
936                 s += sep_len;
937
938         }
939
940         vec = ep_alloc_array(gchar*,tokens+1);
941         state = AT_START;
942
943         for (i=0; i< str_len; i++) {
944                 switch(state) {
945                         case AT_START:
946                                 switch(splitted[i]) {
947                                         case '\0':
948                                                 state  = IN_PAD;
949                                                 continue;
950                                         default:
951                                                 vec[curr_tok] = &(splitted[i]);
952                                                 curr_tok++;
953                                                 state = IN_TOKEN;
954                                                 continue;
955                                 }
956                         case IN_TOKEN:
957                                 switch(splitted[i]) {
958                                         case '\0':
959                                                 state = IN_PAD;
960                                         default:
961                                                 continue;
962                                 }
963                         case IN_PAD:
964                                 switch(splitted[i]) {
965                                         default:
966                                                 vec[curr_tok] = &(splitted[i]);
967                                                 curr_tok++;
968                                                 state = IN_TOKEN;
969                                         case '\0':
970                                                 continue;
971                                 }
972                 }
973         }
974
975         vec[curr_tok] = NULL;
976
977         return vec;
978 }
979
980
981
982 void *
983 se_alloc0(size_t size)
984 {
985         return memset(se_alloc(size),'\0',size);
986 }
987
988 /* If str is NULL, just return the string "<NULL>" so that the callers dont
989  * have to bother checking it.
990  */
991 gchar *
992 se_strdup(const gchar* src)
993 {
994         guint len;
995         gchar* dst;
996
997         if(!src)
998                 return "<NULL>";
999
1000         len = (guint) strlen(src);
1001         dst = memcpy(se_alloc(len+1), src, len+1);
1002
1003         return dst;
1004 }
1005
1006 gchar *
1007 se_strndup(const gchar* src, size_t len)
1008 {
1009         gchar* dst = se_alloc(len+1);
1010         guint i;
1011
1012         for (i = 0; (i < len) && src[i]; i++)
1013                 dst[i] = src[i];
1014
1015         dst[i] = '\0';
1016
1017         return dst;
1018 }
1019
1020 void *
1021 se_memdup(const void* src, size_t len)
1022 {
1023         return memcpy(se_alloc(len), src, len);
1024 }
1025
1026 gchar *
1027 se_strdup_vprintf(const gchar* fmt, va_list ap)
1028 {
1029         va_list ap2;
1030         gsize len;
1031         gchar* dst;
1032
1033         G_VA_COPY(ap2, ap);
1034
1035         len = g_printf_string_upper_bound(fmt, ap);
1036
1037         dst = se_alloc(len+1);
1038         g_vsnprintf (dst, (gulong) len, fmt, ap2);
1039         va_end(ap2);
1040
1041         return dst;
1042 }
1043
1044 gchar *
1045 se_strdup_printf(const gchar* fmt, ...)
1046 {
1047         va_list ap;
1048         gchar* dst;
1049
1050         va_start(ap,fmt);
1051         dst = se_strdup_vprintf(fmt, ap);
1052         va_end(ap);
1053         return dst;
1054 }
1055
1056 /* release all allocated memory back to the pool. */
1057 static void
1058 emem_free_all(emem_header_t *mem)
1059 {
1060         gboolean use_chunks = mem->debug_use_chunks;
1061
1062         emem_chunk_t *npc;
1063         emem_tree_t *tree_list;
1064
1065         /* move all used chunks over to the free list */
1066         while(mem->used_list){
1067                 npc=mem->used_list;
1068                 mem->used_list=mem->used_list->next;
1069                 npc->next=mem->free_list;
1070                 mem->free_list=npc;
1071         }
1072
1073         /* clear them all out */
1074         npc = mem->free_list;
1075         while (npc != NULL) {
1076                 if (use_chunks) {
1077                         while (npc->canary_last != NULL) {
1078                                 npc->canary_last = emem_canary_next(mem->canary, npc->canary_last, NULL);
1079                                 /* XXX, check if canary_last is inside allocated memory? */
1080
1081                                 if (npc->canary_last == (void *) -1)
1082                                         g_error("Memory corrupted");
1083                         }
1084
1085                         emem_scrub_memory((npc->buf + npc->free_offset_init),
1086                                           (npc->free_offset - npc->free_offset_init),
1087                                           FALSE);
1088
1089                         npc->amount_free = npc->amount_free_init;
1090                         npc->free_offset = npc->free_offset_init;
1091                         npc = npc->next;
1092                 } else {
1093                         emem_chunk_t *next = npc->next;
1094
1095                         emem_scrub_memory(npc->buf, npc->amount_free_init, FALSE);
1096
1097                         g_free(npc->buf);
1098                         g_free(npc);
1099                         npc = next;
1100                 }
1101         }
1102
1103         if (!use_chunks) {
1104                 /* We've freed all this memory already */
1105                 mem->free_list = NULL;
1106         }
1107
1108         /* release/reset all allocated trees */
1109         for(tree_list=mem->trees;tree_list;tree_list=tree_list->next){
1110                 tree_list->tree=NULL;
1111         }
1112 }
1113
1114 /* release all allocated memory back to the pool. */
1115 void
1116 ep_free_all(void)
1117 {
1118         emem_free_all(&ep_packet_mem);
1119 }
1120
1121 /* release all allocated memory back to the pool. */
1122 void
1123 se_free_all(void)
1124 {
1125 #ifdef SHOW_EMEM_STATS
1126         print_alloc_stats();
1127 #endif
1128
1129         emem_free_all(&se_packet_mem);
1130 }
1131
1132 ep_stack_t
1133 ep_stack_new(void) {
1134         ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
1135         *s = ep_new0(struct _ep_stack_frame_t);
1136         return s;
1137 }
1138
1139 /*  for ep_stack_t we'll keep the popped frames so we reuse them instead
1140 of allocating new ones.
1141 */
1142
1143 void *
1144 ep_stack_push(ep_stack_t stack, void* data)
1145 {
1146         struct _ep_stack_frame_t* frame;
1147         struct _ep_stack_frame_t* head = (*stack);
1148
1149         if (head->above) {
1150                 frame = head->above;
1151         } else {
1152                 frame = ep_new(struct _ep_stack_frame_t);
1153                 head->above = frame;
1154                 frame->below = head;
1155                 frame->above = NULL;
1156         }
1157
1158         frame->payload = data;
1159         (*stack) = frame;
1160
1161         return data;
1162 }
1163
1164 void *
1165 ep_stack_pop(ep_stack_t stack)
1166 {
1167
1168         if ((*stack)->below) {
1169                 (*stack) = (*stack)->below;
1170                 return (*stack)->above->payload;
1171         } else {
1172                 return NULL;
1173         }
1174 }
1175
1176 emem_tree_t *
1177 se_tree_create(int type, const char *name)
1178 {
1179         emem_tree_t *tree_list;
1180
1181         tree_list=g_malloc(sizeof(emem_tree_t));
1182         tree_list->next=se_packet_mem.trees;
1183         tree_list->type=type;
1184         tree_list->tree=NULL;
1185         tree_list->name=name;
1186         tree_list->malloc=se_alloc;
1187         se_packet_mem.trees=tree_list;
1188
1189         return tree_list;
1190 }
1191
1192 void *
1193 emem_tree_lookup32(emem_tree_t *se_tree, guint32 key)
1194 {
1195         emem_tree_node_t *node;
1196
1197         node=se_tree->tree;
1198
1199         while(node){
1200                 if(key==node->key32){
1201                         return node->data;
1202                 }
1203                 if(key<node->key32){
1204                         node=node->left;
1205                         continue;
1206                 }
1207                 if(key>node->key32){
1208                         node=node->right;
1209                         continue;
1210                 }
1211         }
1212         return NULL;
1213 }
1214
1215 void *
1216 emem_tree_lookup32_le(emem_tree_t *se_tree, guint32 key)
1217 {
1218         emem_tree_node_t *node;
1219
1220         node=se_tree->tree;
1221
1222         if(!node){
1223                 return NULL;
1224         }
1225
1226
1227         while(node){
1228                 if(key==node->key32){
1229                         return node->data;
1230                 }
1231                 if(key<node->key32){
1232                         if(node->left){
1233                                 node=node->left;
1234                                 continue;
1235                         } else {
1236                                 break;
1237                         }
1238                 }
1239                 if(key>node->key32){
1240                         if(node->right){
1241                                 node=node->right;
1242                                 continue;
1243                         } else {
1244                                 break;
1245                         }
1246                 }
1247         }
1248
1249
1250         if(!node){
1251                 return NULL;
1252         }
1253
1254         /* If we are still at the root of the tree this means that this node
1255          * is either smaller than the search key and then we return this
1256          * node or else there is no smaller key available and then
1257          * we return NULL.
1258          */
1259         if(!node->parent){
1260                 if(key>node->key32){
1261                         return node->data;
1262                 } else {
1263                         return NULL;
1264                 }
1265         }
1266
1267         if(node->parent->left==node){
1268                 /* left child */
1269
1270                 if(key>node->key32){
1271                         /* if this is a left child and its key is smaller than
1272                          * the search key, then this is the node we want.
1273                          */
1274                         return node->data;
1275                 } else {
1276                         /* if this is a left child and its key is bigger than
1277                          * the search key, we have to check if any
1278                          * of our ancestors are smaller than the search key.
1279                          */
1280                         while(node){
1281                                 if(key>node->key32){
1282                                         return node->data;
1283                                 }
1284                                 node=node->parent;
1285                         }
1286                         return NULL;
1287                 }
1288         } else {
1289                 /* right child */
1290
1291                 if(node->key32<key){
1292                         /* if this is the right child and its key is smaller
1293                          * than the search key then this is the one we want.
1294                          */
1295                         return node->data;
1296                 } else {
1297                         /* if this is the right child and its key is larger
1298                          * than the search key then our parent is the one we
1299                          * want.
1300                          */
1301                         return node->parent->data;
1302                 }
1303         }
1304
1305 }
1306
1307
1308 static inline emem_tree_node_t *
1309 emem_tree_parent(emem_tree_node_t *node)
1310 {
1311         return node->parent;
1312 }
1313
1314 static inline emem_tree_node_t *
1315 emem_tree_grandparent(emem_tree_node_t *node)
1316 {
1317         emem_tree_node_t *parent;
1318
1319         parent=emem_tree_parent(node);
1320         if(parent){
1321                 return parent->parent;
1322         }
1323         return NULL;
1324 }
1325
1326 static inline emem_tree_node_t *
1327 emem_tree_uncle(emem_tree_node_t *node)
1328 {
1329         emem_tree_node_t *parent, *grandparent;
1330
1331         parent=emem_tree_parent(node);
1332         if(!parent){
1333                 return NULL;
1334         }
1335         grandparent=emem_tree_parent(parent);
1336         if(!grandparent){
1337                 return NULL;
1338         }
1339         if(parent==grandparent->left){
1340                 return grandparent->right;
1341         }
1342         return grandparent->left;
1343 }
1344
1345 static inline void rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node);
1346 static inline void rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node);
1347
1348 static inline void
1349 rotate_left(emem_tree_t *se_tree, emem_tree_node_t *node)
1350 {
1351         if(node->parent){
1352                 if(node->parent->left==node){
1353                         node->parent->left=node->right;
1354                 } else {
1355                         node->parent->right=node->right;
1356                 }
1357         } else {
1358                 se_tree->tree=node->right;
1359         }
1360         node->right->parent=node->parent;
1361         node->parent=node->right;
1362         node->right=node->right->left;
1363         if(node->right){
1364                 node->right->parent=node;
1365         }
1366         node->parent->left=node;
1367 }
1368
1369 static inline void
1370 rotate_right(emem_tree_t *se_tree, emem_tree_node_t *node)
1371 {
1372         if(node->parent){
1373                 if(node->parent->left==node){
1374                         node->parent->left=node->left;
1375                 } else {
1376                         node->parent->right=node->left;
1377                 }
1378         } else {
1379                 se_tree->tree=node->left;
1380         }
1381         node->left->parent=node->parent;
1382         node->parent=node->left;
1383         node->left=node->left->right;
1384         if(node->left){
1385                 node->left->parent=node;
1386         }
1387         node->parent->right=node;
1388 }
1389
1390 static inline void
1391 rb_insert_case5(emem_tree_t *se_tree, emem_tree_node_t *node)
1392 {
1393         emem_tree_node_t *grandparent;
1394         emem_tree_node_t *parent;
1395
1396         parent=emem_tree_parent(node);
1397         grandparent=emem_tree_parent(parent);
1398         parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1399         grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1400         if( (node==parent->left) && (parent==grandparent->left) ){
1401                 rotate_right(se_tree, grandparent);
1402         } else {
1403                 rotate_left(se_tree, grandparent);
1404         }
1405 }
1406
1407 static inline void
1408 rb_insert_case4(emem_tree_t *se_tree, emem_tree_node_t *node)
1409 {
1410         emem_tree_node_t *grandparent;
1411         emem_tree_node_t *parent;
1412
1413         parent=emem_tree_parent(node);
1414         grandparent=emem_tree_parent(parent);
1415         if(!grandparent){
1416                 return;
1417         }
1418         if( (node==parent->right) && (parent==grandparent->left) ){
1419                 rotate_left(se_tree, parent);
1420                 node=node->left;
1421         } else if( (node==parent->left) && (parent==grandparent->right) ){
1422                 rotate_right(se_tree, parent);
1423                 node=node->right;
1424         }
1425         rb_insert_case5(se_tree, node);
1426 }
1427
1428 static inline void
1429 rb_insert_case3(emem_tree_t *se_tree, emem_tree_node_t *node)
1430 {
1431         emem_tree_node_t *grandparent;
1432         emem_tree_node_t *parent;
1433         emem_tree_node_t *uncle;
1434
1435         uncle=emem_tree_uncle(node);
1436         if(uncle && (uncle->u.rb_color==EMEM_TREE_RB_COLOR_RED)){
1437                 parent=emem_tree_parent(node);
1438                 parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1439                 uncle->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1440                 grandparent=emem_tree_grandparent(node);
1441                 grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1442                 rb_insert_case1(se_tree, grandparent);
1443         } else {
1444                 rb_insert_case4(se_tree, node);
1445         }
1446 }
1447
1448 static inline void
1449 rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node)
1450 {
1451         emem_tree_node_t *parent;
1452
1453         parent=emem_tree_parent(node);
1454         /* parent is always non-NULL here */
1455         if(parent->u.rb_color==EMEM_TREE_RB_COLOR_BLACK){
1456                 return;
1457         }
1458         rb_insert_case3(se_tree, node);
1459 }
1460
1461 static inline void
1462 rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node)
1463 {
1464         emem_tree_node_t *parent;
1465
1466         parent=emem_tree_parent(node);
1467         if(!parent){
1468                 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1469                 return;
1470         }
1471         rb_insert_case2(se_tree, node);
1472 }
1473
1474 /* insert a new node in the tree. if this node matches an already existing node
1475  * then just replace the data for that node */
1476 void
1477 emem_tree_insert32(emem_tree_t *se_tree, guint32 key, void *data)
1478 {
1479         emem_tree_node_t *node;
1480
1481         node=se_tree->tree;
1482
1483         /* is this the first node ?*/
1484         if(!node){
1485                 node=se_tree->malloc(sizeof(emem_tree_node_t));
1486                 switch(se_tree->type){
1487                 case EMEM_TREE_TYPE_RED_BLACK:
1488                         node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1489                         break;
1490                 }
1491                 node->parent=NULL;
1492                 node->left=NULL;
1493                 node->right=NULL;
1494                 node->key32=key;
1495                 node->data=data;
1496                 node->u.is_subtree = EMEM_TREE_NODE_IS_DATA;
1497                 se_tree->tree=node;
1498                 return;
1499         }
1500
1501         /* it was not the new root so walk the tree until we find where to
1502          * insert this new leaf.
1503          */
1504         while(1){
1505                 /* this node already exists, so just replace the data pointer*/
1506                 if(key==node->key32){
1507                         node->data=data;
1508                         return;
1509                 }
1510                 if(key<node->key32) {
1511                         if(!node->left){
1512                                 /* new node to the left */
1513                                 emem_tree_node_t *new_node;
1514                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1515                                 node->left=new_node;
1516                                 new_node->parent=node;
1517                                 new_node->left=NULL;
1518                                 new_node->right=NULL;
1519                                 new_node->key32=key;
1520                                 new_node->data=data;
1521                                 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1522                                 node=new_node;
1523                                 break;
1524                         }
1525                         node=node->left;
1526                         continue;
1527                 }
1528                 if(key>node->key32) {
1529                         if(!node->right){
1530                                 /* new node to the right */
1531                                 emem_tree_node_t *new_node;
1532                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1533                                 node->right=new_node;
1534                                 new_node->parent=node;
1535                                 new_node->left=NULL;
1536                                 new_node->right=NULL;
1537                                 new_node->key32=key;
1538                                 new_node->data=data;
1539                                 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1540                                 node=new_node;
1541                                 break;
1542                         }
1543                         node=node->right;
1544                         continue;
1545                 }
1546         }
1547
1548         /* node will now point to the newly created node */
1549         switch(se_tree->type){
1550         case EMEM_TREE_TYPE_RED_BLACK:
1551                 node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1552                 rb_insert_case1(se_tree, node);
1553                 break;
1554         }
1555 }
1556
1557 static void *
1558 lookup_or_insert32(emem_tree_t *se_tree, guint32 key, void*(*func)(void*),void* ud, int is_subtree)
1559 {
1560         emem_tree_node_t *node;
1561
1562         node=se_tree->tree;
1563
1564         /* is this the first node ?*/
1565         if(!node){
1566                 node=se_tree->malloc(sizeof(emem_tree_node_t));
1567                 switch(se_tree->type){
1568                         case EMEM_TREE_TYPE_RED_BLACK:
1569                                 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1570                                 break;
1571                 }
1572                 node->parent=NULL;
1573                 node->left=NULL;
1574                 node->right=NULL;
1575                 node->key32=key;
1576                 node->data= func(ud);
1577                 node->u.is_subtree = is_subtree;
1578                 se_tree->tree=node;
1579                 return node->data;
1580         }
1581
1582         /* it was not the new root so walk the tree until we find where to
1583                 * insert this new leaf.
1584                 */
1585         while(1){
1586                 /* this node already exists, so just return the data pointer*/
1587                 if(key==node->key32){
1588                         return node->data;
1589                 }
1590                 if(key<node->key32) {
1591                         if(!node->left){
1592                                 /* new node to the left */
1593                                 emem_tree_node_t *new_node;
1594                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1595                                 node->left=new_node;
1596                                 new_node->parent=node;
1597                                 new_node->left=NULL;
1598                                 new_node->right=NULL;
1599                                 new_node->key32=key;
1600                                 new_node->data= func(ud);
1601                                 new_node->u.is_subtree = is_subtree;
1602                                 node=new_node;
1603                                 break;
1604                         }
1605                         node=node->left;
1606                         continue;
1607                 }
1608                 if(key>node->key32) {
1609                         if(!node->right){
1610                                 /* new node to the right */
1611                                 emem_tree_node_t *new_node;
1612                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1613                                 node->right=new_node;
1614                                 new_node->parent=node;
1615                                 new_node->left=NULL;
1616                                 new_node->right=NULL;
1617                                 new_node->key32=key;
1618                                 new_node->data= func(ud);
1619                                 new_node->u.is_subtree = is_subtree;
1620                                 node=new_node;
1621                                 break;
1622                         }
1623                         node=node->right;
1624                         continue;
1625                 }
1626         }
1627
1628         /* node will now point to the newly created node */
1629         switch(se_tree->type){
1630                 case EMEM_TREE_TYPE_RED_BLACK:
1631                         node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1632                         rb_insert_case1(se_tree, node);
1633                         break;
1634         }
1635
1636         return node->data;
1637 }
1638
1639 /* When the se data is released, this entire tree will dissapear as if it
1640  * never existed including all metadata associated with the tree.
1641  */
1642 emem_tree_t *
1643 se_tree_create_non_persistent(int type, const char *name)
1644 {
1645         emem_tree_t *tree_list;
1646
1647         tree_list=se_alloc(sizeof(emem_tree_t));
1648         tree_list->next=NULL;
1649         tree_list->type=type;
1650         tree_list->tree=NULL;
1651         tree_list->name=name;
1652         tree_list->malloc=se_alloc;
1653
1654         return tree_list;
1655 }
1656
1657 /* This tree is PErmanent and will never be released
1658  */
1659 emem_tree_t *
1660 pe_tree_create(int type, const char *name)
1661 {
1662         emem_tree_t *tree_list;
1663
1664         tree_list=g_new(emem_tree_t, 1);
1665         tree_list->next=NULL;
1666         tree_list->type=type;
1667         tree_list->tree=NULL;
1668         tree_list->name=name;
1669         tree_list->malloc=(void *(*)(size_t)) g_malloc;
1670
1671         return tree_list;
1672 }
1673
1674 /* create another (sub)tree using the same memory allocation scope
1675  * as the parent tree.
1676  */
1677 static emem_tree_t *
1678 emem_tree_create_subtree(emem_tree_t *parent_tree, const char *name)
1679 {
1680         emem_tree_t *tree_list;
1681
1682         tree_list=parent_tree->malloc(sizeof(emem_tree_t));
1683         tree_list->next=NULL;
1684         tree_list->type=parent_tree->type;
1685         tree_list->tree=NULL;
1686         tree_list->name=name;
1687         tree_list->malloc=parent_tree->malloc;
1688
1689         return tree_list;
1690 }
1691
1692 static void *
1693 create_sub_tree(void* d)
1694 {
1695         emem_tree_t *se_tree = d;
1696         return emem_tree_create_subtree(se_tree, "subtree");
1697 }
1698
1699 /* insert a new node in the tree. if this node matches an already existing node
1700  * then just replace the data for that node */
1701
1702 void
1703 emem_tree_insert32_array(emem_tree_t *se_tree, emem_tree_key_t *key, void *data)
1704 {
1705         emem_tree_t *next_tree;
1706
1707         if((key[0].length<1)||(key[0].length>100)){
1708                 DISSECTOR_ASSERT_NOT_REACHED();
1709         }
1710         if((key[0].length==1)&&(key[1].length==0)){
1711                 emem_tree_insert32(se_tree, *key[0].key, data);
1712                 return;
1713         }
1714
1715         next_tree=lookup_or_insert32(se_tree, *key[0].key, create_sub_tree, se_tree, EMEM_TREE_NODE_IS_SUBTREE);
1716
1717         if(key[0].length==1){
1718                 key++;
1719         } else {
1720                 key[0].length--;
1721                 key[0].key++;
1722         }
1723         emem_tree_insert32_array(next_tree, key, data);
1724 }
1725
1726 void *
1727 emem_tree_lookup32_array(emem_tree_t *se_tree, emem_tree_key_t *key)
1728 {
1729         emem_tree_t *next_tree;
1730
1731         if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1732
1733         if((key[0].length<1)||(key[0].length>100)){
1734                 DISSECTOR_ASSERT_NOT_REACHED();
1735         }
1736         if((key[0].length==1)&&(key[1].length==0)){
1737                 return emem_tree_lookup32(se_tree, *key[0].key);
1738         }
1739         next_tree=emem_tree_lookup32(se_tree, *key[0].key);
1740         if(!next_tree){
1741                 return NULL;
1742         }
1743         if(key[0].length==1){
1744                 key++;
1745         } else {
1746                 key[0].length--;
1747                 key[0].key++;
1748         }
1749         return emem_tree_lookup32_array(next_tree, key);
1750 }
1751
1752 void *
1753 emem_tree_lookup32_array_le(emem_tree_t *se_tree, emem_tree_key_t *key)
1754 {
1755         emem_tree_t *next_tree;
1756
1757         if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1758
1759         if((key[0].length<1)||(key[0].length>100)){
1760                 DISSECTOR_ASSERT_NOT_REACHED();
1761         }
1762         if((key[0].length==1)&&(key[1].length==0)){ /* last key in key array */
1763                 return emem_tree_lookup32_le(se_tree, *key[0].key);
1764         }
1765         next_tree=emem_tree_lookup32(se_tree, *key[0].key);
1766         /* key[0].key not found so find le and return */
1767         if(!next_tree)
1768                 return emem_tree_lookup32_le(se_tree, *key[0].key);
1769
1770         /* key[0].key found so inc key pointer and try again */
1771         if(key[0].length==1){
1772                 key++;
1773         } else {
1774                 key[0].length--;
1775                 key[0].key++;
1776         }
1777         return emem_tree_lookup32_array_le(next_tree, key);
1778 }
1779
1780 /* Strings are stored as an array of uint32 containing the string characters
1781    with 4 characters in each uint32.
1782    The first byte of the string is stored as the most significant byte.
1783    If the string is not a multiple of 4 characters in length the last
1784    uint32 containing the string bytes are padded with 0 bytes.
1785    After the uint32's containing the string, there is one final terminator
1786    uint32 with the value 0x00000001
1787 */
1788 void
1789 emem_tree_insert_string(emem_tree_t* se_tree, const gchar* k, void* v, guint32 flags)
1790 {
1791         emem_tree_key_t key[2];
1792         guint32 *aligned=NULL;
1793         guint32 len = (guint32) strlen(k);
1794         guint32 divx = (len+3)/4+1;
1795         guint32 i;
1796         guint32 tmp;
1797
1798         aligned = g_malloc(divx * sizeof (guint32));
1799
1800         /* pack the bytes one one by one into guint32s */
1801         tmp = 0;
1802         for (i = 0;i < len;i++) {
1803                 unsigned char ch;
1804
1805                 ch = (unsigned char)k[i];
1806                 if (flags & EMEM_TREE_STRING_NOCASE) {
1807                         if(isupper(ch)) {
1808                                 ch = tolower(ch);
1809                         }
1810                 }
1811                 tmp <<= 8;
1812                 tmp |= ch;
1813                 if (i%4 == 3) {
1814                         aligned[i/4] = tmp;
1815                         tmp = 0;
1816                 }
1817         }
1818         /* add required padding to the last uint32 */
1819         if (i%4 != 0) {
1820                 while (i%4 != 0) {
1821                         i++;
1822                         tmp <<= 8;
1823                 }
1824                 aligned[i/4-1] = tmp;
1825         }
1826
1827         /* add the terminator */
1828         aligned[divx-1] = 0x00000001;
1829
1830         key[0].length = divx;
1831         key[0].key = aligned;
1832         key[1].length = 0;
1833         key[1].key = NULL;
1834
1835
1836         emem_tree_insert32_array(se_tree, key, v);
1837         g_free(aligned);
1838 }
1839
1840 void *
1841 emem_tree_lookup_string(emem_tree_t* se_tree, const gchar* k, guint32 flags)
1842 {
1843         emem_tree_key_t key[2];
1844         guint32 *aligned=NULL;
1845         guint32 len = (guint) strlen(k);
1846         guint32 divx = (len+3)/4+1;
1847         guint32 i;
1848         guint32 tmp;
1849         void *ret;
1850
1851         aligned = g_malloc(divx * sizeof (guint32));
1852
1853         /* pack the bytes one one by one into guint32s */
1854         tmp = 0;
1855         for (i = 0;i < len;i++) {
1856                 unsigned char ch;
1857
1858                 ch = (unsigned char)k[i];
1859                 if (flags & EMEM_TREE_STRING_NOCASE) {
1860                         if(isupper(ch)) {
1861                                 ch = tolower(ch);
1862                         }
1863                 }
1864                 tmp <<= 8;
1865                 tmp |= ch;
1866                 if (i%4 == 3) {
1867                         aligned[i/4] = tmp;
1868                         tmp = 0;
1869                 }
1870         }
1871         /* add required padding to the last uint32 */
1872         if (i%4 != 0) {
1873                 while (i%4 != 0) {
1874                         i++;
1875                         tmp <<= 8;
1876                 }
1877                 aligned[i/4-1] = tmp;
1878         }
1879
1880         /* add the terminator */
1881         aligned[divx-1] = 0x00000001;
1882
1883         key[0].length = divx;
1884         key[0].key = aligned;
1885         key[1].length = 0;
1886         key[1].key = NULL;
1887
1888
1889         ret = emem_tree_lookup32_array(se_tree, key);
1890         g_free(aligned);
1891         return ret;
1892 }
1893
1894 static gboolean
1895 emem_tree_foreach_nodes(emem_tree_node_t* node, tree_foreach_func callback, void *user_data)
1896 {
1897         gboolean stop_traverse = FALSE;
1898
1899         if (!node)
1900                 return FALSE;
1901
1902         if(node->left) {
1903                 stop_traverse = emem_tree_foreach_nodes(node->left, callback, user_data);
1904                 if (stop_traverse) {
1905                         return TRUE;
1906                 }
1907         }
1908
1909         if (node->u.is_subtree == EMEM_TREE_NODE_IS_SUBTREE) {
1910                 stop_traverse = emem_tree_foreach(node->data, callback, user_data);
1911         } else {
1912                 stop_traverse = callback(node->data, user_data);
1913         }
1914
1915         if (stop_traverse) {
1916                 return TRUE;
1917         }
1918
1919         if(node->right) {
1920                 stop_traverse = emem_tree_foreach_nodes(node->right, callback, user_data);
1921                 if (stop_traverse) {
1922                         return TRUE;
1923                 }
1924         }
1925
1926         return FALSE;
1927 }
1928
1929 gboolean
1930 emem_tree_foreach(emem_tree_t* emem_tree, tree_foreach_func callback, void *user_data)
1931 {
1932         if (!emem_tree)
1933                 return FALSE;
1934
1935         if(!emem_tree->tree)
1936                 return FALSE;
1937
1938         return emem_tree_foreach_nodes(emem_tree->tree, callback, user_data);
1939 }
1940
1941
1942 static void
1943 emem_tree_print_nodes(emem_tree_node_t* node, int level)
1944 {
1945         int i;
1946
1947         if (!node)
1948                 return;
1949
1950         for(i=0;i<level;i++){
1951                 printf("    ");
1952         }
1953
1954         printf("NODE:%p parent:%p left:0x%p right:%px key:%d data:%p\n",
1955                 (void *)node,(void *)(node->parent),(void *)(node->left),(void *)(node->right),
1956                 (node->key32),node->data);
1957         if(node->left)
1958                 emem_tree_print_nodes(node->left, level+1);
1959         if(node->right)
1960                 emem_tree_print_nodes(node->right, level+1);
1961 }
1962 void
1963 emem_print_tree(emem_tree_t* emem_tree)
1964 {
1965         if (!emem_tree)
1966                 return;
1967
1968         printf("EMEM tree type:%d name:%s tree:%p\n",emem_tree->type,emem_tree->name,(void *)(emem_tree->tree));
1969         if(emem_tree->tree)
1970                 emem_tree_print_nodes(emem_tree->tree, 0);
1971 }
1972
1973 /*
1974  * String buffers
1975  */
1976
1977 /*
1978  * Presumably we're using these routines for building strings for the tree.
1979  * Use ITEM_LABEL_LENGTH as the basis for our default lengths.
1980  */
1981
1982 #define DEFAULT_STRBUF_LEN (ITEM_LABEL_LENGTH / 10)
1983 #define MAX_STRBUF_LEN 65536
1984
1985 static gsize
1986 next_size(gsize cur_alloc_len, gsize wanted_alloc_len, gsize max_alloc_len)
1987 {
1988         if (max_alloc_len < 1 || max_alloc_len > MAX_STRBUF_LEN) {
1989                 max_alloc_len = MAX_STRBUF_LEN;
1990         }
1991
1992         if (cur_alloc_len < 1) {
1993                 cur_alloc_len = DEFAULT_STRBUF_LEN;
1994         }
1995
1996         while (cur_alloc_len < wanted_alloc_len) {
1997                 cur_alloc_len *= 2;
1998         }
1999
2000         return cur_alloc_len < max_alloc_len ? cur_alloc_len : max_alloc_len;
2001 }
2002
2003 static void
2004 ep_strbuf_grow(emem_strbuf_t *strbuf, gsize wanted_alloc_len)
2005 {
2006         gsize new_alloc_len;
2007         gchar *new_str;
2008
2009         if (!strbuf || (wanted_alloc_len <= strbuf->alloc_len) || (strbuf->alloc_len >= strbuf->max_alloc_len)) {
2010                 return;
2011         }
2012
2013         new_alloc_len = next_size(strbuf->alloc_len, wanted_alloc_len, strbuf->max_alloc_len);
2014         new_str = ep_alloc(new_alloc_len);
2015         g_strlcpy(new_str, strbuf->str, new_alloc_len);
2016
2017         strbuf->alloc_len = new_alloc_len;
2018         strbuf->str = new_str;
2019 }
2020
2021 emem_strbuf_t *
2022 ep_strbuf_sized_new(gsize alloc_len, gsize max_alloc_len)
2023 {
2024         emem_strbuf_t *strbuf;
2025
2026         strbuf = ep_alloc(sizeof(emem_strbuf_t));
2027
2028         if ((max_alloc_len == 0) || (max_alloc_len > MAX_STRBUF_LEN))
2029                 max_alloc_len = MAX_STRBUF_LEN;
2030         if (alloc_len == 0)
2031                 alloc_len = 1;
2032         else if (alloc_len > max_alloc_len)
2033                 alloc_len = max_alloc_len;
2034
2035         strbuf->str = ep_alloc(alloc_len);
2036         strbuf->str[0] = '\0';
2037
2038         strbuf->len = 0;
2039         strbuf->alloc_len = alloc_len;
2040         strbuf->max_alloc_len = max_alloc_len;
2041
2042         return strbuf;
2043 }
2044
2045 emem_strbuf_t *
2046 ep_strbuf_new(const gchar *init)
2047 {
2048         emem_strbuf_t *strbuf;
2049
2050         strbuf = ep_strbuf_sized_new(next_size(0, init?strlen(init)+1:0, 0), 0);  /* +1 for NULL terminator */
2051         if (init) {
2052                 gsize full_len;
2053                 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2054                 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2055         }
2056
2057         return strbuf;
2058 }
2059
2060 emem_strbuf_t *
2061 ep_strbuf_new_label(const gchar *init)
2062 {
2063         emem_strbuf_t *strbuf;
2064         gsize full_len;
2065
2066         /* Be optimistic: Allocate default size strbuf string and only      */
2067         /*  request an increase if needed.                                  */
2068         /* XXX: Is it reasonable to assume that much of the usage of        */
2069         /*  ep_strbuf_new_label will have  init==NULL or                    */
2070         /*   strlen(init) < DEFAULT_STRBUF_LEN) ???                         */
2071         strbuf = ep_strbuf_sized_new(DEFAULT_STRBUF_LEN, ITEM_LABEL_LENGTH);
2072
2073         if (!init)
2074                 return strbuf;
2075
2076         /* full_len does not count the trailing '\0'.                       */
2077         full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2078         if (full_len < strbuf->alloc_len) {
2079                 strbuf->len += full_len;
2080         } else {
2081                 strbuf = ep_strbuf_sized_new(full_len+1, ITEM_LABEL_LENGTH);
2082                 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2083                 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2084         }
2085
2086         return strbuf;
2087 }
2088
2089 emem_strbuf_t *
2090 ep_strbuf_append(emem_strbuf_t *strbuf, const gchar *str)
2091 {
2092         gsize add_len, full_len;
2093
2094         if (!strbuf || !str || str[0] == '\0') {
2095                 return strbuf;
2096         }
2097
2098         /* Be optimistic; try the g_strlcpy first & see if enough room.                 */
2099         /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same  */
2100         add_len = strbuf->alloc_len - strbuf->len;
2101         full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2102         if (full_len < add_len) {
2103                 strbuf->len += full_len;
2104         } else {
2105                 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2106                 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2107                 add_len = strbuf->alloc_len - strbuf->len;
2108                 full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2109                 strbuf->len += MIN(add_len-1, full_len);
2110         }
2111
2112         return strbuf;
2113 }
2114
2115 void
2116 ep_strbuf_append_vprintf(emem_strbuf_t *strbuf, const gchar *format, va_list ap)
2117 {
2118         va_list ap2;
2119         gsize add_len, full_len;
2120
2121         G_VA_COPY(ap2, ap);
2122
2123         /* Be optimistic; try the g_vsnprintf first & see if enough room.               */
2124         /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same. */
2125         add_len = strbuf->alloc_len - strbuf->len;
2126         full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap);
2127         if (full_len < add_len) {
2128                 strbuf->len += full_len;
2129         } else {
2130                 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2131                 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2132                 add_len = strbuf->alloc_len - strbuf->len;
2133                 full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap2);
2134                 strbuf->len += MIN(add_len-1, full_len);
2135         }
2136
2137         va_end(ap2);
2138 }
2139
2140 void
2141 ep_strbuf_append_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2142 {
2143         va_list ap;
2144
2145         va_start(ap, format);
2146         ep_strbuf_append_vprintf(strbuf, format, ap);
2147         va_end(ap);
2148 }
2149
2150 void
2151 ep_strbuf_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2152 {
2153         va_list ap;
2154         if (!strbuf) {
2155                 return;
2156         }
2157
2158         strbuf->len = 0;
2159
2160         va_start(ap, format);
2161         ep_strbuf_append_vprintf(strbuf, format, ap);
2162         va_end(ap);
2163 }
2164
2165 emem_strbuf_t *
2166 ep_strbuf_append_c(emem_strbuf_t *strbuf, const gchar c)
2167 {
2168         if (!strbuf) {
2169                 return strbuf;
2170         }
2171
2172         /* +1 for the new character & +1 for the trailing '\0'. */
2173         if (strbuf->alloc_len < strbuf->len + 1 + 1) {
2174                 ep_strbuf_grow(strbuf, strbuf->len + 1 + 1);
2175         }
2176         if (strbuf->alloc_len >= strbuf->len + 1 + 1) {
2177                 strbuf->str[strbuf->len] = c;
2178                 strbuf->len++;
2179                 strbuf->str[strbuf->len] = '\0';
2180         }
2181
2182         return strbuf;
2183 }
2184
2185 emem_strbuf_t *
2186 ep_strbuf_truncate(emem_strbuf_t *strbuf, gsize len)
2187 {
2188         if (!strbuf || len >= strbuf->len) {
2189                 return strbuf;
2190         }
2191
2192         strbuf->str[len] = '\0';
2193         strbuf->len = len;
2194
2195         return strbuf;
2196 }
2197
2198 /*
2199  * Editor modelines
2200  *
2201  * Local Variables:
2202  * c-basic-offset: 8
2203  * tab-width: 8
2204  * indent-tabs-mode: t
2205  * End:
2206  *
2207  * ex: set shiftwidth=8 tabstop=8 noexpandtab
2208  * :indentSize=8:tabSize=8:noTabs=false:
2209  */