The rest of the fix for https://bugs.wireshark.org/bugzilla/show_bug.cgi?id=7221
[metze/wireshark/wip.git] / epan / emem.c
1 /* emem.c
2  * Wireshark memory management and garbage collection functions
3  * Ronnie Sahlberg 2005
4  *
5  * $Id$
6  *
7  * Wireshark - Network traffic analyzer
8  * By Gerald Combs <gerald@wireshark.org>
9  * Copyright 1998 Gerald Combs
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version 2
14  * of the License, or (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
24  */
25 #ifdef HAVE_CONFIG_H
26 #include "config.h"
27 #endif
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <stdarg.h>
33 #include <ctype.h>
34
35 #include <time.h>
36 #ifdef HAVE_SYS_TIME_H
37 #include <sys/time.h>
38 #endif
39
40 #ifdef HAVE_UNISTD_H
41 #include <unistd.h>
42 #endif
43
44 #include <glib.h>
45
46 #include "proto.h"
47 #include "emem.h"
48
49 #ifdef _WIN32
50 #include <windows.h>    /* VirtualAlloc, VirtualProtect */
51 #include <process.h>    /* getpid */
52 #endif
53
54 /* Print out statistics about our memory allocations? */
55 /*#define SHOW_EMEM_STATS*/
56
57 /* Do we want to use guardpages? if available */
58 #define WANT_GUARD_PAGES 1
59
60 #ifdef WANT_GUARD_PAGES
61 /* Add guard pages at each end of our allocated memory */
62 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
63 #include <stdint.h>
64 #ifdef HAVE_SYS_TYPES_H
65 #include <sys/types.h>
66 #endif
67 #include <sys/mman.h>
68 #if defined(MAP_ANONYMOUS)
69 #define ANON_PAGE_MODE  (MAP_ANONYMOUS|MAP_PRIVATE)
70 #elif defined(MAP_ANON)
71 #define ANON_PAGE_MODE  (MAP_ANON|MAP_PRIVATE)
72 #else
73 #define ANON_PAGE_MODE  (MAP_PRIVATE)   /* have to map /dev/zero */
74 #define NEED_DEV_ZERO
75 #endif
76 #ifdef NEED_DEV_ZERO
77 #include <fcntl.h>
78 static int dev_zero_fd;
79 #define ANON_FD dev_zero_fd
80 #else
81 #define ANON_FD -1
82 #endif
83 #define USE_GUARD_PAGES 1
84 #endif
85 #endif
86
87 /* When required, allocate more memory from the OS in this size chunks */
88 #define EMEM_PACKET_CHUNK_SIZE (10 * 1024 * 1024)
89
90 /* The canary between allocations is at least 8 bytes and up to 16 bytes to
91  * allow future allocations to be 4- or 8-byte aligned.
92  * All but the last byte of the canary are randomly generated; the last byte is
93  * NULL to separate the canary and the pointer to the next canary.
94  *
95  * For example, if the allocation is a multiple of 8 bytes, the canary and
96  * pointer would look like:
97  *   |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
98  *   |c|c|c|c|c|c|c|0||p|p|p|p|p|p|p|p| (64-bit), or:
99  *   |c|c|c|c|c|c|c|0||p|p|p|p|         (32-bit)
100  *
101  * If the allocation was, for example, 12 bytes, the canary would look like:
102  *        |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
103  *   [...]|a|a|a|a|c|c|c|c||c|c|c|c|c|c|c|0| (followed by the pointer)
104  */
105 #define EMEM_CANARY_SIZE 8
106 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
107
108 typedef struct _emem_chunk_t {
109         struct _emem_chunk_t *next;
110         char            *buf;
111         unsigned int    amount_free_init;
112         unsigned int    amount_free;
113         unsigned int    free_offset_init;
114         unsigned int    free_offset;
115         void            *canary_last;
116 } emem_chunk_t;
117
118 typedef struct _emem_header_t {
119         emem_chunk_t *free_list;
120         emem_chunk_t *used_list;
121
122         emem_tree_t *trees;             /* only used by se_mem allocator */
123
124         guint8 canary[EMEM_CANARY_DATA_SIZE];
125         void *(*memory_alloc)(size_t size, struct _emem_header_t *);
126
127         /*
128          * Tools like Valgrind and ElectricFence don't work well with memchunks.
129          * Export the following environment variables to make {ep|se}_alloc() allocate each
130          * object individually.
131          *
132          * WIRESHARK_DEBUG_EP_NO_CHUNKS
133          * WIRESHARK_DEBUG_SE_NO_CHUNKS
134          */
135         gboolean debug_use_chunks;
136
137         /* Do we want to use canaries?
138          * Export the following environment variables to disable/enable canaries
139          *
140          * WIRESHARK_DEBUG_EP_NO_CANARY
141          * For SE memory use of canary is default off as the memory overhead
142          * is considerable.
143          * WIRESHARK_DEBUG_SE_USE_CANARY
144          */
145         gboolean debug_use_canary;
146
147         /*  Do we want to verify no one is using a pointer to an ep_ or se_
148          *  allocated thing where they shouldn't be?
149          *
150          * Export WIRESHARK_EP_VERIFY_POINTERS or WIRESHARK_SE_VERIFY_POINTERS
151          * to turn this on.
152          */
153         gboolean debug_verify_pointers;
154
155 } emem_header_t;
156
157 static emem_header_t ep_packet_mem;
158 static emem_header_t se_packet_mem;
159
160 /*
161  *  Memory scrubbing is expensive but can be useful to ensure we don't:
162  *    - use memory before initializing it
163  *    - use memory after freeing it
164  *  Export WIRESHARK_DEBUG_SCRUB_MEMORY to turn it on.
165  */
166 static gboolean debug_use_memory_scrubber = FALSE;
167
168 #if defined (_WIN32)
169 static SYSTEM_INFO sysinfo;
170 static OSVERSIONINFO versinfo;
171 static int pagesize;
172 #elif defined(USE_GUARD_PAGES)
173 static intptr_t pagesize;
174 #endif /* _WIN32 / USE_GUARD_PAGES */
175
176 static void *emem_alloc_chunk(size_t size, emem_header_t *mem);
177 static void *emem_alloc_glib(size_t size, emem_header_t *mem);
178
179 /*
180  * Set a canary value to be placed between memchunks.
181  */
182 static void
183 emem_canary_init(guint8 *canary)
184 {
185         int i;
186         static GRand *rand_state = NULL;
187
188         if (rand_state == NULL) {
189                 rand_state = g_rand_new();
190         }
191         for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
192                 canary[i] = (guint8) g_rand_int_range(rand_state, 1, 0x100);
193         }
194         return;
195 }
196
197 static void *
198 emem_canary_next(guint8 *mem_canary, guint8 *canary, int *len)
199 {
200         void *ptr;
201         int i;
202
203         for (i = 0; i < EMEM_CANARY_SIZE-1; i++)
204                 if (mem_canary[i] != canary[i])
205                         return (void *) -1;
206
207         for (; i < EMEM_CANARY_DATA_SIZE; i++) {
208                 if (canary[i] == '\0') {
209                         memcpy(&ptr, &canary[i+1], sizeof(void *));
210
211                         if (len)
212                                 *len = i + 1 + sizeof(void *);
213                         return ptr;
214                 }
215
216                 if (mem_canary[i] != canary[i])
217                         return (void *) -1;
218         }
219
220         return (void *) -1;
221 }
222
223 /*
224  * Given an allocation size, return the amount of room needed for the canary
225  * (with a minimum of 8 bytes) while using the canary to pad to an 8-byte
226  * boundary.
227  */
228 static guint8
229 emem_canary_pad (size_t allocation)
230 {
231         guint8 pad;
232
233         pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
234         if (pad < EMEM_CANARY_SIZE)
235                 pad += EMEM_CANARY_SIZE;
236
237         return pad;
238 }
239
240 /* used for debugging canaries, will block */
241 #ifdef DEBUG_INTENSE_CANARY_CHECKS
242 gboolean intense_canary_checking = FALSE;
243
244 /*  used to intensivelly check ep canaries
245  */
246 void
247 ep_check_canary_integrity(const char* fmt, ...)
248 {
249         va_list ap;
250         static gchar there[128] = {
251                 'L','a','u','n','c','h',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
252                 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
253                 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
254                 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
255         gchar here[128];
256         emem_chunk_t* npc = NULL;
257
258         if (! intense_canary_checking ) return;
259
260         va_start(ap,fmt);
261         g_vsnprintf(here, sizeof(here), fmt, ap);
262         va_end(ap);
263
264         for (npc = ep_packet_mem.free_list; npc != NULL; npc = npc->next) {
265                 void *canary_next = npc->canary_last;
266
267                 while (canary_next != NULL) {
268                         canary_next = emem_canary_next(ep_packet_mem.canary, canary_next, NULL);
269                         /* XXX, check if canary_next is inside allocated memory? */
270
271                         if (canary_next == (void *) -1)
272                                 g_error("Per-packet memory corrupted\nbetween: %s\nand: %s", there, here);
273                 }
274         }
275
276         g_strlcpy(there, here, sizeof(there));
277 }
278 #endif
279
280 static void
281 emem_init_chunk(emem_header_t *mem)
282 {
283         if (mem->debug_use_canary)
284                 emem_canary_init(mem->canary);
285
286         if (mem->debug_use_chunks)
287                 mem->memory_alloc = emem_alloc_chunk;
288         else
289                 mem->memory_alloc = emem_alloc_glib;
290 }
291
292
293 /* Initialize the packet-lifetime memory allocation pool.
294  * This function should be called only once when Wireshark or TShark starts
295  * up.
296  */
297 static void
298 ep_init_chunk(void)
299 {
300         ep_packet_mem.free_list=NULL;
301         ep_packet_mem.used_list=NULL;
302         ep_packet_mem.trees=NULL;       /* not used by this allocator */
303
304         ep_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_EP_NO_CHUNKS") == NULL);
305         ep_packet_mem.debug_use_canary = ep_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_EP_NO_CANARY") == NULL);
306         ep_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_EP_VERIFY_POINTERS") != NULL);
307
308 #ifdef DEBUG_INTENSE_CANARY_CHECKS
309         intense_canary_checking = (getenv("WIRESHARK_DEBUG_EP_INTENSE_CANARY") != NULL);
310 #endif
311
312         emem_init_chunk(&ep_packet_mem);
313 }
314
315 /* Initialize the capture-lifetime memory allocation pool.
316  * This function should be called only once when Wireshark or TShark starts
317  * up.
318  */
319 static void
320 se_init_chunk(void)
321 {
322         se_packet_mem.free_list = NULL;
323         se_packet_mem.used_list = NULL;
324         se_packet_mem.trees = NULL;
325
326         se_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_SE_NO_CHUNKS") == NULL);
327         se_packet_mem.debug_use_canary = se_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_SE_USE_CANARY") != NULL);
328         se_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_SE_VERIFY_POINTERS") != NULL);
329
330         emem_init_chunk(&se_packet_mem);
331 }
332
333 /*  Initialize all the allocators here.
334  *  This function should be called only once when Wireshark or TShark starts
335  *  up.
336  */
337 void
338 emem_init(void)
339 {
340         ep_init_chunk();
341         se_init_chunk();
342
343         if (getenv("WIRESHARK_DEBUG_SCRUB_MEMORY"))
344                 debug_use_memory_scrubber  = TRUE;
345
346 #if defined (_WIN32)
347         /* Set up our guard page info for Win32 */
348         GetSystemInfo(&sysinfo);
349         pagesize = sysinfo.dwPageSize;
350
351         /* calling GetVersionEx using the OSVERSIONINFO structure.
352          * OSVERSIONINFOEX requires Win NT4 with SP6 or newer NT Versions.
353          * OSVERSIONINFOEX will fail on Win9x and older NT Versions.
354          * See also:
355          * http://msdn.microsoft.com/library/en-us/sysinfo/base/getversionex.asp
356          * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
357          * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfoex_str.asp
358          */
359         versinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
360         GetVersionEx(&versinfo);
361
362 #elif defined(USE_GUARD_PAGES)
363         pagesize = sysconf(_SC_PAGESIZE);
364 #ifdef NEED_DEV_ZERO
365         dev_zero_fd = ws_open("/dev/zero", O_RDWR);
366         g_assert(dev_zero_fd != -1);
367 #endif
368 #endif /* _WIN32 / USE_GUARD_PAGES */
369 }
370
371 #ifdef SHOW_EMEM_STATS
372 #define NUM_ALLOC_DIST 10
373 static guint allocations[NUM_ALLOC_DIST] = { 0 };
374 static guint total_no_chunks = 0;
375
376 static void
377 print_alloc_stats()
378 {
379         guint num_chunks = 0;
380         guint num_allocs = 0;
381         guint total_used = 0;
382         guint total_allocation = 0;
383         guint total_free = 0;
384         guint used_for_canaries = 0;
385         guint total_headers;
386         guint i;
387         emem_chunk_t *chunk;
388         guint total_space_allocated_from_os, total_space_wasted;
389         gboolean ep_stat=TRUE;
390
391         fprintf(stderr, "\n-------- EP allocator statistics --------\n");
392         fprintf(stderr, "%s chunks, %s canaries, %s memory scrubber\n",
393                ep_packet_mem.debug_use_chunks ? "Using" : "Not using",
394                ep_packet_mem.debug_use_canary ? "using" : "not using",
395                debug_use_memory_scrubber ? "using" : "not using");
396
397         if (! (ep_packet_mem.free_list || !ep_packet_mem.used_list)) {
398                 fprintf(stderr, "No memory allocated\n");
399                 ep_stat = FALSE;
400         }
401         if (ep_packet_mem.debug_use_chunks && ep_stat) {
402                 /* Nothing interesting without chunks */
403                 /*  Only look at the used_list since those chunks are fully
404                  *  used.  Looking at the free list would skew our view of what
405                  *  we have wasted.
406                  */
407                 for (chunk = ep_packet_mem.used_list; chunk; chunk = chunk->next) {
408                         num_chunks++;
409                         total_used += (chunk->amount_free_init - chunk->amount_free);
410                         total_allocation += chunk->amount_free_init;
411                         total_free += chunk->amount_free;
412                 }
413                 if (num_chunks > 0) {
414                         fprintf (stderr, "\n");
415                         fprintf (stderr, "\n---- Buffer space ----\n");
416                         fprintf (stderr, "\tChunk allocation size: %10u\n", EMEM_PACKET_CHUNK_SIZE);
417                         fprintf (stderr, "\t*    Number of chunks: %10u\n", num_chunks);
418                         fprintf (stderr, "\t-------------------------------------------\n");
419                         fprintf (stderr, "\t= %u (%u including guard pages) total space used for buffers\n",
420                         total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
421                         fprintf (stderr, "\t-------------------------------------------\n");
422                         total_space_allocated_from_os = total_allocation
423                                 + sizeof(emem_chunk_t) * num_chunks;
424                         fprintf (stderr, "Total allocated from OS: %u\n\n",
425                                 total_space_allocated_from_os);
426                 }else{
427                         fprintf (stderr, "No fully used chunks, nothing to do\n");
428                 }
429                 /* Reset stats */
430                 num_chunks = 0;
431                 num_allocs = 0;
432                 total_used = 0;
433                 total_allocation = 0;
434                 total_free = 0;
435                 used_for_canaries = 0;
436         }
437
438
439         fprintf(stderr, "\n-------- SE allocator statistics --------\n");
440         fprintf(stderr, "Total number of chunk allocations %u\n",
441                 total_no_chunks);
442         fprintf(stderr, "%s chunks, %s canaries\n",
443                se_packet_mem.debug_use_chunks ? "Using" : "Not using",
444                se_packet_mem.debug_use_canary ? "using" : "not using");
445
446         if (! (se_packet_mem.free_list || !se_packet_mem.used_list)) {
447                 fprintf(stderr, "No memory allocated\n");
448                 return;
449         }
450
451         if (!se_packet_mem.debug_use_chunks )
452                 return; /* Nothing interesting without chunks?? */
453
454         /*  Only look at the used_list since those chunks are fully used.
455          *  Looking at the free list would skew our view of what we have wasted.
456          */
457         for (chunk = se_packet_mem.used_list; chunk; chunk = chunk->next) {
458                 num_chunks++;
459                 total_used += (chunk->amount_free_init - chunk->amount_free);
460                 total_allocation += chunk->amount_free_init;
461                 total_free += chunk->amount_free;
462
463                 if (se_packet_mem.debug_use_canary){
464                         void *ptr = chunk->canary_last;
465                         int len;
466
467                         while (ptr != NULL) {
468                                 ptr = emem_canary_next(se_packet_mem.canary, ptr, &len);
469
470                                 if (ptr == (void *) -1)
471                                         g_error("Memory corrupted");
472                                 used_for_canaries += len;
473                         }
474                 }
475         }
476
477         if (num_chunks == 0) {
478
479                 fprintf (stderr, "No fully used chunks, nothing to do\n");
480                 return;
481         }
482
483         fprintf (stderr, "\n");
484         fprintf (stderr, "---------- Allocations from the OS ----------\n");
485         fprintf (stderr, "---- Headers ----\n");
486         fprintf (stderr, "\t(    Chunk header size: %10lu\n",
487                  sizeof(emem_chunk_t));
488         fprintf (stderr, "\t*     Number of chunks: %10u\n", num_chunks);
489         fprintf (stderr, "\t-------------------------------------------\n");
490
491         total_headers = sizeof(emem_chunk_t) * num_chunks;
492         fprintf (stderr, "\t= %u bytes used for headers\n", total_headers);
493         fprintf (stderr, "\n---- Buffer space ----\n");
494         fprintf (stderr, "\tChunk allocation size: %10u\n",
495                  EMEM_PACKET_CHUNK_SIZE);
496         fprintf (stderr, "\t*    Number of chunks: %10u\n", num_chunks);
497         fprintf (stderr, "\t-------------------------------------------\n");
498         fprintf (stderr, "\t= %u (%u including guard pages) bytes used for buffers\n",
499                 total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
500         fprintf (stderr, "\t-------------------------------------------\n");
501         total_space_allocated_from_os = (EMEM_PACKET_CHUNK_SIZE * num_chunks)
502                                         + total_headers;
503         fprintf (stderr, "Total bytes allocated from the OS: %u\n\n",
504                 total_space_allocated_from_os);
505
506         for (i = 0; i < NUM_ALLOC_DIST; i++)
507                 num_allocs += allocations[i];
508
509         fprintf (stderr, "---------- Allocations from the SE pool ----------\n");
510         fprintf (stderr, "                Number of SE allocations: %10u\n",
511                  num_allocs);
512         fprintf (stderr, "             Bytes used (incl. canaries): %10u\n",
513                  total_used);
514         fprintf (stderr, "                 Bytes used for canaries: %10u\n",
515                  used_for_canaries);
516         fprintf (stderr, "Bytes unused (wasted, excl. guard pages): %10u\n",
517                  total_allocation - total_used);
518         fprintf (stderr, "Bytes unused (wasted, incl. guard pages): %10u\n\n",
519                  total_space_allocated_from_os - total_used);
520
521         fprintf (stderr, "---------- Statistics ----------\n");
522         fprintf (stderr, "Average SE allocation size (incl. canaries): %6.2f\n",
523                 (float)total_used/(float)num_allocs);
524         fprintf (stderr, "Average SE allocation size (excl. canaries): %6.2f\n",
525                 (float)(total_used - used_for_canaries)/(float)num_allocs);
526         fprintf (stderr, "        Average wasted bytes per allocation: %6.2f\n",
527                 (total_allocation - total_used)/(float)num_allocs);
528         total_space_wasted = (total_allocation - total_used)
529                 + (sizeof(emem_chunk_t));
530         fprintf (stderr, " Space used for headers + unused allocation: %8u\n",
531                 total_space_wasted);
532         fprintf (stderr, "--> %% overhead/waste: %4.2f\n",
533                 100 * (float)total_space_wasted/(float)total_space_allocated_from_os);
534
535         fprintf (stderr, "\nAllocation distribution (sizes include canaries):\n");
536         for (i = 0; i < (NUM_ALLOC_DIST-1); i++)
537                 fprintf (stderr, "size < %5d: %8u\n", 32<<i, allocations[i]);
538         fprintf (stderr, "size > %5d: %8u\n", 32<<i, allocations[i]);
539 }
540 #endif
541
542 static gboolean
543 emem_verify_pointer_list(const emem_chunk_t *chunk_list, const void *ptr)
544 {
545         const gchar *cptr = ptr;
546         const emem_chunk_t *chunk;
547
548         for (chunk = chunk_list; chunk; chunk = chunk->next) {
549                 if (cptr >= (chunk->buf + chunk->free_offset_init) && cptr < (chunk->buf + chunk->free_offset))
550                         return TRUE;
551         }
552         return FALSE;
553 }
554
555 static gboolean
556 emem_verify_pointer(const emem_header_t *hdr, const void *ptr)
557 {
558         return emem_verify_pointer_list(hdr->free_list, ptr) || emem_verify_pointer_list(hdr->used_list, ptr);
559 }
560
561 gboolean
562 ep_verify_pointer(const void *ptr)
563 {
564         if (ep_packet_mem.debug_verify_pointers)
565                 return emem_verify_pointer(&ep_packet_mem, ptr);
566         else
567                 return FALSE;
568 }
569
570 gboolean
571 se_verify_pointer(const void *ptr)
572 {
573         if (se_packet_mem.debug_verify_pointers)
574                 return emem_verify_pointer(&se_packet_mem, ptr);
575         else
576                 return FALSE;
577 }
578
579 static void
580 emem_scrub_memory(char *buf, size_t size, gboolean alloc)
581 {
582         guint scrubbed_value;
583         guint offset;
584
585         if (!debug_use_memory_scrubber)
586                 return;
587
588         if (alloc) /* this memory is being allocated */
589                 scrubbed_value = 0xBADDCAFE;
590         else /* this memory is being freed */
591                 scrubbed_value = 0xDEADBEEF;
592
593         /*  We shouldn't need to check the alignment of the starting address
594          *  since this is malloc'd memory (or 'pagesize' bytes into malloc'd
595          *  memory).
596          */
597
598         /* XXX - if the above is *NOT* true, we should use memcpy here,
599          * in order to avoid problems on alignment-sensitive platforms, e.g.
600          * http://stackoverflow.com/questions/108866/is-there-memset-that-accepts-integers-larger-than-char
601          */
602
603         for (offset = 0; offset + sizeof(guint) <= size; offset += sizeof(guint))
604                 *(guint*)(void*)(buf+offset) = scrubbed_value;
605
606         /* Initialize the last bytes, if any */
607         if (offset < size) {
608                 *(guint8*)(buf+offset) = scrubbed_value >> 24;
609                 offset++;
610                 if (offset < size) {
611                         *(guint8*)(buf+offset) = (scrubbed_value >> 16) & 0xFF;
612                         offset++;
613                         if (offset < size) {
614                                 *(guint8*)(buf+offset) = (scrubbed_value >> 8) & 0xFF;
615                         }
616                 }
617         }
618
619
620 }
621
622 static emem_chunk_t *
623 emem_create_chunk(size_t size)
624 {
625         emem_chunk_t *npc;
626
627         npc = g_new(emem_chunk_t, 1);
628         npc->next = NULL;
629         npc->canary_last = NULL;
630
631 #if defined (_WIN32)
632         /*
633          * MSDN documents VirtualAlloc/VirtualProtect at
634          * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
635          */
636
637         /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
638         npc->buf = VirtualAlloc(NULL, size,
639                 MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
640
641         if (npc->buf == NULL) {
642                 g_free(npc);
643                 if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
644                         abort();
645                 else
646                         THROW(OutOfMemoryError);
647         }
648
649 #elif defined(USE_GUARD_PAGES)
650         npc->buf = mmap(NULL, size,
651                 PROT_READ|PROT_WRITE, ANON_PAGE_MODE, ANON_FD, 0);
652
653         if (npc->buf == MAP_FAILED) {
654                 g_free(npc);
655                 if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
656                         abort();
657                 else
658                         THROW(OutOfMemoryError);
659         }
660
661 #else /* Is there a draft in here? */
662         npc->buf = g_malloc(size);
663         /* g_malloc() can't fail */
664 #endif
665
666 #ifdef SHOW_EMEM_STATS
667         total_no_chunks++;
668 #endif
669
670         npc->amount_free = npc->amount_free_init = (unsigned int) size;
671         npc->free_offset = npc->free_offset_init = 0;
672         return npc;
673 }
674
675 static void
676 emem_destroy_chunk(emem_chunk_t *npc)
677 {
678 #if defined (_WIN32)
679         VirtualFree(npc->buf, 0, MEM_RELEASE);
680 #elif defined(USE_GUARD_PAGES)
681         munmap(npc->buf, npc->amount_free_init);
682 #else
683         g_free(npc->buf);
684 #endif
685 #ifdef SHOW_EMEM_STATS
686         total_no_chunks--;
687 #endif
688         g_free(npc);
689 }
690
691 static emem_chunk_t *
692 emem_create_chunk_gp(size_t size)
693 {
694 #if defined (_WIN32)
695         BOOL ret;
696         char *buf_end, *prot1, *prot2;
697         DWORD oldprot;
698 #elif defined(USE_GUARD_PAGES)
699         int ret;
700         char *buf_end, *prot1, *prot2;
701 #endif /* _WIN32 / USE_GUARD_PAGES */
702         emem_chunk_t *npc;
703
704         npc = emem_create_chunk(size);
705
706 #if defined (_WIN32)
707         buf_end = npc->buf + size;
708
709         /* Align our guard pages on page-sized boundaries */
710         prot1 = (char *) ((((int) npc->buf + pagesize - 1) / pagesize) * pagesize);
711         prot2 = (char *) ((((int) buf_end - (1 * pagesize)) / pagesize) * pagesize);
712
713         ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
714         g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
715         ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
716         g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
717
718         npc->amount_free_init = (unsigned int) (prot2 - prot1 - pagesize);
719         npc->free_offset_init = (unsigned int) (prot1 - npc->buf) + pagesize;
720 #elif defined(USE_GUARD_PAGES)
721         buf_end = npc->buf + size;
722
723         /* Align our guard pages on page-sized boundaries */
724         prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
725         prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
726
727         ret = mprotect(prot1, pagesize, PROT_NONE);
728         g_assert(ret != -1);
729         ret = mprotect(prot2, pagesize, PROT_NONE);
730         g_assert(ret != -1);
731
732         npc->amount_free_init = prot2 - prot1 - pagesize;
733         npc->free_offset_init = (prot1 - npc->buf) + pagesize;
734 #else
735         npc->amount_free_init = size;
736         npc->free_offset_init = 0;
737 #endif /* USE_GUARD_PAGES */
738
739         npc->amount_free = npc->amount_free_init;
740         npc->free_offset = npc->free_offset_init;
741         return npc;
742 }
743
744 static void *
745 emem_alloc_chunk(size_t size, emem_header_t *mem)
746 {
747         void *buf;
748
749         size_t asize = size;
750         gboolean use_canary = mem->debug_use_canary;
751         guint8 pad;
752         emem_chunk_t *free_list;
753
754         /* Allocate room for at least 8 bytes of canary plus some padding
755          * so the canary ends on an 8-byte boundary.
756          * But first add the room needed for the pointer to the next canary
757          * (so the entire allocation will end on an 8-byte boundary).
758          */
759          if (use_canary) {
760                 asize += sizeof(void *);
761                 pad = emem_canary_pad(asize);
762         } else
763                 pad = (WS_MEM_ALIGN - (asize & (WS_MEM_ALIGN-1))) & (WS_MEM_ALIGN-1);
764
765         asize += pad;
766
767 #ifdef SHOW_EMEM_STATS
768         /* Do this check here so we can include the canary size */
769         if (mem == &se_packet_mem) {
770                 if (asize < 32)
771                         allocations[0]++;
772                 else if (asize < 64)
773                         allocations[1]++;
774                 else if (asize < 128)
775                         allocations[2]++;
776                 else if (asize < 256)
777                         allocations[3]++;
778                 else if (asize < 512)
779                         allocations[4]++;
780                 else if (asize < 1024)
781                         allocations[5]++;
782                 else if (asize < 2048)
783                         allocations[6]++;
784                 else if (asize < 4096)
785                         allocations[7]++;
786                 else if (asize < 8192)
787                         allocations[8]++;
788                 else if (asize < 16384)
789                         allocations[8]++;
790                 else
791                         allocations[(NUM_ALLOC_DIST-1)]++;
792         }
793 #endif
794
795         /* make sure we dont try to allocate too much (arbitrary limit) */
796         DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
797
798         if (!mem->free_list)
799                 mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
800
801         /* oops, we need to allocate more memory to serve this request
802          * than we have free. move this node to the used list and try again
803          */
804         if(asize > mem->free_list->amount_free) {
805                 emem_chunk_t *npc;
806                 npc=mem->free_list;
807                 mem->free_list=mem->free_list->next;
808                 npc->next=mem->used_list;
809                 mem->used_list=npc;
810
811                 if (!mem->free_list)
812                         mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
813         }
814
815         free_list = mem->free_list;
816
817         buf = free_list->buf + free_list->free_offset;
818
819         free_list->amount_free -= (unsigned int) asize;
820         free_list->free_offset += (unsigned int) asize;
821
822         if (use_canary) {
823                 char *cptr = (char *)buf + size;
824
825                 memcpy(cptr, mem->canary, pad-1);
826                 cptr[pad-1] = '\0';
827                 memcpy(cptr + pad, &free_list->canary_last, sizeof(void *));
828
829                 free_list->canary_last = cptr;
830         }
831
832         return buf;
833 }
834
835 static void *
836 emem_alloc_glib(size_t size, emem_header_t *mem)
837 {
838         emem_chunk_t *npc;
839
840         npc=g_new(emem_chunk_t, 1);
841         npc->next=mem->used_list;
842         npc->buf=g_malloc(size);
843         npc->canary_last = NULL;
844         mem->used_list=npc;
845         /* There's no padding/alignment involved (from our point of view) when
846          * we fetch the memory directly from the system pool, so WYSIWYG */
847         npc->free_offset = npc->free_offset_init = 0;
848         npc->amount_free = npc->amount_free_init = (unsigned int) size;
849
850         return npc->buf;
851 }
852
853 /* allocate 'size' amount of memory. */
854 static void *
855 emem_alloc(size_t size, emem_header_t *mem)
856 {
857         void *buf = mem->memory_alloc(size, mem);
858
859         /*  XXX - this is a waste of time if the allocator function is going to
860          *  memset this straight back to 0.
861          */
862         emem_scrub_memory(buf, size, TRUE);
863
864         return buf;
865 }
866
867 /* allocate 'size' amount of memory with an allocation lifetime until the
868  * next packet.
869  */
870 void *
871 ep_alloc(size_t size)
872 {
873         return emem_alloc(size, &ep_packet_mem);
874 }
875
876 /* allocate 'size' amount of memory with an allocation lifetime until the
877  * next capture.
878  */
879 void *
880 se_alloc(size_t size)
881 {
882         return emem_alloc(size, &se_packet_mem);
883 }
884
885 void *
886 sl_alloc(struct ws_memory_slab *mem_chunk)
887 {
888         emem_chunk_t *chunk;
889         void *ptr;
890
891         /* XXX, debug_use_slices -> fallback to g_slice_alloc0 */
892
893         if ((mem_chunk->freed != NULL)) {
894                 ptr = mem_chunk->freed;
895                 memcpy(&mem_chunk->freed, ptr, sizeof(void *));
896                 return ptr;
897         }
898
899         if (!(chunk = mem_chunk->chunk_list) || chunk->amount_free < (guint) mem_chunk->item_size) {
900                 size_t alloc_size = mem_chunk->item_size * mem_chunk->count;
901
902                 /* align to page-size */
903 #if defined (_WIN32) || defined(USE_GUARD_PAGES)
904                 alloc_size = (alloc_size + (pagesize - 1)) & ~(pagesize - 1);
905 #endif
906
907                 chunk = emem_create_chunk(alloc_size);  /* NOTE: using version without guard pages! */
908                 chunk->next = mem_chunk->chunk_list;
909                 mem_chunk->chunk_list = chunk;
910         }
911
912         ptr = chunk->buf + chunk->free_offset;
913         chunk->free_offset += mem_chunk->item_size;
914         chunk->amount_free -= mem_chunk->item_size;
915
916         return ptr;
917 }
918
919 void
920 sl_free(struct ws_memory_slab *mem_chunk, gpointer ptr)
921 {
922         /* XXX, debug_use_slices -> fallback to g_slice_free1 */
923
924         /* XXX, abort if ptr not found in emem_verify_pointer_list()? */
925         if (ptr != NULL /* && emem_verify_pointer_list(mem_chunk->chunk_list, ptr) */) {
926                 memcpy(ptr, &(mem_chunk->freed), sizeof(void *));
927                 mem_chunk->freed = ptr;
928         }
929 }
930
931 void *
932 ep_alloc0(size_t size)
933 {
934         return memset(ep_alloc(size),'\0',size);
935 }
936
937 void *
938 se_alloc0(size_t size)
939 {
940         return memset(se_alloc(size),'\0',size);
941 }
942
943 void *
944 sl_alloc0(struct ws_memory_slab *mem_chunk)
945 {
946         return memset(sl_alloc(mem_chunk), '\0', mem_chunk->item_size);
947 }
948
949 static gchar *
950 emem_strdup(const gchar *src, void *allocator(size_t))
951 {
952         guint len;
953         gchar *dst;
954
955         /* If str is NULL, just return the string "<NULL>" so that the callers don't
956          * have to bother checking it.
957          */
958         if(!src)
959                 return "<NULL>";
960
961         len = (guint) strlen(src);
962         dst = memcpy(allocator(len+1), src, len+1);
963
964         return dst;
965 }
966
967 gchar *
968 ep_strdup(const gchar *src)
969 {
970         return emem_strdup(src, ep_alloc);
971 }
972
973 gchar *
974 se_strdup(const gchar *src)
975 {
976         return emem_strdup(src, se_alloc);
977 }
978
979 static gchar *
980 emem_strndup(const gchar *src, size_t len, void *allocator(size_t))
981 {
982         gchar *dst = allocator(len+1);
983         guint i;
984
985         for (i = 0; (i < len) && src[i]; i++)
986                 dst[i] = src[i];
987
988         dst[i] = '\0';
989
990         return dst;
991 }
992
993 gchar *
994 ep_strndup(const gchar *src, size_t len)
995 {
996         return emem_strndup(src, len, ep_alloc);
997 }
998
999 gchar *
1000 se_strndup(const gchar *src, size_t len)
1001 {
1002         return emem_strndup(src, len, se_alloc);
1003 }
1004
1005
1006
1007 void *
1008 ep_memdup(const void* src, size_t len)
1009 {
1010         return memcpy(ep_alloc(len), src, len);
1011 }
1012
1013 void *
1014 se_memdup(const void* src, size_t len)
1015 {
1016         return memcpy(se_alloc(len), src, len);
1017 }
1018
1019 static gchar *
1020 emem_strdup_vprintf(const gchar *fmt, va_list ap, void *allocator(size_t))
1021 {
1022         va_list ap2;
1023         gsize len;
1024         gchar* dst;
1025
1026         G_VA_COPY(ap2, ap);
1027
1028         len = g_printf_string_upper_bound(fmt, ap);
1029
1030         dst = allocator(len+1);
1031         g_vsnprintf (dst, (gulong) len, fmt, ap2);
1032         va_end(ap2);
1033
1034         return dst;
1035 }
1036
1037 gchar *
1038 ep_strdup_vprintf(const gchar *fmt, va_list ap)
1039 {
1040         return emem_strdup_vprintf(fmt, ap, ep_alloc);
1041 }
1042
1043 gchar *
1044 se_strdup_vprintf(const gchar* fmt, va_list ap)
1045 {
1046         return emem_strdup_vprintf(fmt, ap, se_alloc);
1047 }
1048
1049 gchar *
1050 ep_strdup_printf(const gchar *fmt, ...)
1051 {
1052         va_list ap;
1053         gchar *dst;
1054
1055         va_start(ap, fmt);
1056         dst = ep_strdup_vprintf(fmt, ap);
1057         va_end(ap);
1058         return dst;
1059 }
1060
1061 gchar *
1062 se_strdup_printf(const gchar *fmt, ...)
1063 {
1064         va_list ap;
1065         gchar *dst;
1066
1067         va_start(ap, fmt);
1068         dst = se_strdup_vprintf(fmt, ap);
1069         va_end(ap);
1070         return dst;
1071 }
1072
1073 gchar **
1074 ep_strsplit(const gchar* string, const gchar* sep, int max_tokens)
1075 {
1076         gchar* splitted;
1077         gchar* s;
1078         guint tokens;
1079         guint str_len;
1080         guint sep_len;
1081         guint i;
1082         gchar** vec;
1083         enum { AT_START, IN_PAD, IN_TOKEN } state;
1084         guint curr_tok = 0;
1085
1086         if (    ! string
1087              || ! sep
1088              || ! sep[0])
1089                 return NULL;
1090
1091         s = splitted = ep_strdup(string);
1092         str_len = (guint) strlen(splitted);
1093         sep_len = (guint) strlen(sep);
1094
1095         if (max_tokens < 1) max_tokens = INT_MAX;
1096
1097         tokens = 1;
1098
1099
1100         while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
1101                 tokens++;
1102
1103                 for(i=0; i < sep_len; i++ )
1104                         s[i] = '\0';
1105
1106                 s += sep_len;
1107
1108         }
1109
1110         vec = ep_alloc_array(gchar*,tokens+1);
1111         state = AT_START;
1112
1113         for (i=0; i< str_len; i++) {
1114                 switch(state) {
1115                         case AT_START:
1116                                 switch(splitted[i]) {
1117                                         case '\0':
1118                                                 state  = IN_PAD;
1119                                                 continue;
1120                                         default:
1121                                                 vec[curr_tok] = &(splitted[i]);
1122                                                 curr_tok++;
1123                                                 state = IN_TOKEN;
1124                                                 continue;
1125                                 }
1126                         case IN_TOKEN:
1127                                 switch(splitted[i]) {
1128                                         case '\0':
1129                                                 state = IN_PAD;
1130                                         default:
1131                                                 continue;
1132                                 }
1133                         case IN_PAD:
1134                                 switch(splitted[i]) {
1135                                         default:
1136                                                 vec[curr_tok] = &(splitted[i]);
1137                                                 curr_tok++;
1138                                                 state = IN_TOKEN;
1139                                         case '\0':
1140                                                 continue;
1141                                 }
1142                 }
1143         }
1144
1145         vec[curr_tok] = NULL;
1146
1147         return vec;
1148 }
1149
1150 gchar *
1151 ep_strconcat(const gchar *string1, ...)
1152 {
1153         gsize   l;
1154         va_list args;
1155         gchar   *s;
1156         gchar   *concat;
1157         gchar   *ptr;
1158
1159         if (!string1)
1160                 return NULL;
1161
1162         l = 1 + strlen(string1);
1163         va_start(args, string1);
1164         s = va_arg(args, gchar*);
1165         while (s) {
1166                 l += strlen(s);
1167                 s = va_arg(args, gchar*);
1168         }
1169         va_end(args);
1170
1171         concat = ep_alloc(l);
1172         ptr = concat;
1173
1174         ptr = g_stpcpy(ptr, string1);
1175         va_start(args, string1);
1176         s = va_arg(args, gchar*);
1177         while (s) {
1178                 ptr = g_stpcpy(ptr, s);
1179                 s = va_arg(args, gchar*);
1180         }
1181         va_end(args);
1182
1183         return concat;
1184 }
1185
1186
1187
1188 /* release all allocated memory back to the pool. */
1189 static void
1190 emem_free_all(emem_header_t *mem)
1191 {
1192         gboolean use_chunks = mem->debug_use_chunks;
1193
1194         emem_chunk_t *npc;
1195         emem_tree_t *tree_list;
1196
1197         /* move all used chunks over to the free list */
1198         while(mem->used_list){
1199                 npc=mem->used_list;
1200                 mem->used_list=mem->used_list->next;
1201                 npc->next=mem->free_list;
1202                 mem->free_list=npc;
1203         }
1204
1205         /* clear them all out */
1206         npc = mem->free_list;
1207         while (npc != NULL) {
1208                 if (use_chunks) {
1209                         while (npc->canary_last != NULL) {
1210                                 npc->canary_last = emem_canary_next(mem->canary, npc->canary_last, NULL);
1211                                 /* XXX, check if canary_last is inside allocated memory? */
1212
1213                                 if (npc->canary_last == (void *) -1)
1214                                         g_error("Memory corrupted");
1215                         }
1216
1217                         emem_scrub_memory((npc->buf + npc->free_offset_init),
1218                                           (npc->free_offset - npc->free_offset_init),
1219                                           FALSE);
1220
1221                         npc->amount_free = npc->amount_free_init;
1222                         npc->free_offset = npc->free_offset_init;
1223                         npc = npc->next;
1224                 } else {
1225                         emem_chunk_t *next = npc->next;
1226
1227                         emem_scrub_memory(npc->buf, npc->amount_free_init, FALSE);
1228
1229                         g_free(npc->buf);
1230                         g_free(npc);
1231                         npc = next;
1232                 }
1233         }
1234
1235         if (!use_chunks) {
1236                 /* We've freed all this memory already */
1237                 mem->free_list = NULL;
1238         }
1239
1240         /* release/reset all allocated trees */
1241         for(tree_list=mem->trees;tree_list;tree_list=tree_list->next){
1242                 tree_list->tree=NULL;
1243         }
1244 }
1245
1246 /* release all allocated memory back to the pool. */
1247 void
1248 ep_free_all(void)
1249 {
1250         emem_free_all(&ep_packet_mem);
1251 }
1252
1253 /* release all allocated memory back to the pool. */
1254 void
1255 se_free_all(void)
1256 {
1257 #ifdef SHOW_EMEM_STATS
1258         print_alloc_stats();
1259 #endif
1260
1261         emem_free_all(&se_packet_mem);
1262 }
1263
1264 void
1265 sl_free_all(struct ws_memory_slab *mem_chunk)
1266 {
1267         emem_chunk_t *chunk_list = mem_chunk->chunk_list;
1268
1269         mem_chunk->chunk_list = NULL;
1270         mem_chunk->freed = NULL;
1271         while (chunk_list) {
1272                 emem_chunk_t *chunk = chunk_list;
1273
1274                 chunk_list = chunk_list->next;
1275                 emem_destroy_chunk(chunk);
1276         }
1277 }
1278
1279 ep_stack_t
1280 ep_stack_new(void) {
1281         ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
1282         *s = ep_new0(struct _ep_stack_frame_t);
1283         return s;
1284 }
1285
1286 /*  for ep_stack_t we'll keep the popped frames so we reuse them instead
1287 of allocating new ones.
1288 */
1289
1290 void *
1291 ep_stack_push(ep_stack_t stack, void* data)
1292 {
1293         struct _ep_stack_frame_t* frame;
1294         struct _ep_stack_frame_t* head = (*stack);
1295
1296         if (head->above) {
1297                 frame = head->above;
1298         } else {
1299                 frame = ep_new(struct _ep_stack_frame_t);
1300                 head->above = frame;
1301                 frame->below = head;
1302                 frame->above = NULL;
1303         }
1304
1305         frame->payload = data;
1306         (*stack) = frame;
1307
1308         return data;
1309 }
1310
1311 void *
1312 ep_stack_pop(ep_stack_t stack)
1313 {
1314
1315         if ((*stack)->below) {
1316                 (*stack) = (*stack)->below;
1317                 return (*stack)->above->payload;
1318         } else {
1319                 return NULL;
1320         }
1321 }
1322
1323 emem_tree_t *
1324 se_tree_create(int type, const char *name)
1325 {
1326         emem_tree_t *tree_list;
1327
1328         tree_list=g_malloc(sizeof(emem_tree_t));
1329         tree_list->next=se_packet_mem.trees;
1330         tree_list->type=type;
1331         tree_list->tree=NULL;
1332         tree_list->name=name;
1333         tree_list->malloc=se_alloc;
1334         se_packet_mem.trees=tree_list;
1335
1336         return tree_list;
1337 }
1338
1339 void *
1340 emem_tree_lookup32(emem_tree_t *se_tree, guint32 key)
1341 {
1342         emem_tree_node_t *node;
1343
1344         node=se_tree->tree;
1345
1346         while(node){
1347                 if(key==node->key32){
1348                         return node->data;
1349                 }
1350                 if(key<node->key32){
1351                         node=node->left;
1352                         continue;
1353                 }
1354                 if(key>node->key32){
1355                         node=node->right;
1356                         continue;
1357                 }
1358         }
1359         return NULL;
1360 }
1361
1362 void *
1363 emem_tree_lookup32_le(emem_tree_t *se_tree, guint32 key)
1364 {
1365         emem_tree_node_t *node;
1366
1367         node=se_tree->tree;
1368
1369         if(!node){
1370                 return NULL;
1371         }
1372
1373
1374         while(node){
1375                 if(key==node->key32){
1376                         return node->data;
1377                 }
1378                 if(key<node->key32){
1379                         if(node->left){
1380                                 node=node->left;
1381                                 continue;
1382                         } else {
1383                                 break;
1384                         }
1385                 }
1386                 if(key>node->key32){
1387                         if(node->right){
1388                                 node=node->right;
1389                                 continue;
1390                         } else {
1391                                 break;
1392                         }
1393                 }
1394         }
1395
1396
1397         if(!node){
1398                 return NULL;
1399         }
1400
1401         /* If we are still at the root of the tree this means that this node
1402          * is either smaller than the search key and then we return this
1403          * node or else there is no smaller key available and then
1404          * we return NULL.
1405          */
1406         if(!node->parent){
1407                 if(key>node->key32){
1408                         return node->data;
1409                 } else {
1410                         return NULL;
1411                 }
1412         }
1413
1414         if(node->parent->left==node){
1415                 /* left child */
1416
1417                 if(key>node->key32){
1418                         /* if this is a left child and its key is smaller than
1419                          * the search key, then this is the node we want.
1420                          */
1421                         return node->data;
1422                 } else {
1423                         /* if this is a left child and its key is bigger than
1424                          * the search key, we have to check if any
1425                          * of our ancestors are smaller than the search key.
1426                          */
1427                         while(node){
1428                                 if(key>node->key32){
1429                                         return node->data;
1430                                 }
1431                                 node=node->parent;
1432                         }
1433                         return NULL;
1434                 }
1435         } else {
1436                 /* right child */
1437
1438                 if(node->key32<key){
1439                         /* if this is the right child and its key is smaller
1440                          * than the search key then this is the one we want.
1441                          */
1442                         return node->data;
1443                 } else {
1444                         /* if this is the right child and its key is larger
1445                          * than the search key then our parent is the one we
1446                          * want.
1447                          */
1448                         return node->parent->data;
1449                 }
1450         }
1451
1452 }
1453
1454
1455 static inline emem_tree_node_t *
1456 emem_tree_parent(emem_tree_node_t *node)
1457 {
1458         return node->parent;
1459 }
1460
1461 static inline emem_tree_node_t *
1462 emem_tree_grandparent(emem_tree_node_t *node)
1463 {
1464         emem_tree_node_t *parent;
1465
1466         parent=emem_tree_parent(node);
1467         if(parent){
1468                 return parent->parent;
1469         }
1470         return NULL;
1471 }
1472
1473 static inline emem_tree_node_t *
1474 emem_tree_uncle(emem_tree_node_t *node)
1475 {
1476         emem_tree_node_t *parent, *grandparent;
1477
1478         parent=emem_tree_parent(node);
1479         if(!parent){
1480                 return NULL;
1481         }
1482         grandparent=emem_tree_parent(parent);
1483         if(!grandparent){
1484                 return NULL;
1485         }
1486         if(parent==grandparent->left){
1487                 return grandparent->right;
1488         }
1489         return grandparent->left;
1490 }
1491
1492 static inline void rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node);
1493 static inline void rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node);
1494
1495 static inline void
1496 rotate_left(emem_tree_t *se_tree, emem_tree_node_t *node)
1497 {
1498         if(node->parent){
1499                 if(node->parent->left==node){
1500                         node->parent->left=node->right;
1501                 } else {
1502                         node->parent->right=node->right;
1503                 }
1504         } else {
1505                 se_tree->tree=node->right;
1506         }
1507         node->right->parent=node->parent;
1508         node->parent=node->right;
1509         node->right=node->right->left;
1510         if(node->right){
1511                 node->right->parent=node;
1512         }
1513         node->parent->left=node;
1514 }
1515
1516 static inline void
1517 rotate_right(emem_tree_t *se_tree, emem_tree_node_t *node)
1518 {
1519         if(node->parent){
1520                 if(node->parent->left==node){
1521                         node->parent->left=node->left;
1522                 } else {
1523                         node->parent->right=node->left;
1524                 }
1525         } else {
1526                 se_tree->tree=node->left;
1527         }
1528         node->left->parent=node->parent;
1529         node->parent=node->left;
1530         node->left=node->left->right;
1531         if(node->left){
1532                 node->left->parent=node;
1533         }
1534         node->parent->right=node;
1535 }
1536
1537 static inline void
1538 rb_insert_case5(emem_tree_t *se_tree, emem_tree_node_t *node)
1539 {
1540         emem_tree_node_t *grandparent;
1541         emem_tree_node_t *parent;
1542
1543         parent=emem_tree_parent(node);
1544         grandparent=emem_tree_parent(parent);
1545         parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1546         grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1547         if( (node==parent->left) && (parent==grandparent->left) ){
1548                 rotate_right(se_tree, grandparent);
1549         } else {
1550                 rotate_left(se_tree, grandparent);
1551         }
1552 }
1553
1554 static inline void
1555 rb_insert_case4(emem_tree_t *se_tree, emem_tree_node_t *node)
1556 {
1557         emem_tree_node_t *grandparent;
1558         emem_tree_node_t *parent;
1559
1560         parent=emem_tree_parent(node);
1561         grandparent=emem_tree_parent(parent);
1562         if(!grandparent){
1563                 return;
1564         }
1565         if( (node==parent->right) && (parent==grandparent->left) ){
1566                 rotate_left(se_tree, parent);
1567                 node=node->left;
1568         } else if( (node==parent->left) && (parent==grandparent->right) ){
1569                 rotate_right(se_tree, parent);
1570                 node=node->right;
1571         }
1572         rb_insert_case5(se_tree, node);
1573 }
1574
1575 static inline void
1576 rb_insert_case3(emem_tree_t *se_tree, emem_tree_node_t *node)
1577 {
1578         emem_tree_node_t *grandparent;
1579         emem_tree_node_t *parent;
1580         emem_tree_node_t *uncle;
1581
1582         uncle=emem_tree_uncle(node);
1583         if(uncle && (uncle->u.rb_color==EMEM_TREE_RB_COLOR_RED)){
1584                 parent=emem_tree_parent(node);
1585                 parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1586                 uncle->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1587                 grandparent=emem_tree_grandparent(node);
1588                 grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1589                 rb_insert_case1(se_tree, grandparent);
1590         } else {
1591                 rb_insert_case4(se_tree, node);
1592         }
1593 }
1594
1595 static inline void
1596 rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node)
1597 {
1598         emem_tree_node_t *parent;
1599
1600         parent=emem_tree_parent(node);
1601         /* parent is always non-NULL here */
1602         if(parent->u.rb_color==EMEM_TREE_RB_COLOR_BLACK){
1603                 return;
1604         }
1605         rb_insert_case3(se_tree, node);
1606 }
1607
1608 static inline void
1609 rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node)
1610 {
1611         emem_tree_node_t *parent;
1612
1613         parent=emem_tree_parent(node);
1614         if(!parent){
1615                 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1616                 return;
1617         }
1618         rb_insert_case2(se_tree, node);
1619 }
1620
1621 /* insert a new node in the tree. if this node matches an already existing node
1622  * then just replace the data for that node */
1623 void
1624 emem_tree_insert32(emem_tree_t *se_tree, guint32 key, void *data)
1625 {
1626         emem_tree_node_t *node;
1627
1628         node=se_tree->tree;
1629
1630         /* is this the first node ?*/
1631         if(!node){
1632                 node=se_tree->malloc(sizeof(emem_tree_node_t));
1633                 switch(se_tree->type){
1634                 case EMEM_TREE_TYPE_RED_BLACK:
1635                         node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1636                         break;
1637                 }
1638                 node->parent=NULL;
1639                 node->left=NULL;
1640                 node->right=NULL;
1641                 node->key32=key;
1642                 node->data=data;
1643                 node->u.is_subtree = EMEM_TREE_NODE_IS_DATA;
1644                 se_tree->tree=node;
1645                 return;
1646         }
1647
1648         /* it was not the new root so walk the tree until we find where to
1649          * insert this new leaf.
1650          */
1651         while(1){
1652                 /* this node already exists, so just replace the data pointer*/
1653                 if(key==node->key32){
1654                         node->data=data;
1655                         return;
1656                 }
1657                 if(key<node->key32) {
1658                         if(!node->left){
1659                                 /* new node to the left */
1660                                 emem_tree_node_t *new_node;
1661                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1662                                 node->left=new_node;
1663                                 new_node->parent=node;
1664                                 new_node->left=NULL;
1665                                 new_node->right=NULL;
1666                                 new_node->key32=key;
1667                                 new_node->data=data;
1668                                 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1669                                 node=new_node;
1670                                 break;
1671                         }
1672                         node=node->left;
1673                         continue;
1674                 }
1675                 if(key>node->key32) {
1676                         if(!node->right){
1677                                 /* new node to the right */
1678                                 emem_tree_node_t *new_node;
1679                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1680                                 node->right=new_node;
1681                                 new_node->parent=node;
1682                                 new_node->left=NULL;
1683                                 new_node->right=NULL;
1684                                 new_node->key32=key;
1685                                 new_node->data=data;
1686                                 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1687                                 node=new_node;
1688                                 break;
1689                         }
1690                         node=node->right;
1691                         continue;
1692                 }
1693         }
1694
1695         /* node will now point to the newly created node */
1696         switch(se_tree->type){
1697         case EMEM_TREE_TYPE_RED_BLACK:
1698                 node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1699                 rb_insert_case1(se_tree, node);
1700                 break;
1701         }
1702 }
1703
1704 static void *
1705 lookup_or_insert32(emem_tree_t *se_tree, guint32 key, void*(*func)(void*),void* ud, int is_subtree)
1706 {
1707         emem_tree_node_t *node;
1708
1709         node=se_tree->tree;
1710
1711         /* is this the first node ?*/
1712         if(!node){
1713                 node=se_tree->malloc(sizeof(emem_tree_node_t));
1714                 switch(se_tree->type){
1715                         case EMEM_TREE_TYPE_RED_BLACK:
1716                                 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1717                                 break;
1718                 }
1719                 node->parent=NULL;
1720                 node->left=NULL;
1721                 node->right=NULL;
1722                 node->key32=key;
1723                 node->data= func(ud);
1724                 node->u.is_subtree = is_subtree;
1725                 se_tree->tree=node;
1726                 return node->data;
1727         }
1728
1729         /* it was not the new root so walk the tree until we find where to
1730                 * insert this new leaf.
1731                 */
1732         while(1){
1733                 /* this node already exists, so just return the data pointer*/
1734                 if(key==node->key32){
1735                         return node->data;
1736                 }
1737                 if(key<node->key32) {
1738                         if(!node->left){
1739                                 /* new node to the left */
1740                                 emem_tree_node_t *new_node;
1741                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1742                                 node->left=new_node;
1743                                 new_node->parent=node;
1744                                 new_node->left=NULL;
1745                                 new_node->right=NULL;
1746                                 new_node->key32=key;
1747                                 new_node->data= func(ud);
1748                                 new_node->u.is_subtree = is_subtree;
1749                                 node=new_node;
1750                                 break;
1751                         }
1752                         node=node->left;
1753                         continue;
1754                 }
1755                 if(key>node->key32) {
1756                         if(!node->right){
1757                                 /* new node to the right */
1758                                 emem_tree_node_t *new_node;
1759                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1760                                 node->right=new_node;
1761                                 new_node->parent=node;
1762                                 new_node->left=NULL;
1763                                 new_node->right=NULL;
1764                                 new_node->key32=key;
1765                                 new_node->data= func(ud);
1766                                 new_node->u.is_subtree = is_subtree;
1767                                 node=new_node;
1768                                 break;
1769                         }
1770                         node=node->right;
1771                         continue;
1772                 }
1773         }
1774
1775         /* node will now point to the newly created node */
1776         switch(se_tree->type){
1777                 case EMEM_TREE_TYPE_RED_BLACK:
1778                         node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1779                         rb_insert_case1(se_tree, node);
1780                         break;
1781         }
1782
1783         return node->data;
1784 }
1785
1786 /* When the se data is released, this entire tree will dissapear as if it
1787  * never existed including all metadata associated with the tree.
1788  */
1789 emem_tree_t *
1790 se_tree_create_non_persistent(int type, const char *name)
1791 {
1792         emem_tree_t *tree_list;
1793
1794         tree_list=se_alloc(sizeof(emem_tree_t));
1795         tree_list->next=NULL;
1796         tree_list->type=type;
1797         tree_list->tree=NULL;
1798         tree_list->name=name;
1799         tree_list->malloc=se_alloc;
1800
1801         return tree_list;
1802 }
1803
1804 /* This tree is PErmanent and will never be released
1805  */
1806 emem_tree_t *
1807 pe_tree_create(int type, const char *name)
1808 {
1809         emem_tree_t *tree_list;
1810
1811         tree_list=g_new(emem_tree_t, 1);
1812         tree_list->next=NULL;
1813         tree_list->type=type;
1814         tree_list->tree=NULL;
1815         tree_list->name=name;
1816         tree_list->malloc=(void *(*)(size_t)) g_malloc;
1817
1818         return tree_list;
1819 }
1820
1821 /* create another (sub)tree using the same memory allocation scope
1822  * as the parent tree.
1823  */
1824 static emem_tree_t *
1825 emem_tree_create_subtree(emem_tree_t *parent_tree, const char *name)
1826 {
1827         emem_tree_t *tree_list;
1828
1829         tree_list=parent_tree->malloc(sizeof(emem_tree_t));
1830         tree_list->next=NULL;
1831         tree_list->type=parent_tree->type;
1832         tree_list->tree=NULL;
1833         tree_list->name=name;
1834         tree_list->malloc=parent_tree->malloc;
1835
1836         return tree_list;
1837 }
1838
1839 static void *
1840 create_sub_tree(void* d)
1841 {
1842         emem_tree_t *se_tree = d;
1843         return emem_tree_create_subtree(se_tree, "subtree");
1844 }
1845
1846 /* insert a new node in the tree. if this node matches an already existing node
1847  * then just replace the data for that node */
1848
1849 void
1850 emem_tree_insert32_array(emem_tree_t *se_tree, emem_tree_key_t *key, void *data)
1851 {
1852         emem_tree_t *next_tree;
1853
1854         if((key[0].length<1)||(key[0].length>100)){
1855                 DISSECTOR_ASSERT_NOT_REACHED();
1856         }
1857         if((key[0].length==1)&&(key[1].length==0)){
1858                 emem_tree_insert32(se_tree, *key[0].key, data);
1859                 return;
1860         }
1861
1862         next_tree=lookup_or_insert32(se_tree, *key[0].key, create_sub_tree, se_tree, EMEM_TREE_NODE_IS_SUBTREE);
1863
1864         if(key[0].length==1){
1865                 key++;
1866         } else {
1867                 key[0].length--;
1868                 key[0].key++;
1869         }
1870         emem_tree_insert32_array(next_tree, key, data);
1871 }
1872
1873 void *
1874 emem_tree_lookup32_array(emem_tree_t *se_tree, emem_tree_key_t *key)
1875 {
1876         emem_tree_t *next_tree;
1877
1878         if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1879
1880         if((key[0].length<1)||(key[0].length>100)){
1881                 DISSECTOR_ASSERT_NOT_REACHED();
1882         }
1883         if((key[0].length==1)&&(key[1].length==0)){
1884                 return emem_tree_lookup32(se_tree, *key[0].key);
1885         }
1886         next_tree=emem_tree_lookup32(se_tree, *key[0].key);
1887         if(!next_tree){
1888                 return NULL;
1889         }
1890         if(key[0].length==1){
1891                 key++;
1892         } else {
1893                 key[0].length--;
1894                 key[0].key++;
1895         }
1896         return emem_tree_lookup32_array(next_tree, key);
1897 }
1898
1899 void *
1900 emem_tree_lookup32_array_le(emem_tree_t *se_tree, emem_tree_key_t *key)
1901 {
1902         emem_tree_t *next_tree;
1903
1904         if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1905
1906         if((key[0].length<1)||(key[0].length>100)){
1907                 DISSECTOR_ASSERT_NOT_REACHED();
1908         }
1909         if((key[0].length==1)&&(key[1].length==0)){ /* last key in key array */
1910                 return emem_tree_lookup32_le(se_tree, *key[0].key);
1911         }
1912         next_tree=emem_tree_lookup32(se_tree, *key[0].key);
1913         /* key[0].key not found so find le and return */
1914         if(!next_tree)
1915                 return emem_tree_lookup32_le(se_tree, *key[0].key);
1916
1917         /* key[0].key found so inc key pointer and try again */
1918         if(key[0].length==1){
1919                 key++;
1920         } else {
1921                 key[0].length--;
1922                 key[0].key++;
1923         }
1924         return emem_tree_lookup32_array_le(next_tree, key);
1925 }
1926
1927 /* Strings are stored as an array of uint32 containing the string characters
1928    with 4 characters in each uint32.
1929    The first byte of the string is stored as the most significant byte.
1930    If the string is not a multiple of 4 characters in length the last
1931    uint32 containing the string bytes are padded with 0 bytes.
1932    After the uint32's containing the string, there is one final terminator
1933    uint32 with the value 0x00000001
1934 */
1935 void
1936 emem_tree_insert_string(emem_tree_t* se_tree, const gchar* k, void* v, guint32 flags)
1937 {
1938         emem_tree_key_t key[2];
1939         guint32 *aligned=NULL;
1940         guint32 len = (guint32) strlen(k);
1941         guint32 divx = (len+3)/4+1;
1942         guint32 i;
1943         guint32 tmp;
1944
1945         aligned = g_malloc(divx * sizeof (guint32));
1946
1947         /* pack the bytes one one by one into guint32s */
1948         tmp = 0;
1949         for (i = 0;i < len;i++) {
1950                 unsigned char ch;
1951
1952                 ch = (unsigned char)k[i];
1953                 if (flags & EMEM_TREE_STRING_NOCASE) {
1954                         if(isupper(ch)) {
1955                                 ch = tolower(ch);
1956                         }
1957                 }
1958                 tmp <<= 8;
1959                 tmp |= ch;
1960                 if (i%4 == 3) {
1961                         aligned[i/4] = tmp;
1962                         tmp = 0;
1963                 }
1964         }
1965         /* add required padding to the last uint32 */
1966         if (i%4 != 0) {
1967                 while (i%4 != 0) {
1968                         i++;
1969                         tmp <<= 8;
1970                 }
1971                 aligned[i/4-1] = tmp;
1972         }
1973
1974         /* add the terminator */
1975         aligned[divx-1] = 0x00000001;
1976
1977         key[0].length = divx;
1978         key[0].key = aligned;
1979         key[1].length = 0;
1980         key[1].key = NULL;
1981
1982
1983         emem_tree_insert32_array(se_tree, key, v);
1984         g_free(aligned);
1985 }
1986
1987 void *
1988 emem_tree_lookup_string(emem_tree_t* se_tree, const gchar* k, guint32 flags)
1989 {
1990         emem_tree_key_t key[2];
1991         guint32 *aligned=NULL;
1992         guint32 len = (guint) strlen(k);
1993         guint32 divx = (len+3)/4+1;
1994         guint32 i;
1995         guint32 tmp;
1996         void *ret;
1997
1998         aligned = g_malloc(divx * sizeof (guint32));
1999
2000         /* pack the bytes one one by one into guint32s */
2001         tmp = 0;
2002         for (i = 0;i < len;i++) {
2003                 unsigned char ch;
2004
2005                 ch = (unsigned char)k[i];
2006                 if (flags & EMEM_TREE_STRING_NOCASE) {
2007                         if(isupper(ch)) {
2008                                 ch = tolower(ch);
2009                         }
2010                 }
2011                 tmp <<= 8;
2012                 tmp |= ch;
2013                 if (i%4 == 3) {
2014                         aligned[i/4] = tmp;
2015                         tmp = 0;
2016                 }
2017         }
2018         /* add required padding to the last uint32 */
2019         if (i%4 != 0) {
2020                 while (i%4 != 0) {
2021                         i++;
2022                         tmp <<= 8;
2023                 }
2024                 aligned[i/4-1] = tmp;
2025         }
2026
2027         /* add the terminator */
2028         aligned[divx-1] = 0x00000001;
2029
2030         key[0].length = divx;
2031         key[0].key = aligned;
2032         key[1].length = 0;
2033         key[1].key = NULL;
2034
2035
2036         ret = emem_tree_lookup32_array(se_tree, key);
2037         g_free(aligned);
2038         return ret;
2039 }
2040
2041 static gboolean
2042 emem_tree_foreach_nodes(emem_tree_node_t* node, tree_foreach_func callback, void *user_data)
2043 {
2044         gboolean stop_traverse = FALSE;
2045
2046         if (!node)
2047                 return FALSE;
2048
2049         if(node->left) {
2050                 stop_traverse = emem_tree_foreach_nodes(node->left, callback, user_data);
2051                 if (stop_traverse) {
2052                         return TRUE;
2053                 }
2054         }
2055
2056         if (node->u.is_subtree == EMEM_TREE_NODE_IS_SUBTREE) {
2057                 stop_traverse = emem_tree_foreach(node->data, callback, user_data);
2058         } else {
2059                 stop_traverse = callback(node->data, user_data);
2060         }
2061
2062         if (stop_traverse) {
2063                 return TRUE;
2064         }
2065
2066         if(node->right) {
2067                 stop_traverse = emem_tree_foreach_nodes(node->right, callback, user_data);
2068                 if (stop_traverse) {
2069                         return TRUE;
2070                 }
2071         }
2072
2073         return FALSE;
2074 }
2075
2076 gboolean
2077 emem_tree_foreach(emem_tree_t* emem_tree, tree_foreach_func callback, void *user_data)
2078 {
2079         if (!emem_tree)
2080                 return FALSE;
2081
2082         if(!emem_tree->tree)
2083                 return FALSE;
2084
2085         return emem_tree_foreach_nodes(emem_tree->tree, callback, user_data);
2086 }
2087
2088
2089 static void
2090 emem_tree_print_nodes(emem_tree_node_t* node, int level)
2091 {
2092         int i;
2093
2094         if (!node)
2095                 return;
2096
2097         for(i=0;i<level;i++){
2098                 printf("    ");
2099         }
2100
2101         printf("NODE:%p parent:%p left:0x%p right:%px key:%d data:%p\n",
2102                 (void *)node,(void *)(node->parent),(void *)(node->left),(void *)(node->right),
2103                 (node->key32),node->data);
2104         if(node->left)
2105                 emem_tree_print_nodes(node->left, level+1);
2106         if(node->right)
2107                 emem_tree_print_nodes(node->right, level+1);
2108 }
2109 void
2110 emem_print_tree(emem_tree_t* emem_tree)
2111 {
2112         if (!emem_tree)
2113                 return;
2114
2115         printf("EMEM tree type:%d name:%s tree:%p\n",emem_tree->type,emem_tree->name,(void *)(emem_tree->tree));
2116         if(emem_tree->tree)
2117                 emem_tree_print_nodes(emem_tree->tree, 0);
2118 }
2119
2120 /*
2121  * String buffers
2122  */
2123
2124 /*
2125  * Presumably we're using these routines for building strings for the tree.
2126  * Use ITEM_LABEL_LENGTH as the basis for our default lengths.
2127  */
2128
2129 #define DEFAULT_STRBUF_LEN (ITEM_LABEL_LENGTH / 10)
2130 #define MAX_STRBUF_LEN 65536
2131
2132 static gsize
2133 next_size(gsize cur_alloc_len, gsize wanted_alloc_len, gsize max_alloc_len)
2134 {
2135         if (max_alloc_len < 1 || max_alloc_len > MAX_STRBUF_LEN) {
2136                 max_alloc_len = MAX_STRBUF_LEN;
2137         }
2138
2139         if (cur_alloc_len < 1) {
2140                 cur_alloc_len = DEFAULT_STRBUF_LEN;
2141         }
2142
2143         while (cur_alloc_len < wanted_alloc_len) {
2144                 cur_alloc_len *= 2;
2145         }
2146
2147         return cur_alloc_len < max_alloc_len ? cur_alloc_len : max_alloc_len;
2148 }
2149
2150 static void
2151 ep_strbuf_grow(emem_strbuf_t *strbuf, gsize wanted_alloc_len)
2152 {
2153         gsize new_alloc_len;
2154         gchar *new_str;
2155
2156         if (!strbuf || (wanted_alloc_len <= strbuf->alloc_len) || (strbuf->alloc_len >= strbuf->max_alloc_len)) {
2157                 return;
2158         }
2159
2160         new_alloc_len = next_size(strbuf->alloc_len, wanted_alloc_len, strbuf->max_alloc_len);
2161         new_str = ep_alloc(new_alloc_len);
2162         g_strlcpy(new_str, strbuf->str, new_alloc_len);
2163
2164         strbuf->alloc_len = new_alloc_len;
2165         strbuf->str = new_str;
2166 }
2167
2168 emem_strbuf_t *
2169 ep_strbuf_sized_new(gsize alloc_len, gsize max_alloc_len)
2170 {
2171         emem_strbuf_t *strbuf;
2172
2173         strbuf = ep_alloc(sizeof(emem_strbuf_t));
2174
2175         if ((max_alloc_len == 0) || (max_alloc_len > MAX_STRBUF_LEN))
2176                 max_alloc_len = MAX_STRBUF_LEN;
2177         if (alloc_len == 0)
2178                 alloc_len = 1;
2179         else if (alloc_len > max_alloc_len)
2180                 alloc_len = max_alloc_len;
2181
2182         strbuf->str = ep_alloc(alloc_len);
2183         strbuf->str[0] = '\0';
2184
2185         strbuf->len = 0;
2186         strbuf->alloc_len = alloc_len;
2187         strbuf->max_alloc_len = max_alloc_len;
2188
2189         return strbuf;
2190 }
2191
2192 emem_strbuf_t *
2193 ep_strbuf_new(const gchar *init)
2194 {
2195         emem_strbuf_t *strbuf;
2196
2197         strbuf = ep_strbuf_sized_new(next_size(0, init?strlen(init)+1:0, 0), 0);  /* +1 for NULL terminator */
2198         if (init) {
2199                 gsize full_len;
2200                 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2201                 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2202         }
2203
2204         return strbuf;
2205 }
2206
2207 emem_strbuf_t *
2208 ep_strbuf_new_label(const gchar *init)
2209 {
2210         emem_strbuf_t *strbuf;
2211         gsize full_len;
2212
2213         /* Be optimistic: Allocate default size strbuf string and only      */
2214         /*  request an increase if needed.                                  */
2215         /* XXX: Is it reasonable to assume that much of the usage of        */
2216         /*  ep_strbuf_new_label will have  init==NULL or                    */
2217         /*   strlen(init) < DEFAULT_STRBUF_LEN) ???                         */
2218         strbuf = ep_strbuf_sized_new(DEFAULT_STRBUF_LEN, ITEM_LABEL_LENGTH);
2219
2220         if (!init)
2221                 return strbuf;
2222
2223         /* full_len does not count the trailing '\0'.                       */
2224         full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2225         if (full_len < strbuf->alloc_len) {
2226                 strbuf->len += full_len;
2227         } else {
2228                 strbuf = ep_strbuf_sized_new(full_len+1, ITEM_LABEL_LENGTH);
2229                 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2230                 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2231         }
2232
2233         return strbuf;
2234 }
2235
2236 emem_strbuf_t *
2237 ep_strbuf_append(emem_strbuf_t *strbuf, const gchar *str)
2238 {
2239         gsize add_len, full_len;
2240
2241         if (!strbuf || !str || str[0] == '\0') {
2242                 return strbuf;
2243         }
2244
2245         /* Be optimistic; try the g_strlcpy first & see if enough room.                 */
2246         /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same  */
2247         add_len = strbuf->alloc_len - strbuf->len;
2248         full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2249         if (full_len < add_len) {
2250                 strbuf->len += full_len;
2251         } else {
2252                 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2253                 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2254                 add_len = strbuf->alloc_len - strbuf->len;
2255                 full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2256                 strbuf->len += MIN(add_len-1, full_len);
2257         }
2258
2259         return strbuf;
2260 }
2261
2262 void
2263 ep_strbuf_append_vprintf(emem_strbuf_t *strbuf, const gchar *format, va_list ap)
2264 {
2265         va_list ap2;
2266         gsize add_len, full_len;
2267
2268         G_VA_COPY(ap2, ap);
2269
2270         /* Be optimistic; try the g_vsnprintf first & see if enough room.               */
2271         /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same. */
2272         add_len = strbuf->alloc_len - strbuf->len;
2273         full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap);
2274         if (full_len < add_len) {
2275                 strbuf->len += full_len;
2276         } else {
2277                 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2278                 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2279                 add_len = strbuf->alloc_len - strbuf->len;
2280                 full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap2);
2281                 strbuf->len += MIN(add_len-1, full_len);
2282         }
2283
2284         va_end(ap2);
2285 }
2286
2287 void
2288 ep_strbuf_append_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2289 {
2290         va_list ap;
2291
2292         va_start(ap, format);
2293         ep_strbuf_append_vprintf(strbuf, format, ap);
2294         va_end(ap);
2295 }
2296
2297 void
2298 ep_strbuf_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2299 {
2300         va_list ap;
2301         if (!strbuf) {
2302                 return;
2303         }
2304
2305         strbuf->len = 0;
2306
2307         va_start(ap, format);
2308         ep_strbuf_append_vprintf(strbuf, format, ap);
2309         va_end(ap);
2310 }
2311
2312 emem_strbuf_t *
2313 ep_strbuf_append_c(emem_strbuf_t *strbuf, const gchar c)
2314 {
2315         if (!strbuf) {
2316                 return strbuf;
2317         }
2318
2319         /* +1 for the new character & +1 for the trailing '\0'. */
2320         if (strbuf->alloc_len < strbuf->len + 1 + 1) {
2321                 ep_strbuf_grow(strbuf, strbuf->len + 1 + 1);
2322         }
2323         if (strbuf->alloc_len >= strbuf->len + 1 + 1) {
2324                 strbuf->str[strbuf->len] = c;
2325                 strbuf->len++;
2326                 strbuf->str[strbuf->len] = '\0';
2327         }
2328
2329         return strbuf;
2330 }
2331
2332 emem_strbuf_t *
2333 ep_strbuf_truncate(emem_strbuf_t *strbuf, gsize len)
2334 {
2335         if (!strbuf || len >= strbuf->len) {
2336                 return strbuf;
2337         }
2338
2339         strbuf->str[len] = '\0';
2340         strbuf->len = len;
2341
2342         return strbuf;
2343 }
2344
2345 /*
2346  * Editor modelines
2347  *
2348  * Local Variables:
2349  * c-basic-offset: 8
2350  * tab-width: 8
2351  * indent-tabs-mode: t
2352  * End:
2353  *
2354  * ex: set shiftwidth=8 tabstop=8 noexpandtab:
2355  * :indentSize=8:tabSize=8:noTabs=false:
2356  */