Fix various gcc -Wshadow warnings.
[metze/wireshark/wip.git] / epan / emem.c
1 /* emem.c
2  * Wireshark memory management and garbage collection functions
3  * Ronnie Sahlberg 2005
4  *
5  * $Id$
6  *
7  * Wireshark - Network traffic analyzer
8  * By Gerald Combs <gerald@wireshark.org>
9  * Copyright 1998 Gerald Combs
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version 2
14  * of the License, or (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
24  */
25 #ifdef HAVE_CONFIG_H
26 #include "config.h"
27 #endif
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <stdarg.h>
33 #include <ctype.h>
34
35 #include <time.h>
36 #ifdef HAVE_SYS_TIME_H
37 #include <sys/time.h>
38 #endif
39
40 #ifdef HAVE_UNISTD_H
41 #include <unistd.h>
42 #endif
43
44 #include <glib.h>
45
46 #include "proto.h"
47 #include "emem.h"
48
49 #ifdef _WIN32
50 #include <windows.h>    /* VirtualAlloc, VirtualProtect */
51 #include <process.h>    /* getpid */
52 #endif
53
54 /* Print out statistics about our memory allocations? */
55 /*#define SHOW_EMEM_STATS*/
56
57 /* Do we want to use guardpages? if available */
58 #define WANT_GUARD_PAGES 1
59
60 #ifdef WANT_GUARD_PAGES
61 /* Add guard pages at each end of our allocated memory */
62 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
63 #include <stdint.h>
64 #ifdef HAVE_SYS_TYPES_H
65 #include <sys/types.h>
66 #endif
67 #include <sys/mman.h>
68 #if defined(MAP_ANONYMOUS)
69 #define ANON_PAGE_MODE  (MAP_ANONYMOUS|MAP_PRIVATE)
70 #elif defined(MAP_ANON)
71 #define ANON_PAGE_MODE  (MAP_ANON|MAP_PRIVATE)
72 #else
73 #define ANON_PAGE_MODE  (MAP_PRIVATE)   /* have to map /dev/zero */
74 #define NEED_DEV_ZERO
75 #endif
76 #ifdef NEED_DEV_ZERO
77 #include <fcntl.h>
78 static int dev_zero_fd;
79 #define ANON_FD dev_zero_fd
80 #else
81 #define ANON_FD -1
82 #endif
83 #define USE_GUARD_PAGES 1
84 #endif
85 #endif
86
87 /* When required, allocate more memory from the OS in this size chunks */
88 #define EMEM_PACKET_CHUNK_SIZE (10 * 1024 * 1024)
89
90 #define EMEM_CANARY_SIZE 8
91 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
92
93 typedef struct _emem_chunk_t {
94         struct _emem_chunk_t *next;
95         char            *buf;
96         unsigned int    amount_free_init;
97         unsigned int    amount_free;
98         unsigned int    free_offset_init;
99         unsigned int    free_offset;
100         void            *canary_last;
101 } emem_chunk_t;
102
103 typedef struct _emem_header_t {
104         emem_chunk_t *free_list;
105         emem_chunk_t *used_list;
106
107         emem_tree_t *trees;             /* only used by se_mem allocator */
108
109         guint8 canary[EMEM_CANARY_DATA_SIZE];
110         void *(*memory_alloc)(size_t size, struct _emem_header_t *);
111
112         /*
113          * Tools like Valgrind and ElectricFence don't work well with memchunks.
114          * Export the following environment variables to make {ep|se}_alloc() allocate each
115          * object individually.
116          *
117          * WIRESHARK_DEBUG_EP_NO_CHUNKS
118          * WIRESHARK_DEBUG_SE_NO_CHUNKS
119          */
120         gboolean debug_use_chunks;
121
122         /* Do we want to use canaries?
123          * Export the following environment variables to disable/enable canaries
124          *
125          * WIRESHARK_DEBUG_EP_NO_CANARY
126          * For SE memory use of canary is default off as the memory overhead
127          * is considerable.
128          * WIRESHARK_DEBUG_SE_USE_CANARY
129          */
130         gboolean debug_use_canary;
131
132 } emem_header_t;
133
134 static emem_header_t ep_packet_mem;
135 static emem_header_t se_packet_mem;
136
137 /*
138  *  Memory scrubbing is expensive but can be useful to ensure we don't:
139  *    - use memory before initializing it
140  *    - use memory after freeing it
141  *  Export WIRESHARK_DEBUG_SCRUB_MEMORY to turn it on.
142  */
143 static gboolean debug_use_memory_scrubber = FALSE;
144
145 #if defined (_WIN32)
146 static SYSTEM_INFO sysinfo;
147 static OSVERSIONINFO versinfo;
148 static int pagesize;
149 #elif defined(USE_GUARD_PAGES)
150 static intptr_t pagesize;
151 #endif /* _WIN32 / USE_GUARD_PAGES */
152
153 static void *emem_alloc_chunk(size_t size, emem_header_t *mem);
154 static void *emem_alloc_glib(size_t size, emem_header_t *mem);
155
156 /*
157  * Set a canary value to be placed between memchunks.
158  */
159 static void
160 emem_canary_init(guint8 *canary)
161 {
162         int i;
163         static GRand *rand_state = NULL;
164
165         if (rand_state == NULL) {
166                 rand_state = g_rand_new();
167         }
168         for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
169                 canary[i] = (guint8) g_rand_int_range(rand_state, 1, 0x100);
170         }
171         return;
172 }
173
174 static void *
175 emem_canary_next(guint8 *mem_canary, guint8 *canary, int *len)
176 {
177         void *ptr;
178         int i;
179
180         for (i = 0; i < EMEM_CANARY_SIZE-1; i++)
181                 if (mem_canary[i] != canary[i])
182                         return (void *) -1;
183
184         for (; i < EMEM_CANARY_DATA_SIZE; i++) {
185                 if (canary[i] == '\0') {
186                         memcpy(&ptr, &canary[i+1], sizeof(void *));
187
188                         if (len)
189                                 *len = i + 1 + sizeof(void *);
190                         return ptr;
191                 }
192
193                 if (mem_canary[i] != canary[i])
194                         return (void *) -1;
195         }
196
197         return (void *) -1;
198 }
199
200 /*
201  * Given an allocation size, return the amount of padding needed for
202  * the canary value.
203  */
204 static guint8
205 emem_canary_pad (size_t allocation)
206 {
207         guint8 pad;
208
209         pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
210         if (pad < EMEM_CANARY_SIZE)
211                 pad += EMEM_CANARY_SIZE;
212
213         return pad;
214 }
215
216 /* used for debugging canaries, will block */
217 #ifdef DEBUG_INTENSE_CANARY_CHECKS
218 gboolean intense_canary_checking = FALSE;
219
220 /*  used to intensivelly check ep canaries
221  */
222 void
223 ep_check_canary_integrity(const char* fmt, ...)
224 {
225         va_list ap;
226         static gchar there[128] = {
227                 'L','a','u','n','c','h',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
228                 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
229                 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
230                 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
231         gchar here[128];
232         emem_chunk_t* npc = NULL;
233
234         if (! intense_canary_checking ) return;
235
236         va_start(ap,fmt);
237         g_vsnprintf(here, sizeof(here), fmt, ap);
238         va_end(ap);
239
240         for (npc = ep_packet_mem.free_list; npc != NULL; npc = npc->next) {
241                 void *canary_next = npc->canary_last;
242
243                 while (canary_next != NULL) {
244                         canary_next = emem_canary_next(ep_packet_mem.canary, canary_next, NULL);
245                         /* XXX, check if canary_last is inside allocated memory? */
246
247                         if (npc->canary_last == (void *) -1)
248                                 g_error("Per-packet memory corrupted\nbetween: %s\nand: %s", there, here);
249                 }
250         }
251
252         g_strlcpy(there, here, sizeof(there));
253 }
254 #endif
255
256 static void
257 emem_init_chunk(emem_header_t *mem)
258 {
259         if (mem->debug_use_canary)
260                 emem_canary_init(mem->canary);
261
262         if (mem->debug_use_chunks)
263                 mem->memory_alloc = emem_alloc_chunk;
264         else
265                 mem->memory_alloc = emem_alloc_glib;
266 }
267
268
269 /* Initialize the packet-lifetime memory allocation pool.
270  * This function should be called only once when Wireshark or TShark starts
271  * up.
272  */
273 static void
274 ep_init_chunk(void)
275 {
276         ep_packet_mem.free_list=NULL;
277         ep_packet_mem.used_list=NULL;
278         ep_packet_mem.trees=NULL;       /* not used by this allocator */
279
280         ep_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_EP_NO_CHUNKS") == NULL);
281         ep_packet_mem.debug_use_canary = ep_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_EP_NO_CANARY") == NULL);
282
283 #ifdef DEBUG_INTENSE_CANARY_CHECKS
284         intense_canary_checking = (getenv("WIRESHARK_DEBUG_EP_INTENSE_CANARY") != NULL);
285 #endif
286
287         emem_init_chunk(&ep_packet_mem);
288 }
289
290 /* Initialize the capture-lifetime memory allocation pool.
291  * This function should be called only once when Wireshark or TShark starts
292  * up.
293  */
294 static void
295 se_init_chunk(void)
296 {
297         se_packet_mem.free_list = NULL;
298         se_packet_mem.used_list = NULL;
299         se_packet_mem.trees = NULL;
300
301         se_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_SE_NO_CHUNKS") == NULL);
302         se_packet_mem.debug_use_canary = se_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_SE_USE_CANARY") != NULL);
303
304         emem_init_chunk(&se_packet_mem);
305 }
306
307 /*  Initialize all the allocators here.
308  *  This function should be called only once when Wireshark or TShark starts
309  *  up.
310  */
311 void
312 emem_init(void)
313 {
314         ep_init_chunk();
315         se_init_chunk();
316
317         if (getenv("WIRESHARK_DEBUG_SCRUB_MEMORY"))
318                 debug_use_memory_scrubber  = TRUE;
319
320 #if defined (_WIN32)
321         /* Set up our guard page info for Win32 */
322         GetSystemInfo(&sysinfo);
323         pagesize = sysinfo.dwPageSize;
324
325         /* calling GetVersionEx using the OSVERSIONINFO structure.
326          * OSVERSIONINFOEX requires Win NT4 with SP6 or newer NT Versions.
327          * OSVERSIONINFOEX will fail on Win9x and older NT Versions.
328          * See also:
329          * http://msdn.microsoft.com/library/en-us/sysinfo/base/getversionex.asp
330          * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
331          * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfoex_str.asp
332          */
333         versinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
334         GetVersionEx(&versinfo);
335
336 #elif defined(USE_GUARD_PAGES)
337         pagesize = sysconf(_SC_PAGESIZE);
338 #ifdef NEED_DEV_ZERO
339         dev_zero_fd = ws_open("/dev/zero", O_RDWR);
340         g_assert(dev_zero_fd != -1);
341 #endif
342 #endif /* _WIN32 / USE_GUARD_PAGES */
343 }
344
345 #ifdef SHOW_EMEM_STATS
346 #define NUM_ALLOC_DIST 10
347 static guint allocations[NUM_ALLOC_DIST] = { 0 };
348 static guint total_no_chunks = 0;
349
350 static void
351 print_alloc_stats()
352 {
353         guint num_chunks = 0;
354         guint num_allocs = 0;
355         guint total_used = 0;
356         guint total_allocation = 0;
357         guint total_free = 0;
358         guint used_for_canaries = 0;
359         guint total_headers;
360         guint i;
361         emem_chunk_t *chunk;
362         guint total_space_allocated_from_os, total_space_wasted;
363         gboolean ep_stat=TRUE;
364
365         fprintf(stderr, "\n-------- EP allocator statistics --------\n");
366         fprintf(stderr, "%s chunks, %s canaries, %s memory scrubber\n",
367                ep_packet_mem.debug_use_chunks ? "Using" : "Not using",
368                ep_packet_mem.debug_use_canary ? "using" : "not using",
369                debug_use_memory_scrubber ? "using" : "not using");
370
371         if (! (ep_packet_mem.free_list || !ep_packet_mem.used_list)) {
372                 fprintf(stderr, "No memory allocated\n");
373                 ep_stat = FALSE;
374         }
375         if (ep_packet_mem.debug_use_chunks && ep_stat) {
376                 /* Nothing interesting without chunks */
377                 /*  Only look at the used_list since those chunks are fully
378                  *  used.  Looking at the free list would skew our view of what
379                  *  we have wasted.
380                  */
381                 for (chunk = ep_packet_mem.used_list; chunk; chunk = chunk->next) {
382                         num_chunks++;
383                         total_used += (chunk->amount_free_init - chunk->amount_free);
384                         total_allocation += chunk->amount_free_init;
385                         total_free += chunk->amount_free;
386                 }
387                 if (num_chunks > 0) {
388                         fprintf (stderr, "\n");
389                         fprintf (stderr, "\n---- Buffer space ----\n");
390                         fprintf (stderr, "\tChunk allocation size: %10u\n", EMEM_PACKET_CHUNK_SIZE);
391                         fprintf (stderr, "\t*    Number of chunks: %10u\n", num_chunks);
392                         fprintf (stderr, "\t-------------------------------------------\n");
393                         fprintf (stderr, "\t= %u (%u including guard pages) total space used for buffers\n",
394                         total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
395                         fprintf (stderr, "\t-------------------------------------------\n");
396                         total_space_allocated_from_os = total_allocation
397                                 + sizeof(emem_chunk_t) * num_chunks;
398                         fprintf (stderr, "Total allocated from OS: %u\n\n",
399                                 total_space_allocated_from_os);
400                 }else{
401                         fprintf (stderr, "No fully used chunks, nothing to do\n");
402                 }
403                 /* Reset stats */
404                 num_chunks = 0;
405                 num_allocs = 0;
406                 total_used = 0;
407                 total_allocation = 0;
408                 total_free = 0;
409                 used_for_canaries = 0;
410         }
411
412
413         fprintf(stderr, "\n-------- SE allocator statistics --------\n");
414         fprintf(stderr, "Total number of chunk allocations %u\n",
415                 total_no_chunks);
416         fprintf(stderr, "%s chunks, %s canaries\n",
417                se_packet_mem.debug_use_chunks ? "Using" : "Not using",
418                se_packet_mem.debug_use_canary ? "using" : "not using");
419
420         if (! (se_packet_mem.free_list || !se_packet_mem.used_list)) {
421                 fprintf(stderr, "No memory allocated\n");
422                 return;
423         }
424
425         if (!se_packet_mem.debug_use_chunks )
426                 return; /* Nothing interesting without chunks?? */
427
428         /*  Only look at the used_list since those chunks are fully used.
429          *  Looking at the free list would skew our view of what we have wasted.
430          */
431         for (chunk = se_packet_mem.used_list; chunk; chunk = chunk->next) {
432                 num_chunks++;
433                 total_used += (chunk->amount_free_init - chunk->amount_free);
434                 total_allocation += chunk->amount_free_init;
435                 total_free += chunk->amount_free;
436
437                 if (se_packet_mem.debug_use_canary){
438                         void *ptr = chunk->canary_last;
439                         int len;
440
441                         while (ptr != NULL) {
442                                 ptr = emem_canary_next(se_packet_mem.canary, ptr, &len);
443
444                                 if (ptr == (void *) -1)
445                                         g_error("Memory corrupted");
446                                 used_for_canaries += len;
447                         }
448                 }
449         }
450
451         if (num_chunks == 0) {
452
453                 fprintf (stderr, "No fully used chunks, nothing to do\n");
454                 return;
455         }
456
457         fprintf (stderr, "\n");
458         fprintf (stderr, "---------- Allocations from the OS ----------\n");
459         fprintf (stderr, "---- Headers ----\n");
460         fprintf (stderr, "\t(    Chunk header size: %10lu\n",
461                  sizeof(emem_chunk_t));
462         fprintf (stderr, "\t*     Number of chunks: %10u\n", num_chunks);
463         fprintf (stderr, "\t-------------------------------------------\n");
464
465         total_headers = sizeof(emem_chunk_t) * num_chunks;
466         fprintf (stderr, "\t= %u bytes used for headers\n", total_headers);
467         fprintf (stderr, "\n---- Buffer space ----\n");
468         fprintf (stderr, "\tChunk allocation size: %10u\n",
469                  EMEM_PACKET_CHUNK_SIZE);
470         fprintf (stderr, "\t*    Number of chunks: %10u\n", num_chunks);
471         fprintf (stderr, "\t-------------------------------------------\n");
472         fprintf (stderr, "\t= %u (%u including guard pages) bytes used for buffers\n",
473                 total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
474         fprintf (stderr, "\t-------------------------------------------\n");
475         total_space_allocated_from_os = (EMEM_PACKET_CHUNK_SIZE * num_chunks)
476                                         + total_headers;
477         fprintf (stderr, "Total bytes allocated from the OS: %u\n\n",
478                 total_space_allocated_from_os);
479
480         for (i = 0; i < NUM_ALLOC_DIST; i++)
481                 num_allocs += allocations[i];
482
483         fprintf (stderr, "---------- Allocations from the SE pool ----------\n");
484         fprintf (stderr, "                Number of SE allocations: %10u\n",
485                  num_allocs);
486         fprintf (stderr, "             Bytes used (incl. canaries): %10u\n",
487                  total_used);
488         fprintf (stderr, "                 Bytes used for canaries: %10u\n",
489                  used_for_canaries);
490         fprintf (stderr, "Bytes unused (wasted, excl. guard pages): %10u\n",
491                  total_allocation - total_used);
492         fprintf (stderr, "Bytes unused (wasted, incl. guard pages): %10u\n\n",
493                  total_space_allocated_from_os - total_used);
494
495         fprintf (stderr, "---------- Statistics ----------\n");
496         fprintf (stderr, "Average SE allocation size (incl. canaries): %6.2f\n",
497                 (float)total_used/(float)num_allocs);
498         fprintf (stderr, "Average SE allocation size (excl. canaries): %6.2f\n",
499                 (float)(total_used - used_for_canaries)/(float)num_allocs);
500         fprintf (stderr, "        Average wasted bytes per allocation: %6.2f\n",
501                 (total_allocation - total_used)/(float)num_allocs);
502         total_space_wasted = (total_allocation - total_used)
503                 + (sizeof(emem_chunk_t));
504         fprintf (stderr, " Space used for headers + unused allocation: %8u\n",
505                 total_space_wasted);
506         fprintf (stderr, "--> %% overhead/waste: %4.2f\n",
507                 100 * (float)total_space_wasted/(float)total_space_allocated_from_os);
508
509         fprintf (stderr, "\nAllocation distribution (sizes include canaries):\n");
510         for (i = 0; i < (NUM_ALLOC_DIST-1); i++)
511                 fprintf (stderr, "size < %5d: %8u\n", 32<<i, allocations[i]);
512         fprintf (stderr, "size > %5d: %8u\n", 32<<i, allocations[i]);
513 }
514 #endif
515
516 static gboolean
517 emem_verify_pointer(emem_header_t *hdr, const void *ptr)
518 {
519         const gchar *cptr = ptr;
520         emem_chunk_t *used_list[2];
521         guint8 used_list_idx;
522         emem_chunk_t *chunk;
523
524         used_list[0] = hdr->free_list;
525         used_list[1] = hdr->used_list;
526
527         for (used_list_idx=0; used_list_idx < G_N_ELEMENTS(used_list); ++used_list_idx) {
528                 chunk = used_list[used_list_idx];
529                 for ( ; chunk ; chunk = chunk->next) {
530                         if (cptr >= (chunk->buf + chunk->free_offset_init) &&
531                                 cptr < (chunk->buf + chunk->free_offset))
532                                 return TRUE;
533                 }
534         }
535
536         return FALSE;
537 }
538
539 gboolean
540 ep_verify_pointer(const void *ptr)
541 {
542         return emem_verify_pointer(&ep_packet_mem, ptr);
543 }
544
545 gboolean
546 se_verify_pointer(const void *ptr)
547 {
548         return emem_verify_pointer(&se_packet_mem, ptr);
549 }
550
551 static void
552 emem_scrub_memory(char *buf, size_t size, gboolean alloc)
553 {
554         guint scrubbed_value;
555         guint offset;
556
557         if (!debug_use_memory_scrubber)
558                 return;
559
560         if (alloc) /* this memory is being allocated */
561                 scrubbed_value = 0xBADDCAFE;
562         else /* this memory is being freed */
563                 scrubbed_value = 0xDEADBEEF;
564
565         /*  We shouldn't need to check the alignment of the starting address
566          *  since this is malloc'd memory (or 'pagesize' bytes into malloc'd
567          *  memory).
568          */
569
570         /* XXX - We might want to use memset here in order to avoid problems on
571          * alignment-sensitive platforms, e.g.
572          * http://stackoverflow.com/questions/108866/is-there-memset-that-accepts-integers-larger-than-char
573          */
574
575         for (offset = 0; offset + sizeof(guint) <= size; offset += sizeof(guint))
576                 *(guint*)(buf+offset) = scrubbed_value;
577
578         /* Initialize the last bytes, if any */
579         if (offset < size) {
580                 *(guint8*)(buf+offset) = scrubbed_value >> 24;
581                 offset++;
582                 if (offset < size) {
583                         *(guint8*)(buf+offset) = (scrubbed_value >> 16) & 0xFF;
584                         offset++;
585                         if (offset < size) {
586                                 *(guint8*)(buf+offset) = (scrubbed_value >> 8) & 0xFF;
587                                 offset++;
588                         }
589                 }
590         }
591
592
593 }
594
595 static emem_chunk_t *
596 emem_create_chunk() {
597 #if defined (_WIN32)
598         BOOL ret;
599         char *buf_end, *prot1, *prot2;
600         DWORD oldprot;
601 #elif defined(USE_GUARD_PAGES)
602         int ret;
603         char *buf_end, *prot1, *prot2;
604 #endif /* _WIN32 / USE_GUARD_PAGES */
605         emem_chunk_t *npc;
606
607         npc = g_new(emem_chunk_t, 1);
608         npc->next = NULL;
609         npc->canary_last = NULL;
610
611 #if defined (_WIN32)
612         /*
613          * MSDN documents VirtualAlloc/VirtualProtect at
614          * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
615          */
616
617         /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
618         npc->buf = VirtualAlloc(NULL, EMEM_PACKET_CHUNK_SIZE,
619                 MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
620
621         if (npc->buf == NULL) {
622                 g_free(npc);
623                 THROW(OutOfMemoryError);
624         }
625
626 #elif defined(USE_GUARD_PAGES)
627         npc->buf = mmap(NULL, EMEM_PACKET_CHUNK_SIZE,
628                 PROT_READ|PROT_WRITE, ANON_PAGE_MODE, ANON_FD, 0);
629
630         if (npc->buf == MAP_FAILED) {
631                 g_free(npc);
632                 THROW(OutOfMemoryError);
633         }
634
635 #else /* Is there a draft in here? */
636         npc->buf = g_malloc(EMEM_PACKET_CHUNK_SIZE);
637         /* g_malloc() can't fail */
638 #endif
639
640 #ifdef SHOW_EMEM_STATS
641         total_no_chunks++;
642 #endif
643
644 #if defined (_WIN32)
645         buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
646
647         /* Align our guard pages on page-sized boundaries */
648         prot1 = (char *) ((((int) npc->buf + pagesize - 1) / pagesize) * pagesize);
649         prot2 = (char *) ((((int) buf_end - (1 * pagesize)) / pagesize) * pagesize);
650
651         ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
652         g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
653         ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
654         g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
655
656         npc->amount_free_init = (unsigned int) (prot2 - prot1 - pagesize);
657         npc->free_offset_init = (unsigned int) (prot1 - npc->buf) + pagesize;
658 #elif defined(USE_GUARD_PAGES)
659         buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
660
661         /* Align our guard pages on page-sized boundaries */
662         prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
663         prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
664
665         ret = mprotect(prot1, pagesize, PROT_NONE);
666         g_assert(ret != -1);
667         ret = mprotect(prot2, pagesize, PROT_NONE);
668         g_assert(ret != -1);
669
670         npc->amount_free_init = prot2 - prot1 - pagesize;
671         npc->free_offset_init = (prot1 - npc->buf) + pagesize;
672 #else 
673         npc->amount_free_init = EMEM_PACKET_CHUNK_SIZE;
674         npc->free_offset_init = 0;
675 #endif /* USE_GUARD_PAGES */
676
677         npc->amount_free = npc->amount_free_init;
678         npc->free_offset = npc->free_offset_init;
679         return npc;
680 }
681
682 static void *
683 emem_alloc_chunk(size_t size, emem_header_t *mem)
684 {
685         void *buf;
686
687         size_t asize = size;
688         gboolean use_canary = mem->debug_use_canary;
689         guint8 pad;
690         emem_chunk_t *free_list;
691
692         /* Round up to an 8 byte boundary. Make sure we have at least
693          * 8 pad bytes for our canary.
694          */
695          if (use_canary) {
696                 pad = emem_canary_pad(asize);
697                 asize += sizeof(void *);
698         } else
699                 pad = (G_MEM_ALIGN - (asize & (G_MEM_ALIGN-1))) & (G_MEM_ALIGN-1);
700
701         asize += pad;
702
703 #ifdef SHOW_EMEM_STATS
704         /* Do this check here so we can include the canary size */
705         if (mem == &se_packet_mem) {
706                 if (asize < 32)
707                         allocations[0]++;
708                 else if (asize < 64)
709                         allocations[1]++;
710                 else if (asize < 128)
711                         allocations[2]++;
712                 else if (asize < 256)
713                         allocations[3]++;
714                 else if (asize < 512)
715                         allocations[4]++;
716                 else if (asize < 1024)
717                         allocations[5]++;
718                 else if (asize < 2048)
719                         allocations[6]++;
720                 else if (asize < 4096)
721                         allocations[7]++;
722                 else if (asize < 8192)
723                         allocations[8]++;
724                 else if (asize < 16384)
725                         allocations[8]++;
726                 else
727                         allocations[(NUM_ALLOC_DIST-1)]++;
728         }
729 #endif
730
731         /* make sure we dont try to allocate too much (arbitrary limit) */
732         DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
733
734         if (!mem->free_list)
735                 mem->free_list = emem_create_chunk();
736
737         /* oops, we need to allocate more memory to serve this request
738          * than we have free. move this node to the used list and try again
739          */
740         if(asize > mem->free_list->amount_free) {
741                 emem_chunk_t *npc;
742                 npc=mem->free_list;
743                 mem->free_list=mem->free_list->next;
744                 npc->next=mem->used_list;
745                 mem->used_list=npc;
746
747                 if (!mem->free_list)
748                         mem->free_list = emem_create_chunk();
749         }
750
751         free_list = mem->free_list;
752
753         buf = free_list->buf + free_list->free_offset;
754
755         free_list->amount_free -= (unsigned int) asize;
756         free_list->free_offset += (unsigned int) asize;
757
758         if (use_canary) {
759                 char *cptr = (char *)buf + size;
760
761                 memcpy(cptr, mem->canary, pad-1);
762                 cptr[pad-1] = '\0';
763                 memcpy(cptr + pad, &free_list->canary_last, sizeof(void *));
764
765                 free_list->canary_last = cptr;
766         }
767
768         return buf;
769 }
770
771 static void *
772 emem_alloc_glib(size_t size, emem_header_t *mem)
773 {
774         emem_chunk_t *npc;
775
776         npc=g_new(emem_chunk_t, 1);
777         npc->next=mem->used_list;
778         npc->buf=g_malloc(size);
779         npc->canary_last = NULL;
780         mem->used_list=npc;
781         /* There's no padding/alignment involved (from our point of view) when
782          * we fetch the memory directly from the system pool, so WYSIWYG */
783         npc->free_offset = npc->free_offset_init = 0;
784         npc->amount_free = npc->amount_free_init = (unsigned int) size;
785
786         return npc->buf;
787 }
788
789 /* allocate 'size' amount of memory. */
790 static void *
791 emem_alloc(size_t size, emem_header_t *mem)
792 {
793         void *buf = mem->memory_alloc(size, mem);
794
795         /*  XXX - this is a waste of time if the allocator function is going to
796          *  memset this straight back to 0.
797          */
798         emem_scrub_memory(buf, size, TRUE);
799
800         return buf;
801 }
802
803 /* allocate 'size' amount of memory with an allocation lifetime until the
804  * next packet.
805  */
806 void *
807 ep_alloc(size_t size)
808 {
809         return emem_alloc(size, &ep_packet_mem);
810 }
811
812 /* allocate 'size' amount of memory with an allocation lifetime until the
813  * next capture.
814  */
815 void *
816 se_alloc(size_t size)
817 {
818         return emem_alloc(size, &se_packet_mem);
819 }
820
821 void *
822 ep_alloc0(size_t size)
823 {
824         return memset(ep_alloc(size),'\0',size);
825 }
826
827 gchar *
828 ep_strdup(const gchar* src)
829 {
830         guint len = (guint) strlen(src);
831         gchar* dst;
832
833         dst = memcpy(ep_alloc(len+1), src, len+1);
834
835         return dst;
836 }
837
838 gchar *
839 ep_strndup(const gchar* src, size_t len)
840 {
841         gchar* dst = ep_alloc(len+1);
842
843         g_strlcpy(dst, src, len+1);
844
845         return dst;
846 }
847
848 void *
849 ep_memdup(const void* src, size_t len)
850 {
851         return memcpy(ep_alloc(len), src, len);
852 }
853
854 gchar *
855 ep_strdup_vprintf(const gchar* fmt, va_list ap)
856 {
857         va_list ap2;
858         gsize len;
859         gchar* dst;
860
861         G_VA_COPY(ap2, ap);
862
863         len = g_printf_string_upper_bound(fmt, ap);
864
865         dst = ep_alloc(len+1);
866         g_vsnprintf (dst, (gulong) len, fmt, ap2);
867         va_end(ap2);
868
869         return dst;
870 }
871
872 gchar *
873 ep_strdup_printf(const gchar* fmt, ...)
874 {
875         va_list ap;
876         gchar* dst;
877
878         va_start(ap,fmt);
879         dst = ep_strdup_vprintf(fmt, ap);
880         va_end(ap);
881         return dst;
882 }
883
884 gchar **
885 ep_strsplit(const gchar* string, const gchar* sep, int max_tokens)
886 {
887         gchar* splitted;
888         gchar* s;
889         guint tokens;
890         guint str_len;
891         guint sep_len;
892         guint i;
893         gchar** vec;
894         enum { AT_START, IN_PAD, IN_TOKEN } state;
895         guint curr_tok = 0;
896
897         if (    ! string
898              || ! sep
899              || ! sep[0])
900                 return NULL;
901
902         s = splitted = ep_strdup(string);
903         str_len = (guint) strlen(splitted);
904         sep_len = (guint) strlen(sep);
905
906         if (max_tokens < 1) max_tokens = INT_MAX;
907
908         tokens = 1;
909
910
911         while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
912                 tokens++;
913
914                 for(i=0; i < sep_len; i++ )
915                         s[i] = '\0';
916
917                 s += sep_len;
918
919         }
920
921         vec = ep_alloc_array(gchar*,tokens+1);
922         state = AT_START;
923
924         for (i=0; i< str_len; i++) {
925                 switch(state) {
926                         case AT_START:
927                                 switch(splitted[i]) {
928                                         case '\0':
929                                                 state  = IN_PAD;
930                                                 continue;
931                                         default:
932                                                 vec[curr_tok] = &(splitted[i]);
933                                                 curr_tok++;
934                                                 state = IN_TOKEN;
935                                                 continue;
936                                 }
937                         case IN_TOKEN:
938                                 switch(splitted[i]) {
939                                         case '\0':
940                                                 state = IN_PAD;
941                                         default:
942                                                 continue;
943                                 }
944                         case IN_PAD:
945                                 switch(splitted[i]) {
946                                         default:
947                                                 vec[curr_tok] = &(splitted[i]);
948                                                 curr_tok++;
949                                                 state = IN_TOKEN;
950                                         case '\0':
951                                                 continue;
952                                 }
953                 }
954         }
955
956         vec[curr_tok] = NULL;
957
958         return vec;
959 }
960
961
962
963 void *
964 se_alloc0(size_t size)
965 {
966         return memset(se_alloc(size),'\0',size);
967 }
968
969 /* If str is NULL, just return the string "<NULL>" so that the callers dont
970  * have to bother checking it.
971  */
972 gchar *
973 se_strdup(const gchar* src)
974 {
975         guint len;
976         gchar* dst;
977
978         if(!src)
979                 return "<NULL>";
980
981         len = (guint) strlen(src);
982         dst = memcpy(se_alloc(len+1), src, len+1);
983
984         return dst;
985 }
986
987 gchar *
988 se_strndup(const gchar* src, size_t len)
989 {
990         gchar* dst = se_alloc(len+1);
991
992         g_strlcpy(dst, src, len+1);
993
994         return dst;
995 }
996
997 void *
998 se_memdup(const void* src, size_t len)
999 {
1000         return memcpy(se_alloc(len), src, len);
1001 }
1002
1003 gchar *
1004 se_strdup_vprintf(const gchar* fmt, va_list ap)
1005 {
1006         va_list ap2;
1007         gsize len;
1008         gchar* dst;
1009
1010         G_VA_COPY(ap2, ap);
1011
1012         len = g_printf_string_upper_bound(fmt, ap);
1013
1014         dst = se_alloc(len+1);
1015         g_vsnprintf (dst, (gulong) len, fmt, ap2);
1016         va_end(ap2);
1017
1018         return dst;
1019 }
1020
1021 gchar *
1022 se_strdup_printf(const gchar* fmt, ...)
1023 {
1024         va_list ap;
1025         gchar* dst;
1026
1027         va_start(ap,fmt);
1028         dst = se_strdup_vprintf(fmt, ap);
1029         va_end(ap);
1030         return dst;
1031 }
1032
1033 /* release all allocated memory back to the pool. */
1034 static void
1035 emem_free_all(emem_header_t *mem)
1036 {
1037         gboolean use_chunks = mem->debug_use_chunks;
1038
1039         emem_chunk_t *npc;
1040         emem_tree_t *tree_list;
1041
1042         /* move all used chunks over to the free list */
1043         while(mem->used_list){
1044                 npc=mem->used_list;
1045                 mem->used_list=mem->used_list->next;
1046                 npc->next=mem->free_list;
1047                 mem->free_list=npc;
1048         }
1049
1050         /* clear them all out */
1051         npc = mem->free_list;
1052         while (npc != NULL) {
1053                 if (use_chunks) {
1054                         while (npc->canary_last != NULL) {
1055                                 npc->canary_last = emem_canary_next(mem->canary, npc->canary_last, NULL);
1056                                 /* XXX, check if canary_last is inside allocated memory? */
1057
1058                                 if (npc->canary_last == (void *) -1)
1059                                         g_error("Memory corrupted");
1060                         }
1061
1062                         emem_scrub_memory((npc->buf + npc->free_offset_init),
1063                                           (npc->free_offset - npc->free_offset_init),
1064                                           FALSE);
1065
1066                         npc->amount_free = npc->amount_free_init;
1067                         npc->free_offset = npc->free_offset_init;
1068                         npc = npc->next;
1069                 } else {
1070                         emem_chunk_t *next = npc->next;
1071
1072                         emem_scrub_memory(npc->buf, npc->amount_free_init, FALSE);
1073
1074                         g_free(npc->buf);
1075                         g_free(npc);
1076                         npc = next;
1077                 }
1078         }
1079
1080         if (!use_chunks) {
1081                 /* We've freed all this memory already */
1082                 mem->free_list = NULL;
1083         }
1084
1085         /* release/reset all allocated trees */
1086         for(tree_list=mem->trees;tree_list;tree_list=tree_list->next){
1087                 tree_list->tree=NULL;
1088         }
1089 }
1090
1091 /* release all allocated memory back to the pool. */
1092 void
1093 ep_free_all(void)
1094 {
1095         emem_free_all(&ep_packet_mem);
1096 }
1097
1098 /* release all allocated memory back to the pool. */
1099 void
1100 se_free_all(void)
1101 {
1102 #ifdef SHOW_EMEM_STATS
1103         print_alloc_stats();
1104 #endif
1105
1106         emem_free_all(&se_packet_mem);
1107 }
1108
1109 ep_stack_t
1110 ep_stack_new(void) {
1111         ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
1112         *s = ep_new0(struct _ep_stack_frame_t);
1113         return s;
1114 }
1115
1116 /*  for ep_stack_t we'll keep the popped frames so we reuse them instead
1117 of allocating new ones.
1118 */
1119
1120 void *
1121 ep_stack_push(ep_stack_t stack, void* data)
1122 {
1123         struct _ep_stack_frame_t* frame;
1124         struct _ep_stack_frame_t* head = (*stack);
1125
1126         if (head->above) {
1127                 frame = head->above;
1128         } else {
1129                 frame = ep_new(struct _ep_stack_frame_t);
1130                 head->above = frame;
1131                 frame->below = head;
1132                 frame->above = NULL;
1133         }
1134
1135         frame->payload = data;
1136         (*stack) = frame;
1137
1138         return data;
1139 }
1140
1141 void *
1142 ep_stack_pop(ep_stack_t stack)
1143 {
1144
1145         if ((*stack)->below) {
1146                 (*stack) = (*stack)->below;
1147                 return (*stack)->above->payload;
1148         } else {
1149                 return NULL;
1150         }
1151 }
1152
1153 emem_tree_t *
1154 se_tree_create(int type, const char *name)
1155 {
1156         emem_tree_t *tree_list;
1157
1158         tree_list=g_malloc(sizeof(emem_tree_t));
1159         tree_list->next=se_packet_mem.trees;
1160         tree_list->type=type;
1161         tree_list->tree=NULL;
1162         tree_list->name=name;
1163         tree_list->malloc=se_alloc;
1164         se_packet_mem.trees=tree_list;
1165
1166         return tree_list;
1167 }
1168
1169 void *
1170 emem_tree_lookup32(emem_tree_t *se_tree, guint32 key)
1171 {
1172         emem_tree_node_t *node;
1173
1174         node=se_tree->tree;
1175
1176         while(node){
1177                 if(key==node->key32){
1178                         return node->data;
1179                 }
1180                 if(key<node->key32){
1181                         node=node->left;
1182                         continue;
1183                 }
1184                 if(key>node->key32){
1185                         node=node->right;
1186                         continue;
1187                 }
1188         }
1189         return NULL;
1190 }
1191
1192 void *
1193 emem_tree_lookup32_le(emem_tree_t *se_tree, guint32 key)
1194 {
1195         emem_tree_node_t *node;
1196
1197         node=se_tree->tree;
1198
1199         if(!node){
1200                 return NULL;
1201         }
1202
1203
1204         while(node){
1205                 if(key==node->key32){
1206                         return node->data;
1207                 }
1208                 if(key<node->key32){
1209                         if(node->left){
1210                                 node=node->left;
1211                                 continue;
1212                         } else {
1213                                 break;
1214                         }
1215                 }
1216                 if(key>node->key32){
1217                         if(node->right){
1218                                 node=node->right;
1219                                 continue;
1220                         } else {
1221                                 break;
1222                         }
1223                 }
1224         }
1225
1226
1227         if(!node){
1228                 return NULL;
1229         }
1230
1231         /* If we are still at the root of the tree this means that this node
1232          * is either smaller than the search key and then we return this
1233          * node or else there is no smaller key available and then
1234          * we return NULL.
1235          */
1236         if(!node->parent){
1237                 if(key>node->key32){
1238                         return node->data;
1239                 } else {
1240                         return NULL;
1241                 }
1242         }
1243
1244         if(node->parent->left==node){
1245                 /* left child */
1246
1247                 if(key>node->key32){
1248                         /* if this is a left child and its key is smaller than
1249                          * the search key, then this is the node we want.
1250                          */
1251                         return node->data;
1252                 } else {
1253                         /* if this is a left child and its key is bigger than
1254                          * the search key, we have to check if any
1255                          * of our ancestors are smaller than the search key.
1256                          */
1257                         while(node){
1258                                 if(key>node->key32){
1259                                         return node->data;
1260                                 }
1261                                 node=node->parent;
1262                         }
1263                         return NULL;
1264                 }
1265         } else {
1266                 /* right child */
1267
1268                 if(node->key32<key){
1269                         /* if this is the right child and its key is smaller
1270                          * than the search key then this is the one we want.
1271                          */
1272                         return node->data;
1273                 } else {
1274                         /* if this is the right child and its key is larger
1275                          * than the search key then our parent is the one we
1276                          * want.
1277                          */
1278                         return node->parent->data;
1279                 }
1280         }
1281
1282 }
1283
1284
1285 static inline emem_tree_node_t *
1286 emem_tree_parent(emem_tree_node_t *node)
1287 {
1288         return node->parent;
1289 }
1290
1291 static inline emem_tree_node_t *
1292 emem_tree_grandparent(emem_tree_node_t *node)
1293 {
1294         emem_tree_node_t *parent;
1295
1296         parent=emem_tree_parent(node);
1297         if(parent){
1298                 return parent->parent;
1299         }
1300         return NULL;
1301 }
1302
1303 static inline emem_tree_node_t *
1304 emem_tree_uncle(emem_tree_node_t *node)
1305 {
1306         emem_tree_node_t *parent, *grandparent;
1307
1308         parent=emem_tree_parent(node);
1309         if(!parent){
1310                 return NULL;
1311         }
1312         grandparent=emem_tree_parent(parent);
1313         if(!grandparent){
1314                 return NULL;
1315         }
1316         if(parent==grandparent->left){
1317                 return grandparent->right;
1318         }
1319         return grandparent->left;
1320 }
1321
1322 static inline void rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node);
1323 static inline void rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node);
1324
1325 static inline void
1326 rotate_left(emem_tree_t *se_tree, emem_tree_node_t *node)
1327 {
1328         if(node->parent){
1329                 if(node->parent->left==node){
1330                         node->parent->left=node->right;
1331                 } else {
1332                         node->parent->right=node->right;
1333                 }
1334         } else {
1335                 se_tree->tree=node->right;
1336         }
1337         node->right->parent=node->parent;
1338         node->parent=node->right;
1339         node->right=node->right->left;
1340         if(node->right){
1341                 node->right->parent=node;
1342         }
1343         node->parent->left=node;
1344 }
1345
1346 static inline void
1347 rotate_right(emem_tree_t *se_tree, emem_tree_node_t *node)
1348 {
1349         if(node->parent){
1350                 if(node->parent->left==node){
1351                         node->parent->left=node->left;
1352                 } else {
1353                         node->parent->right=node->left;
1354                 }
1355         } else {
1356                 se_tree->tree=node->left;
1357         }
1358         node->left->parent=node->parent;
1359         node->parent=node->left;
1360         node->left=node->left->right;
1361         if(node->left){
1362                 node->left->parent=node;
1363         }
1364         node->parent->right=node;
1365 }
1366
1367 static inline void
1368 rb_insert_case5(emem_tree_t *se_tree, emem_tree_node_t *node)
1369 {
1370         emem_tree_node_t *grandparent;
1371         emem_tree_node_t *parent;
1372
1373         parent=emem_tree_parent(node);
1374         grandparent=emem_tree_parent(parent);
1375         parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1376         grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1377         if( (node==parent->left) && (parent==grandparent->left) ){
1378                 rotate_right(se_tree, grandparent);
1379         } else {
1380                 rotate_left(se_tree, grandparent);
1381         }
1382 }
1383
1384 static inline void
1385 rb_insert_case4(emem_tree_t *se_tree, emem_tree_node_t *node)
1386 {
1387         emem_tree_node_t *grandparent;
1388         emem_tree_node_t *parent;
1389
1390         parent=emem_tree_parent(node);
1391         grandparent=emem_tree_parent(parent);
1392         if(!grandparent){
1393                 return;
1394         }
1395         if( (node==parent->right) && (parent==grandparent->left) ){
1396                 rotate_left(se_tree, parent);
1397                 node=node->left;
1398         } else if( (node==parent->left) && (parent==grandparent->right) ){
1399                 rotate_right(se_tree, parent);
1400                 node=node->right;
1401         }
1402         rb_insert_case5(se_tree, node);
1403 }
1404
1405 static inline void
1406 rb_insert_case3(emem_tree_t *se_tree, emem_tree_node_t *node)
1407 {
1408         emem_tree_node_t *grandparent;
1409         emem_tree_node_t *parent;
1410         emem_tree_node_t *uncle;
1411
1412         uncle=emem_tree_uncle(node);
1413         if(uncle && (uncle->u.rb_color==EMEM_TREE_RB_COLOR_RED)){
1414                 parent=emem_tree_parent(node);
1415                 parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1416                 uncle->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1417                 grandparent=emem_tree_grandparent(node);
1418                 grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1419                 rb_insert_case1(se_tree, grandparent);
1420         } else {
1421                 rb_insert_case4(se_tree, node);
1422         }
1423 }
1424
1425 static inline void
1426 rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node)
1427 {
1428         emem_tree_node_t *parent;
1429
1430         parent=emem_tree_parent(node);
1431         /* parent is always non-NULL here */
1432         if(parent->u.rb_color==EMEM_TREE_RB_COLOR_BLACK){
1433                 return;
1434         }
1435         rb_insert_case3(se_tree, node);
1436 }
1437
1438 static inline void
1439 rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node)
1440 {
1441         emem_tree_node_t *parent;
1442
1443         parent=emem_tree_parent(node);
1444         if(!parent){
1445                 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1446                 return;
1447         }
1448         rb_insert_case2(se_tree, node);
1449 }
1450
1451 /* insert a new node in the tree. if this node matches an already existing node
1452  * then just replace the data for that node */
1453 void
1454 emem_tree_insert32(emem_tree_t *se_tree, guint32 key, void *data)
1455 {
1456         emem_tree_node_t *node;
1457
1458         node=se_tree->tree;
1459
1460         /* is this the first node ?*/
1461         if(!node){
1462                 node=se_tree->malloc(sizeof(emem_tree_node_t));
1463                 switch(se_tree->type){
1464                 case EMEM_TREE_TYPE_RED_BLACK:
1465                         node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1466                         break;
1467                 }
1468                 node->parent=NULL;
1469                 node->left=NULL;
1470                 node->right=NULL;
1471                 node->key32=key;
1472                 node->data=data;
1473                 node->u.is_subtree = EMEM_TREE_NODE_IS_DATA;
1474                 se_tree->tree=node;
1475                 return;
1476         }
1477
1478         /* it was not the new root so walk the tree until we find where to
1479          * insert this new leaf.
1480          */
1481         while(1){
1482                 /* this node already exists, so just replace the data pointer*/
1483                 if(key==node->key32){
1484                         node->data=data;
1485                         return;
1486                 }
1487                 if(key<node->key32) {
1488                         if(!node->left){
1489                                 /* new node to the left */
1490                                 emem_tree_node_t *new_node;
1491                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1492                                 node->left=new_node;
1493                                 new_node->parent=node;
1494                                 new_node->left=NULL;
1495                                 new_node->right=NULL;
1496                                 new_node->key32=key;
1497                                 new_node->data=data;
1498                                 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1499                                 node=new_node;
1500                                 break;
1501                         }
1502                         node=node->left;
1503                         continue;
1504                 }
1505                 if(key>node->key32) {
1506                         if(!node->right){
1507                                 /* new node to the right */
1508                                 emem_tree_node_t *new_node;
1509                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1510                                 node->right=new_node;
1511                                 new_node->parent=node;
1512                                 new_node->left=NULL;
1513                                 new_node->right=NULL;
1514                                 new_node->key32=key;
1515                                 new_node->data=data;
1516                                 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1517                                 node=new_node;
1518                                 break;
1519                         }
1520                         node=node->right;
1521                         continue;
1522                 }
1523         }
1524
1525         /* node will now point to the newly created node */
1526         switch(se_tree->type){
1527         case EMEM_TREE_TYPE_RED_BLACK:
1528                 node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1529                 rb_insert_case1(se_tree, node);
1530                 break;
1531         }
1532 }
1533
1534 static void *
1535 lookup_or_insert32(emem_tree_t *se_tree, guint32 key, void*(*func)(void*),void* ud, int is_subtree)
1536 {
1537         emem_tree_node_t *node;
1538
1539         node=se_tree->tree;
1540
1541         /* is this the first node ?*/
1542         if(!node){
1543                 node=se_tree->malloc(sizeof(emem_tree_node_t));
1544                 switch(se_tree->type){
1545                         case EMEM_TREE_TYPE_RED_BLACK:
1546                                 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1547                                 break;
1548                 }
1549                 node->parent=NULL;
1550                 node->left=NULL;
1551                 node->right=NULL;
1552                 node->key32=key;
1553                 node->data= func(ud);
1554                 node->u.is_subtree = is_subtree;
1555                 se_tree->tree=node;
1556                 return node->data;
1557         }
1558
1559         /* it was not the new root so walk the tree until we find where to
1560                 * insert this new leaf.
1561                 */
1562         while(1){
1563                 /* this node already exists, so just return the data pointer*/
1564                 if(key==node->key32){
1565                         return node->data;
1566                 }
1567                 if(key<node->key32) {
1568                         if(!node->left){
1569                                 /* new node to the left */
1570                                 emem_tree_node_t *new_node;
1571                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1572                                 node->left=new_node;
1573                                 new_node->parent=node;
1574                                 new_node->left=NULL;
1575                                 new_node->right=NULL;
1576                                 new_node->key32=key;
1577                                 new_node->data= func(ud);
1578                                 new_node->u.is_subtree = is_subtree;
1579                                 node=new_node;
1580                                 break;
1581                         }
1582                         node=node->left;
1583                         continue;
1584                 }
1585                 if(key>node->key32) {
1586                         if(!node->right){
1587                                 /* new node to the right */
1588                                 emem_tree_node_t *new_node;
1589                                 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1590                                 node->right=new_node;
1591                                 new_node->parent=node;
1592                                 new_node->left=NULL;
1593                                 new_node->right=NULL;
1594                                 new_node->key32=key;
1595                                 new_node->data= func(ud);
1596                                 new_node->u.is_subtree = is_subtree;
1597                                 node=new_node;
1598                                 break;
1599                         }
1600                         node=node->right;
1601                         continue;
1602                 }
1603         }
1604
1605         /* node will now point to the newly created node */
1606         switch(se_tree->type){
1607                 case EMEM_TREE_TYPE_RED_BLACK:
1608                         node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1609                         rb_insert_case1(se_tree, node);
1610                         break;
1611         }
1612
1613         return node->data;
1614 }
1615
1616 /* When the se data is released, this entire tree will dissapear as if it
1617  * never existed including all metadata associated with the tree.
1618  */
1619 emem_tree_t *
1620 se_tree_create_non_persistent(int type, const char *name)
1621 {
1622         emem_tree_t *tree_list;
1623
1624         tree_list=se_alloc(sizeof(emem_tree_t));
1625         tree_list->next=NULL;
1626         tree_list->type=type;
1627         tree_list->tree=NULL;
1628         tree_list->name=name;
1629         tree_list->malloc=se_alloc;
1630
1631         return tree_list;
1632 }
1633
1634 /* This tree is PErmanent and will never be released
1635  */
1636 emem_tree_t *
1637 pe_tree_create(int type, const char *name)
1638 {
1639         emem_tree_t *tree_list;
1640
1641         tree_list=g_new(emem_tree_t, 1);
1642         tree_list->next=NULL;
1643         tree_list->type=type;
1644         tree_list->tree=NULL;
1645         tree_list->name=name;
1646         tree_list->malloc=(void *(*)(size_t)) g_malloc;
1647
1648         return tree_list;
1649 }
1650
1651 /* create another (sub)tree using the same memory allocation scope
1652  * as the parent tree.
1653  */
1654 static emem_tree_t *
1655 emem_tree_create_subtree(emem_tree_t *parent_tree, const char *name)
1656 {
1657         emem_tree_t *tree_list;
1658
1659         tree_list=parent_tree->malloc(sizeof(emem_tree_t));
1660         tree_list->next=NULL;
1661         tree_list->type=parent_tree->type;
1662         tree_list->tree=NULL;
1663         tree_list->name=name;
1664         tree_list->malloc=parent_tree->malloc;
1665
1666         return tree_list;
1667 }
1668
1669 static void *
1670 create_sub_tree(void* d)
1671 {
1672         emem_tree_t *se_tree = d;
1673         return emem_tree_create_subtree(se_tree, "subtree");
1674 }
1675
1676 /* insert a new node in the tree. if this node matches an already existing node
1677  * then just replace the data for that node */
1678
1679 void
1680 emem_tree_insert32_array(emem_tree_t *se_tree, emem_tree_key_t *key, void *data)
1681 {
1682         emem_tree_t *next_tree;
1683
1684         if((key[0].length<1)||(key[0].length>100)){
1685                 DISSECTOR_ASSERT_NOT_REACHED();
1686         }
1687         if((key[0].length==1)&&(key[1].length==0)){
1688                 emem_tree_insert32(se_tree, *key[0].key, data);
1689                 return;
1690         }
1691
1692         next_tree=lookup_or_insert32(se_tree, *key[0].key, create_sub_tree, se_tree, EMEM_TREE_NODE_IS_SUBTREE);
1693
1694         if(key[0].length==1){
1695                 key++;
1696         } else {
1697                 key[0].length--;
1698                 key[0].key++;
1699         }
1700         emem_tree_insert32_array(next_tree, key, data);
1701 }
1702
1703 void *
1704 emem_tree_lookup32_array(emem_tree_t *se_tree, emem_tree_key_t *key)
1705 {
1706         emem_tree_t *next_tree;
1707
1708         if((key[0].length<1)||(key[0].length>100)){
1709                 DISSECTOR_ASSERT_NOT_REACHED();
1710         }
1711         if((key[0].length==1)&&(key[1].length==0)){
1712                 return emem_tree_lookup32(se_tree, *key[0].key);
1713         }
1714         next_tree=emem_tree_lookup32(se_tree, *key[0].key);
1715         if(!next_tree){
1716                 return NULL;
1717         }
1718         if(key[0].length==1){
1719                 key++;
1720         } else {
1721                 key[0].length--;
1722                 key[0].key++;
1723         }
1724         return emem_tree_lookup32_array(next_tree, key);
1725 }
1726
1727
1728 /* Strings are stored as an array of uint32 containing the string characters
1729    with 4 characters in each uint32.
1730    The first byte of the string is stored as the most significant byte.
1731    If the string is not a multiple of 4 characters in length the last
1732    uint32 containing the string bytes are padded with 0 bytes.
1733    After the uint32's containing the string, there is one final terminator
1734    uint32 with the value 0x00000001
1735 */
1736 void
1737 emem_tree_insert_string(emem_tree_t* se_tree, const gchar* k, void* v, guint32 flags)
1738 {
1739         emem_tree_key_t key[2];
1740         guint32 *aligned=NULL;
1741         guint32 len = (guint32) strlen(k);
1742         guint32 divx = (len+3)/4+1;
1743         guint32 i;
1744         guint32 tmp;
1745
1746         aligned = g_malloc(divx * sizeof (guint32));
1747
1748         /* pack the bytes one one by one into guint32s */
1749         tmp = 0;
1750         for (i = 0;i < len;i++) {
1751                 unsigned char ch;
1752
1753                 ch = (unsigned char)k[i];
1754                 if (flags & EMEM_TREE_STRING_NOCASE) {
1755                         if(isupper(ch)) {
1756                                 ch = tolower(ch);
1757                         }
1758                 }
1759                 tmp <<= 8;
1760                 tmp |= ch;
1761                 if (i%4 == 3) {
1762                         aligned[i/4] = tmp;
1763                         tmp = 0;
1764                 }
1765         }
1766         /* add required padding to the last uint32 */
1767         if (i%4 != 0) {
1768                 while (i%4 != 0) {
1769                         i++;
1770                         tmp <<= 8;
1771                 }
1772                 aligned[i/4-1] = tmp;
1773         }
1774
1775         /* add the terminator */
1776         aligned[divx-1] = 0x00000001;
1777
1778         key[0].length = divx;
1779         key[0].key = aligned;
1780         key[1].length = 0;
1781         key[1].key = NULL;
1782
1783
1784         emem_tree_insert32_array(se_tree, key, v);
1785         g_free(aligned);
1786 }
1787
1788 void *
1789 emem_tree_lookup_string(emem_tree_t* se_tree, const gchar* k, guint32 flags)
1790 {
1791         emem_tree_key_t key[2];
1792         guint32 *aligned=NULL;
1793         guint32 len = (guint) strlen(k);
1794         guint32 divx = (len+3)/4+1;
1795         guint32 i;
1796         guint32 tmp;
1797         void *ret;
1798
1799         aligned = g_malloc(divx * sizeof (guint32));
1800
1801         /* pack the bytes one one by one into guint32s */
1802         tmp = 0;
1803         for (i = 0;i < len;i++) {
1804                 unsigned char ch;
1805
1806                 ch = (unsigned char)k[i];
1807                 if (flags & EMEM_TREE_STRING_NOCASE) {
1808                         if(isupper(ch)) {
1809                                 ch = tolower(ch);
1810                         }
1811                 }
1812                 tmp <<= 8;
1813                 tmp |= ch;
1814                 if (i%4 == 3) {
1815                         aligned[i/4] = tmp;
1816                         tmp = 0;
1817                 }
1818         }
1819         /* add required padding to the last uint32 */
1820         if (i%4 != 0) {
1821                 while (i%4 != 0) {
1822                         i++;
1823                         tmp <<= 8;
1824                 }
1825                 aligned[i/4-1] = tmp;
1826         }
1827
1828         /* add the terminator */
1829         aligned[divx-1] = 0x00000001;
1830
1831         key[0].length = divx;
1832         key[0].key = aligned;
1833         key[1].length = 0;
1834         key[1].key = NULL;
1835
1836
1837         ret = emem_tree_lookup32_array(se_tree, key);
1838         g_free(aligned);
1839         return ret;
1840 }
1841
1842 static gboolean
1843 emem_tree_foreach_nodes(emem_tree_node_t* node, tree_foreach_func callback, void *user_data)
1844 {
1845         gboolean stop_traverse = FALSE;
1846
1847         if (!node)
1848                 return FALSE;
1849
1850         if(node->left) {
1851                 stop_traverse = emem_tree_foreach_nodes(node->left, callback, user_data);
1852                 if (stop_traverse) {
1853                         return TRUE;
1854                 }
1855         }
1856
1857         if (node->u.is_subtree == EMEM_TREE_NODE_IS_SUBTREE) {
1858                 stop_traverse = emem_tree_foreach(node->data, callback, user_data);
1859         } else {
1860                 stop_traverse = callback(node->data, user_data);
1861         }
1862
1863         if (stop_traverse) {
1864                 return TRUE;
1865         }
1866
1867         if(node->right) {
1868                 stop_traverse = emem_tree_foreach_nodes(node->right, callback, user_data);
1869                 if (stop_traverse) {
1870                         return TRUE;
1871                 }
1872         }
1873
1874         return FALSE;
1875 }
1876
1877 gboolean
1878 emem_tree_foreach(emem_tree_t* emem_tree, tree_foreach_func callback, void *user_data)
1879 {
1880         if (!emem_tree)
1881                 return FALSE;
1882
1883         if(!emem_tree->tree)
1884                 return FALSE;
1885
1886         return emem_tree_foreach_nodes(emem_tree->tree, callback, user_data);
1887 }
1888
1889
1890 static void
1891 emem_tree_print_nodes(emem_tree_node_t* node, int level)
1892 {
1893         int i;
1894
1895         if (!node)
1896                 return;
1897
1898         for(i=0;i<level;i++){
1899                 printf("    ");
1900         }
1901
1902         printf("NODE:%p parent:%p left:0x%p right:%px key:%d data:%p\n",
1903                 (void *)node,(void *)(node->parent),(void *)(node->left),(void *)(node->right),
1904                 (node->key32),node->data);
1905         if(node->left)
1906                 emem_tree_print_nodes(node->left, level+1);
1907         if(node->right)
1908                 emem_tree_print_nodes(node->right, level+1);
1909 }
1910 void
1911 emem_print_tree(emem_tree_t* emem_tree)
1912 {
1913         if (!emem_tree)
1914                 return;
1915
1916         printf("EMEM tree type:%d name:%s tree:%p\n",emem_tree->type,emem_tree->name,(void *)(emem_tree->tree));
1917         if(emem_tree->tree)
1918                 emem_tree_print_nodes(emem_tree->tree, 0);
1919 }
1920
1921 /*
1922  * String buffers
1923  */
1924
1925 /*
1926  * Presumably we're using these routines for building strings for the tree.
1927  * Use ITEM_LABEL_LENGTH as the basis for our default lengths.
1928  */
1929
1930 #define DEFAULT_STRBUF_LEN (ITEM_LABEL_LENGTH / 10)
1931 #define MAX_STRBUF_LEN 65536
1932
1933 static gsize
1934 next_size(gsize cur_alloc_len, gsize wanted_alloc_len, gsize max_alloc_len)
1935 {
1936         if (max_alloc_len < 1 || max_alloc_len > MAX_STRBUF_LEN) {
1937                 max_alloc_len = MAX_STRBUF_LEN;
1938         }
1939
1940         if (cur_alloc_len < 1) {
1941                 cur_alloc_len = DEFAULT_STRBUF_LEN;
1942         }
1943
1944         while (cur_alloc_len < wanted_alloc_len) {
1945                 cur_alloc_len *= 2;
1946         }
1947
1948         return cur_alloc_len < max_alloc_len ? cur_alloc_len : max_alloc_len;
1949 }
1950
1951 static void
1952 ep_strbuf_grow(emem_strbuf_t *strbuf, gsize wanted_alloc_len)
1953 {
1954         gsize new_alloc_len;
1955         gchar *new_str;
1956
1957         if (!strbuf || (wanted_alloc_len <= strbuf->alloc_len) || (strbuf->alloc_len >= strbuf->max_alloc_len)) {
1958                 return;
1959         }
1960
1961         new_alloc_len = next_size(strbuf->alloc_len, wanted_alloc_len, strbuf->max_alloc_len);
1962         new_str = ep_alloc(new_alloc_len);
1963         g_strlcpy(new_str, strbuf->str, new_alloc_len);
1964
1965         strbuf->alloc_len = new_alloc_len;
1966         strbuf->str = new_str;
1967 }
1968
1969 emem_strbuf_t *
1970 ep_strbuf_sized_new(gsize alloc_len, gsize max_alloc_len)
1971 {
1972         emem_strbuf_t *strbuf;
1973
1974         strbuf = ep_alloc(sizeof(emem_strbuf_t));
1975
1976         if ((max_alloc_len == 0) || (max_alloc_len > MAX_STRBUF_LEN))
1977                 max_alloc_len = MAX_STRBUF_LEN;
1978         if (alloc_len == 0)
1979                 alloc_len = 1;
1980         else if (alloc_len > max_alloc_len)
1981                 alloc_len = max_alloc_len;
1982
1983         strbuf->str = ep_alloc(alloc_len);
1984         strbuf->str[0] = '\0';
1985
1986         strbuf->len = 0;
1987         strbuf->alloc_len = alloc_len;
1988         strbuf->max_alloc_len = max_alloc_len;
1989
1990         return strbuf;
1991 }
1992
1993 emem_strbuf_t *
1994 ep_strbuf_new(const gchar *init)
1995 {
1996         emem_strbuf_t *strbuf;
1997
1998         strbuf = ep_strbuf_sized_new(next_size(0, init?strlen(init):0, 0), 0);
1999         if (init) {
2000                 gsize full_len;
2001                 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2002                 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2003         }
2004
2005         return strbuf;
2006 }
2007
2008 emem_strbuf_t *
2009 ep_strbuf_new_label(const gchar *init)
2010 {
2011         emem_strbuf_t *strbuf;
2012         gsize full_len;
2013
2014         /* Be optimistic: Allocate default size strbuf string and only      */
2015         /*  request an increase if needed.                                  */
2016         /* XXX: Is it reasonable to assume that much of the usage of        */
2017         /*  ep_strbuf_new_label will have  init==NULL or                    */
2018         /*   strlen(init) < DEFAULT_STRBUF_LEN) ???                         */
2019         strbuf = ep_strbuf_sized_new(DEFAULT_STRBUF_LEN, ITEM_LABEL_LENGTH);
2020
2021         if (!init)
2022                 return strbuf;
2023
2024         /* full_len does not count the trailing '\0'.                       */
2025         full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2026         if (full_len < strbuf->alloc_len) {
2027                 strbuf->len += full_len;
2028         } else {
2029                 strbuf = ep_strbuf_sized_new(full_len+1, ITEM_LABEL_LENGTH);
2030                 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2031                 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2032         }
2033
2034         return strbuf;
2035 }
2036
2037 emem_strbuf_t *
2038 ep_strbuf_append(emem_strbuf_t *strbuf, const gchar *str)
2039 {
2040         gsize add_len, full_len;
2041
2042         if (!strbuf || !str || str[0] == '\0') {
2043                 return strbuf;
2044         }
2045
2046         /* Be optimistic; try the g_strlcpy first & see if enough room.                 */
2047         /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same  */
2048         add_len = strbuf->alloc_len - strbuf->len;
2049         full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2050         if (full_len < add_len) {
2051                 strbuf->len += full_len;
2052         } else {
2053                 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2054                 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2055                 add_len = strbuf->alloc_len - strbuf->len;
2056                 full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2057                 strbuf->len += MIN(add_len-1, full_len);
2058         }
2059
2060         return strbuf;
2061 }
2062
2063 void
2064 ep_strbuf_append_vprintf(emem_strbuf_t *strbuf, const gchar *format, va_list ap)
2065 {
2066         va_list ap2;
2067         gsize add_len, full_len;
2068
2069         G_VA_COPY(ap2, ap);
2070
2071         /* Be optimistic; try the g_vsnprintf first & see if enough room.               */
2072         /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same. */
2073         add_len = strbuf->alloc_len - strbuf->len;
2074         full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap);
2075         if (full_len < add_len) {
2076                 strbuf->len += full_len;
2077         } else {
2078                 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2079                 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2080                 add_len = strbuf->alloc_len - strbuf->len;
2081                 full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap2);
2082                 strbuf->len += MIN(add_len-1, full_len);
2083         }
2084
2085         va_end(ap2);
2086 }
2087
2088 void
2089 ep_strbuf_append_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2090 {
2091         va_list ap;
2092
2093         va_start(ap, format);
2094         ep_strbuf_append_vprintf(strbuf, format, ap);
2095         va_end(ap);
2096 }
2097
2098 void
2099 ep_strbuf_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2100 {
2101         va_list ap;
2102         if (!strbuf) {
2103                 return;
2104         }
2105
2106         strbuf->len = 0;
2107
2108         va_start(ap, format);
2109         ep_strbuf_append_vprintf(strbuf, format, ap);
2110         va_end(ap);
2111 }
2112
2113 emem_strbuf_t *
2114 ep_strbuf_append_c(emem_strbuf_t *strbuf, const gchar c)
2115 {
2116         if (!strbuf) {
2117                 return strbuf;
2118         }
2119
2120         /* +1 for the new character & +1 for the trailing '\0'. */
2121         if (strbuf->alloc_len < strbuf->len + 1 + 1) {
2122                 ep_strbuf_grow(strbuf, strbuf->len + 1 + 1);
2123         }
2124         if (strbuf->alloc_len >= strbuf->len + 1 + 1) {
2125                 strbuf->str[strbuf->len] = c;
2126                 strbuf->len++;
2127                 strbuf->str[strbuf->len] = '\0';
2128         }
2129
2130         return strbuf;
2131 }
2132
2133 emem_strbuf_t *
2134 ep_strbuf_truncate(emem_strbuf_t *strbuf, gsize len)
2135 {
2136         if (!strbuf || len >= strbuf->len) {
2137                 return strbuf;
2138         }
2139
2140         strbuf->str[len] = '\0';
2141         strbuf->len = len;
2142
2143         return strbuf;
2144 }
2145
2146 /*
2147  * Editor modelines
2148  *
2149  * Local Variables:
2150  * c-basic-offset: 8
2151  * tab-width: 8
2152  * indent-tabs-mode: t
2153  * End:
2154  *
2155  * ex: set shiftwidth=8 tabstop=8 noexpandtab
2156  * :indentSize=8:tabSize=8:noTabs=false:
2157  */