2 * Wireshark memory management and garbage collection functions
7 * Wireshark - Network traffic analyzer
8 * By Gerald Combs <gerald@wireshark.org>
9 * Copyright 1998 Gerald Combs
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
36 #ifdef HAVE_SYS_TIME_H
50 #include <windows.h> /* VirtualAlloc, VirtualProtect */
51 #include <process.h> /* getpid */
54 /* Print out statistics about our memory allocations? */
55 /*#define SHOW_EMEM_STATS*/
57 /* Do we want to use guardpages? if available */
58 #define WANT_GUARD_PAGES 1
60 #ifdef WANT_GUARD_PAGES
61 /* Add guard pages at each end of our allocated memory */
62 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
64 #ifdef HAVE_SYS_TYPES_H
65 #include <sys/types.h>
68 #if defined(MAP_ANONYMOUS)
69 #define ANON_PAGE_MODE (MAP_ANONYMOUS|MAP_PRIVATE)
70 #elif defined(MAP_ANON)
71 #define ANON_PAGE_MODE (MAP_ANON|MAP_PRIVATE)
73 #define ANON_PAGE_MODE (MAP_PRIVATE) /* have to map /dev/zero */
78 static int dev_zero_fd;
79 #define ANON_FD dev_zero_fd
83 #define USE_GUARD_PAGES 1
87 /* When required, allocate more memory from the OS in this size chunks */
88 #define EMEM_PACKET_CHUNK_SIZE (10 * 1024 * 1024)
90 #define EMEM_CANARY_SIZE 8
91 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
93 typedef struct _emem_chunk_t {
94 struct _emem_chunk_t *next;
96 unsigned int amount_free_init;
97 unsigned int amount_free;
98 unsigned int free_offset_init;
99 unsigned int free_offset;
103 typedef struct _emem_header_t {
104 emem_chunk_t *free_list;
105 emem_chunk_t *used_list;
107 emem_tree_t *trees; /* only used by se_mem allocator */
109 guint8 canary[EMEM_CANARY_DATA_SIZE];
110 void *(*memory_alloc)(size_t size, struct _emem_header_t *);
113 * Tools like Valgrind and ElectricFence don't work well with memchunks.
114 * Export the following environment variables to make {ep|se}_alloc() allocate each
115 * object individually.
117 * WIRESHARK_DEBUG_EP_NO_CHUNKS
118 * WIRESHARK_DEBUG_SE_NO_CHUNKS
120 gboolean debug_use_chunks;
122 /* Do we want to use canaries?
123 * Export the following environment variables to disable/enable canaries
125 * WIRESHARK_DEBUG_EP_NO_CANARY
126 * For SE memory use of canary is default off as the memory overhead
128 * WIRESHARK_DEBUG_SE_USE_CANARY
130 gboolean debug_use_canary;
132 /* Do we want to verify no one is using a pointer to an ep_ or se_
133 * allocated thing where they shouldn't be?
135 * Export WIRESHARK_EP_VERIFY_POINTERS or WIRESHARK_SE_VERIFY_POINTERS
138 gboolean debug_verify_pointers;
142 static emem_header_t ep_packet_mem;
143 static emem_header_t se_packet_mem;
146 * Memory scrubbing is expensive but can be useful to ensure we don't:
147 * - use memory before initializing it
148 * - use memory after freeing it
149 * Export WIRESHARK_DEBUG_SCRUB_MEMORY to turn it on.
151 static gboolean debug_use_memory_scrubber = FALSE;
154 static SYSTEM_INFO sysinfo;
155 static OSVERSIONINFO versinfo;
157 #elif defined(USE_GUARD_PAGES)
158 static intptr_t pagesize;
159 #endif /* _WIN32 / USE_GUARD_PAGES */
161 static void *emem_alloc_chunk(size_t size, emem_header_t *mem);
162 static void *emem_alloc_glib(size_t size, emem_header_t *mem);
165 * Set a canary value to be placed between memchunks.
168 emem_canary_init(guint8 *canary)
171 static GRand *rand_state = NULL;
173 if (rand_state == NULL) {
174 rand_state = g_rand_new();
176 for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
177 canary[i] = (guint8) g_rand_int_range(rand_state, 1, 0x100);
183 emem_canary_next(guint8 *mem_canary, guint8 *canary, int *len)
188 for (i = 0; i < EMEM_CANARY_SIZE-1; i++)
189 if (mem_canary[i] != canary[i])
192 for (; i < EMEM_CANARY_DATA_SIZE; i++) {
193 if (canary[i] == '\0') {
194 memcpy(&ptr, &canary[i+1], sizeof(void *));
197 *len = i + 1 + sizeof(void *);
201 if (mem_canary[i] != canary[i])
209 * Given an allocation size, return the amount of padding needed for
213 emem_canary_pad (size_t allocation)
217 pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
218 if (pad < EMEM_CANARY_SIZE)
219 pad += EMEM_CANARY_SIZE;
224 /* used for debugging canaries, will block */
225 #ifdef DEBUG_INTENSE_CANARY_CHECKS
226 gboolean intense_canary_checking = FALSE;
228 /* used to intensivelly check ep canaries
231 ep_check_canary_integrity(const char* fmt, ...)
234 static gchar there[128] = {
235 'L','a','u','n','c','h',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
236 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
237 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
238 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
240 emem_chunk_t* npc = NULL;
242 if (! intense_canary_checking ) return;
245 g_vsnprintf(here, sizeof(here), fmt, ap);
248 for (npc = ep_packet_mem.free_list; npc != NULL; npc = npc->next) {
249 void *canary_next = npc->canary_last;
251 while (canary_next != NULL) {
252 canary_next = emem_canary_next(ep_packet_mem.canary, canary_next, NULL);
253 /* XXX, check if canary_last is inside allocated memory? */
255 if (npc->canary_last == (void *) -1)
256 g_error("Per-packet memory corrupted\nbetween: %s\nand: %s", there, here);
260 g_strlcpy(there, here, sizeof(there));
265 emem_init_chunk(emem_header_t *mem)
267 if (mem->debug_use_canary)
268 emem_canary_init(mem->canary);
270 if (mem->debug_use_chunks)
271 mem->memory_alloc = emem_alloc_chunk;
273 mem->memory_alloc = emem_alloc_glib;
277 /* Initialize the packet-lifetime memory allocation pool.
278 * This function should be called only once when Wireshark or TShark starts
284 ep_packet_mem.free_list=NULL;
285 ep_packet_mem.used_list=NULL;
286 ep_packet_mem.trees=NULL; /* not used by this allocator */
288 ep_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_EP_NO_CHUNKS") == NULL);
289 ep_packet_mem.debug_use_canary = ep_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_EP_NO_CANARY") == NULL);
290 ep_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_EP_VERIFY_POINTERS") != NULL);
292 #ifdef DEBUG_INTENSE_CANARY_CHECKS
293 intense_canary_checking = (getenv("WIRESHARK_DEBUG_EP_INTENSE_CANARY") != NULL);
296 emem_init_chunk(&ep_packet_mem);
299 /* Initialize the capture-lifetime memory allocation pool.
300 * This function should be called only once when Wireshark or TShark starts
306 se_packet_mem.free_list = NULL;
307 se_packet_mem.used_list = NULL;
308 se_packet_mem.trees = NULL;
310 se_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_SE_NO_CHUNKS") == NULL);
311 se_packet_mem.debug_use_canary = se_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_SE_USE_CANARY") != NULL);
312 se_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_SE_VERIFY_POINTERS") != NULL);
314 emem_init_chunk(&se_packet_mem);
317 /* Initialize all the allocators here.
318 * This function should be called only once when Wireshark or TShark starts
327 if (getenv("WIRESHARK_DEBUG_SCRUB_MEMORY"))
328 debug_use_memory_scrubber = TRUE;
331 /* Set up our guard page info for Win32 */
332 GetSystemInfo(&sysinfo);
333 pagesize = sysinfo.dwPageSize;
335 /* calling GetVersionEx using the OSVERSIONINFO structure.
336 * OSVERSIONINFOEX requires Win NT4 with SP6 or newer NT Versions.
337 * OSVERSIONINFOEX will fail on Win9x and older NT Versions.
339 * http://msdn.microsoft.com/library/en-us/sysinfo/base/getversionex.asp
340 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
341 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfoex_str.asp
343 versinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
344 GetVersionEx(&versinfo);
346 #elif defined(USE_GUARD_PAGES)
347 pagesize = sysconf(_SC_PAGESIZE);
349 dev_zero_fd = ws_open("/dev/zero", O_RDWR);
350 g_assert(dev_zero_fd != -1);
352 #endif /* _WIN32 / USE_GUARD_PAGES */
355 #ifdef SHOW_EMEM_STATS
356 #define NUM_ALLOC_DIST 10
357 static guint allocations[NUM_ALLOC_DIST] = { 0 };
358 static guint total_no_chunks = 0;
363 guint num_chunks = 0;
364 guint num_allocs = 0;
365 guint total_used = 0;
366 guint total_allocation = 0;
367 guint total_free = 0;
368 guint used_for_canaries = 0;
372 guint total_space_allocated_from_os, total_space_wasted;
373 gboolean ep_stat=TRUE;
375 fprintf(stderr, "\n-------- EP allocator statistics --------\n");
376 fprintf(stderr, "%s chunks, %s canaries, %s memory scrubber\n",
377 ep_packet_mem.debug_use_chunks ? "Using" : "Not using",
378 ep_packet_mem.debug_use_canary ? "using" : "not using",
379 debug_use_memory_scrubber ? "using" : "not using");
381 if (! (ep_packet_mem.free_list || !ep_packet_mem.used_list)) {
382 fprintf(stderr, "No memory allocated\n");
385 if (ep_packet_mem.debug_use_chunks && ep_stat) {
386 /* Nothing interesting without chunks */
387 /* Only look at the used_list since those chunks are fully
388 * used. Looking at the free list would skew our view of what
391 for (chunk = ep_packet_mem.used_list; chunk; chunk = chunk->next) {
393 total_used += (chunk->amount_free_init - chunk->amount_free);
394 total_allocation += chunk->amount_free_init;
395 total_free += chunk->amount_free;
397 if (num_chunks > 0) {
398 fprintf (stderr, "\n");
399 fprintf (stderr, "\n---- Buffer space ----\n");
400 fprintf (stderr, "\tChunk allocation size: %10u\n", EMEM_PACKET_CHUNK_SIZE);
401 fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
402 fprintf (stderr, "\t-------------------------------------------\n");
403 fprintf (stderr, "\t= %u (%u including guard pages) total space used for buffers\n",
404 total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
405 fprintf (stderr, "\t-------------------------------------------\n");
406 total_space_allocated_from_os = total_allocation
407 + sizeof(emem_chunk_t) * num_chunks;
408 fprintf (stderr, "Total allocated from OS: %u\n\n",
409 total_space_allocated_from_os);
411 fprintf (stderr, "No fully used chunks, nothing to do\n");
417 total_allocation = 0;
419 used_for_canaries = 0;
423 fprintf(stderr, "\n-------- SE allocator statistics --------\n");
424 fprintf(stderr, "Total number of chunk allocations %u\n",
426 fprintf(stderr, "%s chunks, %s canaries\n",
427 se_packet_mem.debug_use_chunks ? "Using" : "Not using",
428 se_packet_mem.debug_use_canary ? "using" : "not using");
430 if (! (se_packet_mem.free_list || !se_packet_mem.used_list)) {
431 fprintf(stderr, "No memory allocated\n");
435 if (!se_packet_mem.debug_use_chunks )
436 return; /* Nothing interesting without chunks?? */
438 /* Only look at the used_list since those chunks are fully used.
439 * Looking at the free list would skew our view of what we have wasted.
441 for (chunk = se_packet_mem.used_list; chunk; chunk = chunk->next) {
443 total_used += (chunk->amount_free_init - chunk->amount_free);
444 total_allocation += chunk->amount_free_init;
445 total_free += chunk->amount_free;
447 if (se_packet_mem.debug_use_canary){
448 void *ptr = chunk->canary_last;
451 while (ptr != NULL) {
452 ptr = emem_canary_next(se_packet_mem.canary, ptr, &len);
454 if (ptr == (void *) -1)
455 g_error("Memory corrupted");
456 used_for_canaries += len;
461 if (num_chunks == 0) {
463 fprintf (stderr, "No fully used chunks, nothing to do\n");
467 fprintf (stderr, "\n");
468 fprintf (stderr, "---------- Allocations from the OS ----------\n");
469 fprintf (stderr, "---- Headers ----\n");
470 fprintf (stderr, "\t( Chunk header size: %10lu\n",
471 sizeof(emem_chunk_t));
472 fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
473 fprintf (stderr, "\t-------------------------------------------\n");
475 total_headers = sizeof(emem_chunk_t) * num_chunks;
476 fprintf (stderr, "\t= %u bytes used for headers\n", total_headers);
477 fprintf (stderr, "\n---- Buffer space ----\n");
478 fprintf (stderr, "\tChunk allocation size: %10u\n",
479 EMEM_PACKET_CHUNK_SIZE);
480 fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
481 fprintf (stderr, "\t-------------------------------------------\n");
482 fprintf (stderr, "\t= %u (%u including guard pages) bytes used for buffers\n",
483 total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
484 fprintf (stderr, "\t-------------------------------------------\n");
485 total_space_allocated_from_os = (EMEM_PACKET_CHUNK_SIZE * num_chunks)
487 fprintf (stderr, "Total bytes allocated from the OS: %u\n\n",
488 total_space_allocated_from_os);
490 for (i = 0; i < NUM_ALLOC_DIST; i++)
491 num_allocs += allocations[i];
493 fprintf (stderr, "---------- Allocations from the SE pool ----------\n");
494 fprintf (stderr, " Number of SE allocations: %10u\n",
496 fprintf (stderr, " Bytes used (incl. canaries): %10u\n",
498 fprintf (stderr, " Bytes used for canaries: %10u\n",
500 fprintf (stderr, "Bytes unused (wasted, excl. guard pages): %10u\n",
501 total_allocation - total_used);
502 fprintf (stderr, "Bytes unused (wasted, incl. guard pages): %10u\n\n",
503 total_space_allocated_from_os - total_used);
505 fprintf (stderr, "---------- Statistics ----------\n");
506 fprintf (stderr, "Average SE allocation size (incl. canaries): %6.2f\n",
507 (float)total_used/(float)num_allocs);
508 fprintf (stderr, "Average SE allocation size (excl. canaries): %6.2f\n",
509 (float)(total_used - used_for_canaries)/(float)num_allocs);
510 fprintf (stderr, " Average wasted bytes per allocation: %6.2f\n",
511 (total_allocation - total_used)/(float)num_allocs);
512 total_space_wasted = (total_allocation - total_used)
513 + (sizeof(emem_chunk_t));
514 fprintf (stderr, " Space used for headers + unused allocation: %8u\n",
516 fprintf (stderr, "--> %% overhead/waste: %4.2f\n",
517 100 * (float)total_space_wasted/(float)total_space_allocated_from_os);
519 fprintf (stderr, "\nAllocation distribution (sizes include canaries):\n");
520 for (i = 0; i < (NUM_ALLOC_DIST-1); i++)
521 fprintf (stderr, "size < %5d: %8u\n", 32<<i, allocations[i]);
522 fprintf (stderr, "size > %5d: %8u\n", 32<<i, allocations[i]);
527 emem_verify_pointer(emem_header_t *hdr, const void *ptr)
529 const gchar *cptr = ptr;
530 emem_chunk_t *used_list[2];
531 guint8 used_list_idx;
534 used_list[0] = hdr->free_list;
535 used_list[1] = hdr->used_list;
537 for (used_list_idx=0; used_list_idx < G_N_ELEMENTS(used_list); ++used_list_idx) {
538 chunk = used_list[used_list_idx];
539 for ( ; chunk ; chunk = chunk->next) {
540 if (cptr >= (chunk->buf + chunk->free_offset_init) &&
541 cptr < (chunk->buf + chunk->free_offset))
550 ep_verify_pointer(const void *ptr)
552 if (ep_packet_mem.debug_verify_pointers)
553 return emem_verify_pointer(&ep_packet_mem, ptr);
559 se_verify_pointer(const void *ptr)
561 if (se_packet_mem.debug_verify_pointers)
562 return emem_verify_pointer(&se_packet_mem, ptr);
568 emem_scrub_memory(char *buf, size_t size, gboolean alloc)
570 guint scrubbed_value;
573 if (!debug_use_memory_scrubber)
576 if (alloc) /* this memory is being allocated */
577 scrubbed_value = 0xBADDCAFE;
578 else /* this memory is being freed */
579 scrubbed_value = 0xDEADBEEF;
581 /* We shouldn't need to check the alignment of the starting address
582 * since this is malloc'd memory (or 'pagesize' bytes into malloc'd
586 /* XXX - We might want to use memset here in order to avoid problems on
587 * alignment-sensitive platforms, e.g.
588 * http://stackoverflow.com/questions/108866/is-there-memset-that-accepts-integers-larger-than-char
591 for (offset = 0; offset + sizeof(guint) <= size; offset += sizeof(guint))
592 *(guint*)(buf+offset) = scrubbed_value;
594 /* Initialize the last bytes, if any */
596 *(guint8*)(buf+offset) = scrubbed_value >> 24;
599 *(guint8*)(buf+offset) = (scrubbed_value >> 16) & 0xFF;
602 *(guint8*)(buf+offset) = (scrubbed_value >> 8) & 0xFF;
610 static emem_chunk_t *
611 emem_create_chunk(void) {
614 char *buf_end, *prot1, *prot2;
616 #elif defined(USE_GUARD_PAGES)
618 char *buf_end, *prot1, *prot2;
619 #endif /* _WIN32 / USE_GUARD_PAGES */
622 npc = g_new(emem_chunk_t, 1);
624 npc->canary_last = NULL;
628 * MSDN documents VirtualAlloc/VirtualProtect at
629 * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
632 /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
633 npc->buf = VirtualAlloc(NULL, EMEM_PACKET_CHUNK_SIZE,
634 MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
636 if (npc->buf == NULL) {
638 THROW(OutOfMemoryError);
641 #elif defined(USE_GUARD_PAGES)
642 npc->buf = mmap(NULL, EMEM_PACKET_CHUNK_SIZE,
643 PROT_READ|PROT_WRITE, ANON_PAGE_MODE, ANON_FD, 0);
645 if (npc->buf == MAP_FAILED) {
647 THROW(OutOfMemoryError);
650 #else /* Is there a draft in here? */
651 npc->buf = g_malloc(EMEM_PACKET_CHUNK_SIZE);
652 /* g_malloc() can't fail */
655 #ifdef SHOW_EMEM_STATS
660 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
662 /* Align our guard pages on page-sized boundaries */
663 prot1 = (char *) ((((int) npc->buf + pagesize - 1) / pagesize) * pagesize);
664 prot2 = (char *) ((((int) buf_end - (1 * pagesize)) / pagesize) * pagesize);
666 ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
667 g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
668 ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
669 g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
671 npc->amount_free_init = (unsigned int) (prot2 - prot1 - pagesize);
672 npc->free_offset_init = (unsigned int) (prot1 - npc->buf) + pagesize;
673 #elif defined(USE_GUARD_PAGES)
674 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
676 /* Align our guard pages on page-sized boundaries */
677 prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
678 prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
680 ret = mprotect(prot1, pagesize, PROT_NONE);
682 ret = mprotect(prot2, pagesize, PROT_NONE);
685 npc->amount_free_init = prot2 - prot1 - pagesize;
686 npc->free_offset_init = (prot1 - npc->buf) + pagesize;
688 npc->amount_free_init = EMEM_PACKET_CHUNK_SIZE;
689 npc->free_offset_init = 0;
690 #endif /* USE_GUARD_PAGES */
692 npc->amount_free = npc->amount_free_init;
693 npc->free_offset = npc->free_offset_init;
698 emem_alloc_chunk(size_t size, emem_header_t *mem)
703 gboolean use_canary = mem->debug_use_canary;
705 emem_chunk_t *free_list;
707 /* Round up to an 8 byte boundary. Make sure we have at least
708 * 8 pad bytes for our canary.
711 pad = emem_canary_pad(asize);
712 asize += sizeof(void *);
714 pad = (G_MEM_ALIGN - (asize & (G_MEM_ALIGN-1))) & (G_MEM_ALIGN-1);
718 #ifdef SHOW_EMEM_STATS
719 /* Do this check here so we can include the canary size */
720 if (mem == &se_packet_mem) {
725 else if (asize < 128)
727 else if (asize < 256)
729 else if (asize < 512)
731 else if (asize < 1024)
733 else if (asize < 2048)
735 else if (asize < 4096)
737 else if (asize < 8192)
739 else if (asize < 16384)
742 allocations[(NUM_ALLOC_DIST-1)]++;
746 /* make sure we dont try to allocate too much (arbitrary limit) */
747 DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
750 mem->free_list = emem_create_chunk();
752 /* oops, we need to allocate more memory to serve this request
753 * than we have free. move this node to the used list and try again
755 if(asize > mem->free_list->amount_free) {
758 mem->free_list=mem->free_list->next;
759 npc->next=mem->used_list;
763 mem->free_list = emem_create_chunk();
766 free_list = mem->free_list;
768 buf = free_list->buf + free_list->free_offset;
770 free_list->amount_free -= (unsigned int) asize;
771 free_list->free_offset += (unsigned int) asize;
774 char *cptr = (char *)buf + size;
776 memcpy(cptr, mem->canary, pad-1);
778 memcpy(cptr + pad, &free_list->canary_last, sizeof(void *));
780 free_list->canary_last = cptr;
787 emem_alloc_glib(size_t size, emem_header_t *mem)
791 npc=g_new(emem_chunk_t, 1);
792 npc->next=mem->used_list;
793 npc->buf=g_malloc(size);
794 npc->canary_last = NULL;
796 /* There's no padding/alignment involved (from our point of view) when
797 * we fetch the memory directly from the system pool, so WYSIWYG */
798 npc->free_offset = npc->free_offset_init = 0;
799 npc->amount_free = npc->amount_free_init = (unsigned int) size;
804 /* allocate 'size' amount of memory. */
806 emem_alloc(size_t size, emem_header_t *mem)
808 void *buf = mem->memory_alloc(size, mem);
810 /* XXX - this is a waste of time if the allocator function is going to
811 * memset this straight back to 0.
813 emem_scrub_memory(buf, size, TRUE);
818 /* allocate 'size' amount of memory with an allocation lifetime until the
822 ep_alloc(size_t size)
824 return emem_alloc(size, &ep_packet_mem);
827 /* allocate 'size' amount of memory with an allocation lifetime until the
831 se_alloc(size_t size)
833 return emem_alloc(size, &se_packet_mem);
837 ep_alloc0(size_t size)
839 return memset(ep_alloc(size),'\0',size);
843 ep_strdup(const gchar* src)
845 guint len = (guint) strlen(src);
848 dst = memcpy(ep_alloc(len+1), src, len+1);
854 ep_strndup(const gchar* src, size_t len)
856 gchar* dst = ep_alloc(len+1);
859 for (i = 0; (i < len) && src[i]; i++)
868 ep_memdup(const void* src, size_t len)
870 return memcpy(ep_alloc(len), src, len);
874 ep_strdup_vprintf(const gchar* fmt, va_list ap)
882 len = g_printf_string_upper_bound(fmt, ap);
884 dst = ep_alloc(len+1);
885 g_vsnprintf (dst, (gulong) len, fmt, ap2);
892 ep_strdup_printf(const gchar* fmt, ...)
898 dst = ep_strdup_vprintf(fmt, ap);
904 ep_strsplit(const gchar* string, const gchar* sep, int max_tokens)
913 enum { AT_START, IN_PAD, IN_TOKEN } state;
921 s = splitted = ep_strdup(string);
922 str_len = (guint) strlen(splitted);
923 sep_len = (guint) strlen(sep);
925 if (max_tokens < 1) max_tokens = INT_MAX;
930 while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
933 for(i=0; i < sep_len; i++ )
940 vec = ep_alloc_array(gchar*,tokens+1);
943 for (i=0; i< str_len; i++) {
946 switch(splitted[i]) {
951 vec[curr_tok] = &(splitted[i]);
957 switch(splitted[i]) {
964 switch(splitted[i]) {
966 vec[curr_tok] = &(splitted[i]);
975 vec[curr_tok] = NULL;
983 se_alloc0(size_t size)
985 return memset(se_alloc(size),'\0',size);
988 /* If str is NULL, just return the string "<NULL>" so that the callers dont
989 * have to bother checking it.
992 se_strdup(const gchar* src)
1000 len = (guint) strlen(src);
1001 dst = memcpy(se_alloc(len+1), src, len+1);
1007 se_strndup(const gchar* src, size_t len)
1009 gchar* dst = se_alloc(len+1);
1012 for (i = 0; (i < len) && src[i]; i++)
1021 se_memdup(const void* src, size_t len)
1023 return memcpy(se_alloc(len), src, len);
1027 se_strdup_vprintf(const gchar* fmt, va_list ap)
1035 len = g_printf_string_upper_bound(fmt, ap);
1037 dst = se_alloc(len+1);
1038 g_vsnprintf (dst, (gulong) len, fmt, ap2);
1045 se_strdup_printf(const gchar* fmt, ...)
1051 dst = se_strdup_vprintf(fmt, ap);
1056 /* release all allocated memory back to the pool. */
1058 emem_free_all(emem_header_t *mem)
1060 gboolean use_chunks = mem->debug_use_chunks;
1063 emem_tree_t *tree_list;
1065 /* move all used chunks over to the free list */
1066 while(mem->used_list){
1068 mem->used_list=mem->used_list->next;
1069 npc->next=mem->free_list;
1073 /* clear them all out */
1074 npc = mem->free_list;
1075 while (npc != NULL) {
1077 while (npc->canary_last != NULL) {
1078 npc->canary_last = emem_canary_next(mem->canary, npc->canary_last, NULL);
1079 /* XXX, check if canary_last is inside allocated memory? */
1081 if (npc->canary_last == (void *) -1)
1082 g_error("Memory corrupted");
1085 emem_scrub_memory((npc->buf + npc->free_offset_init),
1086 (npc->free_offset - npc->free_offset_init),
1089 npc->amount_free = npc->amount_free_init;
1090 npc->free_offset = npc->free_offset_init;
1093 emem_chunk_t *next = npc->next;
1095 emem_scrub_memory(npc->buf, npc->amount_free_init, FALSE);
1104 /* We've freed all this memory already */
1105 mem->free_list = NULL;
1108 /* release/reset all allocated trees */
1109 for(tree_list=mem->trees;tree_list;tree_list=tree_list->next){
1110 tree_list->tree=NULL;
1114 /* release all allocated memory back to the pool. */
1118 emem_free_all(&ep_packet_mem);
1121 /* release all allocated memory back to the pool. */
1125 #ifdef SHOW_EMEM_STATS
1126 print_alloc_stats();
1129 emem_free_all(&se_packet_mem);
1133 ep_stack_new(void) {
1134 ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
1135 *s = ep_new0(struct _ep_stack_frame_t);
1139 /* for ep_stack_t we'll keep the popped frames so we reuse them instead
1140 of allocating new ones.
1144 ep_stack_push(ep_stack_t stack, void* data)
1146 struct _ep_stack_frame_t* frame;
1147 struct _ep_stack_frame_t* head = (*stack);
1150 frame = head->above;
1152 frame = ep_new(struct _ep_stack_frame_t);
1153 head->above = frame;
1154 frame->below = head;
1155 frame->above = NULL;
1158 frame->payload = data;
1165 ep_stack_pop(ep_stack_t stack)
1168 if ((*stack)->below) {
1169 (*stack) = (*stack)->below;
1170 return (*stack)->above->payload;
1177 se_tree_create(int type, const char *name)
1179 emem_tree_t *tree_list;
1181 tree_list=g_malloc(sizeof(emem_tree_t));
1182 tree_list->next=se_packet_mem.trees;
1183 tree_list->type=type;
1184 tree_list->tree=NULL;
1185 tree_list->name=name;
1186 tree_list->malloc=se_alloc;
1187 se_packet_mem.trees=tree_list;
1193 emem_tree_lookup32(emem_tree_t *se_tree, guint32 key)
1195 emem_tree_node_t *node;
1200 if(key==node->key32){
1203 if(key<node->key32){
1207 if(key>node->key32){
1216 emem_tree_lookup32_le(emem_tree_t *se_tree, guint32 key)
1218 emem_tree_node_t *node;
1228 if(key==node->key32){
1231 if(key<node->key32){
1239 if(key>node->key32){
1254 /* If we are still at the root of the tree this means that this node
1255 * is either smaller than the search key and then we return this
1256 * node or else there is no smaller key available and then
1260 if(key>node->key32){
1267 if(node->parent->left==node){
1270 if(key>node->key32){
1271 /* if this is a left child and its key is smaller than
1272 * the search key, then this is the node we want.
1276 /* if this is a left child and its key is bigger than
1277 * the search key, we have to check if any
1278 * of our ancestors are smaller than the search key.
1281 if(key>node->key32){
1291 if(node->key32<key){
1292 /* if this is the right child and its key is smaller
1293 * than the search key then this is the one we want.
1297 /* if this is the right child and its key is larger
1298 * than the search key then our parent is the one we
1301 return node->parent->data;
1308 static inline emem_tree_node_t *
1309 emem_tree_parent(emem_tree_node_t *node)
1311 return node->parent;
1314 static inline emem_tree_node_t *
1315 emem_tree_grandparent(emem_tree_node_t *node)
1317 emem_tree_node_t *parent;
1319 parent=emem_tree_parent(node);
1321 return parent->parent;
1326 static inline emem_tree_node_t *
1327 emem_tree_uncle(emem_tree_node_t *node)
1329 emem_tree_node_t *parent, *grandparent;
1331 parent=emem_tree_parent(node);
1335 grandparent=emem_tree_parent(parent);
1339 if(parent==grandparent->left){
1340 return grandparent->right;
1342 return grandparent->left;
1345 static inline void rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node);
1346 static inline void rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node);
1349 rotate_left(emem_tree_t *se_tree, emem_tree_node_t *node)
1352 if(node->parent->left==node){
1353 node->parent->left=node->right;
1355 node->parent->right=node->right;
1358 se_tree->tree=node->right;
1360 node->right->parent=node->parent;
1361 node->parent=node->right;
1362 node->right=node->right->left;
1364 node->right->parent=node;
1366 node->parent->left=node;
1370 rotate_right(emem_tree_t *se_tree, emem_tree_node_t *node)
1373 if(node->parent->left==node){
1374 node->parent->left=node->left;
1376 node->parent->right=node->left;
1379 se_tree->tree=node->left;
1381 node->left->parent=node->parent;
1382 node->parent=node->left;
1383 node->left=node->left->right;
1385 node->left->parent=node;
1387 node->parent->right=node;
1391 rb_insert_case5(emem_tree_t *se_tree, emem_tree_node_t *node)
1393 emem_tree_node_t *grandparent;
1394 emem_tree_node_t *parent;
1396 parent=emem_tree_parent(node);
1397 grandparent=emem_tree_parent(parent);
1398 parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1399 grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1400 if( (node==parent->left) && (parent==grandparent->left) ){
1401 rotate_right(se_tree, grandparent);
1403 rotate_left(se_tree, grandparent);
1408 rb_insert_case4(emem_tree_t *se_tree, emem_tree_node_t *node)
1410 emem_tree_node_t *grandparent;
1411 emem_tree_node_t *parent;
1413 parent=emem_tree_parent(node);
1414 grandparent=emem_tree_parent(parent);
1418 if( (node==parent->right) && (parent==grandparent->left) ){
1419 rotate_left(se_tree, parent);
1421 } else if( (node==parent->left) && (parent==grandparent->right) ){
1422 rotate_right(se_tree, parent);
1425 rb_insert_case5(se_tree, node);
1429 rb_insert_case3(emem_tree_t *se_tree, emem_tree_node_t *node)
1431 emem_tree_node_t *grandparent;
1432 emem_tree_node_t *parent;
1433 emem_tree_node_t *uncle;
1435 uncle=emem_tree_uncle(node);
1436 if(uncle && (uncle->u.rb_color==EMEM_TREE_RB_COLOR_RED)){
1437 parent=emem_tree_parent(node);
1438 parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1439 uncle->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1440 grandparent=emem_tree_grandparent(node);
1441 grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1442 rb_insert_case1(se_tree, grandparent);
1444 rb_insert_case4(se_tree, node);
1449 rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node)
1451 emem_tree_node_t *parent;
1453 parent=emem_tree_parent(node);
1454 /* parent is always non-NULL here */
1455 if(parent->u.rb_color==EMEM_TREE_RB_COLOR_BLACK){
1458 rb_insert_case3(se_tree, node);
1462 rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node)
1464 emem_tree_node_t *parent;
1466 parent=emem_tree_parent(node);
1468 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1471 rb_insert_case2(se_tree, node);
1474 /* insert a new node in the tree. if this node matches an already existing node
1475 * then just replace the data for that node */
1477 emem_tree_insert32(emem_tree_t *se_tree, guint32 key, void *data)
1479 emem_tree_node_t *node;
1483 /* is this the first node ?*/
1485 node=se_tree->malloc(sizeof(emem_tree_node_t));
1486 switch(se_tree->type){
1487 case EMEM_TREE_TYPE_RED_BLACK:
1488 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1496 node->u.is_subtree = EMEM_TREE_NODE_IS_DATA;
1501 /* it was not the new root so walk the tree until we find where to
1502 * insert this new leaf.
1505 /* this node already exists, so just replace the data pointer*/
1506 if(key==node->key32){
1510 if(key<node->key32) {
1512 /* new node to the left */
1513 emem_tree_node_t *new_node;
1514 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1515 node->left=new_node;
1516 new_node->parent=node;
1517 new_node->left=NULL;
1518 new_node->right=NULL;
1519 new_node->key32=key;
1520 new_node->data=data;
1521 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1528 if(key>node->key32) {
1530 /* new node to the right */
1531 emem_tree_node_t *new_node;
1532 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1533 node->right=new_node;
1534 new_node->parent=node;
1535 new_node->left=NULL;
1536 new_node->right=NULL;
1537 new_node->key32=key;
1538 new_node->data=data;
1539 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1548 /* node will now point to the newly created node */
1549 switch(se_tree->type){
1550 case EMEM_TREE_TYPE_RED_BLACK:
1551 node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1552 rb_insert_case1(se_tree, node);
1558 lookup_or_insert32(emem_tree_t *se_tree, guint32 key, void*(*func)(void*),void* ud, int is_subtree)
1560 emem_tree_node_t *node;
1564 /* is this the first node ?*/
1566 node=se_tree->malloc(sizeof(emem_tree_node_t));
1567 switch(se_tree->type){
1568 case EMEM_TREE_TYPE_RED_BLACK:
1569 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1576 node->data= func(ud);
1577 node->u.is_subtree = is_subtree;
1582 /* it was not the new root so walk the tree until we find where to
1583 * insert this new leaf.
1586 /* this node already exists, so just return the data pointer*/
1587 if(key==node->key32){
1590 if(key<node->key32) {
1592 /* new node to the left */
1593 emem_tree_node_t *new_node;
1594 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1595 node->left=new_node;
1596 new_node->parent=node;
1597 new_node->left=NULL;
1598 new_node->right=NULL;
1599 new_node->key32=key;
1600 new_node->data= func(ud);
1601 new_node->u.is_subtree = is_subtree;
1608 if(key>node->key32) {
1610 /* new node to the right */
1611 emem_tree_node_t *new_node;
1612 new_node=se_tree->malloc(sizeof(emem_tree_node_t));
1613 node->right=new_node;
1614 new_node->parent=node;
1615 new_node->left=NULL;
1616 new_node->right=NULL;
1617 new_node->key32=key;
1618 new_node->data= func(ud);
1619 new_node->u.is_subtree = is_subtree;
1628 /* node will now point to the newly created node */
1629 switch(se_tree->type){
1630 case EMEM_TREE_TYPE_RED_BLACK:
1631 node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1632 rb_insert_case1(se_tree, node);
1639 /* When the se data is released, this entire tree will dissapear as if it
1640 * never existed including all metadata associated with the tree.
1643 se_tree_create_non_persistent(int type, const char *name)
1645 emem_tree_t *tree_list;
1647 tree_list=se_alloc(sizeof(emem_tree_t));
1648 tree_list->next=NULL;
1649 tree_list->type=type;
1650 tree_list->tree=NULL;
1651 tree_list->name=name;
1652 tree_list->malloc=se_alloc;
1657 /* This tree is PErmanent and will never be released
1660 pe_tree_create(int type, const char *name)
1662 emem_tree_t *tree_list;
1664 tree_list=g_new(emem_tree_t, 1);
1665 tree_list->next=NULL;
1666 tree_list->type=type;
1667 tree_list->tree=NULL;
1668 tree_list->name=name;
1669 tree_list->malloc=(void *(*)(size_t)) g_malloc;
1674 /* create another (sub)tree using the same memory allocation scope
1675 * as the parent tree.
1677 static emem_tree_t *
1678 emem_tree_create_subtree(emem_tree_t *parent_tree, const char *name)
1680 emem_tree_t *tree_list;
1682 tree_list=parent_tree->malloc(sizeof(emem_tree_t));
1683 tree_list->next=NULL;
1684 tree_list->type=parent_tree->type;
1685 tree_list->tree=NULL;
1686 tree_list->name=name;
1687 tree_list->malloc=parent_tree->malloc;
1693 create_sub_tree(void* d)
1695 emem_tree_t *se_tree = d;
1696 return emem_tree_create_subtree(se_tree, "subtree");
1699 /* insert a new node in the tree. if this node matches an already existing node
1700 * then just replace the data for that node */
1703 emem_tree_insert32_array(emem_tree_t *se_tree, emem_tree_key_t *key, void *data)
1705 emem_tree_t *next_tree;
1707 if((key[0].length<1)||(key[0].length>100)){
1708 DISSECTOR_ASSERT_NOT_REACHED();
1710 if((key[0].length==1)&&(key[1].length==0)){
1711 emem_tree_insert32(se_tree, *key[0].key, data);
1715 next_tree=lookup_or_insert32(se_tree, *key[0].key, create_sub_tree, se_tree, EMEM_TREE_NODE_IS_SUBTREE);
1717 if(key[0].length==1){
1723 emem_tree_insert32_array(next_tree, key, data);
1727 emem_tree_lookup32_array(emem_tree_t *se_tree, emem_tree_key_t *key)
1729 emem_tree_t *next_tree;
1731 if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1733 if((key[0].length<1)||(key[0].length>100)){
1734 DISSECTOR_ASSERT_NOT_REACHED();
1736 if((key[0].length==1)&&(key[1].length==0)){
1737 return emem_tree_lookup32(se_tree, *key[0].key);
1739 next_tree=emem_tree_lookup32(se_tree, *key[0].key);
1743 if(key[0].length==1){
1749 return emem_tree_lookup32_array(next_tree, key);
1753 emem_tree_lookup32_array_le(emem_tree_t *se_tree, emem_tree_key_t *key)
1755 emem_tree_t *next_tree;
1757 if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1759 if((key[0].length<1)||(key[0].length>100)){
1760 DISSECTOR_ASSERT_NOT_REACHED();
1762 if((key[0].length==1)&&(key[1].length==0)){ /* last key in key array */
1763 return emem_tree_lookup32_le(se_tree, *key[0].key);
1765 next_tree=emem_tree_lookup32(se_tree, *key[0].key);
1766 /* key[0].key not found so find le and return */
1768 return emem_tree_lookup32_le(se_tree, *key[0].key);
1770 /* key[0].key found so inc key pointer and try again */
1771 if(key[0].length==1){
1777 return emem_tree_lookup32_array_le(next_tree, key);
1780 /* Strings are stored as an array of uint32 containing the string characters
1781 with 4 characters in each uint32.
1782 The first byte of the string is stored as the most significant byte.
1783 If the string is not a multiple of 4 characters in length the last
1784 uint32 containing the string bytes are padded with 0 bytes.
1785 After the uint32's containing the string, there is one final terminator
1786 uint32 with the value 0x00000001
1789 emem_tree_insert_string(emem_tree_t* se_tree, const gchar* k, void* v, guint32 flags)
1791 emem_tree_key_t key[2];
1792 guint32 *aligned=NULL;
1793 guint32 len = (guint32) strlen(k);
1794 guint32 divx = (len+3)/4+1;
1798 aligned = g_malloc(divx * sizeof (guint32));
1800 /* pack the bytes one one by one into guint32s */
1802 for (i = 0;i < len;i++) {
1805 ch = (unsigned char)k[i];
1806 if (flags & EMEM_TREE_STRING_NOCASE) {
1818 /* add required padding to the last uint32 */
1824 aligned[i/4-1] = tmp;
1827 /* add the terminator */
1828 aligned[divx-1] = 0x00000001;
1830 key[0].length = divx;
1831 key[0].key = aligned;
1836 emem_tree_insert32_array(se_tree, key, v);
1841 emem_tree_lookup_string(emem_tree_t* se_tree, const gchar* k, guint32 flags)
1843 emem_tree_key_t key[2];
1844 guint32 *aligned=NULL;
1845 guint32 len = (guint) strlen(k);
1846 guint32 divx = (len+3)/4+1;
1851 aligned = g_malloc(divx * sizeof (guint32));
1853 /* pack the bytes one one by one into guint32s */
1855 for (i = 0;i < len;i++) {
1858 ch = (unsigned char)k[i];
1859 if (flags & EMEM_TREE_STRING_NOCASE) {
1871 /* add required padding to the last uint32 */
1877 aligned[i/4-1] = tmp;
1880 /* add the terminator */
1881 aligned[divx-1] = 0x00000001;
1883 key[0].length = divx;
1884 key[0].key = aligned;
1889 ret = emem_tree_lookup32_array(se_tree, key);
1895 emem_tree_foreach_nodes(emem_tree_node_t* node, tree_foreach_func callback, void *user_data)
1897 gboolean stop_traverse = FALSE;
1903 stop_traverse = emem_tree_foreach_nodes(node->left, callback, user_data);
1904 if (stop_traverse) {
1909 if (node->u.is_subtree == EMEM_TREE_NODE_IS_SUBTREE) {
1910 stop_traverse = emem_tree_foreach(node->data, callback, user_data);
1912 stop_traverse = callback(node->data, user_data);
1915 if (stop_traverse) {
1920 stop_traverse = emem_tree_foreach_nodes(node->right, callback, user_data);
1921 if (stop_traverse) {
1930 emem_tree_foreach(emem_tree_t* emem_tree, tree_foreach_func callback, void *user_data)
1935 if(!emem_tree->tree)
1938 return emem_tree_foreach_nodes(emem_tree->tree, callback, user_data);
1943 emem_tree_print_nodes(emem_tree_node_t* node, int level)
1950 for(i=0;i<level;i++){
1954 printf("NODE:%p parent:%p left:0x%p right:%px key:%d data:%p\n",
1955 (void *)node,(void *)(node->parent),(void *)(node->left),(void *)(node->right),
1956 (node->key32),node->data);
1958 emem_tree_print_nodes(node->left, level+1);
1960 emem_tree_print_nodes(node->right, level+1);
1963 emem_print_tree(emem_tree_t* emem_tree)
1968 printf("EMEM tree type:%d name:%s tree:%p\n",emem_tree->type,emem_tree->name,(void *)(emem_tree->tree));
1970 emem_tree_print_nodes(emem_tree->tree, 0);
1978 * Presumably we're using these routines for building strings for the tree.
1979 * Use ITEM_LABEL_LENGTH as the basis for our default lengths.
1982 #define DEFAULT_STRBUF_LEN (ITEM_LABEL_LENGTH / 10)
1983 #define MAX_STRBUF_LEN 65536
1986 next_size(gsize cur_alloc_len, gsize wanted_alloc_len, gsize max_alloc_len)
1988 if (max_alloc_len < 1 || max_alloc_len > MAX_STRBUF_LEN) {
1989 max_alloc_len = MAX_STRBUF_LEN;
1992 if (cur_alloc_len < 1) {
1993 cur_alloc_len = DEFAULT_STRBUF_LEN;
1996 while (cur_alloc_len < wanted_alloc_len) {
2000 return cur_alloc_len < max_alloc_len ? cur_alloc_len : max_alloc_len;
2004 ep_strbuf_grow(emem_strbuf_t *strbuf, gsize wanted_alloc_len)
2006 gsize new_alloc_len;
2009 if (!strbuf || (wanted_alloc_len <= strbuf->alloc_len) || (strbuf->alloc_len >= strbuf->max_alloc_len)) {
2013 new_alloc_len = next_size(strbuf->alloc_len, wanted_alloc_len, strbuf->max_alloc_len);
2014 new_str = ep_alloc(new_alloc_len);
2015 g_strlcpy(new_str, strbuf->str, new_alloc_len);
2017 strbuf->alloc_len = new_alloc_len;
2018 strbuf->str = new_str;
2022 ep_strbuf_sized_new(gsize alloc_len, gsize max_alloc_len)
2024 emem_strbuf_t *strbuf;
2026 strbuf = ep_alloc(sizeof(emem_strbuf_t));
2028 if ((max_alloc_len == 0) || (max_alloc_len > MAX_STRBUF_LEN))
2029 max_alloc_len = MAX_STRBUF_LEN;
2032 else if (alloc_len > max_alloc_len)
2033 alloc_len = max_alloc_len;
2035 strbuf->str = ep_alloc(alloc_len);
2036 strbuf->str[0] = '\0';
2039 strbuf->alloc_len = alloc_len;
2040 strbuf->max_alloc_len = max_alloc_len;
2046 ep_strbuf_new(const gchar *init)
2048 emem_strbuf_t *strbuf;
2050 strbuf = ep_strbuf_sized_new(next_size(0, init?strlen(init)+1:0, 0), 0); /* +1 for NULL terminator */
2053 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2054 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2061 ep_strbuf_new_label(const gchar *init)
2063 emem_strbuf_t *strbuf;
2066 /* Be optimistic: Allocate default size strbuf string and only */
2067 /* request an increase if needed. */
2068 /* XXX: Is it reasonable to assume that much of the usage of */
2069 /* ep_strbuf_new_label will have init==NULL or */
2070 /* strlen(init) < DEFAULT_STRBUF_LEN) ??? */
2071 strbuf = ep_strbuf_sized_new(DEFAULT_STRBUF_LEN, ITEM_LABEL_LENGTH);
2076 /* full_len does not count the trailing '\0'. */
2077 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2078 if (full_len < strbuf->alloc_len) {
2079 strbuf->len += full_len;
2081 strbuf = ep_strbuf_sized_new(full_len+1, ITEM_LABEL_LENGTH);
2082 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2083 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2090 ep_strbuf_append(emem_strbuf_t *strbuf, const gchar *str)
2092 gsize add_len, full_len;
2094 if (!strbuf || !str || str[0] == '\0') {
2098 /* Be optimistic; try the g_strlcpy first & see if enough room. */
2099 /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same */
2100 add_len = strbuf->alloc_len - strbuf->len;
2101 full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2102 if (full_len < add_len) {
2103 strbuf->len += full_len;
2105 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2106 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2107 add_len = strbuf->alloc_len - strbuf->len;
2108 full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2109 strbuf->len += MIN(add_len-1, full_len);
2116 ep_strbuf_append_vprintf(emem_strbuf_t *strbuf, const gchar *format, va_list ap)
2119 gsize add_len, full_len;
2123 /* Be optimistic; try the g_vsnprintf first & see if enough room. */
2124 /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same. */
2125 add_len = strbuf->alloc_len - strbuf->len;
2126 full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap);
2127 if (full_len < add_len) {
2128 strbuf->len += full_len;
2130 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2131 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2132 add_len = strbuf->alloc_len - strbuf->len;
2133 full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap2);
2134 strbuf->len += MIN(add_len-1, full_len);
2141 ep_strbuf_append_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2145 va_start(ap, format);
2146 ep_strbuf_append_vprintf(strbuf, format, ap);
2151 ep_strbuf_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2160 va_start(ap, format);
2161 ep_strbuf_append_vprintf(strbuf, format, ap);
2166 ep_strbuf_append_c(emem_strbuf_t *strbuf, const gchar c)
2172 /* +1 for the new character & +1 for the trailing '\0'. */
2173 if (strbuf->alloc_len < strbuf->len + 1 + 1) {
2174 ep_strbuf_grow(strbuf, strbuf->len + 1 + 1);
2176 if (strbuf->alloc_len >= strbuf->len + 1 + 1) {
2177 strbuf->str[strbuf->len] = c;
2179 strbuf->str[strbuf->len] = '\0';
2186 ep_strbuf_truncate(emem_strbuf_t *strbuf, gsize len)
2188 if (!strbuf || len >= strbuf->len) {
2192 strbuf->str[len] = '\0';
2204 * indent-tabs-mode: t
2207 * ex: set shiftwidth=8 tabstop=8 noexpandtab
2208 * :indentSize=8:tabSize=8:noTabs=false: