#include <glib.h>
-#include <proto.h>
+#include "proto.h"
#include "emem.h"
#ifdef _WIN32
#include <process.h> /* getpid */
#endif
-
-/*
- * Tools like Valgrind and ElectricFence don't work well with memchunks.
- * Uncomment the defines below to make {ep|se}_alloc() allocate each
- * object individually.
- */
-/* #define EP_DEBUG_FREE 1 */
-/* #define SE_DEBUG_FREE 1 */
+/* Print out statistics about our memory allocations? */
+/*#define SHOW_EMEM_STATS*/
/* Do we want to use guardpages? if available */
#define WANT_GUARD_PAGES 1
-/* Do we want to use canaries ? */
-#define DEBUG_USE_CANARIES 1
-
#ifdef WANT_GUARD_PAGES
/* Add guard pages at each end of our allocated memory */
#if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
#include <stdint.h>
+#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
+#endif
#include <sys/mman.h>
#if defined(MAP_ANONYMOUS)
#define ANON_PAGE_MODE (MAP_ANONYMOUS|MAP_PRIVATE)
#endif
/* When required, allocate more memory from the OS in this size chunks */
-#define EMEM_PACKET_CHUNK_SIZE 10485760
+#define EMEM_PACKET_CHUNK_SIZE (10 * 1024 * 1024)
/* The maximum number of allocations per chunk */
-#define EMEM_ALLOCS_PER_CHUNK (EMEM_PACKET_CHUNK_SIZE / 512)
+#define EMEM_ALLOCS_PER_CHUNK (EMEM_PACKET_CHUNK_SIZE / 64)
-
-#ifdef DEBUG_USE_CANARIES
#define EMEM_CANARY_SIZE 8
#define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
-/* this should be static, but if it were gdb would had problems finding it */
-guint8 ep_canary[EMEM_CANARY_DATA_SIZE], se_canary[EMEM_CANARY_DATA_SIZE];
-#endif /* DEBUG_USE_CANARIES */
+typedef struct _emem_no_chunk_t {
+ unsigned int c_count;
+ void *canary[EMEM_ALLOCS_PER_CHUNK];
+ guint8 cmp_len[EMEM_ALLOCS_PER_CHUNK];
+} emem_canary_t;
typedef struct _emem_chunk_t {
struct _emem_chunk_t *next;
+ char *buf;
unsigned int amount_free_init;
unsigned int amount_free;
unsigned int free_offset_init;
unsigned int free_offset;
- char *buf;
-#ifdef DEBUG_USE_CANARIES
-#if ! defined(EP_DEBUG_FREE) && ! defined(SE_DEBUG_FREE)
- unsigned int c_count;
- void *canary[EMEM_ALLOCS_PER_CHUNK];
- guint8 cmp_len[EMEM_ALLOCS_PER_CHUNK];
-#endif
-#endif /* DEBUG_USE_CANARIES */
+ emem_canary_t *canary_info;
} emem_chunk_t;
typedef struct _emem_header_t {
emem_chunk_t *free_list;
emem_chunk_t *used_list;
+
+ emem_tree_t *trees; /* only used by se_mem allocator */
+
+ guint8 canary[EMEM_CANARY_DATA_SIZE];
+ void *(*memory_alloc)(size_t size, struct _emem_header_t *);
+
+ /*
+ * Tools like Valgrind and ElectricFence don't work well with memchunks.
+ * Export the following environment variables to make {ep|se}_alloc() allocate each
+ * object individually.
+ *
+ * WIRESHARK_DEBUG_EP_NO_CHUNKS
+ * WIRESHARK_DEBUG_SE_NO_CHUNKS
+ */
+ gboolean debug_use_chunks;
+
+ /* Do we want to use canaries?
+ * Export the following environment variables to disable/enable canaries
+ *
+ * WIRESHARK_DEBUG_EP_NO_CANARY
+ * For SE memory use of canary is default off as the memory overhead
+ * is considerable.
+ * WIRESHARK_DEBUG_SE_USE_CANARY
+ */
+ gboolean debug_use_canary;
+
} emem_header_t;
static emem_header_t ep_packet_mem;
static emem_header_t se_packet_mem;
-#if !defined(SE_DEBUG_FREE)
+/*
+ * Memory scrubbing is expensive but can be useful to ensure we don't:
+ * - use memory before initializing it
+ * - use memory after freeing it
+ * Export WIRESHARK_DEBUG_SCRUB_MEMORY to turn it on.
+ */
+static gboolean debug_use_memory_scrubber = FALSE;
+
#if defined (_WIN32)
static SYSTEM_INFO sysinfo;
static OSVERSIONINFO versinfo;
#elif defined(USE_GUARD_PAGES)
static intptr_t pagesize;
#endif /* _WIN32 / USE_GUARD_PAGES */
-#endif /* SE_DEBUG_FREE */
-#ifdef DEBUG_USE_CANARIES
+static void *emem_alloc_chunk(size_t size, emem_header_t *mem);
+static void *emem_alloc_glib(size_t size, emem_header_t *mem);
+
/*
* Set a canary value to be placed between memchunks.
*/
-void
-emem_canary(guint8 *canary) {
+static void
+emem_canary_init(guint8 *canary)
+{
int i;
- static GRand *rand_state = NULL;
+ static GRand *rand_state = NULL;
if (rand_state == NULL) {
rand_state = g_rand_new();
* the canary value.
*/
static guint8
-emem_canary_pad (size_t allocation) {
+emem_canary_pad (size_t allocation)
+{
guint8 pad;
pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
return pad;
}
-#endif /* DEBUG_USE_CANARIES */
/* used for debugging canaries, will block */
#ifdef DEBUG_INTENSE_CANARY_CHECKS
/* used to intensivelly check ep canaries
*/
-void ep_check_canary_integrity(const char* fmt, ...) {
+void
+ep_check_canary_integrity(const char* fmt, ...)
+{
va_list ap;
static gchar there[128] = {
'L','a','u','n','c','h',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
for (npc = ep_packet_mem.free_list; npc != NULL; npc = npc->next) {
static unsigned i_ctr;
- if (npc->c_count > 0x00ffffff) {
+ if (npc->canary_info->c_count > 0x00ffffff) {
g_error("ep_packet_mem.free_list was corrupted\nbetween: %s\nand: %s",there, here);
}
- for (i_ctr = 0; i_ctr < npc->c_count; i_ctr++) {
- if (memcmp(npc->canary[i_ctr], &ep_canary, npc->cmp_len[i_ctr]) != 0) {
+ for (i_ctr = 0; i_ctr < npc->canary_info->c_count; i_ctr++) {
+ if (memcmp(npc->canary_info->canary[i_ctr], &ep_canary, npc->canary_info->cmp_len[i_ctr]) != 0) {
g_error("Per-packet memory corrupted\nbetween: %s\nand: %s",there, here);
}
}
}
#endif
+static void
+emem_init_chunk(emem_header_t *mem)
+{
+ if (mem->debug_use_canary)
+ emem_canary_init(mem->canary);
+
+ if (mem->debug_use_chunks)
+ mem->memory_alloc = emem_alloc_chunk;
+ else
+ mem->memory_alloc = emem_alloc_glib;
+}
+
/* Initialize the packet-lifetime memory allocation pool.
* This function should be called only once when Wireshark or TShark starts
* up.
*/
-void
+static void
ep_init_chunk(void)
{
ep_packet_mem.free_list=NULL;
ep_packet_mem.used_list=NULL;
+ ep_packet_mem.trees=NULL; /* not used by this allocator */
+
+ ep_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_EP_NO_CHUNKS") == NULL);
+ ep_packet_mem.debug_use_canary = ep_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_EP_NO_CANARY") == NULL);
#ifdef DEBUG_INTENSE_CANARY_CHECKS
- intense_canary_checking = (gboolean)getenv("WIRESHARK_DEBUG_EP_CANARY");
+ intense_canary_checking = (gboolean)getenv("WIRESHARK_DEBUG_EP_INTENSE_CANARY");
#endif
-#ifdef DEBUG_USE_CANARIES
- emem_canary(ep_canary);
-#endif /* DEBUG_USE_CANARIES */
+ emem_init_chunk(&ep_packet_mem);
+}
+
+/* Initialize the capture-lifetime memory allocation pool.
+ * This function should be called only once when Wireshark or TShark starts
+ * up.
+ */
+static void
+se_init_chunk(void)
+{
+ se_packet_mem.free_list = NULL;
+ se_packet_mem.used_list = NULL;
+ ep_packet_mem.trees = NULL;
+
+ se_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_SE_NO_CHUNKS") == NULL);
+ se_packet_mem.debug_use_canary = se_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_SE_USE_CANARY") != NULL);
+
+ emem_init_chunk(&se_packet_mem);
+}
+
+/* Initialize all the allocators here.
+ * This function should be called only once when Wireshark or TShark starts
+ * up.
+ */
+void
+emem_init(void)
+{
+ ep_init_chunk();
+ se_init_chunk();
+
+ if (getenv("WIRESHARK_DEBUG_SCRUB_MEMORY"))
+ debug_use_memory_scrubber = TRUE;
-#if !defined(SE_DEBUG_FREE)
#if defined (_WIN32)
/* Set up our guard page info for Win32 */
GetSystemInfo(&sysinfo);
g_assert(dev_zero_fd != -1);
#endif
#endif /* _WIN32 / USE_GUARD_PAGES */
-#endif /* SE_DEBUG_FREE */
+}
+
+#ifdef SHOW_EMEM_STATS
+#define NUM_ALLOC_DIST 10
+static guint allocations[NUM_ALLOC_DIST] = { 0 };
+static guint total_no_chunks = 0;
+
+static void
+print_alloc_stats()
+{
+ guint num_chunks = 0;
+ guint num_allocs = 0;
+ guint total_used = 0;
+ guint total_allocation = 0;
+ guint total_free = 0;
+ guint used_for_canaries = 0;
+ guint total_headers;
+ guint i_ctr, i;
+ emem_chunk_t *chunk;
+ guint total_space_allocated_from_os, total_space_wasted;
+ gboolean ep_stat=TRUE;
+
+ fprintf(stderr, "\n-------- EP allocator statistics --------\n");
+ fprintf(stderr, "%s chunks, %s canaries, %s memory scrubber\n",
+ ep_packet_mem.debug_use_chunks ? "Using" : "Not using",
+ ep_packet_mem.debug_use_canary ? "using" : "not using",
+ debug_use_memory_scrubber ? "using" : "not using");
+
+ if (! (ep_packet_mem.free_list || !ep_packet_mem.used_list)) {
+ fprintf(stderr, "No memory allocated\n");
+ ep_stat = FALSE;
+ }
+ if (ep_packet_mem.debug_use_chunks && ep_stat) {
+ /* Nothing interesting without chunks */
+ /* Only look at the used_list since those chunks are fully
+ * used. Looking at the free list would skew our view of what
+ * we have wasted.
+ */
+ for (chunk = ep_packet_mem.used_list; chunk; chunk = chunk->next) {
+ num_chunks++;
+ total_used += (chunk->amount_free_init - chunk->amount_free);
+ total_allocation += chunk->amount_free_init;
+ total_free += chunk->amount_free;
+ }
+ if (num_chunks > 0) {
+ fprintf (stderr, "\n");
+ fprintf (stderr, "\n---- Buffer space ----\n");
+ fprintf (stderr, "\tChunk allocation size: %10u\n", EMEM_PACKET_CHUNK_SIZE);
+ fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
+ fprintf (stderr, "\t-------------------------------------------\n");
+ fprintf (stderr, "\t= %u (%u including guard pages) total space used for buffers\n",
+ total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
+ fprintf (stderr, "\t-------------------------------------------\n");
+ total_space_allocated_from_os = total_allocation
+ + (sizeof(emem_chunk_t) + sizeof(emem_canary_t)) * num_chunks;
+ fprintf (stderr, "Total allocated from OS: %u\n\n",
+ total_space_allocated_from_os);
+ }else{
+ fprintf (stderr, "No fully used chunks, nothing to do\n");
+ }
+ /* Reset stats */
+ num_chunks = 0;
+ num_allocs = 0;
+ total_used = 0;
+ total_allocation = 0;
+ total_free = 0;
+ used_for_canaries = 0;
+ }
+
+ fprintf(stderr, "\n-------- SE allocator statistics --------\n");
+ fprintf(stderr, "Total number of chunk allocations %u\n",
+ total_no_chunks);
+ fprintf(stderr, "%s chunks, %s canaries\n",
+ se_packet_mem.debug_use_chunks ? "Using" : "Not using",
+ se_packet_mem.debug_use_canary ? "using" : "not using");
+ if (! (se_packet_mem.free_list || !se_packet_mem.used_list)) {
+ fprintf(stderr, "No memory allocated\n");
+ return;
+ }
+
+ if (!se_packet_mem.debug_use_chunks )
+ return; /* Nothing interesting without chunks?? */
+
+ /* Only look at the used_list since those chunks are fully used.
+ * Looking at the free list would skew our view of what we have wasted.
+ */
+ for (chunk = se_packet_mem.used_list; chunk; chunk = chunk->next) {
+ num_chunks++;
+ total_used += (chunk->amount_free_init - chunk->amount_free);
+ total_allocation += chunk->amount_free_init;
+ total_free += chunk->amount_free;
+
+ if (se_packet_mem.debug_use_canary){
+ for (i_ctr = 0; i_ctr < chunk->canary_info->c_count; i_ctr++) {
+ used_for_canaries += chunk->canary_info->cmp_len[i_ctr];
+ }
+ }
+ }
+
+ if (num_chunks == 0) {
+
+ fprintf (stderr, "No fully used chunks, nothing to do\n");
+ return;
+ }
+
+ fprintf (stderr, "\n");
+ fprintf (stderr, "---------- Allocations from the OS ----------\n");
+ fprintf (stderr, "---- Headers ----\n");
+ fprintf (stderr, "\t( Chunk header size: %10lu\n",
+ sizeof(emem_chunk_t));
+ if (se_packet_mem.debug_use_canary)
+ fprintf (stderr, "\t + Canary header size: %10lu)\n",
+ sizeof(emem_canary_t));
+ fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
+ fprintf (stderr, "\t-------------------------------------------\n");
+
+ total_headers = sizeof(emem_chunk_t) * num_chunks;
+ if (se_packet_mem.debug_use_canary)
+ total_headers += sizeof(emem_canary_t) * num_chunks;
+ fprintf (stderr, "\t= %u bytes used for headers\n", total_headers);
+ fprintf (stderr, "\n---- Buffer space ----\n");
+ fprintf (stderr, "\tChunk allocation size: %10u\n",
+ EMEM_PACKET_CHUNK_SIZE);
+ fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
+ fprintf (stderr, "\t-------------------------------------------\n");
+ fprintf (stderr, "\t= %u (%u including guard pages) bytes used for buffers\n",
+ total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
+ fprintf (stderr, "\t-------------------------------------------\n");
+ total_space_allocated_from_os = (EMEM_PACKET_CHUNK_SIZE * num_chunks)
+ + total_headers;
+ fprintf (stderr, "Total bytes allocated from the OS: %u\n\n",
+ total_space_allocated_from_os);
+
+ for (i = 0; i < NUM_ALLOC_DIST; i++)
+ num_allocs += allocations[i];
+
+ fprintf (stderr, "---------- Allocations from the SE pool ----------\n");
+ fprintf (stderr, " Number of SE allocations: %10u\n",
+ num_allocs);
+ fprintf (stderr, " Bytes used (incl. canaries): %10u\n",
+ total_used);
+ fprintf (stderr, " Bytes used for canaries: %10u\n",
+ used_for_canaries);
+ fprintf (stderr, "Bytes unused (wasted, excl. guard pages): %10u\n",
+ total_allocation - total_used);
+ fprintf (stderr, "Bytes unused (wasted, incl. guard pages): %10u\n\n",
+ total_space_allocated_from_os - total_used);
+
+ fprintf (stderr, "---------- Statistics ----------\n");
+ fprintf (stderr, "Average SE allocation size (incl. canaries): %6.2f\n",
+ (float)total_used/(float)num_allocs);
+ fprintf (stderr, "Average SE allocation size (excl. canaries): %6.2f\n",
+ (float)(total_used - used_for_canaries)/(float)num_allocs);
+ fprintf (stderr, " Average wasted bytes per allocation: %6.2f\n",
+ (total_allocation - total_used)/(float)num_allocs);
+ total_space_wasted = (total_allocation - total_used)
+ + (sizeof(emem_chunk_t) + sizeof(emem_canary_t));
+ fprintf (stderr, " Space used for headers + unused allocation: %8u\n",
+ total_space_wasted);
+ fprintf (stderr, "--> %% overhead/waste: %4.2f\n",
+ 100 * (float)total_space_wasted/(float)total_space_allocated_from_os);
+
+ fprintf (stderr, "\nAllocation distribution (sizes include canaries):\n");
+ for (i = 0; i < (NUM_ALLOC_DIST-1); i++)
+ fprintf (stderr, "size < %5d: %8u\n", 32<<i, allocations[i]);
+ fprintf (stderr, "size > %5d: %8u\n", 32<<i, allocations[i]);
}
-/* Initialize the capture-lifetime memory allocation pool.
- * This function should be called only once when Wireshark or TShark starts
- * up.
- */
-void
-se_init_chunk(void)
+#endif
+
+static gboolean
+emem_verify_pointer(emem_header_t *hdr, const void *ptr)
+{
+ const gchar *cptr = ptr;
+ emem_chunk_t *used_list[2];
+ guint8 used_list_idx;
+ emem_chunk_t *chunk;
+
+ used_list[0] = hdr->free_list;
+ used_list[1] = hdr->used_list;
+
+ for (used_list_idx=0; used_list_idx < G_N_ELEMENTS(used_list); ++used_list_idx) {
+ chunk = used_list[used_list_idx];
+ for ( ; chunk ; chunk = chunk->next) {
+ if (cptr >= (chunk->buf + chunk->free_offset_init) &&
+ cptr < (chunk->buf + chunk->free_offset))
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+gboolean
+ep_verify_pointer(const void *ptr)
+{
+ return emem_verify_pointer(&ep_packet_mem, ptr);
+}
+
+gboolean
+se_verify_pointer(const void *ptr)
+{
+ return emem_verify_pointer(&se_packet_mem, ptr);
+}
+
+static void
+emem_scrub_memory(char *buf, size_t size, gboolean alloc)
{
- se_packet_mem.free_list=NULL;
- se_packet_mem.used_list=NULL;
+ guint scrubbed_value;
+ guint offset;
+
+ if (!debug_use_memory_scrubber)
+ return;
+
+ if (alloc) /* this memory is being allocated */
+ scrubbed_value = 0xBADDCAFE;
+ else /* this memory is being freed */
+ scrubbed_value = 0xDEADBEEF;
+
+ /* We shouldn't need to check the alignment of the starting address
+ * since this is malloc'd memory (or 'pagesize' bytes into malloc'd
+ * memory).
+ */
+
+ /* XXX - We might want to use memset here in order to avoid problems on
+ * alignment-sensitive platforms, e.g.
+ * http://stackoverflow.com/questions/108866/is-there-memset-that-accepts-integers-larger-than-char
+ */
+
+ for (offset = 0; offset + sizeof(guint) <= size; offset += sizeof(guint))
+ *(guint*)(buf+offset) = scrubbed_value;
+
+ /* Initialize the last bytes, if any */
+ if (offset < size) {
+ *(guint8*)(buf+offset) = scrubbed_value >> 24;
+ offset++;
+ if (offset < size) {
+ *(guint8*)(buf+offset) = (scrubbed_value >> 16) & 0xFF;
+ offset++;
+ if (offset < size) {
+ *(guint8*)(buf+offset) = (scrubbed_value >> 8) & 0xFF;
+ offset++;
+ }
+ }
+ }
+
-#ifdef DEBUG_USE_CANARIES
- emem_canary(se_canary);
-#endif /* DEBUG_USE_CANARIES */
}
-#if !defined(SE_DEBUG_FREE)
static void
-emem_create_chunk(emem_chunk_t **free_list) {
+emem_create_chunk(emem_chunk_t **free_list, gboolean use_canary) {
#if defined (_WIN32)
BOOL ret;
char *buf_end, *prot1, *prot2;
int ret;
char *buf_end, *prot1, *prot2;
#endif /* _WIN32 / USE_GUARD_PAGES */
+ emem_chunk_t *npc;
+
/* we dont have any free data, so we must allocate a new one */
- if(!*free_list){
- emem_chunk_t *npc;
- npc = g_malloc(sizeof(emem_chunk_t));
- npc->next = NULL;
-#ifdef DEBUG_USE_CANARIES
-#if ! defined(EP_DEBUG_FREE) && ! defined(SE_DEBUG_FREE)
- npc->c_count = 0;
-#endif
-#endif /* DEBUG_USE_CANARIES */
+ DISSECTOR_ASSERT(!*free_list);
- *free_list = npc;
+ npc = g_new(emem_chunk_t, 1);
+ npc->next = NULL;
+ if (use_canary) {
+ npc->canary_info = g_new(emem_canary_t, 1);
+ npc->canary_info->c_count = 0;
+ }
+ else
+ npc->canary_info = NULL;
+
+ *free_list = npc;
#if defined (_WIN32)
- /*
- * MSDN documents VirtualAlloc/VirtualProtect at
- * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
- */
+ /*
+ * MSDN documents VirtualAlloc/VirtualProtect at
+ * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
+ */
- /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
- npc->buf = VirtualAlloc(NULL, EMEM_PACKET_CHUNK_SIZE,
- MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
- if(npc->buf == NULL) {
- THROW(OutOfMemoryError);
- }
- buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
+ /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
+ npc->buf = VirtualAlloc(NULL, EMEM_PACKET_CHUNK_SIZE,
+ MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
+ if(npc->buf == NULL) {
+ THROW(OutOfMemoryError);
+ }
+#ifdef SHOW_EMEM_STATS
+ total_no_chunks++;
+#endif
+ buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
- /* Align our guard pages on page-sized boundaries */
- prot1 = (char *) ((((int) npc->buf + pagesize - 1) / pagesize) * pagesize);
- prot2 = (char *) ((((int) buf_end - (1 * pagesize)) / pagesize) * pagesize);
+ /* Align our guard pages on page-sized boundaries */
+ prot1 = (char *) ((((int) npc->buf + pagesize - 1) / pagesize) * pagesize);
+ prot2 = (char *) ((((int) buf_end - (1 * pagesize)) / pagesize) * pagesize);
- ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
- g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
- ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
- g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
+ ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
+ g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
+ ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
+ g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
- npc->amount_free_init = (unsigned int) (prot2 - prot1 - pagesize);
- npc->amount_free = npc->amount_free_init;
- npc->free_offset_init = (unsigned int) (prot1 - npc->buf) + pagesize;
- npc->free_offset = npc->free_offset_init;
+ npc->amount_free_init = (unsigned int) (prot2 - prot1 - pagesize);
+ npc->amount_free = npc->amount_free_init;
+ npc->free_offset_init = (unsigned int) (prot1 - npc->buf) + pagesize;
+ npc->free_offset = npc->free_offset_init;
#elif defined(USE_GUARD_PAGES)
- npc->buf = mmap(NULL, EMEM_PACKET_CHUNK_SIZE,
- PROT_READ|PROT_WRITE, ANON_PAGE_MODE, ANON_FD, 0);
- if(npc->buf == MAP_FAILED) {
- /* XXX - what do we have to cleanup here? */
- THROW(OutOfMemoryError);
- }
- buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
-
- /* Align our guard pages on page-sized boundaries */
- prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
- prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
- ret = mprotect(prot1, pagesize, PROT_NONE);
- g_assert(ret != -1);
- ret = mprotect(prot2, pagesize, PROT_NONE);
- g_assert(ret != -1);
-
- npc->amount_free_init = prot2 - prot1 - pagesize;
- npc->amount_free = npc->amount_free_init;
- npc->free_offset_init = (prot1 - npc->buf) + pagesize;
- npc->free_offset = npc->free_offset_init;
+ npc->buf = mmap(NULL, EMEM_PACKET_CHUNK_SIZE,
+ PROT_READ|PROT_WRITE, ANON_PAGE_MODE, ANON_FD, 0);
+ if(npc->buf == MAP_FAILED) {
+ /* XXX - what do we have to cleanup here? */
+ THROW(OutOfMemoryError);
+ }
+ buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
+#ifdef SHOW_EMEM_STATS
+ total_no_chunks++;
+#endif
+ /* Align our guard pages on page-sized boundaries */
+ prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
+ prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
+ ret = mprotect(prot1, pagesize, PROT_NONE);
+ g_assert(ret != -1);
+ ret = mprotect(prot2, pagesize, PROT_NONE);
+ g_assert(ret != -1);
+
+ npc->amount_free_init = prot2 - prot1 - pagesize;
+ npc->amount_free = npc->amount_free_init;
+ npc->free_offset_init = (prot1 - npc->buf) + pagesize;
+ npc->free_offset = npc->free_offset_init;
#else /* Is there a draft in here? */
- npc->buf = malloc(EMEM_PACKET_CHUNK_SIZE);
- if(npc->buf == NULL) {
- THROW(OutOfMemoryError);
- }
- npc->amount_free_init = EMEM_PACKET_CHUNK_SIZE;
- npc->amount_free = npc->amount_free_init;
- npc->free_offset_init = 0;
- npc->free_offset = npc->free_offset_init;
-#endif /* USE_GUARD_PAGES */
+ npc->buf = g_malloc(EMEM_PACKET_CHUNK_SIZE);
+ if(npc->buf == NULL) {
+ THROW(OutOfMemoryError);
}
-}
+#ifdef SHOW_EMEM_STATS
+ total_no_chunks++;
#endif
+ npc->amount_free_init = EMEM_PACKET_CHUNK_SIZE;
+ npc->amount_free = npc->amount_free_init;
+ npc->free_offset_init = 0;
+ npc->free_offset = npc->free_offset_init;
+#endif /* USE_GUARD_PAGES */
+}
-/* allocate 'size' amount of memory. */
static void *
-emem_alloc(size_t size, gboolean debug_free, emem_header_t *mem, guint8 *canary)
+emem_alloc_chunk(size_t size, emem_header_t *mem)
{
void *buf;
-#ifdef DEBUG_USE_CANARIES
- void *cptr;
- guint8 pad = emem_canary_pad(size);
-#else
- static guint8 pad = 8;
-#endif /* DEBUG_USE_CANARIES */
- emem_chunk_t *free_list;
- if (!debug_free) {
- /* Round up to an 8 byte boundary. Make sure we have at least
- * 8 pad bytes for our canary.
- */
- size += pad;
+ size_t asize = size;
+ gboolean use_canary = mem->debug_use_canary;
+ guint8 pad;
+ emem_chunk_t *free_list;
- /* make sure we dont try to allocate too much (arbitrary limit) */
- DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
+ /* Round up to an 8 byte boundary. Make sure we have at least
+ * 8 pad bytes for our canary.
+ */
+ if (use_canary)
+ pad = emem_canary_pad(asize);
+ else
+ pad = (G_MEM_ALIGN - (asize & (G_MEM_ALIGN-1))) & (G_MEM_ALIGN-1);
+
+ asize += pad;
+
+#ifdef SHOW_EMEM_STATS
+ /* Do this check here so we can include the canary size */
+ if (mem == &se_packet_mem) {
+ if (asize < 32)
+ allocations[0]++;
+ else if (asize < 64)
+ allocations[1]++;
+ else if (asize < 128)
+ allocations[2]++;
+ else if (asize < 256)
+ allocations[3]++;
+ else if (asize < 512)
+ allocations[4]++;
+ else if (asize < 1024)
+ allocations[5]++;
+ else if (asize < 2048)
+ allocations[6]++;
+ else if (asize < 4096)
+ allocations[7]++;
+ else if (asize < 8192)
+ allocations[8]++;
+ else if (asize < 16384)
+ allocations[8]++;
+ else
+ allocations[(NUM_ALLOC_DIST-1)]++;
+ }
+#endif
- emem_create_chunk(&mem->free_list);
+ /* make sure we dont try to allocate too much (arbitrary limit) */
+ DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
- /* oops, we need to allocate more memory to serve this request
- * than we have free. move this node to the used list and try again
- */
- if(size>mem->free_list->amount_free
-#ifdef DEBUG_USE_CANARIES
- || mem->free_list->c_count >= EMEM_ALLOCS_PER_CHUNK
-#endif /* DEBUG_USE_CANARIES */
- ) {
- emem_chunk_t *npc;
- npc=mem->free_list;
- mem->free_list=mem->free_list->next;
- npc->next=mem->used_list;
- mem->used_list=npc;
- }
+ if (!mem->free_list)
+ emem_create_chunk(&mem->free_list, use_canary);
- emem_create_chunk(&mem->free_list);
+ /* oops, we need to allocate more memory to serve this request
+ * than we have free. move this node to the used list and try again
+ */
+ if(asize > mem->free_list->amount_free ||
+ (use_canary &&
+ mem->free_list->canary_info->c_count >= EMEM_ALLOCS_PER_CHUNK)) {
+ emem_chunk_t *npc;
+ npc=mem->free_list;
+ mem->free_list=mem->free_list->next;
+ npc->next=mem->used_list;
+ mem->used_list=npc;
- free_list = mem->free_list;
+ if (!mem->free_list)
+ emem_create_chunk(&mem->free_list, use_canary);
+ }
- buf = free_list->buf + free_list->free_offset;
+ free_list = mem->free_list;
- free_list->amount_free -= (unsigned int) size;
- free_list->free_offset += (unsigned int) size;
+ buf = free_list->buf + free_list->free_offset;
-#ifdef DEBUG_USE_CANARIES
- cptr = (char *)buf + size - pad;
- memcpy(cptr, canary, pad);
- free_list->canary[free_list->c_count] = cptr;
- free_list->cmp_len[free_list->c_count] = pad;
- free_list->c_count++;
-#endif /* DEBUG_USE_CANARIES */
- } else {
- emem_chunk_t *npc;
+ free_list->amount_free -= (unsigned int) asize;
+ free_list->free_offset += (unsigned int) asize;
- npc=g_malloc(sizeof(emem_chunk_t));
- npc->next=mem->used_list;
- npc->amount_free=size;
- npc->free_offset=0;
- npc->buf=g_malloc(size);
- buf = npc->buf;
- mem->used_list=npc;
+ if (use_canary) {
+ void *cptr = (char *)buf + size;
+ memcpy(cptr, mem->canary, pad);
+ free_list->canary_info->canary[free_list->canary_info->c_count] = cptr;
+ free_list->canary_info->cmp_len[free_list->canary_info->c_count] = pad;
+ free_list->canary_info->c_count++;
}
return buf;
}
+static void *
+emem_alloc_glib(size_t size, emem_header_t *mem)
+{
+ emem_chunk_t *npc;
+
+ npc=g_new(emem_chunk_t, 1);
+ npc->next=mem->used_list;
+ npc->buf=g_malloc(size);
+ npc->canary_info = NULL;
+ mem->used_list=npc;
+ /* There's no padding/alignment involved (from our point of view) when
+ * we fetch the memory directly from the system pool, so WYSIWYG */
+ npc->free_offset = npc->free_offset_init = 0;
+ npc->amount_free = npc->amount_free_init = (unsigned int) size;
+
+ return npc->buf;
+}
+
+/* allocate 'size' amount of memory. */
+static void *
+emem_alloc(size_t size, emem_header_t *mem)
+{
+ void *buf = mem->memory_alloc(size, mem);
+
+ /* XXX - this is a waste of time if the allocator function is going to
+ * memset this straight back to 0.
+ */
+ emem_scrub_memory(buf, size, TRUE);
+
+ return buf;
+}
+
/* allocate 'size' amount of memory with an allocation lifetime until the
* next packet.
*/
void *
ep_alloc(size_t size)
{
-#ifdef EP_DEBUG_FREE
- return emem_alloc(size, TRUE, &ep_packet_mem, ep_canary);
-#else
- return emem_alloc(size, FALSE, &ep_packet_mem, ep_canary);
-#endif
+ return emem_alloc(size, &ep_packet_mem);
}
/* allocate 'size' amount of memory with an allocation lifetime until the
void *
se_alloc(size_t size)
{
-#ifdef SE_DEBUG_FREE
- return emem_alloc(size, TRUE, &se_packet_mem, se_canary);
-#else
- return emem_alloc(size, FALSE, &se_packet_mem, se_canary);
-#endif
+ return emem_alloc(size, &se_packet_mem);
}
-void* ep_alloc0(size_t size) {
+void *
+ep_alloc0(size_t size)
+{
return memset(ep_alloc(size),'\0',size);
}
-gchar* ep_strdup(const gchar* src) {
+gchar *
+ep_strdup(const gchar* src)
+{
guint len = (guint) strlen(src);
gchar* dst;
- dst = strncpy(ep_alloc(len+1), src, len);
-
- dst[len] = '\0';
+ dst = memcpy(ep_alloc(len+1), src, len+1);
return dst;
}
-gchar* ep_strndup(const gchar* src, size_t len) {
+gchar *
+ep_strndup(const gchar* src, size_t len)
+{
gchar* dst = ep_alloc(len+1);
- guint i;
- for (i = 0; (i < len) && src[i]; i++)
- dst[i] = src[i];
-
- dst[i] = '\0';
+ g_strlcpy(dst, src, len+1);
return dst;
}
-void* ep_memdup(const void* src, size_t len) {
+void *
+ep_memdup(const void* src, size_t len)
+{
return memcpy(ep_alloc(len), src, len);
}
-gchar* ep_strdup_vprintf(const gchar* fmt, va_list ap) {
+gchar *
+ep_strdup_vprintf(const gchar* fmt, va_list ap)
+{
va_list ap2;
gsize len;
gchar* dst;
return dst;
}
-gchar* ep_strdup_printf(const gchar* fmt, ...) {
+gchar *
+ep_strdup_printf(const gchar* fmt, ...)
+{
va_list ap;
gchar* dst;
return dst;
}
-gchar** ep_strsplit(const gchar* string, const gchar* sep, int max_tokens) {
+gchar **
+ep_strsplit(const gchar* string, const gchar* sep, int max_tokens)
+{
gchar* splitted;
gchar* s;
guint tokens;
-void* se_alloc0(size_t size) {
+void *
+se_alloc0(size_t size)
+{
return memset(se_alloc(size),'\0',size);
}
/* If str is NULL, just return the string "<NULL>" so that the callers dont
* have to bother checking it.
*/
-gchar* se_strdup(const gchar* src) {
+gchar *
+se_strdup(const gchar* src)
+{
guint len;
gchar* dst;
- if(!src){
+ if(!src)
return "<NULL>";
- }
len = (guint) strlen(src);
- dst = strncpy(se_alloc(len+1), src, len);
-
- dst[len] = '\0';
+ dst = memcpy(se_alloc(len+1), src, len+1);
return dst;
}
-gchar* se_strndup(const gchar* src, size_t len) {
+gchar *
+se_strndup(const gchar* src, size_t len)
+{
gchar* dst = se_alloc(len+1);
- guint i;
-
- for (i = 0; (i < len) && src[i]; i++)
- dst[i] = src[i];
- dst[i] = '\0';
+ g_strlcpy(dst, src, len+1);
return dst;
}
-void* se_memdup(const void* src, size_t len) {
+void *
+se_memdup(const void* src, size_t len)
+{
return memcpy(se_alloc(len), src, len);
}
-gchar* se_strdup_vprintf(const gchar* fmt, va_list ap) {
+gchar *
+se_strdup_vprintf(const gchar* fmt, va_list ap)
+{
va_list ap2;
gsize len;
gchar* dst;
return dst;
}
-gchar* se_strdup_printf(const gchar* fmt, ...) {
+gchar *
+se_strdup_printf(const gchar* fmt, ...)
+{
va_list ap;
gchar* dst;
/* release all allocated memory back to the pool. */
static void
-emem_free_all(gboolean debug_free, emem_header_t *mem, guint8 *canary, emem_tree_t *trees, const char *error_msg)
+emem_free_all(emem_header_t *mem)
{
+ gboolean use_chunks = mem->debug_use_chunks;
+ guint8 *canary = (mem->debug_use_canary) ? mem->canary : NULL;
+
emem_chunk_t *npc;
emem_tree_t *tree_list;
-#ifdef DEBUG_USE_CANARIES
guint i;
-#endif /* DEBUG_USE_CANARIES */
/* move all used chunks over to the free list */
while(mem->used_list){
/* clear them all out */
npc = mem->free_list;
while (npc != NULL) {
- if (!debug_free) {
-#ifdef DEBUG_USE_CANARIES
- for (i = 0; i < npc->c_count; i++) {
- if (memcmp(npc->canary[i], canary, npc->cmp_len[i]) != 0)
- g_error(error_msg);
+ if (use_chunks) {
+ if (canary) {
+ for (i = 0; i < npc->canary_info->c_count; i++) {
+ if (memcmp(npc->canary_info->canary[i], canary, npc->canary_info->cmp_len[i]) != 0)
+ g_error("Memory corrupted");
+ }
+ npc->canary_info->c_count = 0;
}
- npc->c_count = 0;
-#endif /* DEBUG_USE_CANARIES */
+
+ emem_scrub_memory((npc->buf + npc->free_offset_init),
+ (npc->free_offset - npc->free_offset_init),
+ FALSE);
+
npc->amount_free = npc->amount_free_init;
npc->free_offset = npc->free_offset_init;
npc = npc->next;
} else {
emem_chunk_t *next = npc->next;
+ emem_scrub_memory(npc->buf, npc->amount_free_init, FALSE);
+
g_free(npc->buf);
g_free(npc);
npc = next;
}
}
+ if (!use_chunks) {
+ /* We've freed all this memory already */
+ mem->free_list = NULL;
+ }
+
/* release/reset all allocated trees */
- for(tree_list=trees;tree_list;tree_list=tree_list->next){
+ for(tree_list=mem->trees;tree_list;tree_list=tree_list->next){
tree_list->tree=NULL;
}
}
void
ep_free_all(void)
{
-#ifdef EP_DEBUG_FREE
- emem_free_all(TRUE, &ep_packet_mem, ep_canary, NULL, "Per-packet memory corrupted.");
-#else
- emem_free_all(FALSE, &ep_packet_mem, ep_canary, NULL, "Per-packet memory corrupted.");
-#endif
-
-#ifdef EP_DEBUG_FREE
- ep_init_chunk();
-#endif
+ emem_free_all(&ep_packet_mem);
}
/* release all allocated memory back to the pool. */
void
se_free_all(void)
{
-#ifdef SE_DEBUG_FREE
- emem_free_all(TRUE, &se_packet_mem, se_canary, se_trees, "Per-session memory corrupted.");
-#else
- emem_free_all(FALSE, &se_packet_mem, se_canary, se_trees, "Per-session memory corrupted.");
+#ifdef SHOW_EMEM_STATS
+ print_alloc_stats();
#endif
-#ifdef SE_DEBUG_FREE
- se_init_chunk();
-#endif
+ emem_free_all(&se_packet_mem);
}
-ep_stack_t ep_stack_new(void) {
+ep_stack_t
+ep_stack_new(void) {
ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
*s = ep_new0(struct _ep_stack_frame_t);
return s;
of allocating new ones.
*/
-
-void* ep_stack_push(ep_stack_t stack, void* data) {
+void *
+ep_stack_push(ep_stack_t stack, void* data)
+{
struct _ep_stack_frame_t* frame;
struct _ep_stack_frame_t* head = (*stack);
return data;
}
-void* ep_stack_pop(ep_stack_t stack) {
+void *
+ep_stack_pop(ep_stack_t stack)
+{
if ((*stack)->below) {
(*stack) = (*stack)->below;
}
}
-
-
-#ifdef REMOVED
-void print_tree_item(emem_tree_node_t *node, int level){
- int i;
- for(i=0;i<level;i++){
- printf(" ");
- }
- printf("%s KEY:0x%08x node:0x%08x parent:0x%08x left:0x%08x right:0x%08x\n",node->u.rb_color==EMEM_TREE_RB_COLOR_BLACK?"BLACK":"RED",node->key32,(int)node,(int)node->parent,(int)node->left,(int)node->right);
- if(node->left)
- print_tree_item(node->left,level+1);
- if(node->right)
- print_tree_item(node->right,level+1);
-}
-
-void print_tree(emem_tree_node_t *node){
- if(!node){
- return;
- }
- while(node->parent){
- node=node->parent;
- }
- print_tree_item(node,0);
-}
-#endif
-
-
-
-/* routines to manage se allocated red-black trees */
-emem_tree_t *se_trees=NULL;
-
emem_tree_t *
se_tree_create(int type, const char *name)
{
emem_tree_t *tree_list;
- tree_list=malloc(sizeof(emem_tree_t));
- tree_list->next=se_trees;
+ tree_list=g_malloc(sizeof(emem_tree_t));
+ tree_list->next=se_packet_mem.trees;
tree_list->type=type;
tree_list->tree=NULL;
tree_list->name=name;
tree_list->malloc=se_alloc;
- se_trees=tree_list;
+ se_packet_mem.trees=tree_list;
return tree_list;
}
-
-
void *
emem_tree_lookup32(emem_tree_t *se_tree, guint32 key)
{
}
+ if(!node){
+ return NULL;
+ }
+
/* If we are still at the root of the tree this means that this node
* is either smaller than the search key and then we return this
* node or else there is no smaller key available and then
}
return NULL;
}
+
static inline emem_tree_node_t *
emem_tree_uncle(emem_tree_node_t *node)
{
}
}
-static void* lookup_or_insert32(emem_tree_t *se_tree, guint32 key, void*(*func)(void*),void* ud, int is_subtree) {
+static void *
+lookup_or_insert32(emem_tree_t *se_tree, guint32 key, void*(*func)(void*),void* ud, int is_subtree)
+{
emem_tree_node_t *node;
node=se_tree->tree;
{
emem_tree_t *tree_list;
- tree_list=g_malloc(sizeof(emem_tree_t));
+ tree_list=g_new(emem_tree_t, 1);
tree_list->next=NULL;
tree_list->type=type;
tree_list->tree=NULL;
return tree_list;
}
-static void* create_sub_tree(void* d) {
+static void *
+create_sub_tree(void* d)
+{
emem_tree_t *se_tree = d;
return emem_tree_create_subtree(se_tree, "subtree");
}
guint32 i;
guint32 tmp;
- aligned = malloc(div * sizeof (guint32));
+ aligned = g_malloc(div * sizeof (guint32));
/* pack the bytes one one by one into guint32s */
tmp = 0;
emem_tree_insert32_array(se_tree, key, v);
- free(aligned);
+ g_free(aligned);
}
void *
guint32 tmp;
void *ret;
- aligned = malloc(div * sizeof (guint32));
+ aligned = g_malloc(div * sizeof (guint32));
/* pack the bytes one one by one into guint32s */
tmp = 0;
ret = emem_tree_lookup32_array(se_tree, key);
- free(aligned);
+ g_free(aligned);
return ret;
}
#define MAX_STRBUF_LEN 65536
static gsize
-next_size(gsize cur_alloc_len, gsize wanted_alloc_len, gsize max_alloc_len) {
+next_size(gsize cur_alloc_len, gsize wanted_alloc_len, gsize max_alloc_len)
+{
if (max_alloc_len < 1 || max_alloc_len > MAX_STRBUF_LEN) {
max_alloc_len = MAX_STRBUF_LEN;
}
}
static void
-ep_strbuf_grow(emem_strbuf_t *strbuf, gsize wanted_alloc_len) {
+ep_strbuf_grow(emem_strbuf_t *strbuf, gsize wanted_alloc_len)
+{
gsize new_alloc_len;
gchar *new_str;
}
emem_strbuf_t *
-ep_strbuf_sized_new(gsize alloc_len, gsize max_alloc_len) {
+ep_strbuf_sized_new(gsize alloc_len, gsize max_alloc_len)
+{
emem_strbuf_t *strbuf;
strbuf = ep_alloc(sizeof(emem_strbuf_t));
}
emem_strbuf_t *
-ep_strbuf_new(const gchar *init) {
+ep_strbuf_new(const gchar *init)
+{
emem_strbuf_t *strbuf;
strbuf = ep_strbuf_sized_new(next_size(0, init?strlen(init):0, 0), 0);
}
emem_strbuf_t *
-ep_strbuf_new_label(const gchar *init) {
+ep_strbuf_new_label(const gchar *init)
+{
emem_strbuf_t *strbuf;
gsize full_len;
}
emem_strbuf_t *
-ep_strbuf_append(emem_strbuf_t *strbuf, const gchar *str) {
+ep_strbuf_append(emem_strbuf_t *strbuf, const gchar *str)
+{
gsize add_len, full_len;
if (!strbuf || !str || str[0] == '\0') {
}
/* Be optimistic; try the g_strlcpy first & see if enough room. */
- /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same */
+ /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same */
add_len = strbuf->alloc_len - strbuf->len;
full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
if (full_len < add_len) {
}
void
-ep_strbuf_append_vprintf(emem_strbuf_t *strbuf, const gchar *format, va_list ap) {
+ep_strbuf_append_vprintf(emem_strbuf_t *strbuf, const gchar *format, va_list ap)
+{
va_list ap2;
gsize add_len, full_len;
G_VA_COPY(ap2, ap);
/* Be optimistic; try the g_vsnprintf first & see if enough room. */
- /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same. */
+ /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same. */
add_len = strbuf->alloc_len - strbuf->len;
full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap);
if (full_len < add_len) {
}
void
-ep_strbuf_append_printf(emem_strbuf_t *strbuf, const gchar *format, ...) {
+ep_strbuf_append_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
+{
va_list ap;
va_start(ap, format);
}
void
-ep_strbuf_printf(emem_strbuf_t *strbuf, const gchar *format, ...) {
+ep_strbuf_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
+{
va_list ap;
if (!strbuf) {
return;
}
emem_strbuf_t *
-ep_strbuf_append_c(emem_strbuf_t *strbuf, const gchar c) {
+ep_strbuf_append_c(emem_strbuf_t *strbuf, const gchar c)
+{
if (!strbuf) {
return strbuf;
}
}
emem_strbuf_t *
-ep_strbuf_truncate(emem_strbuf_t *strbuf, gsize len) {
+ep_strbuf_truncate(emem_strbuf_t *strbuf, gsize len)
+{
if (!strbuf || len >= strbuf->len) {
return strbuf;
}