2 * Ethereal memory management and garbage collection functions
7 * Ethereal - Network traffic analyzer
8 * By Gerald Combs <gerald@ethereal.com>
9 * Copyright 1998 Gerald Combs
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
35 #ifdef HAVE_SYS_TIME_H
44 #include <windows.h> /* VirtualAlloc, VirtualProtect */
45 #include <process.h> /* getpid */
51 #include <wiretap/file_util.h>
53 /* Add guard pages at each end of our allocated memory */
54 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
56 #include <sys/types.h>
58 #define USE_GUARD_PAGES 1
61 /* When required, allocate more memory from the OS in this size chunks */
62 #define EMEM_PACKET_CHUNK_SIZE 10485760
64 /* The maximum number of allocations per chunk */
65 #define EMEM_ALLOCS_PER_CHUNK (EMEM_PACKET_CHUNK_SIZE / 512)
68 * Tools like Valgrind and ElectricFence don't work well with memchunks.
69 * Uncomment the defines below to make {ep|se}_alloc() allocate each
70 * object individually.
72 /* #define EP_DEBUG_FREE 1 */
73 /* #define SE_DEBUG_FREE 1 */
75 #if GLIB_MAJOR_VERSION >= 2
76 GRand *rand_state = NULL;
79 #define EMEM_CANARY_SIZE 8
80 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
81 guint8 ep_canary[EMEM_CANARY_DATA_SIZE], se_canary[EMEM_CANARY_DATA_SIZE];
83 typedef struct _emem_chunk_t {
84 struct _emem_chunk_t *next;
85 unsigned int amount_free_init;
86 unsigned int amount_free;
87 unsigned int free_offset_init;
88 unsigned int free_offset;
90 #if ! defined(EP_DEBUG_FREE) && ! defined(SE_DEBUG_FREE)
92 void *canary[EMEM_ALLOCS_PER_CHUNK];
93 guint8 cmp_len[EMEM_ALLOCS_PER_CHUNK];
97 typedef struct _emem_header_t {
98 emem_chunk_t *free_list;
99 emem_chunk_t *used_list;
102 static emem_header_t ep_packet_mem;
103 static emem_header_t se_packet_mem;
106 * Set a canary value to be placed between memchunks.
110 emem_canary(guint8 *canary) {
113 /* First, use GLib's random function if we have it */
114 #if GLIB_MAJOR_VERSION >= 2
115 if (rand_state == NULL) {
116 rand_state = g_rand_new();
118 for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
119 canary[i] = (guint8) g_rand_int(rand_state);
125 /* Try /dev/urandom */
126 if ((fp = eth_fopen("/dev/urandom", "r")) != NULL) {
127 sz = fread(canary, EMEM_CANARY_DATA_SIZE, 1, fp);
129 if (sz == EMEM_CANARY_SIZE) {
134 /* Our last resort */
135 srandom(time(NULL) | getpid());
136 for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
137 canary[i] = (guint8) random();
140 #endif /* GLIB_MAJOR_VERSION >= 2 */
144 * Given an allocation size, return the amount of padding needed for
148 emem_canary_pad (size_t allocation) {
151 pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
152 if (pad < EMEM_CANARY_SIZE)
153 pad += EMEM_CANARY_SIZE;
158 /* Initialize the packet-lifetime memory allocation pool.
159 * This function should be called only once when Ethereal or Tethereal starts
165 ep_packet_mem.free_list=NULL;
166 ep_packet_mem.used_list=NULL;
168 emem_canary(ep_canary);
170 /* Initialize the capture-lifetime memory allocation pool.
171 * This function should be called only once when Ethereal or Tethereal starts
177 se_packet_mem.free_list=NULL;
178 se_packet_mem.used_list=NULL;
180 emem_canary(se_canary);
184 emem_create_chunk(emem_chunk_t **free_list) {
189 char *buf_end, *prot1, *prot2;
191 #elif defined(USE_GUARD_PAGES)
192 intptr_t pagesize = sysconf(_SC_PAGESIZE);
194 char *buf_end, *prot1, *prot2;
196 /* we dont have any free data, so we must allocate a new one */
199 npc = g_malloc(sizeof(emem_chunk_t));
205 * MSDN documents VirtualAlloc/VirtualProtect at
206 * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
208 GetSystemInfo(&sysinfo);
209 pagesize = sysinfo.dwPageSize;
211 /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
212 npc->buf = VirtualAlloc(NULL, EMEM_PACKET_CHUNK_SIZE,
213 MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
214 g_assert(npc->buf != NULL);
215 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
217 /* Align our guard pages on page-sized boundaries */
218 prot1 = (char *) ((((int) npc->buf + pagesize - 1) / pagesize) * pagesize);
219 prot2 = (char *) ((((int) buf_end - (1 * pagesize)) / pagesize) * pagesize);
221 ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
222 g_assert(ret == TRUE);
223 ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
224 g_assert(ret == TRUE);
226 npc->amount_free_init = prot2 - prot1 - pagesize;
227 npc->amount_free = npc->amount_free_init;
228 npc->free_offset_init = (prot1 - npc->buf) + pagesize;
229 npc->free_offset = npc->free_offset_init;
231 #elif defined(USE_GUARD_PAGES)
232 npc->buf = mmap(NULL, EMEM_PACKET_CHUNK_SIZE,
233 PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
234 g_assert(npc->buf != MAP_FAILED);
235 buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
237 /* Align our guard pages on page-sized boundaries */
238 prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
239 prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
240 ret = mprotect(prot1, pagesize, PROT_NONE);
242 ret = mprotect(prot2, pagesize, PROT_NONE);
245 npc->amount_free_init = prot2 - prot1 - pagesize;
246 npc->amount_free = npc->amount_free_init;
247 npc->free_offset_init = (prot1 - npc->buf) + pagesize;
248 npc->free_offset = npc->free_offset_init;
250 #else /* Is there a draft in here? */
251 npc->amount_free_init = EMEM_PACKET_CHUNK_SIZE;
252 npc->amount_free = npc->amount_free_init;
253 npc->free_offset_init = 0;
254 npc->free_offset = npc->free_offset_init;
255 npc->buf = g_malloc(EMEM_PACKET_CHUNK_SIZE);
256 #endif /* USE_GUARD_PAGES */
260 /* allocate 'size' amount of memory with an allocation lifetime until the
264 ep_alloc(size_t size)
267 guint8 pad = emem_canary_pad(size);
268 emem_chunk_t *free_list;
270 #ifndef EP_DEBUG_FREE
271 /* Round up to an 8 byte boundary. Make sure we have at least
272 * 8 pad bytes for our canary.
276 /* make sure we dont try to allocate too much (arbitrary limit) */
277 DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
279 emem_create_chunk(&ep_packet_mem.free_list);
281 /* oops, we need to allocate more memory to serve this request
282 * than we have free. move this node to the used list and try again
284 if(size>ep_packet_mem.free_list->amount_free || ep_packet_mem.free_list->c_count >= EMEM_ALLOCS_PER_CHUNK){
286 npc=ep_packet_mem.free_list;
287 ep_packet_mem.free_list=ep_packet_mem.free_list->next;
288 npc->next=ep_packet_mem.used_list;
289 ep_packet_mem.used_list=npc;
292 emem_create_chunk(&ep_packet_mem.free_list);
294 free_list = ep_packet_mem.free_list;
296 buf = free_list->buf + free_list->free_offset;
298 free_list->amount_free -= size;
299 free_list->free_offset += size;
301 cptr = (char *)buf + size - pad;
302 memcpy(cptr, &ep_canary, pad);
303 free_list->canary[free_list->c_count] = cptr;
304 free_list->cmp_len[free_list->c_count] = pad;
305 free_list->c_count++;
307 #else /* EP_DEBUG_FREE */
310 npc=g_malloc(sizeof(emem_chunk_t));
311 npc->next=ep_packet_mem.used_list;
312 npc->amount_free=size;
314 npc->buf=g_malloc(size);
316 ep_packet_mem.used_list=npc;
317 #endif /* EP_DEBUG_FREE */
321 /* allocate 'size' amount of memory with an allocation lifetime until the
325 se_alloc(size_t size)
328 guint8 pad = emem_canary_pad(size);
329 emem_chunk_t *free_list;
331 #ifndef SE_DEBUG_FREE
332 /* Round up to an 8 byte boundary. Make sure we have at least
333 * 8 pad bytes for our canary.
337 /* make sure we dont try to allocate too much (arbitrary limit) */
338 DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
340 emem_create_chunk(&se_packet_mem.free_list);
342 /* oops, we need to allocate more memory to serve this request
343 * than we have free. move this node to the used list and try again
345 if(size>se_packet_mem.free_list->amount_free || se_packet_mem.free_list->c_count >= EMEM_ALLOCS_PER_CHUNK){
347 npc=se_packet_mem.free_list;
348 se_packet_mem.free_list=se_packet_mem.free_list->next;
349 npc->next=se_packet_mem.used_list;
350 se_packet_mem.used_list=npc;
353 emem_create_chunk(&se_packet_mem.free_list);
355 free_list = se_packet_mem.free_list;
357 buf = free_list->buf + free_list->free_offset;
359 free_list->amount_free -= size;
360 free_list->free_offset += size;
362 cptr = (char *)buf + size - pad;
363 memcpy(cptr, &se_canary, pad);
364 free_list->canary[free_list->c_count] = cptr;
365 free_list->cmp_len[free_list->c_count] = pad;
366 free_list->c_count++;
368 #else /* SE_DEBUG_FREE */
371 npc=g_malloc(sizeof(emem_chunk_t));
372 npc->next=se_packet_mem.used_list;
373 npc->amount_free=size;
375 npc->buf=g_malloc(size);
377 se_packet_mem.used_list=npc;
378 #endif /* SE_DEBUG_FREE */
384 void* ep_alloc0(size_t size) {
385 return memset(ep_alloc(size),'\0',size);
388 gchar* ep_strdup(const gchar* src) {
389 guint len = strlen(src);
392 dst = strncpy(ep_alloc(len+1), src, len);
399 gchar* ep_strndup(const gchar* src, size_t len) {
400 gchar* dst = ep_alloc(len+1);
403 for (i = 0; src[i] && i < len; i++)
411 void* ep_memdup(const void* src, size_t len) {
412 return memcpy(ep_alloc(len), src, len);
415 gchar* ep_strdup_vprintf(const gchar* fmt, va_list ap) {
422 len = g_printf_string_upper_bound(fmt, ap);
424 dst = ep_alloc(len+1);
425 g_vsnprintf (dst, len, fmt, ap2);
431 gchar* ep_strdup_printf(const gchar* fmt, ...) {
436 dst = ep_strdup_vprintf(fmt, ap);
441 gchar** ep_strsplit(const gchar* string, const gchar* sep, int max_tokens) {
449 enum { AT_START, IN_PAD, IN_TOKEN } state;
457 s = splitted = ep_strdup(string);
458 str_len = strlen(splitted);
459 sep_len = strlen(sep);
461 if (max_tokens < 1) max_tokens = INT_MAX;
466 while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
469 for(i=0; i < sep_len; i++ )
476 vec = ep_alloc_array(gchar*,tokens+1);
479 for (i=0; i< str_len; i++) {
482 switch(splitted[i]) {
487 vec[curr_tok] = &(splitted[i]);
493 switch(splitted[i]) {
500 switch(splitted[i]) {
502 vec[curr_tok] = &(splitted[i]);
511 vec[curr_tok] = NULL;
518 void* se_alloc0(size_t size) {
519 return memset(se_alloc(size),'\0',size);
522 /* If str is NULL, just return the string "<NULL>" so that the callers dont
523 * have to bother checking it.
525 gchar* se_strdup(const gchar* src) {
534 dst = strncpy(se_alloc(len+1), src, len);
541 gchar* se_strndup(const gchar* src, size_t len) {
542 gchar* dst = se_alloc(len+1);
545 for (i = 0; src[i] && i < len; i++)
553 void* se_memdup(const void* src, size_t len) {
554 return memcpy(se_alloc(len), src, len);
557 gchar* se_strdup_vprintf(const gchar* fmt, va_list ap) {
564 len = g_printf_string_upper_bound(fmt, ap);
566 dst = se_alloc(len+1);
567 g_vsnprintf (dst, len, fmt, ap2);
573 gchar* se_strdup_printf(const gchar* fmt, ...) {
578 dst = se_strdup_vprintf(fmt, ap);
583 /* release all allocated memory back to the pool.
591 /* move all used chunks over to the free list */
592 while(ep_packet_mem.used_list){
593 npc=ep_packet_mem.used_list;
594 ep_packet_mem.used_list=ep_packet_mem.used_list->next;
595 npc->next=ep_packet_mem.free_list;
596 ep_packet_mem.free_list=npc;
599 /* clear them all out */
600 npc = ep_packet_mem.free_list;
601 while (npc != NULL) {
602 #ifndef EP_DEBUG_FREE
603 for (i = 0; i < npc->c_count; i++) {
604 if (memcmp(npc->canary[i], &ep_canary, npc->cmp_len[i]) != 0)
605 g_error("Per-packet memory corrupted.");
608 npc->amount_free = npc->amount_free_init;
609 npc->free_offset = npc->free_offset_init;
611 #else /* EP_DEBUG_FREE */
612 emem_chunk_t *next = npc->next;
617 #endif /* EP_DEBUG_FREE */
624 /* release all allocated memory back to the pool.
632 /* move all used chunks over to the free list */
633 while(se_packet_mem.used_list){
634 npc=se_packet_mem.used_list;
635 se_packet_mem.used_list=se_packet_mem.used_list->next;
636 npc->next=se_packet_mem.free_list;
637 se_packet_mem.free_list=npc;
640 /* clear them all out */
641 npc = se_packet_mem.free_list;
642 while (npc != NULL) {
643 #ifndef SE_DEBUG_FREE
644 for (i = 0; i < npc->c_count; i++) {
645 if (memcmp(npc->canary[i], &se_canary, npc->cmp_len[i]) != 0)
646 g_error("Per-session memory corrupted.");
649 npc->amount_free = npc->amount_free_init;
650 npc->free_offset = npc->free_offset_init;
652 #else /* SE_DEBUG_FREE */
653 emem_chunk_t *next = npc->next;
658 #endif /* SE_DEBUG_FREE */
667 ep_stack_t ep_stack_new(void) {
668 ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
669 *s = ep_new0(struct _ep_stack_frame_t);
673 /* for ep_stack_t we'll keep the popped frames so we reuse them instead
674 of allocating new ones.
678 void* ep_stack_push(ep_stack_t stack, void* data) {
679 struct _ep_stack_frame_t* frame;
680 struct _ep_stack_frame_t* head = (*stack);
685 frame = ep_new(struct _ep_stack_frame_t);
691 frame->payload = data;
697 void* ep_stack_pop(ep_stack_t stack) {
699 if ((*stack)->below) {
700 (*stack) = (*stack)->below;
701 return (*stack)->above->payload;