2 * Ethereal memory management and garbage collection functions
7 * Ethereal - Network traffic analyzer
8 * By Gerald Combs <gerald@ethereal.com>
9 * Copyright 1998 Gerald Combs
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
35 #ifdef HAVE_SYS_TIME_H
44 #include <process.h> /* getpid */
50 #include <wiretap/file_util.h>
52 /* When required, allocate more memory from the OS in this size chunks */
53 #define EMEM_PACKET_CHUNK_SIZE 10485760
55 /* The maximum number of allocations per chunk */
56 #define EMEM_ALLOCS_PER_CHUNK (EMEM_PACKET_CHUNK_SIZE / 512)
59 * Tools like Valgrind and ElectricFence don't work well with memchunks.
60 * Uncomment the defines below to make {ep|se}_alloc() allocate each
61 * object individually.
63 /* #define EP_DEBUG_FREE 1 */
64 /* #define SE_DEBUG_FREE 1 */
66 #if GLIB_MAJOR_VERSION >= 2
67 GRand *rand_state = NULL;
70 #define EMEM_CANARY_SIZE 8
71 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
72 guint8 ep_canary[EMEM_CANARY_DATA_SIZE], se_canary[EMEM_CANARY_DATA_SIZE];
74 typedef struct _emem_chunk_t {
75 struct _emem_chunk_t *next;
76 unsigned int amount_free;
77 unsigned int free_offset;
80 void *canary[EMEM_ALLOCS_PER_CHUNK];
81 guint8 cmp_len[EMEM_ALLOCS_PER_CHUNK];
84 typedef struct _emem_header_t {
85 emem_chunk_t *free_list;
86 emem_chunk_t *used_list;
89 static emem_header_t ep_packet_mem;
90 static emem_header_t se_packet_mem;
93 * Set a canary value to be placed between memchunks.
97 emem_canary(guint8 *canary) {
100 /* First, use GLib's random function if we have it */
101 #if GLIB_MAJOR_VERSION >= 2
102 if (rand_state == NULL) {
103 rand_state = g_rand_new();
105 for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
106 canary[i] = (guint8) g_rand_int(rand_state);
112 /* Try /dev/urandom */
113 if (fp = eth_fopen("/dev/urandom", 0)) {
114 sz = fread(canary, EMEM_CANARY_DATA_SIZE, 1, fd);
116 if (sz == EMEM_CANARY_SIZE) {
121 /* Our last resort */
122 srandom(time(NULL) | getpid());
123 for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
124 canary[i] = (guint8) random();
127 #endif /* GLIB_MAJOR_VERSION >= 2 */
131 * Given an allocation size, return the amount of padding needed for
135 emem_canary_pad (size_t allocation) {
138 pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
139 if (pad < EMEM_CANARY_SIZE)
140 pad += EMEM_CANARY_SIZE;
145 /* Initialize the packet-lifetime memory allocation pool.
146 * This function should be called only once when Ethereal or Tethereal starts
152 ep_packet_mem.free_list=NULL;
153 ep_packet_mem.used_list=NULL;
155 emem_canary(ep_canary);
157 /* Initialize the capture-lifetime memory allocation pool.
158 * This function should be called only once when Ethereal or Tethereal starts
164 se_packet_mem.free_list=NULL;
165 se_packet_mem.used_list=NULL;
167 emem_canary(se_canary);
170 #define EMEM_CREATE_CHUNK(FREE_LIST) \
171 /* we dont have any free data, so we must allocate a new one */ \
174 npc=g_malloc(sizeof(emem_chunk_t)); \
176 npc->amount_free=EMEM_PACKET_CHUNK_SIZE; \
177 npc->free_offset=0; \
178 npc->buf=g_malloc(EMEM_PACKET_CHUNK_SIZE); \
183 /* allocate 'size' amount of memory with an allocation lifetime until the
187 ep_alloc(size_t size)
190 guint8 pad = emem_canary_pad(size);
191 emem_chunk_t *free_list;
193 #ifndef EP_DEBUG_FREE
194 /* Round up to an 8 byte boundary. Make sure we have at least
195 * 8 pad bytes for our canary.
199 /* make sure we dont try to allocate too much (arbitrary limit) */
200 DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
202 EMEM_CREATE_CHUNK(ep_packet_mem.free_list);
204 /* oops, we need to allocate more memory to serve this request
205 * than we have free. move this node to the used list and try again
207 if(size>ep_packet_mem.free_list->amount_free || ep_packet_mem.free_list->c_count >= EMEM_ALLOCS_PER_CHUNK){
209 npc=ep_packet_mem.free_list;
210 ep_packet_mem.free_list=ep_packet_mem.free_list->next;
211 npc->next=ep_packet_mem.used_list;
212 ep_packet_mem.used_list=npc;
215 EMEM_CREATE_CHUNK(ep_packet_mem.free_list);
217 free_list = ep_packet_mem.free_list;
219 buf = free_list->buf + free_list->free_offset;
221 free_list->amount_free -= size;
222 free_list->free_offset += size;
224 cptr = (char *)buf + size - pad;
225 memcpy(cptr, &ep_canary, pad);
226 free_list->canary[free_list->c_count] = cptr;
227 free_list->cmp_len[free_list->c_count] = pad;
228 free_list->c_count++;
230 #else /* EP_DEBUG_FREE */
233 npc=g_malloc(sizeof(emem_chunk_t));
234 npc->next=ep_packet_mem.used_list;
235 npc->amount_free=size;
237 npc->buf=g_malloc(size);
239 ep_packet_mem.used_list=npc;
240 #endif /* EP_DEBUG_FREE */
244 /* allocate 'size' amount of memory with an allocation lifetime until the
248 se_alloc(size_t size)
251 guint8 pad = emem_canary_pad(size);
252 emem_chunk_t *free_list;
254 #ifndef SE_DEBUG_FREE
255 /* Round up to an 8 byte boundary. Make sure we have at least
256 * 8 pad bytes for our canary.
260 /* make sure we dont try to allocate too much (arbitrary limit) */
261 DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
263 EMEM_CREATE_CHUNK(se_packet_mem.free_list);
265 /* oops, we need to allocate more memory to serve this request
266 * than we have free. move this node to the used list and try again
268 if(size>se_packet_mem.free_list->amount_free || se_packet_mem.free_list->c_count >= EMEM_ALLOCS_PER_CHUNK){
270 npc=se_packet_mem.free_list;
271 se_packet_mem.free_list=se_packet_mem.free_list->next;
272 npc->next=se_packet_mem.used_list;
273 se_packet_mem.used_list=npc;
276 EMEM_CREATE_CHUNK(se_packet_mem.free_list);
278 free_list = se_packet_mem.free_list;
280 buf = free_list->buf + free_list->free_offset;
282 free_list->amount_free -= size;
283 free_list->free_offset += size;
285 cptr = (char *)buf + size - pad;
286 memcpy(cptr, &se_canary, pad);
287 free_list->canary[free_list->c_count] = cptr;
288 free_list->cmp_len[free_list->c_count] = pad;
289 free_list->c_count++;
291 #else /* SE_DEBUG_FREE */
294 npc=g_malloc(sizeof(emem_chunk_t));
295 npc->next=se_packet_mem.used_list;
296 npc->amount_free=size;
298 npc->buf=g_malloc(size);
300 se_packet_mem.used_list=npc;
301 #endif /* SE_DEBUG_FREE */
307 void* ep_alloc0(size_t size) {
308 return memset(ep_alloc(size),'\0',size);
311 gchar* ep_strdup(const gchar* src) {
312 guint len = strlen(src);
315 dst = strncpy(ep_alloc(len+1), src, len);
322 gchar* ep_strndup(const gchar* src, size_t len) {
323 gchar* dst = ep_alloc(len+1);
326 for (i = 0; src[i] && i < len; i++)
334 guint8* ep_memdup(const guint8* src, size_t len) {
335 return memcpy(ep_alloc(len), src, len);
338 gchar* ep_strdup_vprintf(const gchar* fmt, va_list ap) {
345 len = g_printf_string_upper_bound(fmt, ap);
347 dst = ep_alloc(len+1);
348 g_vsnprintf (dst, len, fmt, ap2);
354 gchar* ep_strdup_printf(const gchar* fmt, ...) {
359 dst = ep_strdup_vprintf(fmt, ap);
364 gchar** ep_strsplit(const gchar* string, const gchar* sep, int max_tokens) {
372 enum { AT_START, IN_PAD, IN_TOKEN } state;
380 s = splitted = ep_strdup(string);
381 str_len = strlen(splitted);
382 sep_len = strlen(sep);
384 if (max_tokens < 1) max_tokens = INT_MAX;
389 while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
392 for(i=0; i < sep_len; i++ )
399 vec = ep_alloc_array(gchar*,tokens+1);
402 for (i=0; i< str_len; i++) {
405 switch(splitted[i]) {
410 vec[curr_tok] = &(splitted[i]);
416 switch(splitted[i]) {
423 switch(splitted[i]) {
425 vec[curr_tok] = &(splitted[i]);
434 vec[curr_tok] = NULL;
441 void* se_alloc0(size_t size) {
442 return memset(se_alloc(size),'\0',size);
445 /* If str is NULL, just return the string "<NULL>" so that the callers dont
446 * have to bother checking it.
448 gchar* se_strdup(const gchar* src) {
457 dst = strncpy(se_alloc(len+1), src, len);
464 gchar* se_strndup(const gchar* src, size_t len) {
465 gchar* dst = se_alloc(len+1);
468 for (i = 0; src[i] && i < len; i++)
476 guint8* se_memdup(const guint8* src, size_t len) {
477 return memcpy(se_alloc(len), src, len);
480 gchar* se_strdup_vprintf(const gchar* fmt, va_list ap) {
487 len = g_printf_string_upper_bound(fmt, ap);
489 dst = se_alloc(len+1);
490 g_vsnprintf (dst, len, fmt, ap2);
496 gchar* se_strdup_printf(const gchar* fmt, ...) {
501 dst = se_strdup_vprintf(fmt, ap);
506 /* release all allocated memory back to the pool.
514 /* move all used chunks over to the free list */
515 while(ep_packet_mem.used_list){
516 npc=ep_packet_mem.used_list;
517 ep_packet_mem.used_list=ep_packet_mem.used_list->next;
518 npc->next=ep_packet_mem.free_list;
519 ep_packet_mem.free_list=npc;
522 /* clear them all out */
523 npc = ep_packet_mem.free_list;
524 while (npc != NULL) {
525 #ifndef EP_DEBUG_FREE
526 for (i = 0; i < npc->c_count; i++) {
527 g_assert(memcmp(npc->canary[i], &ep_canary, npc->cmp_len[i]) == 0);
530 npc->amount_free=EMEM_PACKET_CHUNK_SIZE;
533 #else /* EP_DEBUG_FREE */
534 emem_chunk_t *next = npc->next;
539 #endif /* EP_DEBUG_FREE */
546 /* release all allocated memory back to the pool.
554 /* move all used chunks ove to the free list */
555 while(se_packet_mem.used_list){
556 npc=se_packet_mem.used_list;
557 se_packet_mem.used_list=se_packet_mem.used_list->next;
558 npc->next=se_packet_mem.free_list;
559 se_packet_mem.free_list=npc;
562 /* clear them all out */
563 npc = se_packet_mem.free_list;
564 while (npc != NULL) {
565 #ifndef SE_DEBUG_FREE
566 for (i = 0; i < npc->c_count; i++) {
567 g_assert(memcmp(npc->canary[i], &se_canary, npc->cmp_len[i]) == 0);
570 npc->amount_free=EMEM_PACKET_CHUNK_SIZE;
573 #else /* SE_DEBUG_FREE */
574 emem_chunk_t *next = npc->next;
579 #endif /* SE_DEBUG_FREE */
588 ep_stack_t ep_stack_new(void) {
589 ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
590 *s = ep_new0(struct _ep_stack_frame_t);
594 /* for ep_stack_t we'll keep the popped frames so we reuse them instead
595 of allocating new ones.
599 void* ep_stack_push(ep_stack_t stack, void* data) {
600 struct _ep_stack_frame_t* frame;
601 struct _ep_stack_frame_t* head = (*stack);
606 frame = ep_new(struct _ep_stack_frame_t);
612 frame->payload = data;
618 void* ep_stack_pop(ep_stack_t stack) {
620 if ((*stack)->below) {
621 (*stack) = (*stack)->below;
622 return (*stack)->above->payload;