2 * Copyright (c) 2010 Kungliga Tekniska Högskolan
3 * (Royal Institute of Technology, Stockholm, Sweden).
6 * Portions Copyright (c) 2010 Apple Inc. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the Institute nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 static heim_base_atomic_integer_type tidglobal = HEIM_TID_USER;
43 heim_base_atomic_integer_type ref_cnt;
44 HEIM_TAILQ_ENTRY(heim_base) autorel;
45 heim_auto_release_t autorelpool;
46 uintptr_t isaextra[3];
49 /* specialized version of base */
50 struct heim_base_mem {
52 heim_base_atomic_integer_type ref_cnt;
53 HEIM_TAILQ_ENTRY(heim_base) autorel;
54 heim_auto_release_t autorelpool;
56 void (*dealloc)(void *);
57 uintptr_t isaextra[1];
60 #define PTR2BASE(ptr) (((struct heim_base *)ptr) - 1)
61 #define BASE2PTR(ptr) ((void *)(((struct heim_base *)ptr) + 1))
63 #ifdef HEIM_BASE_NEED_ATOMIC_MUTEX
64 HEIMDAL_MUTEX _heim_base_mutex = HEIMDAL_MUTEX_INITIALIZER;
68 * Auto release structure
71 struct heim_auto_release {
72 HEIM_TAILQ_HEAD(, heim_base) pool;
73 HEIMDAL_MUTEX pool_mutex;
74 struct heim_auto_release *parent;
79 * Retain object (i.e., take a reference)
81 * @param object to be released, NULL is ok
83 * @return the same object as passed in
87 heim_retain(void *ptr)
89 struct heim_base *p = NULL;
91 if (ptr == NULL || heim_base_is_tagged(ptr))
96 if (heim_base_atomic_load(&p->ref_cnt) == heim_base_atomic_integer_max)
99 if ((heim_base_atomic_inc(&p->ref_cnt) - 1) == 0)
100 heim_abort("resurection");
105 * Release object, free if reference count reaches zero
107 * @param object to be released
111 heim_release(void *ptr)
113 heim_base_atomic_integer_type old;
114 struct heim_base *p = NULL;
116 if (ptr == NULL || heim_base_is_tagged(ptr))
121 if (heim_base_atomic_load(&p->ref_cnt) == heim_base_atomic_integer_max)
124 old = heim_base_atomic_dec(&p->ref_cnt) + 1;
130 heim_auto_release_t ar = p->autorelpool;
131 /* remove from autorel pool list */
133 p->autorelpool = NULL;
134 HEIMDAL_MUTEX_lock(&ar->pool_mutex);
135 HEIM_TAILQ_REMOVE(&ar->pool, p, autorel);
136 HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
139 p->isa->dealloc(ptr);
142 heim_abort("over release");
146 * If used require wrapped in autorelease pool
150 heim_description(heim_object_t ptr)
152 struct heim_base *p = PTR2BASE(ptr);
153 if (p->isa->desc == NULL)
154 return heim_auto_release(heim_string_ref_create(p->isa->name, NULL));
155 return heim_auto_release(p->isa->desc(ptr));
160 _heim_make_permanent(heim_object_t ptr)
162 struct heim_base *p = PTR2BASE(ptr);
163 heim_base_atomic_store(&p->ref_cnt, heim_base_atomic_integer_max);
167 static heim_type_t tagged_isa[9] = {
168 &_heim_number_object,
182 _heim_get_isa(heim_object_t ptr)
185 if (heim_base_is_tagged(ptr)) {
186 if (heim_base_is_tagged_object(ptr))
187 return tagged_isa[heim_base_tagged_object_tid(ptr)];
188 heim_abort("not a supported tagged type");
195 * Get type ID of object
197 * @param object object to get type id of
199 * @return type id of object
203 heim_get_tid(heim_object_t ptr)
205 heim_type_t isa = _heim_get_isa(ptr);
210 * Get hash value of object
212 * @param object object to get hash value for
214 * @return a hash value
218 heim_get_hash(heim_object_t ptr)
220 heim_type_t isa = _heim_get_isa(ptr);
222 return isa->hash(ptr);
223 return (unsigned long)ptr;
227 * Compare two objects, returns 0 if equal, can use used for qsort()
230 * @param a first object to compare
231 * @param b first object to compare
233 * @return 0 if objects are equal
237 heim_cmp(heim_object_t a, heim_object_t b)
242 ta = heim_get_tid(a);
243 tb = heim_get_tid(b);
248 isa = _heim_get_isa(a);
251 return isa->cmp(a, b);
253 return (uintptr_t)a - (uintptr_t)b;
257 * Private - allocates an memory object
261 memory_dealloc(void *ptr)
263 struct heim_base_mem *p = (struct heim_base_mem *)PTR2BASE(ptr);
268 struct heim_type_data memory_object = {
280 * Allocate memory for an object of anonymous type
282 * @param size size of object to be allocated
283 * @param name name of ad-hoc type
284 * @param dealloc destructor function
286 * Objects allocated with this interface do not serialize.
288 * @return allocated object
292 heim_alloc(size_t size, const char *name, heim_type_dealloc dealloc)
294 /* XXX use posix_memalign */
296 struct heim_base_mem *p = calloc(1, size + sizeof(*p));
299 p->isa = &memory_object;
302 p->dealloc = dealloc;
307 _heim_create_type(const char *name,
309 heim_type_dealloc dealloc,
313 heim_type_description desc)
317 type = calloc(1, sizeof(*type));
321 type->tid = heim_base_atomic_inc(&tidglobal);
324 type->dealloc = dealloc;
334 _heim_alloc_object(heim_type_t type, size_t size)
336 /* XXX should use posix_memalign */
337 struct heim_base *p = calloc(1, size + sizeof(*p));
347 _heim_get_isaextra(heim_object_t ptr, size_t idx)
349 struct heim_base *p = NULL;
351 heim_assert(ptr != NULL, "internal error");
352 p = (struct heim_base *)PTR2BASE(ptr);
353 if (p->isa == &memory_object)
355 heim_assert(idx < 3, "invalid private heim_base extra data index");
356 return &p->isaextra[idx];
360 _heim_type_get_tid(heim_type_t type)
365 #if !defined(WIN32) && !defined(HAVE_DISPATCH_DISPATCH_H) && defined(ENABLE_PTHREAD_SUPPORT)
366 static pthread_once_t once_arg_key_once = PTHREAD_ONCE_INIT;
367 static pthread_key_t once_arg_key;
370 once_arg_key_once_init(void)
372 errno = pthread_key_create(&once_arg_key, NULL);
375 "Error: pthread_key_create() failed, cannot continue: %s\n",
381 struct once_callback {
387 once_callback_caller(void)
389 struct once_callback *once_callback = pthread_getspecific(once_arg_key);
391 if (once_callback == NULL) {
392 fprintf(stderr, "Error: pthread_once() calls callback on "
393 "different thread?! Cannot continue.\n");
396 once_callback->fn(once_callback->data);
401 * Call func once and only once
403 * @param once pointer to a heim_base_once_t
404 * @param ctx context passed to func
405 * @param func function to be called
409 heim_base_once_f(heim_base_once_t *once, void *ctx, void (*func)(void *))
413 * With a libroken wrapper for some CAS function and a libroken yield()
414 * wrapper we could make this the default implementation when we have
415 * neither Grand Central nor POSX threads.
417 * We could also adapt the double-checked lock pattern with CAS
418 * providing the necessary memory barriers in the absence of
419 * portable explicit memory barrier APIs.
422 * We use CAS operations in large part to provide implied memory
425 * State 0 means that func() has never executed.
426 * State 1 means that func() is executing.
427 * State 2 means that func() has completed execution.
429 if (InterlockedCompareExchange(once, 1L, 0L) == 0L) {
432 (void)InterlockedExchange(once, 2L);
436 * The InterlockedCompareExchange is being used to fetch
437 * the current state under a full memory barrier. As long
438 * as the current state is 1 continue to spin.
440 while (InterlockedCompareExchange(once, 2L, 0L) == 1L)
443 #elif defined(HAVE_DISPATCH_DISPATCH_H)
444 dispatch_once_f(once, ctx, func);
445 #elif defined(ENABLE_PTHREAD_SUPPORT)
446 struct once_callback once_callback;
448 once_callback.fn = func;
449 once_callback.data = ctx;
451 errno = pthread_once(&once_arg_key_once, once_arg_key_once_init);
453 fprintf(stderr, "Error: pthread_once() failed, cannot continue: %s\n",
457 errno = pthread_setspecific(once_arg_key, &once_callback);
460 "Error: pthread_setspecific() failed, cannot continue: %s\n",
464 errno = pthread_once(once, once_callback_caller);
466 fprintf(stderr, "Error: pthread_once() failed, cannot continue: %s\n",
471 static HEIMDAL_MUTEX mutex = HEIMDAL_MUTEX_INITIALIZER;
472 HEIMDAL_MUTEX_lock(&mutex);
475 HEIMDAL_MUTEX_unlock(&mutex);
477 HEIMDAL_MUTEX_lock(&mutex);
479 HEIMDAL_MUTEX_unlock(&mutex);
480 } else if (*once == 2) {
481 HEIMDAL_MUTEX_unlock(&mutex);
483 HEIMDAL_MUTEX_unlock(&mutex);
485 struct timeval tv = { 0, 1000 };
486 select(0, NULL, NULL, NULL, &tv);
487 HEIMDAL_MUTEX_lock(&mutex);
490 HEIMDAL_MUTEX_unlock(&mutex);
492 HEIMDAL_MUTEX_unlock(&mutex);
498 * Abort and log the failure (using syslog)
502 heim_abort(const char *fmt, ...)
506 heim_abortv(fmt, ap);
511 * Abort and log the failure (using syslog)
515 heim_abortv(const char *fmt, va_list ap)
517 static char str[1024];
519 vsnprintf(str, sizeof(str), fmt, ap);
520 syslog(LOG_ERR, "heim_abort: %s", str);
528 static int ar_created = 0;
529 static HEIMDAL_thread_key ar_key;
532 struct heim_auto_release *head;
533 struct heim_auto_release *current;
534 HEIMDAL_MUTEX tls_mutex;
538 ar_tls_delete(void *ptr)
540 struct ar_tls *tls = ptr;
541 heim_auto_release_t next = NULL;
545 for (; tls->current != NULL; tls->current = next) {
546 next = tls->current->parent;
547 heim_release(tls->current);
553 init_ar_tls(void *ptr)
556 HEIMDAL_key_create(&ar_key, ar_tls_delete, ret);
561 static struct ar_tls *
564 static heim_base_once_t once = HEIM_BASE_ONCE_INIT;
568 heim_base_once_f(&once, NULL, init_ar_tls);
572 arp = HEIMDAL_getspecific(ar_key);
575 arp = calloc(1, sizeof(*arp));
578 HEIMDAL_setspecific(ar_key, arp, ret);
589 autorel_dealloc(void *ptr)
591 heim_auto_release_t ar = ptr;
596 heim_abort("autorelease pool released on thread w/o autorelease inited");
598 heim_auto_release_drain(ar);
600 if (!HEIM_TAILQ_EMPTY(&ar->pool))
601 heim_abort("pool not empty after draining");
603 HEIMDAL_MUTEX_lock(&tls->tls_mutex);
604 if (tls->current != ptr)
605 heim_abort("autorelease not releaseing top pool");
607 tls->current = ar->parent;
608 HEIMDAL_MUTEX_unlock(&tls->tls_mutex);
612 autorel_cmp(void *a, void *b)
618 autorel_hash(void *ptr)
620 return (unsigned long)ptr;
624 static struct heim_type_data _heim_autorel_object = {
625 HEIM_TID_AUTORELEASE,
636 * Create thread-specific object auto-release pool
638 * Objects placed on the per-thread auto-release pool (with
639 * heim_auto_release()) can be released in one fell swoop by calling
640 * heim_auto_release_drain().
644 heim_auto_release_create(void)
646 struct ar_tls *tls = autorel_tls();
647 heim_auto_release_t ar;
650 heim_abort("Failed to create/get autorelease head");
652 ar = _heim_alloc_object(&_heim_autorel_object, sizeof(struct heim_auto_release));
654 HEIMDAL_MUTEX_lock(&tls->tls_mutex);
655 if (tls->head == NULL)
657 ar->parent = tls->current;
659 HEIMDAL_MUTEX_unlock(&tls->tls_mutex);
666 * Place the current object on the thread's auto-release pool
672 heim_auto_release(heim_object_t ptr)
674 struct heim_base *p = NULL;
675 struct ar_tls *tls = autorel_tls();
676 heim_auto_release_t ar;
678 if (ptr == NULL || heim_base_is_tagged(ptr))
683 /* drop from old pool */
684 if ((ar = p->autorelpool) != NULL) {
685 HEIMDAL_MUTEX_lock(&ar->pool_mutex);
686 HEIM_TAILQ_REMOVE(&ar->pool, p, autorel);
687 p->autorelpool = NULL;
688 HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
691 if (tls == NULL || (ar = tls->current) == NULL)
692 heim_abort("no auto relase pool in place, would leak");
694 HEIMDAL_MUTEX_lock(&ar->pool_mutex);
695 HEIM_TAILQ_INSERT_HEAD(&ar->pool, p, autorel);
697 HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
703 * Release all objects on the given auto-release pool
707 heim_auto_release_drain(heim_auto_release_t autorel)
711 /* release all elements on the tail queue */
713 HEIMDAL_MUTEX_lock(&autorel->pool_mutex);
714 while(!HEIM_TAILQ_EMPTY(&autorel->pool)) {
715 obj = HEIM_TAILQ_FIRST(&autorel->pool);
716 HEIMDAL_MUTEX_unlock(&autorel->pool_mutex);
717 heim_release(BASE2PTR(obj));
718 HEIMDAL_MUTEX_lock(&autorel->pool_mutex);
720 HEIMDAL_MUTEX_unlock(&autorel->pool_mutex);
724 * Helper for heim_path_vget() and heim_path_delete(). On success
725 * outputs the node named by the path and the parent node and key
726 * (useful for heim_path_delete()).
730 heim_path_vget2(heim_object_t ptr, heim_object_t *parent, heim_object_t *key,
731 heim_error_t *error, va_list ap)
733 heim_object_t path_element;
734 heim_object_t node, next_node;
735 heim_tid_t node_type;
742 for (node = ptr; node != NULL; ) {
743 path_element = va_arg(ap, heim_object_t);
744 if (path_element == NULL) {
750 node_type = heim_get_tid(node);
758 heim_abort("heim_path_get() only operates on container types");
762 if (node_type == HEIM_TID_DICT) {
763 next_node = heim_dict_get_value(node, path_element);
764 } else if (node_type == HEIM_TID_DB) {
765 next_node = _heim_db_get_value(node, NULL, path_element, NULL);
766 } else if (node_type == HEIM_TID_ARRAY) {
769 if (heim_get_tid(path_element) == HEIM_TID_NUMBER)
770 idx = heim_number_get_int(path_element);
773 *error = heim_error_create(EINVAL,
774 "heim_path_get() path elements "
775 "for array nodes must be "
776 "numeric and positive");
779 next_node = heim_array_get_value(node, idx);
782 *error = heim_error_create(EINVAL,
783 "heim_path_get() node in path "
784 "not a container type");
793 * Get a node in a heim_object tree by path
796 * @param error error (output)
797 * @param ap NULL-terminated va_list of heim_object_ts that form a path
799 * @return object (not retained) if found
801 * @addtogroup heimbase
805 heim_path_vget(heim_object_t ptr, heim_error_t *error, va_list ap)
809 return heim_path_vget2(ptr, &p, &k, error, ap);
813 * Get a node in a tree by path, with retained reference
816 * @param error error (output)
817 * @param ap NULL-terminated va_list of heim_object_ts that form a path
819 * @return retained object if found
821 * @addtogroup heimbase
825 heim_path_vcopy(heim_object_t ptr, heim_error_t *error, va_list ap)
829 return heim_retain(heim_path_vget2(ptr, &p, &k, error, ap));
833 * Get a node in a tree by path
836 * @param error error (output)
837 * @param ... NULL-terminated va_list of heim_object_ts that form a path
839 * @return object (not retained) if found
841 * @addtogroup heimbase
845 heim_path_get(heim_object_t ptr, heim_error_t *error, ...)
855 o = heim_path_vget2(ptr, &p, &k, error, ap);
861 * Get a node in a tree by path, with retained reference
864 * @param error error (output)
865 * @param ... NULL-terminated va_list of heim_object_ts that form a path
867 * @return retained object if found
869 * @addtogroup heimbase
873 heim_path_copy(heim_object_t ptr, heim_error_t *error, ...)
883 o = heim_retain(heim_path_vget2(ptr, &p, &k, error, ap));
889 * Create a path in a heim_object_t tree
891 * @param ptr the tree
892 * @param size the size of the heim_dict_t nodes to be created
893 * @param leaf leaf node to be added, if any
894 * @param error error (output)
895 * @param ap NULL-terminated of path component objects
897 * Create a path of heim_dict_t interior nodes in a given heim_object_t
898 * tree, as necessary, and set/replace a leaf, if given (if leaf is NULL
899 * then the leaf is not deleted).
901 * @return 0 on success, else a system error
903 * @addtogroup heimbase
907 heim_path_vcreate(heim_object_t ptr, size_t size, heim_object_t leaf,
908 heim_error_t *error, va_list ap)
910 heim_object_t path_element = va_arg(ap, heim_object_t);
911 heim_object_t next_path_element = NULL;
912 heim_object_t node = ptr;
913 heim_object_t next_node = NULL;
914 heim_tid_t node_type;
918 heim_abort("heim_path_vcreate() does not create root nodes");
920 while (path_element != NULL) {
921 next_path_element = va_arg(ap, heim_object_t);
922 node_type = heim_get_tid(node);
924 if (node_type == HEIM_TID_DICT) {
925 next_node = heim_dict_get_value(node, path_element);
926 } else if (node_type == HEIM_TID_ARRAY) {
929 if (heim_get_tid(path_element) == HEIM_TID_NUMBER)
930 idx = heim_number_get_int(path_element);
933 *error = heim_error_create(EINVAL,
934 "heim_path() path elements for "
935 "array nodes must be numeric "
939 if (idx < heim_array_get_length(node))
940 next_node = heim_array_get_value(node, idx);
943 } else if (node_type == HEIM_TID_DB && next_path_element != NULL) {
945 *error = heim_error_create(EINVAL, "Interior node is a DB");
949 if (next_path_element == NULL)
952 /* Create missing interior node */
953 if (next_node == NULL) {
954 next_node = heim_dict_create(size); /* no arrays or DBs, just dicts */
955 if (next_node == NULL) {
960 if (node_type == HEIM_TID_DICT) {
961 ret = heim_dict_set_value(node, path_element, next_node);
962 } else if (node_type == HEIM_TID_ARRAY &&
963 heim_number_get_int(path_element) <= heim_array_get_length(node)) {
964 ret = heim_array_insert_value(node,
965 heim_number_get_int(path_element),
970 *error = heim_error_create(ret, "Node in path not a "
973 heim_release(next_node);
978 path_element = next_path_element;
983 if (path_element == NULL)
988 if (node_type == HEIM_TID_DICT)
989 ret = heim_dict_set_value(node, path_element, leaf);
991 ret = heim_array_insert_value(node,
992 heim_number_get_int(path_element),
998 if (error && !*error) {
1000 *error = heim_error_create_enomem();
1002 *error = heim_error_create(ret, "Could not set "
1009 * Create a path in a heim_object_t tree
1011 * @param ptr the tree
1012 * @param size the size of the heim_dict_t nodes to be created
1013 * @param leaf leaf node to be added, if any
1014 * @param error error (output)
1015 * @param ... NULL-terminated list of path component objects
1017 * Create a path of heim_dict_t interior nodes in a given heim_object_t
1018 * tree, as necessary, and set/replace a leaf, if given (if leaf is NULL
1019 * then the leaf is not deleted).
1021 * @return 0 on success, else a system error
1023 * @addtogroup heimbase
1027 heim_path_create(heim_object_t ptr, size_t size, heim_object_t leaf,
1028 heim_error_t *error, ...)
1033 va_start(ap, error);
1034 ret = heim_path_vcreate(ptr, size, leaf, error, ap);
1040 * Delete leaf node named by a path in a heim_object_t tree
1042 * @param ptr the tree
1043 * @param error error (output)
1044 * @param ap NULL-terminated list of path component objects
1046 * @addtogroup heimbase
1050 heim_path_vdelete(heim_object_t ptr, heim_error_t *error, va_list ap)
1052 heim_object_t parent, key, child;
1054 child = heim_path_vget2(ptr, &parent, &key, error, ap);
1055 if (child != NULL) {
1056 if (heim_get_tid(parent) == HEIM_TID_DICT)
1057 heim_dict_delete_key(parent, key);
1058 else if (heim_get_tid(parent) == HEIM_TID_DB)
1059 heim_db_delete_key(parent, NULL, key, error);
1060 else if (heim_get_tid(parent) == HEIM_TID_ARRAY)
1061 heim_array_delete_value(parent, heim_number_get_int(key));
1062 heim_release(child);
1067 * Delete leaf node named by a path in a heim_object_t tree
1069 * @param ptr the tree
1070 * @param error error (output)
1071 * @param ap NULL-terminated list of path component objects
1073 * @addtogroup heimbase
1077 heim_path_delete(heim_object_t ptr, heim_error_t *error, ...)
1081 va_start(ap, error);
1082 heim_path_vdelete(ptr, error, ap);