static void tdb_munmap(TDB_CONTEXT *tdb)
{
+ if (tdb->flags & TDB_INTERNAL)
+ return;
+
#ifdef HAVE_MMAP
- if (tdb->map_ptr) munmap(tdb->map_ptr, tdb->map_size);
+ if (tdb->map_ptr)
+ munmap(tdb->map_ptr, tdb->map_size);
#endif
tdb->map_ptr = NULL;
}
static void tdb_mmap(TDB_CONTEXT *tdb)
{
+ if (tdb->flags & TDB_INTERNAL)
+ return;
+
#ifdef HAVE_MMAP
if (!(tdb->flags & TDB_NOMMAP)) {
tdb->map_ptr = mmap(NULL, tdb->map_size,
static void *convert(void *buf, u32 size)
{
u32 i, *p = buf;
- for (i = 0; i < size / 4; i++) p[i] = TDB_BYTEREV(p[i]);
+ for (i = 0; i < size / 4; i++)
+ p[i] = TDB_BYTEREV(p[i]);
return buf;
}
#define DOCONV() (tdb->flags & TDB_CONVERT)
{
struct flock fl;
- if (tdb->flags & TDB_NOLOCK) return 0;
- if (tdb->read_only) return -1;
+ if (tdb->flags & TDB_NOLOCK)
+ return 0;
+ if (tdb->read_only)
+ return -1;
fl.l_type = rw_type;
fl.l_whence = SEEK_SET;
list, ltype));
return -1;
}
- if (tdb->flags & TDB_NOLOCK) return 0;
+ if (tdb->flags & TDB_NOLOCK)
+ return 0;
/* Since fcntl locks don't nest, we do a lock for the first one,
and simply bump the count for future ones */
/* unlock the database: returns void because it's too late for errors. */
static void tdb_unlock(TDB_CONTEXT *tdb, int list, int ltype)
{
- if (tdb->flags & TDB_NOLOCK) return;
+ if (tdb->flags & TDB_NOLOCK)
+ return;
/* Sanity checks */
- if (list < -1 || list >= (int)tdb->header.hash_size) return;
- if (tdb->locked[list+1].count==0) return;
+ if (list < -1 || list >= (int)tdb->header.hash_size)
+ return;
+ if (tdb->locked[list+1].count==0)
+ return;
if (tdb->locked[list+1].count == 1) {
/* Down to last nested lock: unlock underneath */
- if (tdb->header.rwlocks) tdb_spinunlock(tdb, list, ltype);
- else tdb_brlock(tdb, FREELIST_TOP+4*list, F_UNLCK, F_SETLKW, 0);
+ if (tdb->header.rwlocks)
+ tdb_spinunlock(tdb, list, ltype);
+ else
+ tdb_brlock(tdb, FREELIST_TOP+4*list, F_UNLCK, F_SETLKW, 0);
}
tdb->locked[list+1].count--;
}
static int tdb_oob(TDB_CONTEXT *tdb, tdb_off len, int probe)
{
struct stat st;
- if (len <= tdb->map_size) return 0;
- if (tdb->flags & TDB_INTERNAL) return 0;
+ if (len <= tdb->map_size)
+ return 0;
+ if (tdb->flags & TDB_INTERNAL)
+ return 0;
if (fstat(tdb->fd, &st) == -1)
return TDB_ERRCODE(TDB_ERR_IO, -1);
/* write a lump of data at a specified offset */
static int tdb_write(TDB_CONTEXT *tdb, tdb_off off, void *buf, tdb_len len)
{
- if (tdb_oob(tdb, off + len, 0) != 0) return -1;
+ if (tdb_oob(tdb, off + len, 0) != 0)
+ return -1;
- if (tdb->map_ptr) memcpy(off + (char *)tdb->map_ptr, buf, len);
+ if (tdb->map_ptr)
+ memcpy(off + (char *)tdb->map_ptr, buf, len);
else if (lseek(tdb->fd, off, SEEK_SET) != off
|| write(tdb->fd, buf, len) != (ssize_t)len) {
TDB_LOG((tdb, 0,"tdb_write failed at %d len=%d (%s)\n",
/* read a lump of data at a specified offset, maybe convert */
static int tdb_read(TDB_CONTEXT *tdb,tdb_off off,void *buf,tdb_len len,int cv)
{
- if (tdb_oob(tdb, off + len, 0) != 0) return -1;
+ if (tdb_oob(tdb, off + len, 0) != 0)
+ return -1;
- if (tdb->map_ptr) memcpy(buf, off + (char *)tdb->map_ptr, len);
+ if (tdb->map_ptr)
+ memcpy(buf, off + (char *)tdb->map_ptr, len);
else if (lseek(tdb->fd, off, SEEK_SET) != off
|| read(tdb->fd, buf, len) != (ssize_t)len) {
TDB_LOG((tdb, 0,"tdb_read failed at %d len=%d (%s)\n",
off, len, strerror(errno)));
return TDB_ERRCODE(TDB_ERR_IO, -1);
}
- if (cv) convert(buf, len);
+ if (cv)
+ convert(buf, len);
return 0;
}
/* read/write a record */
static int rec_read(TDB_CONTEXT *tdb, tdb_off offset, struct list_struct *rec)
{
- if (tdb_read(tdb, offset, rec, sizeof(*rec),DOCONV()) == -1) return -1;
+ if (tdb_read(tdb, offset, rec, sizeof(*rec),DOCONV()) == -1)
+ return -1;
if (TDB_BAD_MAGIC(rec)) {
TDB_LOG((tdb, 0,"rec_read bad magic 0x%x at offset=%d\n", rec->magic, offset));
return TDB_ERRCODE(TDB_ERR_CORRUPT, -1);
/* read a freelist record and check for simple errors */
static int rec_free_read(TDB_CONTEXT *tdb, tdb_off off, struct list_struct *rec)
{
- if (tdb_read(tdb, off, rec, sizeof(*rec),DOCONV()) == -1) return -1;
+ if (tdb_read(tdb, off, rec, sizeof(*rec),DOCONV()) == -1)
+ return -1;
if (rec->magic != TDB_FREE_MAGIC) {
TDB_LOG((tdb, 0,"rec_free_read bad magic 0x%x at offset=%d\n",
rec->magic, off));
return TDB_ERRCODE(TDB_ERR_CORRUPT, -1);
}
- if (tdb_oob(tdb, rec->next+sizeof(*rec), 0) != 0) return -1;
+ if (tdb_oob(tdb, rec->next+sizeof(*rec), 0) != 0)
+ return -1;
return 0;
}
return;
}
- if (rec_ptr) printf("hash=%d\n", i);
+ if (rec_ptr)
+ printf("hash=%d\n", i);
while (rec_ptr) {
rec_ptr = tdb_dump_record(tdb, rec_ptr);
tdb_off right, left;
/* Allocation and tailer lock */
- if (tdb_lock(tdb, -1, F_WRLCK) != 0) return -1;
+ if (tdb_lock(tdb, -1, F_WRLCK) != 0)
+ return -1;
/* set an initial tailer, so if we fail we don't leave a bogus record */
update_tailer(tdb, offset, rec);
/* now fill the file with something. This ensures that the file isn't sparse, which would be
very bad if we ran out of disk. This must be done with write, not via mmap */
memset(buf, 0x42, sizeof(buf));
- if (lseek(tdb->fd, size, SEEK_SET) != size) return -1;
+ if (lseek(tdb->fd, size, SEEK_SET) != size)
+ return -1;
while (addition) {
int n = addition>sizeof(buf)?sizeof(buf):addition;
int ret = write(tdb->fd, buf, n);
/* expand the file itself */
if (!(tdb->flags & TDB_INTERNAL)) {
- if (expand_file(tdb, tdb->map_size, size) != 0) goto fail;
+ if (expand_file(tdb, tdb->map_size, size) != 0)
+ goto fail;
}
tdb->map_size += size;
if (tdb->flags & TDB_INTERNAL)
tdb->map_ptr = realloc(tdb->map_ptr, tdb->map_size);
+ else {
+ /*
+ * We must ensure the file is remapped before adding the space
+ * to ensure consistency with systems like OpenBSD where
+ * writes and mmaps are not consistent.
+ */
- /*
- * We must ensure the file is remapped before adding the space
- * to ensure consistency with systems like OpenBSD where
- * writes and mmaps are not consistent.
- */
-
- /* We're ok if the mmap fails as we'll fallback to read/write */
- tdb_mmap(tdb);
+ /* We're ok if the mmap fails as we'll fallback to read/write */
+ tdb_mmap(tdb);
+ }
/* form a new freelist record */
memset(&rec,'\0',sizeof(rec));
/* link it into the free list */
offset = tdb->map_size - size;
- if (tdb_free(tdb, offset, &rec) == -1) goto fail;
+ if (tdb_free(tdb, offset, &rec) == -1)
+ goto fail;
tdb_unlock(tdb, -1, F_WRLCK);
return 0;
tdb_off rec_ptr, last_ptr, newrec_ptr;
struct list_struct newrec;
- if (tdb_lock(tdb, -1, F_WRLCK) == -1) return 0;
+ if (tdb_lock(tdb, -1, F_WRLCK) == -1)
+ return 0;
/* Extra bytes required for tailer */
length += sizeof(tdb_off);
last_ptr = FREELIST_TOP;
/* read in the freelist top */
- if (ofs_read(tdb, FREELIST_TOP, &rec_ptr) == -1) goto fail;
+ if (ofs_read(tdb, FREELIST_TOP, &rec_ptr) == -1)
+ goto fail;
/* keep looking until we find a freelist record big enough */
while (rec_ptr) {
- if (rec_free_read(tdb, rec_ptr, rec) == -1) goto fail;
+ if (rec_free_read(tdb, rec_ptr, rec) == -1)
+ goto fail;
if (rec->rec_len >= length) {
/* found it - now possibly split it up */
}
/* we didn't find enough space. See if we can expand the
database and if we can then try again */
- if (tdb_expand(tdb, length + sizeof(*rec)) == 0) goto again;
+ if (tdb_expand(tdb, length + sizeof(*rec)) == 0)
+ goto again;
fail:
tdb_unlock(tdb, -1, F_WRLCK);
return 0;
static int tdb_new_database(TDB_CONTEXT *tdb, int hash_size)
{
struct tdb_header *newdb;
- int size, ret;
+ int size, ret = -1;
/* We make it up in memory, then write it out if not internal */
size = sizeof(struct tdb_header) + (hash_size+1)*sizeof(tdb_off);
- if (!(newdb = calloc(size, 1))) return TDB_ERRCODE(TDB_ERR_OOM, -1);
+ if (!(newdb = calloc(size, 1)))
+ return TDB_ERRCODE(TDB_ERR_OOM, -1);
- /* Fill in the header */
- newdb->version = TDB_VERSION;
- newdb->hash_size = hash_size;
+ /* Fill in the header */
+ newdb->version = TDB_VERSION;
+ newdb->hash_size = hash_size;
#ifdef USE_SPINLOCKS
newdb->rwlocks = size;
#endif
CONVERT(*newdb);
return 0;
}
- lseek(tdb->fd, 0, SEEK_SET);
- ftruncate(tdb->fd, 0);
+ if (lseek(tdb->fd, 0, SEEK_SET) == -1)
+ goto fail;
+
+ if (ftruncate(tdb->fd, 0) == -1)
+ goto fail;
+
/* This creates an endian-converted header, as if read from disk */
CONVERT(*newdb);
memcpy(&tdb->header, newdb, sizeof(tdb->header));
/* Don't endian-convert the magic food! */
memcpy(newdb->magic_food, TDB_MAGIC_FOOD, strlen(TDB_MAGIC_FOOD)+1);
- if (write(tdb->fd, newdb, size) != size) ret = -1;
- else ret = tdb_create_rwlocks(tdb->fd, hash_size);
+ if (write(tdb->fd, newdb, size) != size)
+ ret = -1;
+ else
+ ret = tdb_create_rwlocks(tdb->fd, hash_size);
+ fail:
free(newdb);
return ret;
}
tdb_off rec_ptr;
/* read in the hash top */
- if (ofs_read(tdb, TDB_HASH_TOP(hash), &rec_ptr) == -1) return 0;
+ if (ofs_read(tdb, TDB_HASH_TOP(hash), &rec_ptr) == -1)
+ return 0;
/* keep looking until we find the right record */
while (rec_ptr) {
- if (rec_read(tdb, rec_ptr, r) == -1) return 0;
+ if (rec_read(tdb, rec_ptr, r) == -1)
+ return 0;
if (!TDB_DEAD(r) && hash==r->full_hash && key.dsize==r->key_len) {
char *k;
/* a very likely hit - read the key */
k = tdb_alloc_read(tdb, rec_ptr + sizeof(*r),
r->key_len);
- if (!k) return 0;
+ if (!k)
+ return 0;
if (memcmp(key.dptr, k, key.dsize) == 0) {
free(k);
static int tdb_keylocked(TDB_CONTEXT *tdb, u32 hash)
{
u32 i;
- if (!tdb->lockedkeys) return 1;
+ if (!tdb->lockedkeys)
+ return 1;
for (i = 0; i < tdb->lockedkeys[0]; i++)
- if (tdb->lockedkeys[i+1] == hash) return 1;
+ if (tdb->lockedkeys[i+1] == hash)
+ return 1;
return TDB_ERRCODE(TDB_ERR_NOLOCK, 0);
}
u32 hash, rec_ptr;
hash = tdb_hash(&key);
- if (!tdb_keylocked(tdb, hash)) return 0;
- if (tdb_lock(tdb, BUCKET(hash), locktype) == -1) return 0;
+ if (!tdb_keylocked(tdb, hash))
+ return 0;
+ if (tdb_lock(tdb, BUCKET(hash), locktype) == -1)
+ return 0;
if (!(rec_ptr = tdb_find(tdb, key, hash, rec)))
tdb_unlock(tdb, BUCKET(hash), locktype);
return rec_ptr;
{
u32 i;
for (i = 0; i < sizeof(emap) / sizeof(struct tdb_errname); i++)
- if (tdb->ecode == emap[i].ecode) return emap[i].estring;
+ if (tdb->ecode == emap[i].ecode)
+ return emap[i].estring;
return "Invalid error code";
}
int ret = -1;
/* find entry */
- if (!(rec_ptr = tdb_find_lock(tdb, key, F_WRLCK, &rec))) return -1;
+ if (!(rec_ptr = tdb_find_lock(tdb, key, F_WRLCK, &rec)))
+ return -1;
/* must be long enough key, data and tailer */
if (rec.rec_len < key.dsize + dbuf.dsize + sizeof(tdb_off)) {
/* update size */
rec.data_len = dbuf.dsize;
ret = rec_write(tdb, rec_ptr, &rec);
- }
- else ret = 0;
+ } else
+ ret = 0;
out:
tdb_unlock(tdb, BUCKET(rec.full_hash), F_WRLCK);
return ret;
TDB_DATA ret;
/* find which hash bucket it is in */
- if (!(rec_ptr = tdb_find_lock(tdb,key,F_RDLCK,&rec))) return tdb_null;
+ if (!(rec_ptr = tdb_find_lock(tdb,key,F_RDLCK,&rec)))
+ return tdb_null;
ret.dptr = tdb_alloc_read(tdb, rec_ptr + sizeof(rec) + rec.key_len,
rec.data_len);
{
struct list_struct rec;
- if (tdb_find_lock(tdb, key, F_RDLCK, &rec) == 0) return 0;
+ if (tdb_find_lock(tdb, key, F_RDLCK, &rec) == 0)
+ return 0;
tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK);
return 1;
}
static int write_lock_record(TDB_CONTEXT *tdb, tdb_off off)
{
struct tdb_traverse_lock *i;
- for (i = &tdb->travlocks; i; i = i->next) if (i->off == off) return -1;
+ for (i = &tdb->travlocks; i; i = i->next)
+ if (i->off == off)
+ return -1;
return tdb_brlock(tdb, off, F_WRLCK, F_SETLK, 1);
}
static int write_unlock_record(TDB_CONTEXT *tdb, tdb_off off)
struct tdb_traverse_lock *i;
u32 count = 0;
- if (off == 0) return 0;
- for (i = &tdb->travlocks; i; i = i->next) if (i->off == off) count++;
+ if (off == 0)
+ return 0;
+ for (i = &tdb->travlocks; i; i = i->next)
+ if (i->off == off)
+ count++;
return (count == 1 ? tdb_brlock(tdb, off, F_UNLCK, F_SETLKW, 0) : 0);
}
write_unlock_record(tdb, rec_ptr);
/* find previous record in hash chain */
- if (ofs_read(tdb, TDB_HASH_TOP(rec->full_hash), &i) == -1) return -1;
+ if (ofs_read(tdb, TDB_HASH_TOP(rec->full_hash), &i) == -1)
+ return -1;
for (last_ptr = 0; i != rec_ptr; last_ptr = i, i = lastrec.next)
- if (rec_read(tdb, i, &lastrec) == -1) return -1;
+ if (rec_read(tdb, i, &lastrec) == -1)
+ return -1;
/* unlink it: next ptr is at start of record. */
- if (last_ptr == 0) last_ptr = TDB_HASH_TOP(rec->full_hash);
- if (ofs_write(tdb, last_ptr, &rec->next) == -1) return -1;
+ if (last_ptr == 0)
+ last_ptr = TDB_HASH_TOP(rec->full_hash);
+ if (ofs_write(tdb, last_ptr, &rec->next) == -1)
+ return -1;
/* recover the space */
- if (tdb_free(tdb, rec_ptr, rec) == -1) return -1;
+ if (tdb_free(tdb, rec_ptr, rec) == -1)
+ return -1;
return 0;
}
int want_next = (tlock->off != 0);
/* No traversal allows if you've called tdb_lockkeys() */
- if (tdb->lockedkeys) return TDB_ERRCODE(TDB_ERR_NOLOCK, -1);
+ if (tdb->lockedkeys)
+ return TDB_ERRCODE(TDB_ERR_NOLOCK, -1);
/* Lock each chain from the start one. */
for (; tlock->hash < tdb->header.hash_size; tlock->hash++) {
- if (tdb_lock(tdb, tlock->hash, F_WRLCK) == -1) return -1;
+ if (tdb_lock(tdb, tlock->hash, F_WRLCK) == -1)
+ return -1;
/* No previous record? Start at top of chain. */
if (!tlock->off) {
if (want_next) {
/* We have offset of old record: grab next */
- if (rec_read(tdb, tlock->off, rec) == -1) goto fail;
+ if (rec_read(tdb, tlock->off, rec) == -1)
+ goto fail;
tlock->off = rec->next;
}
/* Iterate through chain */
while( tlock->off) {
tdb_off current;
- if (rec_read(tdb, tlock->off, rec) == -1) goto fail;
+ if (rec_read(tdb, tlock->off, rec) == -1)
+ goto fail;
if (!TDB_DEAD(rec)) {
/* Woohoo: we found one! */
lock_record(tdb, tlock->off);
unlock_record(tdb, tdb->travlocks.off);
tdb->travlocks.off = tdb->travlocks.hash = 0;
- if (tdb_next_lock(tdb, &tdb->travlocks, &rec) <= 0) return tdb_null;
+ if (tdb_next_lock(tdb, &tdb->travlocks, &rec) <= 0)
+ return tdb_null;
/* now read the key */
key.dsize = rec.key_len;
key.dptr =tdb_alloc_read(tdb,tdb->travlocks.off+sizeof(rec),key.dsize);
/* Is locked key the old key? If so, traverse will be reliable. */
if (tdb->travlocks.off) {
- if (tdb_lock(tdb,tdb->travlocks.hash,F_WRLCK)) return tdb_null;
+ if (tdb_lock(tdb,tdb->travlocks.hash,F_WRLCK))
+ return tdb_null;
if (rec_read(tdb, tdb->travlocks.off, &rec) == -1
|| !(k = tdb_alloc_read(tdb,tdb->travlocks.off+sizeof(rec),
rec.key_len))
if (!tdb->travlocks.off) {
/* No previous element: do normal find, and lock record */
tdb->travlocks.off = tdb_find_lock(tdb, oldkey, F_WRLCK, &rec);
- if (!tdb->travlocks.off) return tdb_null;
+ if (!tdb->travlocks.off)
+ return tdb_null;
tdb->travlocks.hash = BUCKET(rec.full_hash);
lock_record(tdb, tdb->travlocks.off);
}
struct list_struct rec;
int ret;
- if (!(rec_ptr = tdb_find_lock(tdb, key, F_WRLCK, &rec))) return -1;
+ if (!(rec_ptr = tdb_find_lock(tdb, key, F_WRLCK, &rec)))
+ return -1;
ret = do_delete(tdb, rec_ptr, &rec);
tdb_unlock(tdb, BUCKET(rec.full_hash), F_WRLCK);
return ret;
/* find which hash bucket it is in */
hash = tdb_hash(&key);
- if (!tdb_keylocked(tdb, hash)) return -1;
- if (tdb_lock(tdb, BUCKET(hash), F_WRLCK) == -1) return -1;
+ if (!tdb_keylocked(tdb, hash))
+ return -1;
+ if (tdb_lock(tdb, BUCKET(hash), F_WRLCK) == -1)
+ return -1;
/* check for it existing, on insert. */
if (flag == TDB_INSERT) {
}
} else {
/* first try in-place update, on modify or replace. */
- if (tdb_update(tdb, key, dbuf) == 0) goto out;
+ if (tdb_update(tdb, key, dbuf) == 0)
+ goto out;
if (flag == TDB_MODIFY && tdb->ecode == TDB_ERR_NOEXIST)
goto fail;
}
/* delete any existing record - if it doesn't exist we don't
care. Doing this first reduces fragmentation, and avoids
coalescing with `allocated' block before it's updated. */
- if (flag != TDB_INSERT) tdb_delete(tdb, key);
+ if (flag != TDB_INSERT)
+ tdb_delete(tdb, key);
/* Copy key+value *before* allocating free space in case malloc
fails and we are left with a dead spot in the tdb. */
goto fail;
/* Read hash top into next ptr */
- if (ofs_read(tdb, TDB_HASH_TOP(hash), &rec.next) == -1) goto fail;
+ if (ofs_read(tdb, TDB_HASH_TOP(hash), &rec.next) == -1)
+ goto fail;
rec.key_len = key.dsize;
rec.data_len = dbuf.dsize;
tdb.lockedkeys = NULL;
tdb.flags = tdb_flags;
- if ((open_flags & O_ACCMODE) == O_WRONLY) goto fail;
- if (hash_size == 0) hash_size = DEFAULT_HASH_SIZE;
+ if ((open_flags & O_ACCMODE) == O_WRONLY)
+ goto fail;
+ if (hash_size == 0)
+ hash_size = DEFAULT_HASH_SIZE;
if ((open_flags & O_ACCMODE) == O_RDONLY) {
tdb.read_only = 1;
/* read only databases don't do locking */
goto internal;
}
- if ((tdb.fd = open(name, open_flags, mode)) == -1) goto fail;
+ if ((tdb.fd = open(name, open_flags, mode)) == -1)
+ goto fail;
/* ensure there is only one process initialising at once */
- tdb_brlock(&tdb, GLOBAL_LOCK, F_WRLCK, F_SETLKW, 0);
-
+ if (tdb_brlock(&tdb, GLOBAL_LOCK, F_WRLCK, F_SETLKW, 0) == -1)
+ goto fail;
+
/* we need to zero database if we are the only one with it open */
if ((locked = (tdb_brlock(&tdb, ACTIVE_LOCK, F_WRLCK, F_SETLK, 0) == 0))
&& (tdb_flags & TDB_CLEAR_IF_FIRST)) {
open_flags |= O_CREAT;
- ftruncate(tdb.fd, 0);
+ if (ftruncate(tdb.fd, 0) == -1)
+ goto fail;
}
if (read(tdb.fd, &tdb.header, sizeof(tdb.header)) != sizeof(tdb.header)
|| (tdb.header.version != TDB_VERSION
&& !(rev = (tdb.header.version==TDB_BYTEREV(TDB_VERSION))))) {
/* its not a valid database - possibly initialise it */
- if (!(open_flags & O_CREAT)
- || tdb_new_database(&tdb, hash_size) == -1) goto fail;
+ if (!(open_flags & O_CREAT) || tdb_new_database(&tdb, hash_size) == -1)
+ goto fail;
rev = (tdb.flags & TDB_CONVERT);
}
- if (!rev) tdb.flags &= ~TDB_CONVERT;
+ if (!rev)
+ tdb.flags &= ~TDB_CONVERT;
else {
tdb.flags |= TDB_CONVERT;
convert(&tdb.header, sizeof(tdb.header));
}
- fstat(tdb.fd, &st);
+ if (fstat(tdb.fd, &st) == -1)
+ goto fail;
+
/* Is it already in the open list? If so, fail. */
for (i = tdbs; i; i = i->next) {
if (i->device == st.st_dev && i->inode == st.st_ino) {
/* map the database and fill in the return structure */
tdb.name = (char *)strdup(name);
+ if (!tdb.name)
+ goto fail;
tdb.map_size = st.st_size;
tdb.device = st.st_dev;
tdb.inode = st.st_ino;
- tdb.locked = calloc(tdb.header.hash_size+1, sizeof(tdb.locked[0]));
- if (!tdb.locked) goto fail;
+ tdb.locked = calloc(tdb.header.hash_size+1, sizeof(tdb.locked[0]));
+ if (!tdb.locked)
+ goto fail;
tdb_mmap(&tdb);
if (locked) {
tdb_clear_spinlocks(&tdb);
- tdb_brlock(&tdb, ACTIVE_LOCK, F_UNLCK, F_SETLK, 0);
+ if (tdb_brlock(&tdb, ACTIVE_LOCK, F_UNLCK, F_SETLK, 0) == -1)
+ goto fail;
}
/* leave this lock in place to indicate it's in use */
- tdb_brlock(&tdb, ACTIVE_LOCK, F_RDLCK, F_SETLKW, 0);
+ if (tdb_brlock(&tdb, ACTIVE_LOCK, F_RDLCK, F_SETLKW, 0) == -1)
+ goto fail;
internal:
- if (!(ret = malloc(sizeof(tdb)))) goto fail;
+ if (!(ret = malloc(sizeof(tdb))))
+ goto fail;
*ret = tdb;
- tdb_brlock(&tdb, GLOBAL_LOCK, F_UNLCK, F_SETLKW, 0);
+ if (tdb_brlock(&tdb, GLOBAL_LOCK, F_UNLCK, F_SETLKW, 0) == -1)
+ goto fail;
ret->next = tdbs;
tdbs = ret;
return ret;
fail:
- if (tdb.name) free(tdb.name);
- if (tdb.fd != -1) close(tdb.fd);
- tdb_munmap(&tdb);
+ if (tdb.map_ptr) {
+ if (tdb.flags & TDB_INTERNAL)
+ free(tdb.map_ptr);
+ else
+ tdb_munmap(&tdb);
+ }
+ if (tdb.name)
+ free(tdb.name);
+ if (tdb.fd != -1)
+ close(tdb.fd);
+ if (tdb.locked)
+ free(tdb.locked);
return NULL;
}
int ret = 0;
if (tdb->map_ptr) {
- if (tdb->flags & TDB_INTERNAL) free(tdb->map_ptr);
- else tdb_munmap(tdb);
+ if (tdb->flags & TDB_INTERNAL)
+ free(tdb->map_ptr);
+ else
+ tdb_munmap(tdb);
}
- if (tdb->name) free(tdb->name);
- if (tdb->fd != -1) {
+ if (tdb->name)
+ free(tdb->name);
+ if (tdb->fd != -1)
ret = close(tdb->fd);
- }
- if (tdb->locked) free(tdb->locked);
- if (tdb->lockedkeys) free(tdb->lockedkeys);
+ if (tdb->locked)
+ free(tdb->locked);
+ if (tdb->lockedkeys)
+ free(tdb->lockedkeys);
/* Remove from contexts list */
for (i = &tdbs; *i; i = &(*i)->next) {
u32 i;
/* There are no locks on read-only dbs */
- if (tdb->read_only) return TDB_ERRCODE(TDB_ERR_LOCK, -1);
- if (tdb->lockedkeys) return TDB_ERRCODE(TDB_ERR_NOLOCK, -1);
+ if (tdb->read_only)
+ return TDB_ERRCODE(TDB_ERR_LOCK, -1);
+ if (tdb->lockedkeys)
+ return TDB_ERRCODE(TDB_ERR_NOLOCK, -1);
for (i = 0; i < tdb->header.hash_size; i++)
if (tdb_lock(tdb, i, F_WRLCK))
break;
void tdb_unlockall(TDB_CONTEXT *tdb)
{
u32 i;
- for (i=0; i < tdb->header.hash_size; i++) tdb_unlock(tdb, i, F_WRLCK);
+ for (i=0; i < tdb->header.hash_size; i++)
+ tdb_unlock(tdb, i, F_WRLCK);
}
int tdb_lockkeys(TDB_CONTEXT *tdb, u32 number, TDB_DATA keys[])
u32 i, j, hash;
/* Can't lock more keys if already locked */
- if (tdb->lockedkeys) return TDB_ERRCODE(TDB_ERR_NOLOCK, -1);
+ if (tdb->lockedkeys)
+ return TDB_ERRCODE(TDB_ERR_NOLOCK, -1);
if (!(tdb->lockedkeys = malloc(sizeof(u32) * (number+1))))
return TDB_ERRCODE(TDB_ERR_OOM, -1);
/* First number in array is # keys */
/* Insertion sort by bucket */
for (i = 0; i < number; i++) {
hash = tdb_hash(&keys[i]);
- for (j = 0;
- j < i && BUCKET(tdb->lockedkeys[j+1]) < BUCKET(hash);
- j++);
- memmove(&tdb->lockedkeys[j+2], &tdb->lockedkeys[j+1],
- sizeof(u32) * (i-j));
+ for (j = 0; j < i && BUCKET(tdb->lockedkeys[j+1]) < BUCKET(hash); j++);
+ memmove(&tdb->lockedkeys[j+2], &tdb->lockedkeys[j+1], sizeof(u32) * (i-j));
tdb->lockedkeys[j+1] = hash;
}
/* Finally, lock in order */