2 Trivial Database 2: free list/block handling
3 Copyright (C) Rusty Russell 2010
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 3 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <ccan/likely/likely.h>
20 #include <ccan/ilog/ilog.h>
25 static unsigned fls64(uint64_t val)
30 /* In which bucket would we find a particular record size? (ignoring header) */
31 unsigned int size_to_bucket(ntdb_len_t data_len)
35 /* We can't have records smaller than this. */
36 assert(data_len >= NTDB_MIN_DATA_LEN);
38 /* Ignoring the header... */
39 if (data_len - NTDB_MIN_DATA_LEN <= 64) {
40 /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 8. */
41 bucket = (data_len - NTDB_MIN_DATA_LEN) / 8;
43 /* After that we go power of 2. */
44 bucket = fls64(data_len - NTDB_MIN_DATA_LEN) + 2;
47 if (unlikely(bucket >= NTDB_FREE_BUCKETS))
48 bucket = NTDB_FREE_BUCKETS - 1;
52 ntdb_off_t first_ftable(struct ntdb_context *ntdb)
54 return ntdb_read_off(ntdb, offsetof(struct ntdb_header, free_table));
57 ntdb_off_t next_ftable(struct ntdb_context *ntdb, ntdb_off_t ftable)
59 return ntdb_read_off(ntdb, ftable + offsetof(struct ntdb_freetable,next));
62 enum NTDB_ERROR ntdb_ftable_init(struct ntdb_context *ntdb)
64 /* Use reservoir sampling algorithm to select a free list at random. */
65 unsigned int rnd, max = 0, count = 0;
68 ntdb->ftable_off = off = first_ftable(ntdb);
72 if (NTDB_OFF_IS_ERR(off)) {
73 return NTDB_OFF_TO_ERR(off);
78 ntdb->ftable_off = off;
83 off = next_ftable(ntdb, off);
89 /* Offset of a given bucket. */
90 ntdb_off_t bucket_off(ntdb_off_t ftable_off, unsigned bucket)
92 return ftable_off + offsetof(struct ntdb_freetable, buckets)
93 + bucket * sizeof(ntdb_off_t);
96 /* Returns free_buckets + 1, or list number to search, or -ve error. */
97 static ntdb_off_t find_free_head(struct ntdb_context *ntdb,
98 ntdb_off_t ftable_off,
101 /* Speculatively search for a non-zero bucket. */
102 return ntdb_find_nonzero_off(ntdb, bucket_off(ftable_off, 0),
103 bucket, NTDB_FREE_BUCKETS);
106 static void check_list(struct ntdb_context *ntdb, ntdb_off_t b_off)
108 #ifdef CCAN_NTDB_DEBUG
109 ntdb_off_t off, prev = 0, first;
110 struct ntdb_free_record r;
112 first = off = (ntdb_read_off(ntdb, b_off) & NTDB_OFF_MASK);
114 ntdb_read_convert(ntdb, off, &r, sizeof(r));
115 if (frec_magic(&r) != NTDB_FREE_MAGIC)
117 if (prev && frec_prev(&r) != prev)
124 ntdb_read_convert(ntdb, first, &r, sizeof(r));
125 if (frec_prev(&r) != prev)
131 /* Remove from free bucket. */
132 static enum NTDB_ERROR remove_from_list(struct ntdb_context *ntdb,
133 ntdb_off_t b_off, ntdb_off_t r_off,
134 const struct ntdb_free_record *r)
136 ntdb_off_t off, prev_next, head;
137 enum NTDB_ERROR ecode;
139 /* Is this only element in list? Zero out bucket, and we're done. */
140 if (frec_prev(r) == r_off)
141 return ntdb_write_off(ntdb, b_off, 0);
143 /* off = &r->prev->next */
144 off = frec_prev(r) + offsetof(struct ntdb_free_record, next);
147 prev_next = ntdb_read_off(ntdb, off);
148 if (NTDB_OFF_IS_ERR(prev_next))
149 return NTDB_OFF_TO_ERR(prev_next);
151 /* If prev->next == 0, we were head: update bucket to point to next. */
152 if (prev_next == 0) {
153 /* We must preserve upper bits. */
154 head = ntdb_read_off(ntdb, b_off);
155 if (NTDB_OFF_IS_ERR(head))
156 return NTDB_OFF_TO_ERR(head);
158 if ((head & NTDB_OFF_MASK) != r_off) {
159 return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
161 " %llu head %llu on list %llu",
166 head = ((head & ~NTDB_OFF_MASK) | r->next);
167 ecode = ntdb_write_off(ntdb, b_off, head);
168 if (ecode != NTDB_SUCCESS)
171 /* r->prev->next = r->next */
172 ecode = ntdb_write_off(ntdb, off, r->next);
173 if (ecode != NTDB_SUCCESS)
177 /* If we were the tail, off = &head->prev. */
179 head = ntdb_read_off(ntdb, b_off);
180 if (NTDB_OFF_IS_ERR(head))
181 return NTDB_OFF_TO_ERR(head);
182 head &= NTDB_OFF_MASK;
183 off = head + offsetof(struct ntdb_free_record, magic_and_prev);
185 /* off = &r->next->prev */
186 off = r->next + offsetof(struct ntdb_free_record,
190 #ifdef CCAN_NTDB_DEBUG
192 if ((ntdb_read_off(ntdb, off) & NTDB_OFF_MASK) != r_off) {
193 return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
195 " %llu bad prev in list %llu",
196 (long long)r_off, (long long)b_off);
199 /* r->next->prev = r->prev */
200 return ntdb_write_off(ntdb, off, r->magic_and_prev);
203 /* Enqueue in this free bucket: sets coalesce if we've added 128
205 static enum NTDB_ERROR enqueue_in_free(struct ntdb_context *ntdb,
211 struct ntdb_free_record new;
212 enum NTDB_ERROR ecode;
213 ntdb_off_t prev, head;
214 uint64_t magic = (NTDB_FREE_MAGIC << (64 - NTDB_OFF_UPPER_STEAL));
216 head = ntdb_read_off(ntdb, b_off);
217 if (NTDB_OFF_IS_ERR(head))
218 return NTDB_OFF_TO_ERR(head);
220 /* We only need to set ftable_and_len; rest is set in enqueue_in_free */
221 new.ftable_and_len = ((uint64_t)ntdb->ftable
222 << (64 - NTDB_OFF_UPPER_STEAL))
225 /* new->next = head. */
226 new.next = (head & NTDB_OFF_MASK);
228 /* First element? Prev points to ourselves. */
230 new.magic_and_prev = (magic | off);
232 /* new->prev = next->prev */
233 prev = ntdb_read_off(ntdb,
234 new.next + offsetof(struct ntdb_free_record,
236 new.magic_and_prev = prev;
237 if (frec_magic(&new) != NTDB_FREE_MAGIC) {
238 return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
239 "enqueue_in_free: %llu bad head"
244 /* next->prev = new. */
245 ecode = ntdb_write_off(ntdb, new.next
246 + offsetof(struct ntdb_free_record,
249 if (ecode != NTDB_SUCCESS) {
253 #ifdef CCAN_NTDB_DEBUG
254 prev = ntdb_read_off(ntdb, frec_prev(&new)
255 + offsetof(struct ntdb_free_record, next));
257 return ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
259 " %llu bad tail next ptr %llu",
260 (long long)frec_prev(&new)
261 + offsetof(struct ntdb_free_record,
268 /* Update enqueue count, but don't set high bit: see NTDB_OFF_IS_ERR */
270 head += (1ULL << (64 - NTDB_OFF_UPPER_STEAL));
271 head &= ~(NTDB_OFF_MASK | (1ULL << 63));
274 ecode = ntdb_write_off(ntdb, b_off, head);
275 if (ecode != NTDB_SUCCESS) {
279 /* It's time to coalesce if counter wrapped. */
281 *coalesce = ((head & ~NTDB_OFF_MASK) == 0);
283 return ntdb_write_convert(ntdb, off, &new, sizeof(new));
286 static ntdb_off_t ftable_offset(struct ntdb_context *ntdb, unsigned int ftable)
291 if (likely(ntdb->ftable == ftable))
292 return ntdb->ftable_off;
294 off = first_ftable(ntdb);
295 for (i = 0; i < ftable; i++) {
296 if (NTDB_OFF_IS_ERR(off)) {
299 off = next_ftable(ntdb, off);
304 /* Note: we unlock the current bucket if fail (-ve), or coalesce (+ve) and
305 * need to blatt the *protect record (which is set to an error). */
306 static ntdb_len_t coalesce(struct ntdb_context *ntdb,
307 ntdb_off_t off, ntdb_off_t b_off,
312 struct ntdb_free_record rec;
313 enum NTDB_ERROR ecode;
315 ntdb->stats.alloc_coalesce_tried++;
316 end = off + sizeof(struct ntdb_used_record) + data_len;
318 while (end < ntdb->file->map_size) {
319 const struct ntdb_free_record *r;
321 unsigned ftable, bucket;
323 r = ntdb_access_read(ntdb, end, sizeof(*r), true);
324 if (NTDB_PTR_IS_ERR(r)) {
325 ecode = NTDB_PTR_ERR(r);
329 if (frec_magic(r) != NTDB_FREE_MAGIC
330 || frec_ftable(r) == NTDB_FTABLE_NONE) {
331 ntdb_access_release(ntdb, r);
335 ftable = frec_ftable(r);
336 bucket = size_to_bucket(frec_len(r));
337 nb_off = ftable_offset(ntdb, ftable);
338 if (NTDB_OFF_IS_ERR(nb_off)) {
339 ntdb_access_release(ntdb, r);
340 ecode = NTDB_OFF_TO_ERR(nb_off);
343 nb_off = bucket_off(nb_off, bucket);
344 ntdb_access_release(ntdb, r);
346 /* We may be violating lock order here, so best effort. */
347 if (ntdb_lock_free_bucket(ntdb, nb_off, NTDB_LOCK_NOWAIT)
349 ntdb->stats.alloc_coalesce_lockfail++;
353 /* Now we have lock, re-check. */
354 ecode = ntdb_read_convert(ntdb, end, &rec, sizeof(rec));
355 if (ecode != NTDB_SUCCESS) {
356 ntdb_unlock_free_bucket(ntdb, nb_off);
360 if (unlikely(frec_magic(&rec) != NTDB_FREE_MAGIC)) {
361 ntdb->stats.alloc_coalesce_race++;
362 ntdb_unlock_free_bucket(ntdb, nb_off);
366 if (unlikely(frec_ftable(&rec) != ftable)
367 || unlikely(size_to_bucket(frec_len(&rec)) != bucket)) {
368 ntdb->stats.alloc_coalesce_race++;
369 ntdb_unlock_free_bucket(ntdb, nb_off);
373 /* Did we just mess up a record you were hoping to use? */
374 if (end == *protect) {
375 ntdb->stats.alloc_coalesce_iterate_clash++;
376 *protect = NTDB_ERR_TO_OFF(NTDB_ERR_NOEXIST);
379 ecode = remove_from_list(ntdb, nb_off, end, &rec);
380 check_list(ntdb, nb_off);
381 if (ecode != NTDB_SUCCESS) {
382 ntdb_unlock_free_bucket(ntdb, nb_off);
386 end += sizeof(struct ntdb_used_record) + frec_len(&rec);
387 ntdb_unlock_free_bucket(ntdb, nb_off);
388 ntdb->stats.alloc_coalesce_num_merged++;
391 /* Didn't find any adjacent free? */
392 if (end == off + sizeof(struct ntdb_used_record) + data_len)
395 /* Before we expand, check this isn't one you wanted protected? */
396 if (off == *protect) {
397 *protect = NTDB_ERR_TO_OFF(NTDB_ERR_EXISTS);
398 ntdb->stats.alloc_coalesce_iterate_clash++;
401 /* OK, expand initial record */
402 ecode = ntdb_read_convert(ntdb, off, &rec, sizeof(rec));
403 if (ecode != NTDB_SUCCESS) {
407 if (frec_len(&rec) != data_len) {
408 ecode = ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
409 "coalesce: expected data len %zu not %zu",
410 (size_t)data_len, (size_t)frec_len(&rec));
414 ecode = remove_from_list(ntdb, b_off, off, &rec);
415 check_list(ntdb, b_off);
416 if (ecode != NTDB_SUCCESS) {
420 /* Try locking violation first. We don't allow coalesce recursion! */
421 ecode = add_free_record(ntdb, off, end - off, NTDB_LOCK_NOWAIT, false);
422 if (ecode != NTDB_SUCCESS) {
423 /* Need to drop lock. Can't rely on anything stable. */
424 ntdb->stats.alloc_coalesce_lockfail++;
425 *protect = NTDB_ERR_TO_OFF(NTDB_ERR_CORRUPT);
427 /* We have to drop this to avoid deadlocks, so make sure record
428 * doesn't get coalesced by someone else! */
429 rec.ftable_and_len = (NTDB_FTABLE_NONE
430 << (64 - NTDB_OFF_UPPER_STEAL))
431 | (end - off - sizeof(struct ntdb_used_record));
432 ecode = ntdb_write_off(ntdb,
433 off + offsetof(struct ntdb_free_record,
436 if (ecode != NTDB_SUCCESS) {
440 ntdb_unlock_free_bucket(ntdb, b_off);
442 ecode = add_free_record(ntdb, off, end - off, NTDB_LOCK_WAIT,
444 if (ecode != NTDB_SUCCESS) {
445 return NTDB_ERR_TO_OFF(ecode);
447 } else if (NTDB_OFF_IS_ERR(*protect)) {
448 /* For simplicity, we always drop lock if they can't continue */
449 ntdb_unlock_free_bucket(ntdb, b_off);
451 ntdb->stats.alloc_coalesce_succeeded++;
453 /* Return usable length. */
454 return end - off - sizeof(struct ntdb_used_record);
457 /* To unify error paths, we *always* unlock bucket on error. */
458 ntdb_unlock_free_bucket(ntdb, b_off);
459 return NTDB_ERR_TO_OFF(ecode);
462 /* List is locked: we unlock it. */
463 static enum NTDB_ERROR coalesce_list(struct ntdb_context *ntdb,
464 ntdb_off_t ftable_off,
468 enum NTDB_ERROR ecode;
471 off = ntdb_read_off(ntdb, b_off);
472 if (NTDB_OFF_IS_ERR(off)) {
473 ecode = NTDB_OFF_TO_ERR(off);
476 /* A little bit of paranoia: counter should be 0. */
477 off &= NTDB_OFF_MASK;
479 while (off && limit--) {
480 struct ntdb_free_record rec;
484 ecode = ntdb_read_convert(ntdb, off, &rec, sizeof(rec));
485 if (ecode != NTDB_SUCCESS)
489 coal = coalesce(ntdb, off, b_off, frec_len(&rec), &next);
490 if (NTDB_OFF_IS_ERR(coal)) {
491 /* This has already unlocked on error. */
492 return NTDB_OFF_TO_ERR(coal);
494 if (NTDB_OFF_IS_ERR(next)) {
495 /* Coalescing had to unlock, so stop. */
498 /* Keep going if we're doing well... */
499 limit += size_to_bucket(coal / 16 + NTDB_MIN_DATA_LEN);
503 /* Now, move those elements to the tail of the list so we get something
506 struct ntdb_free_record oldhrec, newhrec, oldtrec, newtrec;
507 ntdb_off_t oldhoff, oldtoff, newtoff;
509 /* The record we were up to is the new head. */
510 ecode = ntdb_read_convert(ntdb, off, &newhrec, sizeof(newhrec));
511 if (ecode != NTDB_SUCCESS)
514 /* Get the new tail. */
515 newtoff = frec_prev(&newhrec);
516 ecode = ntdb_read_convert(ntdb, newtoff, &newtrec,
518 if (ecode != NTDB_SUCCESS)
521 /* Get the old head. */
522 oldhoff = ntdb_read_off(ntdb, b_off);
523 if (NTDB_OFF_IS_ERR(oldhoff)) {
524 ecode = NTDB_OFF_TO_ERR(oldhoff);
528 /* This could happen if they all coalesced away. */
532 ecode = ntdb_read_convert(ntdb, oldhoff, &oldhrec,
534 if (ecode != NTDB_SUCCESS)
537 /* Get the old tail. */
538 oldtoff = frec_prev(&oldhrec);
539 ecode = ntdb_read_convert(ntdb, oldtoff, &oldtrec,
541 if (ecode != NTDB_SUCCESS)
544 /* Old tail's next points to old head. */
545 oldtrec.next = oldhoff;
547 /* Old head's prev points to old tail. */
548 oldhrec.magic_and_prev
549 = (NTDB_FREE_MAGIC << (64 - NTDB_OFF_UPPER_STEAL))
552 /* New tail's next is 0. */
555 /* Write out the modified versions. */
556 ecode = ntdb_write_convert(ntdb, oldtoff, &oldtrec,
558 if (ecode != NTDB_SUCCESS)
561 ecode = ntdb_write_convert(ntdb, oldhoff, &oldhrec,
563 if (ecode != NTDB_SUCCESS)
566 ecode = ntdb_write_convert(ntdb, newtoff, &newtrec,
568 if (ecode != NTDB_SUCCESS)
571 /* And finally link in new head. */
572 ecode = ntdb_write_off(ntdb, b_off, off);
573 if (ecode != NTDB_SUCCESS)
577 ntdb_unlock_free_bucket(ntdb, b_off);
581 ntdb_unlock_free_bucket(ntdb, b_off);
585 /* List must not be locked if coalesce_ok is set. */
586 enum NTDB_ERROR add_free_record(struct ntdb_context *ntdb,
587 ntdb_off_t off, ntdb_len_t len_with_header,
588 enum ntdb_lock_flags waitflag,
593 enum NTDB_ERROR ecode;
595 assert(len_with_header >= sizeof(struct ntdb_free_record));
597 len = len_with_header - sizeof(struct ntdb_used_record);
599 b_off = bucket_off(ntdb->ftable_off, size_to_bucket(len));
600 ecode = ntdb_lock_free_bucket(ntdb, b_off, waitflag);
601 if (ecode != NTDB_SUCCESS) {
605 ecode = enqueue_in_free(ntdb, b_off, off, len, &coalesce_ok);
606 check_list(ntdb, b_off);
608 /* Coalescing unlocks free list. */
609 if (!ecode && coalesce_ok)
610 ecode = coalesce_list(ntdb, ntdb->ftable_off, b_off, 2);
612 ntdb_unlock_free_bucket(ntdb, b_off);
616 static size_t adjust_size(size_t keylen, size_t datalen)
618 size_t size = keylen + datalen;
620 if (size < NTDB_MIN_DATA_LEN)
621 size = NTDB_MIN_DATA_LEN;
623 /* Round to next uint64_t boundary. */
624 return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
627 /* If we have enough left over to be useful, split that off. */
628 static size_t record_leftover(size_t keylen, size_t datalen,
629 bool want_extra, size_t total_len)
634 datalen += datalen / 2;
635 leftover = total_len - adjust_size(keylen, datalen);
637 if (leftover < (ssize_t)sizeof(struct ntdb_free_record))
643 /* We need size bytes to put our key and data in. */
644 static ntdb_off_t lock_and_alloc(struct ntdb_context *ntdb,
645 ntdb_off_t ftable_off,
647 size_t keylen, size_t datalen,
652 ntdb_off_t off, b_off,best_off;
653 struct ntdb_free_record best = { 0 };
655 size_t size = adjust_size(keylen, datalen);
656 enum NTDB_ERROR ecode;
658 ntdb->stats.allocs++;
659 b_off = bucket_off(ftable_off, bucket);
661 /* FIXME: Try non-blocking wait first, to measure contention. */
662 /* Lock this bucket. */
663 ecode = ntdb_lock_free_bucket(ntdb, b_off, NTDB_LOCK_WAIT);
664 if (ecode != NTDB_SUCCESS) {
665 return NTDB_ERR_TO_OFF(ecode);
668 best.ftable_and_len = -1ULL;
671 /* Get slack if we're after extra. */
677 /* Walk the list to see if any are large enough, getting less fussy
679 off = ntdb_read_off(ntdb, b_off);
680 if (NTDB_OFF_IS_ERR(off)) {
681 ecode = NTDB_OFF_TO_ERR(off);
684 off &= NTDB_OFF_MASK;
687 const struct ntdb_free_record *r;
691 r = ntdb_access_read(ntdb, off, sizeof(*r), true);
692 if (NTDB_PTR_IS_ERR(r)) {
693 ecode = NTDB_PTR_ERR(r);
697 if (frec_magic(r) != NTDB_FREE_MAGIC) {
698 ecode = ntdb_logerr(ntdb, NTDB_ERR_CORRUPT, NTDB_LOG_ERROR,
700 " %llu non-free 0x%llx",
702 (long long)r->magic_and_prev);
703 ntdb_access_release(ntdb, r);
707 if (frec_len(r) >= size && frec_len(r) < frec_len(&best)) {
712 if (frec_len(&best) <= size * multiplier && best_off) {
713 ntdb_access_release(ntdb, r);
721 ntdb_access_release(ntdb, r);
725 /* If we found anything at all, use it. */
727 struct ntdb_used_record rec;
730 /* We're happy with this size: take it. */
731 ecode = remove_from_list(ntdb, b_off, best_off, &best);
732 check_list(ntdb, b_off);
733 if (ecode != NTDB_SUCCESS) {
737 leftover = record_leftover(keylen, datalen, want_extra,
740 assert(keylen + datalen + leftover <= frec_len(&best));
741 /* We need to mark non-free before we drop lock, otherwise
742 * coalesce() could try to merge it! */
743 ecode = set_header(ntdb, &rec, magic, keylen, datalen,
744 frec_len(&best) - leftover, hashlow);
745 if (ecode != NTDB_SUCCESS) {
749 ecode = ntdb_write_convert(ntdb, best_off, &rec, sizeof(rec));
750 if (ecode != NTDB_SUCCESS) {
754 /* For futureproofing, we put a 0 in any unused space. */
755 if (rec_extra_padding(&rec)) {
756 ecode = ntdb->io->twrite(ntdb, best_off + sizeof(rec)
757 + keylen + datalen, "", 1);
758 if (ecode != NTDB_SUCCESS) {
763 /* Bucket of leftover will be <= current bucket, so nested
764 * locking is allowed. */
766 ntdb->stats.alloc_leftover++;
767 ecode = add_free_record(ntdb,
768 best_off + sizeof(rec)
769 + frec_len(&best) - leftover,
770 leftover, NTDB_LOCK_WAIT, false);
771 if (ecode != NTDB_SUCCESS) {
772 best_off = NTDB_ERR_TO_OFF(ecode);
775 ntdb_unlock_free_bucket(ntdb, b_off);
780 ntdb_unlock_free_bucket(ntdb, b_off);
784 ntdb_unlock_free_bucket(ntdb, b_off);
785 return NTDB_ERR_TO_OFF(ecode);
788 /* Get a free block from current free list, or 0 if none, -ve on error. */
789 static ntdb_off_t get_free(struct ntdb_context *ntdb,
790 size_t keylen, size_t datalen, bool want_extra,
791 unsigned magic, unsigned hashlow)
793 ntdb_off_t off, ftable_off;
794 ntdb_off_t start_b, b, ftable;
795 bool wrapped = false;
797 /* If they are growing, add 50% to get to higher bucket. */
799 start_b = size_to_bucket(adjust_size(keylen,
800 datalen + datalen / 2));
802 start_b = size_to_bucket(adjust_size(keylen, datalen));
804 ftable_off = ntdb->ftable_off;
805 ftable = ntdb->ftable;
806 while (!wrapped || ftable_off != ntdb->ftable_off) {
807 /* Start at exact size bucket, and search up... */
808 for (b = find_free_head(ntdb, ftable_off, start_b);
809 b < NTDB_FREE_BUCKETS;
810 b = find_free_head(ntdb, ftable_off, b + 1)) {
811 /* Try getting one from list. */
812 off = lock_and_alloc(ntdb, ftable_off,
813 b, keylen, datalen, want_extra,
815 if (NTDB_OFF_IS_ERR(off))
819 ntdb->stats.alloc_bucket_exact++;
820 if (b == NTDB_FREE_BUCKETS - 1)
821 ntdb->stats.alloc_bucket_max++;
822 /* Worked? Stay using this list. */
823 ntdb->ftable_off = ftable_off;
824 ntdb->ftable = ftable;
827 /* Didn't work. Try next bucket. */
830 if (NTDB_OFF_IS_ERR(b)) {
834 /* Hmm, try next table. */
835 ftable_off = next_ftable(ntdb, ftable_off);
836 if (NTDB_OFF_IS_ERR(ftable_off)) {
841 if (ftable_off == 0) {
843 ftable_off = first_ftable(ntdb);
844 if (NTDB_OFF_IS_ERR(ftable_off)) {
854 enum NTDB_ERROR set_header(struct ntdb_context *ntdb,
855 struct ntdb_used_record *rec,
856 unsigned magic, uint64_t keylen, uint64_t datalen,
857 uint64_t actuallen, unsigned hashlow)
859 uint64_t keybits = (fls64(keylen) + 1) / 2;
861 /* Use bottom bits of hash, so it's independent of hash table size. */
862 rec->magic_and_meta = (hashlow & ((1 << 11)-1))
863 | ((actuallen - (keylen + datalen)) << 11)
865 | ((uint64_t)magic << 48);
866 rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
868 /* Encoding can fail on big values. */
869 if (rec_key_length(rec) != keylen
870 || rec_data_length(rec) != datalen
871 || rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
872 return ntdb_logerr(ntdb, NTDB_ERR_IO, NTDB_LOG_ERROR,
873 "Could not encode k=%llu,d=%llu,a=%llu",
874 (long long)keylen, (long long)datalen,
875 (long long)actuallen);
880 /* You need 'size', this tells you how much you should expand by. */
881 ntdb_off_t ntdb_expand_adjust(ntdb_off_t map_size, ntdb_off_t size)
883 ntdb_off_t new_size, top_size;
885 /* limit size in order to avoid using up huge amounts of memory for
886 * in memory tdbs if an oddball huge record creeps in */
887 if (size > 100 * 1024) {
888 top_size = map_size + size * 2;
890 top_size = map_size + size * 100;
893 /* always make room for at least top_size more records, and at
894 least 25% more space. if the DB is smaller than 100MiB,
895 otherwise grow it by 10% only. */
896 if (map_size > 100 * 1024 * 1024) {
897 new_size = map_size * 1.10;
899 new_size = map_size * 1.25;
902 if (new_size < top_size)
905 /* We always make the file a multiple of transaction page
906 * size. This guarantees that the transaction recovery area
907 * is always aligned, otherwise the transaction code can overwrite
909 new_size = (new_size + NTDB_PGSIZE-1) & ~(NTDB_PGSIZE-1);
910 return new_size - map_size;
913 /* Expand the database. */
914 static enum NTDB_ERROR ntdb_expand(struct ntdb_context *ntdb, ntdb_len_t size)
918 enum NTDB_ERROR ecode;
920 /* Need to hold a hash lock to expand DB: transactions rely on it. */
921 if (!(ntdb->flags & NTDB_NOLOCK)
922 && !ntdb->file->allrecord_lock.count && !ntdb_has_hash_locks(ntdb)) {
923 return ntdb_logerr(ntdb, NTDB_ERR_LOCK, NTDB_LOG_ERROR,
924 "ntdb_expand: must hold lock during expand");
927 /* Only one person can expand file at a time. */
928 ecode = ntdb_lock_expand(ntdb, F_WRLCK);
929 if (ecode != NTDB_SUCCESS) {
933 /* Someone else may have expanded the file, so retry. */
934 old_size = ntdb->file->map_size;
935 ntdb_oob(ntdb, ntdb->file->map_size, 1, true);
936 if (ntdb->file->map_size != old_size) {
937 ntdb_unlock_expand(ntdb, F_WRLCK);
941 /* We need room for the record header too. */
942 size = adjust_size(0, sizeof(struct ntdb_used_record) + size);
944 wanted = ntdb_expand_adjust(old_size, size);
946 ecode = ntdb->io->expand_file(ntdb, wanted);
947 if (ecode != NTDB_SUCCESS) {
948 ntdb_unlock_expand(ntdb, F_WRLCK);
952 /* We need to drop this lock before adding free record. */
953 ntdb_unlock_expand(ntdb, F_WRLCK);
955 ntdb->stats.expands++;
956 return add_free_record(ntdb, old_size, wanted, NTDB_LOCK_WAIT, true);
959 /* This won't fail: it will expand the database if it has to. */
960 ntdb_off_t alloc(struct ntdb_context *ntdb, size_t keylen, size_t datalen,
961 uint64_t hash, unsigned magic, bool growing)
965 /* We can't hold pointers during this: we could unmap! */
966 assert(!ntdb->direct_access);
969 enum NTDB_ERROR ecode;
970 off = get_free(ntdb, keylen, datalen, growing, magic, hash);
971 if (likely(off != 0))
974 ecode = ntdb_expand(ntdb, adjust_size(keylen, datalen));
975 if (ecode != NTDB_SUCCESS) {
976 return NTDB_ERR_TO_OFF(ecode);