Btrfs: rework allocation clustering
[sfrench/cifs-2.6.git] / fs / btrfs / free-space-cache.c
1 /*
2  * Copyright (C) 2008 Red Hat.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/sched.h>
20 #include "ctree.h"
21 #include "free-space-cache.h"
22 #include "transaction.h"
23
24 struct btrfs_free_space {
25         struct rb_node bytes_index;
26         struct rb_node offset_index;
27         u64 offset;
28         u64 bytes;
29 };
30
31 static int tree_insert_offset(struct rb_root *root, u64 offset,
32                               struct rb_node *node)
33 {
34         struct rb_node **p = &root->rb_node;
35         struct rb_node *parent = NULL;
36         struct btrfs_free_space *info;
37
38         while (*p) {
39                 parent = *p;
40                 info = rb_entry(parent, struct btrfs_free_space, offset_index);
41
42                 if (offset < info->offset)
43                         p = &(*p)->rb_left;
44                 else if (offset > info->offset)
45                         p = &(*p)->rb_right;
46                 else
47                         return -EEXIST;
48         }
49
50         rb_link_node(node, parent, p);
51         rb_insert_color(node, root);
52
53         return 0;
54 }
55
56 static int tree_insert_bytes(struct rb_root *root, u64 bytes,
57                              struct rb_node *node)
58 {
59         struct rb_node **p = &root->rb_node;
60         struct rb_node *parent = NULL;
61         struct btrfs_free_space *info;
62
63         while (*p) {
64                 parent = *p;
65                 info = rb_entry(parent, struct btrfs_free_space, bytes_index);
66
67                 if (bytes < info->bytes)
68                         p = &(*p)->rb_left;
69                 else
70                         p = &(*p)->rb_right;
71         }
72
73         rb_link_node(node, parent, p);
74         rb_insert_color(node, root);
75
76         return 0;
77 }
78
79 /*
80  * searches the tree for the given offset.
81  *
82  * fuzzy == 1: this is used for allocations where we are given a hint of where
83  * to look for free space.  Because the hint may not be completely on an offset
84  * mark, or the hint may no longer point to free space we need to fudge our
85  * results a bit.  So we look for free space starting at or after offset with at
86  * least bytes size.  We prefer to find as close to the given offset as we can.
87  * Also if the offset is within a free space range, then we will return the free
88  * space that contains the given offset, which means we can return a free space
89  * chunk with an offset before the provided offset.
90  *
91  * fuzzy == 0: this is just a normal tree search.  Give us the free space that
92  * starts at the given offset which is at least bytes size, and if its not there
93  * return NULL.
94  */
95 static struct btrfs_free_space *tree_search_offset(struct rb_root *root,
96                                                    u64 offset, u64 bytes,
97                                                    int fuzzy)
98 {
99         struct rb_node *n = root->rb_node;
100         struct btrfs_free_space *entry, *ret = NULL;
101
102         while (n) {
103                 entry = rb_entry(n, struct btrfs_free_space, offset_index);
104
105                 if (offset < entry->offset) {
106                         if (fuzzy &&
107                             (!ret || entry->offset < ret->offset) &&
108                             (bytes <= entry->bytes))
109                                 ret = entry;
110                         n = n->rb_left;
111                 } else if (offset > entry->offset) {
112                         if (fuzzy &&
113                             (entry->offset + entry->bytes - 1) >= offset &&
114                             bytes <= entry->bytes) {
115                                 ret = entry;
116                                 break;
117                         }
118                         n = n->rb_right;
119                 } else {
120                         if (bytes > entry->bytes) {
121                                 n = n->rb_right;
122                                 continue;
123                         }
124                         ret = entry;
125                         break;
126                 }
127         }
128
129         return ret;
130 }
131
132 /*
133  * return a chunk at least bytes size, as close to offset that we can get.
134  */
135 static struct btrfs_free_space *tree_search_bytes(struct rb_root *root,
136                                                   u64 offset, u64 bytes)
137 {
138         struct rb_node *n = root->rb_node;
139         struct btrfs_free_space *entry, *ret = NULL;
140
141         while (n) {
142                 entry = rb_entry(n, struct btrfs_free_space, bytes_index);
143
144                 if (bytes < entry->bytes) {
145                         /*
146                          * We prefer to get a hole size as close to the size we
147                          * are asking for so we don't take small slivers out of
148                          * huge holes, but we also want to get as close to the
149                          * offset as possible so we don't have a whole lot of
150                          * fragmentation.
151                          */
152                         if (offset <= entry->offset) {
153                                 if (!ret)
154                                         ret = entry;
155                                 else if (entry->bytes < ret->bytes)
156                                         ret = entry;
157                                 else if (entry->offset < ret->offset)
158                                         ret = entry;
159                         }
160                         n = n->rb_left;
161                 } else if (bytes > entry->bytes) {
162                         n = n->rb_right;
163                 } else {
164                         /*
165                          * Ok we may have multiple chunks of the wanted size,
166                          * so we don't want to take the first one we find, we
167                          * want to take the one closest to our given offset, so
168                          * keep searching just in case theres a better match.
169                          */
170                         n = n->rb_right;
171                         if (offset > entry->offset)
172                                 continue;
173                         else if (!ret || entry->offset < ret->offset)
174                                 ret = entry;
175                 }
176         }
177
178         return ret;
179 }
180
181 static void unlink_free_space(struct btrfs_block_group_cache *block_group,
182                               struct btrfs_free_space *info)
183 {
184         rb_erase(&info->offset_index, &block_group->free_space_offset);
185         rb_erase(&info->bytes_index, &block_group->free_space_bytes);
186 }
187
188 static int link_free_space(struct btrfs_block_group_cache *block_group,
189                            struct btrfs_free_space *info)
190 {
191         int ret = 0;
192
193
194         BUG_ON(!info->bytes);
195         ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
196                                  &info->offset_index);
197         if (ret)
198                 return ret;
199
200         ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes,
201                                 &info->bytes_index);
202         if (ret)
203                 return ret;
204
205         return ret;
206 }
207
208 int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
209                          u64 offset, u64 bytes)
210 {
211         struct btrfs_free_space *right_info;
212         struct btrfs_free_space *left_info;
213         struct btrfs_free_space *info = NULL;
214         int ret = 0;
215
216         info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
217         if (!info)
218                 return -ENOMEM;
219
220         info->offset = offset;
221         info->bytes = bytes;
222
223         spin_lock(&block_group->tree_lock);
224
225         /*
226          * first we want to see if there is free space adjacent to the range we
227          * are adding, if there is remove that struct and add a new one to
228          * cover the entire range
229          */
230         right_info = tree_search_offset(&block_group->free_space_offset,
231                                         offset+bytes, 0, 0);
232         left_info = tree_search_offset(&block_group->free_space_offset,
233                                        offset-1, 0, 1);
234
235         if (right_info) {
236                 unlink_free_space(block_group, right_info);
237                 info->bytes += right_info->bytes;
238                 kfree(right_info);
239         }
240
241         if (left_info && left_info->offset + left_info->bytes == offset) {
242                 unlink_free_space(block_group, left_info);
243                 info->offset = left_info->offset;
244                 info->bytes += left_info->bytes;
245                 kfree(left_info);
246         }
247
248         ret = link_free_space(block_group, info);
249         if (ret)
250                 kfree(info);
251
252         spin_unlock(&block_group->tree_lock);
253
254         if (ret) {
255                 printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
256                 if (ret == -EEXIST)
257                         BUG();
258         }
259
260         return ret;
261 }
262
263 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
264                             u64 offset, u64 bytes)
265 {
266         struct btrfs_free_space *info;
267         int ret = 0;
268
269         spin_lock(&block_group->tree_lock);
270
271         info = tree_search_offset(&block_group->free_space_offset, offset, 0,
272                                   1);
273         if (info && info->offset == offset) {
274                 if (info->bytes < bytes) {
275                         printk(KERN_ERR "Found free space at %llu, size %llu,"
276                                "trying to use %llu\n",
277                                (unsigned long long)info->offset,
278                                (unsigned long long)info->bytes,
279                                (unsigned long long)bytes);
280                         WARN_ON(1);
281                         ret = -EINVAL;
282                         spin_unlock(&block_group->tree_lock);
283                         goto out;
284                 }
285                 unlink_free_space(block_group, info);
286
287                 if (info->bytes == bytes) {
288                         kfree(info);
289                         spin_unlock(&block_group->tree_lock);
290                         goto out;
291                 }
292
293                 info->offset += bytes;
294                 info->bytes -= bytes;
295
296                 ret = link_free_space(block_group, info);
297                 spin_unlock(&block_group->tree_lock);
298                 BUG_ON(ret);
299         } else if (info && info->offset < offset &&
300                    info->offset + info->bytes >= offset + bytes) {
301                 u64 old_start = info->offset;
302                 /*
303                  * we're freeing space in the middle of the info,
304                  * this can happen during tree log replay
305                  *
306                  * first unlink the old info and then
307                  * insert it again after the hole we're creating
308                  */
309                 unlink_free_space(block_group, info);
310                 if (offset + bytes < info->offset + info->bytes) {
311                         u64 old_end = info->offset + info->bytes;
312
313                         info->offset = offset + bytes;
314                         info->bytes = old_end - info->offset;
315                         ret = link_free_space(block_group, info);
316                         BUG_ON(ret);
317                 } else {
318                         /* the hole we're creating ends at the end
319                          * of the info struct, just free the info
320                          */
321                         kfree(info);
322                 }
323                 spin_unlock(&block_group->tree_lock);
324                 /* step two, insert a new info struct to cover anything
325                  * before the hole
326                  */
327                 ret = btrfs_add_free_space(block_group, old_start,
328                                            offset - old_start);
329                 BUG_ON(ret);
330         } else {
331                 spin_unlock(&block_group->tree_lock);
332                 if (!info) {
333                         printk(KERN_ERR "couldn't find space %llu to free\n",
334                                (unsigned long long)offset);
335                         printk(KERN_ERR "cached is %d, offset %llu bytes %llu\n",
336                                block_group->cached, block_group->key.objectid,
337                                block_group->key.offset);
338                         btrfs_dump_free_space(block_group, bytes);
339                 } else if (info) {
340                         printk(KERN_ERR "hmm, found offset=%llu bytes=%llu, "
341                                "but wanted offset=%llu bytes=%llu\n",
342                                info->offset, info->bytes, offset, bytes);
343                 }
344                 WARN_ON(1);
345         }
346 out:
347         return ret;
348 }
349
350 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
351                            u64 bytes)
352 {
353         struct btrfs_free_space *info;
354         struct rb_node *n;
355         int count = 0;
356
357         for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) {
358                 info = rb_entry(n, struct btrfs_free_space, offset_index);
359                 if (info->bytes >= bytes)
360                         count++;
361                 printk(KERN_ERR "entry offset %llu, bytes %llu\n", info->offset,
362                        info->bytes);
363         }
364         printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
365                "\n", count);
366 }
367
368 u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group)
369 {
370         struct btrfs_free_space *info;
371         struct rb_node *n;
372         u64 ret = 0;
373
374         for (n = rb_first(&block_group->free_space_offset); n;
375              n = rb_next(n)) {
376                 info = rb_entry(n, struct btrfs_free_space, offset_index);
377                 ret += info->bytes;
378         }
379
380         return ret;
381 }
382
383 /*
384  * for a given cluster, put all of its extents back into the free
385  * space cache.  If the block group passed doesn't match the block group
386  * pointed to by the cluster, someone else raced in and freed the
387  * cluster already.  In that case, we just return without changing anything
388  */
389 static int
390 __btrfs_return_cluster_to_free_space(
391                              struct btrfs_block_group_cache *block_group,
392                              struct btrfs_free_cluster *cluster)
393 {
394         struct btrfs_free_space *entry;
395         struct rb_node *node;
396
397         spin_lock(&cluster->lock);
398         if (cluster->block_group != block_group)
399                 goto out;
400
401         cluster->window_start = 0;
402         node = rb_first(&cluster->root);
403         while(node) {
404                 entry = rb_entry(node, struct btrfs_free_space, offset_index);
405                 node = rb_next(&entry->offset_index);
406                 rb_erase(&entry->offset_index, &cluster->root);
407                 link_free_space(block_group, entry);
408         }
409         list_del_init(&cluster->block_group_list);
410
411         btrfs_put_block_group(cluster->block_group);
412         cluster->block_group = NULL;
413         cluster->root.rb_node = NULL;
414 out:
415         spin_unlock(&cluster->lock);
416         return 0;
417 }
418
419 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
420 {
421         struct btrfs_free_space *info;
422         struct rb_node *node;
423         struct btrfs_free_cluster *cluster;
424         struct btrfs_free_cluster *safe;
425
426         spin_lock(&block_group->tree_lock);
427
428         list_for_each_entry_safe(cluster, safe, &block_group->cluster_list,
429                                  block_group_list) {
430
431                 WARN_ON(cluster->block_group != block_group);
432                 __btrfs_return_cluster_to_free_space(block_group, cluster);
433         }
434
435         while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
436                 info = rb_entry(node, struct btrfs_free_space, bytes_index);
437                 unlink_free_space(block_group, info);
438                 kfree(info);
439                 if (need_resched()) {
440                         spin_unlock(&block_group->tree_lock);
441                         cond_resched();
442                         spin_lock(&block_group->tree_lock);
443                 }
444         }
445         spin_unlock(&block_group->tree_lock);
446 }
447
448 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
449                                u64 offset, u64 bytes, u64 empty_size)
450 {
451         struct btrfs_free_space *entry = NULL;
452         u64 ret = 0;
453
454         spin_lock(&block_group->tree_lock);
455         entry = tree_search_offset(&block_group->free_space_offset, offset,
456                                    bytes + empty_size, 1);
457         if (!entry)
458                 entry = tree_search_bytes(&block_group->free_space_bytes,
459                                           offset, bytes + empty_size);
460         if (entry) {
461                 unlink_free_space(block_group, entry);
462                 ret = entry->offset;
463                 entry->offset += bytes;
464                 entry->bytes -= bytes;
465
466                 if (!entry->bytes)
467                         kfree(entry);
468                 else
469                         link_free_space(block_group, entry);
470         }
471         spin_unlock(&block_group->tree_lock);
472
473         return ret;
474 }
475
476 /*
477  * given a cluster, put all of its extents back into the free space
478  * cache.  If a block group is passed, this function will only free
479  * a cluster that belongs to the passed block group.
480  *
481  * Otherwise, it'll get a reference on the block group pointed to by the
482  * cluster and remove the cluster from it.
483  */
484 int btrfs_return_cluster_to_free_space(
485                                struct btrfs_block_group_cache *block_group,
486                                struct btrfs_free_cluster *cluster)
487 {
488         int ret;
489
490         /* first, get a safe pointer to the block group */
491         spin_lock(&cluster->lock);
492         if (!block_group) {
493                 block_group = cluster->block_group;
494                 if (!block_group) {
495                         spin_unlock(&cluster->lock);
496                         return 0;
497                 }
498         } else if (cluster->block_group != block_group) {
499                 /* someone else has already freed it don't redo their work */
500                 spin_unlock(&cluster->lock);
501                 return 0;
502         }
503         atomic_inc(&block_group->count);
504         spin_unlock(&cluster->lock);
505
506         /* now return any extents the cluster had on it */
507         spin_lock(&block_group->tree_lock);
508         ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
509         spin_unlock(&block_group->tree_lock);
510
511         /* finally drop our ref */
512         btrfs_put_block_group(block_group);
513         return ret;
514 }
515
516 /*
517  * given a cluster, try to allocate 'bytes' from it, returns 0
518  * if it couldn't find anything suitably large, or a logical disk offset
519  * if things worked out
520  */
521 u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
522                              struct btrfs_free_cluster *cluster, u64 bytes,
523                              u64 min_start)
524 {
525         struct btrfs_free_space *entry = NULL;
526         struct rb_node *node;
527         u64 ret = 0;
528
529         spin_lock(&cluster->lock);
530         if (bytes > cluster->max_size)
531                 goto out;
532
533         if (cluster->block_group != block_group)
534                 goto out;
535
536         node = rb_first(&cluster->root);
537         if (!node)
538                 goto out;
539
540         entry = rb_entry(node, struct btrfs_free_space, offset_index);
541
542         while(1) {
543                 if (entry->bytes < bytes || entry->offset < min_start) {
544                         struct rb_node *node;
545
546                         node = rb_next(&entry->offset_index);
547                         if (!node)
548                                 break;
549                         entry = rb_entry(node, struct btrfs_free_space,
550                                          offset_index);
551                         continue;
552                 }
553                 ret = entry->offset;
554
555                 entry->offset += bytes;
556                 entry->bytes -= bytes;
557
558                 if (entry->bytes == 0) {
559                         rb_erase(&entry->offset_index, &cluster->root);
560                         kfree(entry);
561                 }
562                 break;
563         }
564 out:
565         spin_unlock(&cluster->lock);
566         return ret;
567 }
568
569 /*
570  * here we try to find a cluster of blocks in a block group.  The goal
571  * is to find at least bytes free and up to empty_size + bytes free.
572  * We might not find them all in one contiguous area.
573  *
574  * returns zero and sets up cluster if things worked out, otherwise
575  * it returns -enospc
576  */
577 int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
578                              struct btrfs_block_group_cache *block_group,
579                              struct btrfs_free_cluster *cluster,
580                              u64 offset, u64 bytes, u64 empty_size)
581 {
582         struct btrfs_free_space *entry = NULL;
583         struct rb_node *node;
584         struct btrfs_free_space *next;
585         struct btrfs_free_space *last;
586         u64 min_bytes;
587         u64 window_start;
588         u64 window_free;
589         u64 max_extent = 0;
590         int total_retries = 0;
591         int ret;
592
593         /* for metadata, allow allocates with more holes */
594         if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
595                 /*
596                  * we want to do larger allocations when we are
597                  * flushing out the delayed refs, it helps prevent
598                  * making more work as we go along.
599                  */
600                 if (trans->transaction->delayed_refs.flushing)
601                         min_bytes = max(bytes, (bytes + empty_size) >> 1);
602                 else
603                         min_bytes = max(bytes, (bytes + empty_size) >> 4);
604         } else
605                 min_bytes = max(bytes, (bytes + empty_size) >> 2);
606
607         spin_lock(&block_group->tree_lock);
608         spin_lock(&cluster->lock);
609
610         /* someone already found a cluster, hooray */
611         if (cluster->block_group) {
612                 ret = 0;
613                 goto out;
614         }
615 again:
616         min_bytes = min(min_bytes, bytes + empty_size);
617         entry = tree_search_bytes(&block_group->free_space_bytes,
618                                   offset, min_bytes);
619         if (!entry) {
620                 ret = -ENOSPC;
621                 goto out;
622         }
623         window_start = entry->offset;
624         window_free = entry->bytes;
625         last = entry;
626         max_extent = entry->bytes;
627
628         while(1) {
629                 /* out window is just right, lets fill it */
630                 if (window_free >= bytes + empty_size)
631                         break;
632
633                 node = rb_next(&last->offset_index);
634                 if (!node) {
635                         ret = -ENOSPC;
636                         goto out;
637                 }
638                 next = rb_entry(node, struct btrfs_free_space, offset_index);
639
640                 /*
641                  * we haven't filled the empty size and the window is
642                  * very large.  reset and try again
643                  */
644                 if (next->offset - window_start > (bytes + empty_size) * 2) {
645                         entry = next;
646                         window_start = entry->offset;
647                         window_free = entry->bytes;
648                         last = entry;
649                         max_extent = 0;
650                         total_retries++;
651                         if (total_retries % 256 == 0) {
652                                 if (min_bytes >= (bytes + empty_size)) {
653                                         ret = -ENOSPC;
654                                         goto out;
655                                 }
656                                 /*
657                                  * grow our allocation a bit, we're not having
658                                  * much luck
659                                  */
660                                 min_bytes *= 2;
661                                 goto again;
662                         }
663                 } else {
664                         last = next;
665                         window_free += next->bytes;
666                         if (entry->bytes > max_extent)
667                                 max_extent = entry->bytes;
668                 }
669         }
670
671         cluster->window_start = entry->offset;
672
673         /*
674          * now we've found our entries, pull them out of the free space
675          * cache and put them into the cluster rbtree
676          *
677          * The cluster includes an rbtree, but only uses the offset index
678          * of each free space cache entry.
679          */
680         while(1) {
681                 node = rb_next(&entry->offset_index);
682                 unlink_free_space(block_group, entry);
683                 ret = tree_insert_offset(&cluster->root, entry->offset,
684                                          &entry->offset_index);
685                 BUG_ON(ret);
686
687                 if (!node || entry == last)
688                         break;
689
690                 entry = rb_entry(node, struct btrfs_free_space, offset_index);
691         }
692         ret = 0;
693         cluster->max_size = max_extent;
694         atomic_inc(&block_group->count);
695         list_add_tail(&cluster->block_group_list, &block_group->cluster_list);
696         cluster->block_group = block_group;
697 out:
698         spin_unlock(&cluster->lock);
699         spin_unlock(&block_group->tree_lock);
700
701         return ret;
702 }
703
704 /*
705  * simple code to zero out a cluster
706  */
707 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
708 {
709         spin_lock_init(&cluster->lock);
710         spin_lock_init(&cluster->refill_lock);
711         cluster->root.rb_node = NULL;
712         cluster->max_size = 0;
713         INIT_LIST_HEAD(&cluster->block_group_list);
714         cluster->block_group = NULL;
715 }
716