Merge tag 'rust-6.9' of https://github.com/Rust-for-Linux/linux
[sfrench/cifs-2.6.git] / fs / btrfs / space-info.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "misc.h"
4 #include "ctree.h"
5 #include "space-info.h"
6 #include "sysfs.h"
7 #include "volumes.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
10 #include "transaction.h"
11 #include "block-group.h"
12 #include "zoned.h"
13 #include "fs.h"
14 #include "accessors.h"
15 #include "extent-tree.h"
16
17 /*
18  * HOW DOES SPACE RESERVATION WORK
19  *
20  * If you want to know about delalloc specifically, there is a separate comment
21  * for that with the delalloc code.  This comment is about how the whole system
22  * works generally.
23  *
24  * BASIC CONCEPTS
25  *
26  *   1) space_info.  This is the ultimate arbiter of how much space we can use.
27  *   There's a description of the bytes_ fields with the struct declaration,
28  *   refer to that for specifics on each field.  Suffice it to say that for
29  *   reservations we care about total_bytes - SUM(space_info->bytes_) when
30  *   determining if there is space to make an allocation.  There is a space_info
31  *   for METADATA, SYSTEM, and DATA areas.
32  *
33  *   2) block_rsv's.  These are basically buckets for every different type of
34  *   metadata reservation we have.  You can see the comment in the block_rsv
35  *   code on the rules for each type, but generally block_rsv->reserved is how
36  *   much space is accounted for in space_info->bytes_may_use.
37  *
38  *   3) btrfs_calc*_size.  These are the worst case calculations we used based
39  *   on the number of items we will want to modify.  We have one for changing
40  *   items, and one for inserting new items.  Generally we use these helpers to
41  *   determine the size of the block reserves, and then use the actual bytes
42  *   values to adjust the space_info counters.
43  *
44  * MAKING RESERVATIONS, THE NORMAL CASE
45  *
46  *   We call into either btrfs_reserve_data_bytes() or
47  *   btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
48  *   num_bytes we want to reserve.
49  *
50  *   ->reserve
51  *     space_info->bytes_may_reserve += num_bytes
52  *
53  *   ->extent allocation
54  *     Call btrfs_add_reserved_bytes() which does
55  *     space_info->bytes_may_reserve -= num_bytes
56  *     space_info->bytes_reserved += extent_bytes
57  *
58  *   ->insert reference
59  *     Call btrfs_update_block_group() which does
60  *     space_info->bytes_reserved -= extent_bytes
61  *     space_info->bytes_used += extent_bytes
62  *
63  * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
64  *
65  *   Assume we are unable to simply make the reservation because we do not have
66  *   enough space
67  *
68  *   -> __reserve_bytes
69  *     create a reserve_ticket with ->bytes set to our reservation, add it to
70  *     the tail of space_info->tickets, kick async flush thread
71  *
72  *   ->handle_reserve_ticket
73  *     wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
74  *     on the ticket.
75  *
76  *   -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
77  *     Flushes various things attempting to free up space.
78  *
79  *   -> btrfs_try_granting_tickets()
80  *     This is called by anything that either subtracts space from
81  *     space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
82  *     space_info->total_bytes.  This loops through the ->priority_tickets and
83  *     then the ->tickets list checking to see if the reservation can be
84  *     completed.  If it can the space is added to space_info->bytes_may_use and
85  *     the ticket is woken up.
86  *
87  *   -> ticket wakeup
88  *     Check if ->bytes == 0, if it does we got our reservation and we can carry
89  *     on, if not return the appropriate error (ENOSPC, but can be EINTR if we
90  *     were interrupted.)
91  *
92  * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
93  *
94  *   Same as the above, except we add ourselves to the
95  *   space_info->priority_tickets, and we do not use ticket->wait, we simply
96  *   call flush_space() ourselves for the states that are safe for us to call
97  *   without deadlocking and hope for the best.
98  *
99  * THE FLUSHING STATES
100  *
101  *   Generally speaking we will have two cases for each state, a "nice" state
102  *   and a "ALL THE THINGS" state.  In btrfs we delay a lot of work in order to
103  *   reduce the locking over head on the various trees, and even to keep from
104  *   doing any work at all in the case of delayed refs.  Each of these delayed
105  *   things however hold reservations, and so letting them run allows us to
106  *   reclaim space so we can make new reservations.
107  *
108  *   FLUSH_DELAYED_ITEMS
109  *     Every inode has a delayed item to update the inode.  Take a simple write
110  *     for example, we would update the inode item at write time to update the
111  *     mtime, and then again at finish_ordered_io() time in order to update the
112  *     isize or bytes.  We keep these delayed items to coalesce these operations
113  *     into a single operation done on demand.  These are an easy way to reclaim
114  *     metadata space.
115  *
116  *   FLUSH_DELALLOC
117  *     Look at the delalloc comment to get an idea of how much space is reserved
118  *     for delayed allocation.  We can reclaim some of this space simply by
119  *     running delalloc, but usually we need to wait for ordered extents to
120  *     reclaim the bulk of this space.
121  *
122  *   FLUSH_DELAYED_REFS
123  *     We have a block reserve for the outstanding delayed refs space, and every
124  *     delayed ref operation holds a reservation.  Running these is a quick way
125  *     to reclaim space, but we want to hold this until the end because COW can
126  *     churn a lot and we can avoid making some extent tree modifications if we
127  *     are able to delay for as long as possible.
128  *
129  *   ALLOC_CHUNK
130  *     We will skip this the first time through space reservation, because of
131  *     overcommit and we don't want to have a lot of useless metadata space when
132  *     our worst case reservations will likely never come true.
133  *
134  *   RUN_DELAYED_IPUTS
135  *     If we're freeing inodes we're likely freeing checksums, file extent
136  *     items, and extent tree items.  Loads of space could be freed up by these
137  *     operations, however they won't be usable until the transaction commits.
138  *
139  *   COMMIT_TRANS
140  *     This will commit the transaction.  Historically we had a lot of logic
141  *     surrounding whether or not we'd commit the transaction, but this waits born
142  *     out of a pre-tickets era where we could end up committing the transaction
143  *     thousands of times in a row without making progress.  Now thanks to our
144  *     ticketing system we know if we're not making progress and can error
145  *     everybody out after a few commits rather than burning the disk hoping for
146  *     a different answer.
147  *
148  * OVERCOMMIT
149  *
150  *   Because we hold so many reservations for metadata we will allow you to
151  *   reserve more space than is currently free in the currently allocate
152  *   metadata space.  This only happens with metadata, data does not allow
153  *   overcommitting.
154  *
155  *   You can see the current logic for when we allow overcommit in
156  *   btrfs_can_overcommit(), but it only applies to unallocated space.  If there
157  *   is no unallocated space to be had, all reservations are kept within the
158  *   free space in the allocated metadata chunks.
159  *
160  *   Because of overcommitting, you generally want to use the
161  *   btrfs_can_overcommit() logic for metadata allocations, as it does the right
162  *   thing with or without extra unallocated space.
163  */
164
165 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
166                           bool may_use_included)
167 {
168         ASSERT(s_info);
169         return s_info->bytes_used + s_info->bytes_reserved +
170                 s_info->bytes_pinned + s_info->bytes_readonly +
171                 s_info->bytes_zone_unusable +
172                 (may_use_included ? s_info->bytes_may_use : 0);
173 }
174
175 /*
176  * after adding space to the filesystem, we need to clear the full flags
177  * on all the space infos.
178  */
179 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
180 {
181         struct list_head *head = &info->space_info;
182         struct btrfs_space_info *found;
183
184         list_for_each_entry(found, head, list)
185                 found->full = 0;
186 }
187
188 /*
189  * Block groups with more than this value (percents) of unusable space will be
190  * scheduled for background reclaim.
191  */
192 #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH                      (75)
193
194 /*
195  * Calculate chunk size depending on volume type (regular or zoned).
196  */
197 static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
198 {
199         if (btrfs_is_zoned(fs_info))
200                 return fs_info->zone_size;
201
202         ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
203
204         if (flags & BTRFS_BLOCK_GROUP_DATA)
205                 return BTRFS_MAX_DATA_CHUNK_SIZE;
206         else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
207                 return SZ_32M;
208
209         /* Handle BTRFS_BLOCK_GROUP_METADATA */
210         if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
211                 return SZ_1G;
212
213         return SZ_256M;
214 }
215
216 /*
217  * Update default chunk size.
218  */
219 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
220                                         u64 chunk_size)
221 {
222         WRITE_ONCE(space_info->chunk_size, chunk_size);
223 }
224
225 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
226 {
227
228         struct btrfs_space_info *space_info;
229         int i;
230         int ret;
231
232         space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
233         if (!space_info)
234                 return -ENOMEM;
235
236         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
237                 INIT_LIST_HEAD(&space_info->block_groups[i]);
238         init_rwsem(&space_info->groups_sem);
239         spin_lock_init(&space_info->lock);
240         space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
241         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
242         INIT_LIST_HEAD(&space_info->ro_bgs);
243         INIT_LIST_HEAD(&space_info->tickets);
244         INIT_LIST_HEAD(&space_info->priority_tickets);
245         space_info->clamp = 1;
246         btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
247
248         if (btrfs_is_zoned(info))
249                 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
250
251         ret = btrfs_sysfs_add_space_info_type(info, space_info);
252         if (ret)
253                 return ret;
254
255         list_add(&space_info->list, &info->space_info);
256         if (flags & BTRFS_BLOCK_GROUP_DATA)
257                 info->data_sinfo = space_info;
258
259         return ret;
260 }
261
262 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
263 {
264         struct btrfs_super_block *disk_super;
265         u64 features;
266         u64 flags;
267         int mixed = 0;
268         int ret;
269
270         disk_super = fs_info->super_copy;
271         if (!btrfs_super_root(disk_super))
272                 return -EINVAL;
273
274         features = btrfs_super_incompat_flags(disk_super);
275         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
276                 mixed = 1;
277
278         flags = BTRFS_BLOCK_GROUP_SYSTEM;
279         ret = create_space_info(fs_info, flags);
280         if (ret)
281                 goto out;
282
283         if (mixed) {
284                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
285                 ret = create_space_info(fs_info, flags);
286         } else {
287                 flags = BTRFS_BLOCK_GROUP_METADATA;
288                 ret = create_space_info(fs_info, flags);
289                 if (ret)
290                         goto out;
291
292                 flags = BTRFS_BLOCK_GROUP_DATA;
293                 ret = create_space_info(fs_info, flags);
294         }
295 out:
296         return ret;
297 }
298
299 void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
300                                 struct btrfs_block_group *block_group)
301 {
302         struct btrfs_space_info *found;
303         int factor, index;
304
305         factor = btrfs_bg_type_to_factor(block_group->flags);
306
307         found = btrfs_find_space_info(info, block_group->flags);
308         ASSERT(found);
309         spin_lock(&found->lock);
310         found->total_bytes += block_group->length;
311         found->disk_total += block_group->length * factor;
312         found->bytes_used += block_group->used;
313         found->disk_used += block_group->used * factor;
314         found->bytes_readonly += block_group->bytes_super;
315         found->bytes_zone_unusable += block_group->zone_unusable;
316         if (block_group->length > 0)
317                 found->full = 0;
318         btrfs_try_granting_tickets(info, found);
319         spin_unlock(&found->lock);
320
321         block_group->space_info = found;
322
323         index = btrfs_bg_flags_to_raid_index(block_group->flags);
324         down_write(&found->groups_sem);
325         list_add_tail(&block_group->list, &found->block_groups[index]);
326         up_write(&found->groups_sem);
327 }
328
329 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
330                                                u64 flags)
331 {
332         struct list_head *head = &info->space_info;
333         struct btrfs_space_info *found;
334
335         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
336
337         list_for_each_entry(found, head, list) {
338                 if (found->flags & flags)
339                         return found;
340         }
341         return NULL;
342 }
343
344 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
345                           struct btrfs_space_info *space_info,
346                           enum btrfs_reserve_flush_enum flush)
347 {
348         struct btrfs_space_info *data_sinfo;
349         u64 profile;
350         u64 avail;
351         u64 data_chunk_size;
352         int factor;
353
354         if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
355                 profile = btrfs_system_alloc_profile(fs_info);
356         else
357                 profile = btrfs_metadata_alloc_profile(fs_info);
358
359         avail = atomic64_read(&fs_info->free_chunk_space);
360
361         /*
362          * If we have dup, raid1 or raid10 then only half of the free
363          * space is actually usable.  For raid56, the space info used
364          * doesn't include the parity drive, so we don't have to
365          * change the math
366          */
367         factor = btrfs_bg_type_to_factor(profile);
368         avail = div_u64(avail, factor);
369         if (avail == 0)
370                 return 0;
371
372         /*
373          * Calculate the data_chunk_size, space_info->chunk_size is the
374          * "optimal" chunk size based on the fs size.  However when we actually
375          * allocate the chunk we will strip this down further, making it no more
376          * than 10% of the disk or 1G, whichever is smaller.
377          */
378         data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
379         data_chunk_size = min(data_sinfo->chunk_size,
380                               mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
381         data_chunk_size = min_t(u64, data_chunk_size, SZ_1G);
382
383         /*
384          * Since data allocations immediately use block groups as part of the
385          * reservation, because we assume that data reservations will == actual
386          * usage, we could potentially overcommit and then immediately have that
387          * available space used by a data allocation, which could put us in a
388          * bind when we get close to filling the file system.
389          *
390          * To handle this simply remove the data_chunk_size from the available
391          * space.  If we are relatively empty this won't affect our ability to
392          * overcommit much, and if we're very close to full it'll keep us from
393          * getting into a position where we've given ourselves very little
394          * metadata wiggle room.
395          */
396         if (avail <= data_chunk_size)
397                 return 0;
398         avail -= data_chunk_size;
399
400         /*
401          * If we aren't flushing all things, let us overcommit up to
402          * 1/2th of the space. If we can flush, don't let us overcommit
403          * too much, let it overcommit up to 1/8 of the space.
404          */
405         if (flush == BTRFS_RESERVE_FLUSH_ALL)
406                 avail >>= 3;
407         else
408                 avail >>= 1;
409         return avail;
410 }
411
412 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
413                          struct btrfs_space_info *space_info, u64 bytes,
414                          enum btrfs_reserve_flush_enum flush)
415 {
416         u64 avail;
417         u64 used;
418
419         /* Don't overcommit when in mixed mode */
420         if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
421                 return 0;
422
423         used = btrfs_space_info_used(space_info, true);
424         avail = calc_available_free_space(fs_info, space_info, flush);
425
426         if (used + bytes < space_info->total_bytes + avail)
427                 return 1;
428         return 0;
429 }
430
431 static void remove_ticket(struct btrfs_space_info *space_info,
432                           struct reserve_ticket *ticket)
433 {
434         if (!list_empty(&ticket->list)) {
435                 list_del_init(&ticket->list);
436                 ASSERT(space_info->reclaim_size >= ticket->bytes);
437                 space_info->reclaim_size -= ticket->bytes;
438         }
439 }
440
441 /*
442  * This is for space we already have accounted in space_info->bytes_may_use, so
443  * basically when we're returning space from block_rsv's.
444  */
445 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
446                                 struct btrfs_space_info *space_info)
447 {
448         struct list_head *head;
449         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
450
451         lockdep_assert_held(&space_info->lock);
452
453         head = &space_info->priority_tickets;
454 again:
455         while (!list_empty(head)) {
456                 struct reserve_ticket *ticket;
457                 u64 used = btrfs_space_info_used(space_info, true);
458
459                 ticket = list_first_entry(head, struct reserve_ticket, list);
460
461                 /* Check and see if our ticket can be satisfied now. */
462                 if ((used + ticket->bytes <= space_info->total_bytes) ||
463                     btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
464                                          flush)) {
465                         btrfs_space_info_update_bytes_may_use(fs_info,
466                                                               space_info,
467                                                               ticket->bytes);
468                         remove_ticket(space_info, ticket);
469                         ticket->bytes = 0;
470                         space_info->tickets_id++;
471                         wake_up(&ticket->wait);
472                 } else {
473                         break;
474                 }
475         }
476
477         if (head == &space_info->priority_tickets) {
478                 head = &space_info->tickets;
479                 flush = BTRFS_RESERVE_FLUSH_ALL;
480                 goto again;
481         }
482 }
483
484 #define DUMP_BLOCK_RSV(fs_info, rsv_name)                               \
485 do {                                                                    \
486         struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name;           \
487         spin_lock(&__rsv->lock);                                        \
488         btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu",      \
489                    __rsv->size, __rsv->reserved);                       \
490         spin_unlock(&__rsv->lock);                                      \
491 } while (0)
492
493 static const char *space_info_flag_to_str(const struct btrfs_space_info *space_info)
494 {
495         switch (space_info->flags) {
496         case BTRFS_BLOCK_GROUP_SYSTEM:
497                 return "SYSTEM";
498         case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA:
499                 return "DATA+METADATA";
500         case BTRFS_BLOCK_GROUP_DATA:
501                 return "DATA";
502         case BTRFS_BLOCK_GROUP_METADATA:
503                 return "METADATA";
504         default:
505                 return "UNKNOWN";
506         }
507 }
508
509 static void dump_global_block_rsv(struct btrfs_fs_info *fs_info)
510 {
511         DUMP_BLOCK_RSV(fs_info, global_block_rsv);
512         DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
513         DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
514         DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
515         DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
516 }
517
518 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
519                                     struct btrfs_space_info *info)
520 {
521         const char *flag_str = space_info_flag_to_str(info);
522         lockdep_assert_held(&info->lock);
523
524         /* The free space could be negative in case of overcommit */
525         btrfs_info(fs_info, "space_info %s has %lld free, is %sfull",
526                    flag_str,
527                    (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
528                    info->full ? "" : "not ");
529         btrfs_info(fs_info,
530 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
531                 info->total_bytes, info->bytes_used, info->bytes_pinned,
532                 info->bytes_reserved, info->bytes_may_use,
533                 info->bytes_readonly, info->bytes_zone_unusable);
534 }
535
536 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
537                            struct btrfs_space_info *info, u64 bytes,
538                            int dump_block_groups)
539 {
540         struct btrfs_block_group *cache;
541         u64 total_avail = 0;
542         int index = 0;
543
544         spin_lock(&info->lock);
545         __btrfs_dump_space_info(fs_info, info);
546         dump_global_block_rsv(fs_info);
547         spin_unlock(&info->lock);
548
549         if (!dump_block_groups)
550                 return;
551
552         down_read(&info->groups_sem);
553 again:
554         list_for_each_entry(cache, &info->block_groups[index], list) {
555                 u64 avail;
556
557                 spin_lock(&cache->lock);
558                 avail = cache->length - cache->used - cache->pinned -
559                         cache->reserved - cache->delalloc_bytes -
560                         cache->bytes_super - cache->zone_unusable;
561                 btrfs_info(fs_info,
562 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s",
563                            cache->start, cache->length, cache->used, cache->pinned,
564                            cache->reserved, cache->delalloc_bytes,
565                            cache->bytes_super, cache->zone_unusable,
566                            avail, cache->ro ? "[readonly]" : "");
567                 spin_unlock(&cache->lock);
568                 btrfs_dump_free_space(cache, bytes);
569                 total_avail += avail;
570         }
571         if (++index < BTRFS_NR_RAID_TYPES)
572                 goto again;
573         up_read(&info->groups_sem);
574
575         btrfs_info(fs_info, "%llu bytes available across all block groups", total_avail);
576 }
577
578 static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info,
579                                         u64 to_reclaim)
580 {
581         u64 bytes;
582         u64 nr;
583
584         bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
585         nr = div64_u64(to_reclaim, bytes);
586         if (!nr)
587                 nr = 1;
588         return nr;
589 }
590
591 #define EXTENT_SIZE_PER_ITEM    SZ_256K
592
593 /*
594  * shrink metadata reservation for delalloc
595  */
596 static void shrink_delalloc(struct btrfs_fs_info *fs_info,
597                             struct btrfs_space_info *space_info,
598                             u64 to_reclaim, bool wait_ordered,
599                             bool for_preempt)
600 {
601         struct btrfs_trans_handle *trans;
602         u64 delalloc_bytes;
603         u64 ordered_bytes;
604         u64 items;
605         long time_left;
606         int loops;
607
608         delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
609         ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
610         if (delalloc_bytes == 0 && ordered_bytes == 0)
611                 return;
612
613         /* Calc the number of the pages we need flush for space reservation */
614         if (to_reclaim == U64_MAX) {
615                 items = U64_MAX;
616         } else {
617                 /*
618                  * to_reclaim is set to however much metadata we need to
619                  * reclaim, but reclaiming that much data doesn't really track
620                  * exactly.  What we really want to do is reclaim full inode's
621                  * worth of reservations, however that's not available to us
622                  * here.  We will take a fraction of the delalloc bytes for our
623                  * flushing loops and hope for the best.  Delalloc will expand
624                  * the amount we write to cover an entire dirty extent, which
625                  * will reclaim the metadata reservation for that range.  If
626                  * it's not enough subsequent flush stages will be more
627                  * aggressive.
628                  */
629                 to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
630                 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
631         }
632
633         trans = current->journal_info;
634
635         /*
636          * If we are doing more ordered than delalloc we need to just wait on
637          * ordered extents, otherwise we'll waste time trying to flush delalloc
638          * that likely won't give us the space back we need.
639          */
640         if (ordered_bytes > delalloc_bytes && !for_preempt)
641                 wait_ordered = true;
642
643         loops = 0;
644         while ((delalloc_bytes || ordered_bytes) && loops < 3) {
645                 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
646                 long nr_pages = min_t(u64, temp, LONG_MAX);
647                 int async_pages;
648
649                 btrfs_start_delalloc_roots(fs_info, nr_pages, true);
650
651                 /*
652                  * We need to make sure any outstanding async pages are now
653                  * processed before we continue.  This is because things like
654                  * sync_inode() try to be smart and skip writing if the inode is
655                  * marked clean.  We don't use filemap_fwrite for flushing
656                  * because we want to control how many pages we write out at a
657                  * time, thus this is the only safe way to make sure we've
658                  * waited for outstanding compressed workers to have started
659                  * their jobs and thus have ordered extents set up properly.
660                  *
661                  * This exists because we do not want to wait for each
662                  * individual inode to finish its async work, we simply want to
663                  * start the IO on everybody, and then come back here and wait
664                  * for all of the async work to catch up.  Once we're done with
665                  * that we know we'll have ordered extents for everything and we
666                  * can decide if we wait for that or not.
667                  *
668                  * If we choose to replace this in the future, make absolutely
669                  * sure that the proper waiting is being done in the async case,
670                  * as there have been bugs in that area before.
671                  */
672                 async_pages = atomic_read(&fs_info->async_delalloc_pages);
673                 if (!async_pages)
674                         goto skip_async;
675
676                 /*
677                  * We don't want to wait forever, if we wrote less pages in this
678                  * loop than we have outstanding, only wait for that number of
679                  * pages, otherwise we can wait for all async pages to finish
680                  * before continuing.
681                  */
682                 if (async_pages > nr_pages)
683                         async_pages -= nr_pages;
684                 else
685                         async_pages = 0;
686                 wait_event(fs_info->async_submit_wait,
687                            atomic_read(&fs_info->async_delalloc_pages) <=
688                            async_pages);
689 skip_async:
690                 loops++;
691                 if (wait_ordered && !trans) {
692                         btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
693                 } else {
694                         time_left = schedule_timeout_killable(1);
695                         if (time_left)
696                                 break;
697                 }
698
699                 /*
700                  * If we are for preemption we just want a one-shot of delalloc
701                  * flushing so we can stop flushing if we decide we don't need
702                  * to anymore.
703                  */
704                 if (for_preempt)
705                         break;
706
707                 spin_lock(&space_info->lock);
708                 if (list_empty(&space_info->tickets) &&
709                     list_empty(&space_info->priority_tickets)) {
710                         spin_unlock(&space_info->lock);
711                         break;
712                 }
713                 spin_unlock(&space_info->lock);
714
715                 delalloc_bytes = percpu_counter_sum_positive(
716                                                 &fs_info->delalloc_bytes);
717                 ordered_bytes = percpu_counter_sum_positive(
718                                                 &fs_info->ordered_bytes);
719         }
720 }
721
722 /*
723  * Try to flush some data based on policy set by @state. This is only advisory
724  * and may fail for various reasons. The caller is supposed to examine the
725  * state of @space_info to detect the outcome.
726  */
727 static void flush_space(struct btrfs_fs_info *fs_info,
728                        struct btrfs_space_info *space_info, u64 num_bytes,
729                        enum btrfs_flush_state state, bool for_preempt)
730 {
731         struct btrfs_root *root = fs_info->tree_root;
732         struct btrfs_trans_handle *trans;
733         int nr;
734         int ret = 0;
735
736         switch (state) {
737         case FLUSH_DELAYED_ITEMS_NR:
738         case FLUSH_DELAYED_ITEMS:
739                 if (state == FLUSH_DELAYED_ITEMS_NR)
740                         nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
741                 else
742                         nr = -1;
743
744                 trans = btrfs_join_transaction_nostart(root);
745                 if (IS_ERR(trans)) {
746                         ret = PTR_ERR(trans);
747                         if (ret == -ENOENT)
748                                 ret = 0;
749                         break;
750                 }
751                 ret = btrfs_run_delayed_items_nr(trans, nr);
752                 btrfs_end_transaction(trans);
753                 break;
754         case FLUSH_DELALLOC:
755         case FLUSH_DELALLOC_WAIT:
756         case FLUSH_DELALLOC_FULL:
757                 if (state == FLUSH_DELALLOC_FULL)
758                         num_bytes = U64_MAX;
759                 shrink_delalloc(fs_info, space_info, num_bytes,
760                                 state != FLUSH_DELALLOC, for_preempt);
761                 break;
762         case FLUSH_DELAYED_REFS_NR:
763         case FLUSH_DELAYED_REFS:
764                 trans = btrfs_join_transaction_nostart(root);
765                 if (IS_ERR(trans)) {
766                         ret = PTR_ERR(trans);
767                         if (ret == -ENOENT)
768                                 ret = 0;
769                         break;
770                 }
771                 if (state == FLUSH_DELAYED_REFS_NR)
772                         btrfs_run_delayed_refs(trans, num_bytes);
773                 else
774                         btrfs_run_delayed_refs(trans, 0);
775                 btrfs_end_transaction(trans);
776                 break;
777         case ALLOC_CHUNK:
778         case ALLOC_CHUNK_FORCE:
779                 trans = btrfs_join_transaction(root);
780                 if (IS_ERR(trans)) {
781                         ret = PTR_ERR(trans);
782                         break;
783                 }
784                 ret = btrfs_chunk_alloc(trans,
785                                 btrfs_get_alloc_profile(fs_info, space_info->flags),
786                                 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
787                                         CHUNK_ALLOC_FORCE);
788                 btrfs_end_transaction(trans);
789
790                 if (ret > 0 || ret == -ENOSPC)
791                         ret = 0;
792                 break;
793         case RUN_DELAYED_IPUTS:
794                 /*
795                  * If we have pending delayed iputs then we could free up a
796                  * bunch of pinned space, so make sure we run the iputs before
797                  * we do our pinned bytes check below.
798                  */
799                 btrfs_run_delayed_iputs(fs_info);
800                 btrfs_wait_on_delayed_iputs(fs_info);
801                 break;
802         case COMMIT_TRANS:
803                 ASSERT(current->journal_info == NULL);
804                 /*
805                  * We don't want to start a new transaction, just attach to the
806                  * current one or wait it fully commits in case its commit is
807                  * happening at the moment. Note: we don't use a nostart join
808                  * because that does not wait for a transaction to fully commit
809                  * (only for it to be unblocked, state TRANS_STATE_UNBLOCKED).
810                  */
811                 trans = btrfs_attach_transaction_barrier(root);
812                 if (IS_ERR(trans)) {
813                         ret = PTR_ERR(trans);
814                         if (ret == -ENOENT)
815                                 ret = 0;
816                         break;
817                 }
818                 ret = btrfs_commit_transaction(trans);
819                 break;
820         default:
821                 ret = -ENOSPC;
822                 break;
823         }
824
825         trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
826                                 ret, for_preempt);
827         return;
828 }
829
830 static inline u64
831 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
832                                  struct btrfs_space_info *space_info)
833 {
834         u64 used;
835         u64 avail;
836         u64 to_reclaim = space_info->reclaim_size;
837
838         lockdep_assert_held(&space_info->lock);
839
840         avail = calc_available_free_space(fs_info, space_info,
841                                           BTRFS_RESERVE_FLUSH_ALL);
842         used = btrfs_space_info_used(space_info, true);
843
844         /*
845          * We may be flushing because suddenly we have less space than we had
846          * before, and now we're well over-committed based on our current free
847          * space.  If that's the case add in our overage so we make sure to put
848          * appropriate pressure on the flushing state machine.
849          */
850         if (space_info->total_bytes + avail < used)
851                 to_reclaim += used - (space_info->total_bytes + avail);
852
853         return to_reclaim;
854 }
855
856 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
857                                     struct btrfs_space_info *space_info)
858 {
859         const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
860         u64 ordered, delalloc;
861         u64 thresh;
862         u64 used;
863
864         thresh = mult_perc(space_info->total_bytes, 90);
865
866         lockdep_assert_held(&space_info->lock);
867
868         /* If we're just plain full then async reclaim just slows us down. */
869         if ((space_info->bytes_used + space_info->bytes_reserved +
870              global_rsv_size) >= thresh)
871                 return false;
872
873         used = space_info->bytes_may_use + space_info->bytes_pinned;
874
875         /* The total flushable belongs to the global rsv, don't flush. */
876         if (global_rsv_size >= used)
877                 return false;
878
879         /*
880          * 128MiB is 1/4 of the maximum global rsv size.  If we have less than
881          * that devoted to other reservations then there's no sense in flushing,
882          * we don't have a lot of things that need flushing.
883          */
884         if (used - global_rsv_size <= SZ_128M)
885                 return false;
886
887         /*
888          * We have tickets queued, bail so we don't compete with the async
889          * flushers.
890          */
891         if (space_info->reclaim_size)
892                 return false;
893
894         /*
895          * If we have over half of the free space occupied by reservations or
896          * pinned then we want to start flushing.
897          *
898          * We do not do the traditional thing here, which is to say
899          *
900          *   if (used >= ((total_bytes + avail) / 2))
901          *     return 1;
902          *
903          * because this doesn't quite work how we want.  If we had more than 50%
904          * of the space_info used by bytes_used and we had 0 available we'd just
905          * constantly run the background flusher.  Instead we want it to kick in
906          * if our reclaimable space exceeds our clamped free space.
907          *
908          * Our clamping range is 2^1 -> 2^8.  Practically speaking that means
909          * the following:
910          *
911          * Amount of RAM        Minimum threshold       Maximum threshold
912          *
913          *        256GiB                     1GiB                  128GiB
914          *        128GiB                   512MiB                   64GiB
915          *         64GiB                   256MiB                   32GiB
916          *         32GiB                   128MiB                   16GiB
917          *         16GiB                    64MiB                    8GiB
918          *
919          * These are the range our thresholds will fall in, corresponding to how
920          * much delalloc we need for the background flusher to kick in.
921          */
922
923         thresh = calc_available_free_space(fs_info, space_info,
924                                            BTRFS_RESERVE_FLUSH_ALL);
925         used = space_info->bytes_used + space_info->bytes_reserved +
926                space_info->bytes_readonly + global_rsv_size;
927         if (used < space_info->total_bytes)
928                 thresh += space_info->total_bytes - used;
929         thresh >>= space_info->clamp;
930
931         used = space_info->bytes_pinned;
932
933         /*
934          * If we have more ordered bytes than delalloc bytes then we're either
935          * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
936          * around.  Preemptive flushing is only useful in that it can free up
937          * space before tickets need to wait for things to finish.  In the case
938          * of ordered extents, preemptively waiting on ordered extents gets us
939          * nothing, if our reservations are tied up in ordered extents we'll
940          * simply have to slow down writers by forcing them to wait on ordered
941          * extents.
942          *
943          * In the case that ordered is larger than delalloc, only include the
944          * block reserves that we would actually be able to directly reclaim
945          * from.  In this case if we're heavy on metadata operations this will
946          * clearly be heavy enough to warrant preemptive flushing.  In the case
947          * of heavy DIO or ordered reservations, preemptive flushing will just
948          * waste time and cause us to slow down.
949          *
950          * We want to make sure we truly are maxed out on ordered however, so
951          * cut ordered in half, and if it's still higher than delalloc then we
952          * can keep flushing.  This is to avoid the case where we start
953          * flushing, and now delalloc == ordered and we stop preemptively
954          * flushing when we could still have several gigs of delalloc to flush.
955          */
956         ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
957         delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
958         if (ordered >= delalloc)
959                 used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) +
960                         btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv);
961         else
962                 used += space_info->bytes_may_use - global_rsv_size;
963
964         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
965                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
966 }
967
968 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
969                                   struct btrfs_space_info *space_info,
970                                   struct reserve_ticket *ticket)
971 {
972         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
973         u64 min_bytes;
974
975         if (!ticket->steal)
976                 return false;
977
978         if (global_rsv->space_info != space_info)
979                 return false;
980
981         spin_lock(&global_rsv->lock);
982         min_bytes = mult_perc(global_rsv->size, 10);
983         if (global_rsv->reserved < min_bytes + ticket->bytes) {
984                 spin_unlock(&global_rsv->lock);
985                 return false;
986         }
987         global_rsv->reserved -= ticket->bytes;
988         remove_ticket(space_info, ticket);
989         ticket->bytes = 0;
990         wake_up(&ticket->wait);
991         space_info->tickets_id++;
992         if (global_rsv->reserved < global_rsv->size)
993                 global_rsv->full = 0;
994         spin_unlock(&global_rsv->lock);
995
996         return true;
997 }
998
999 /*
1000  * We've exhausted our flushing, start failing tickets.
1001  *
1002  * @fs_info - fs_info for this fs
1003  * @space_info - the space info we were flushing
1004  *
1005  * We call this when we've exhausted our flushing ability and haven't made
1006  * progress in satisfying tickets.  The reservation code handles tickets in
1007  * order, so if there is a large ticket first and then smaller ones we could
1008  * very well satisfy the smaller tickets.  This will attempt to wake up any
1009  * tickets in the list to catch this case.
1010  *
1011  * This function returns true if it was able to make progress by clearing out
1012  * other tickets, or if it stumbles across a ticket that was smaller than the
1013  * first ticket.
1014  */
1015 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
1016                                    struct btrfs_space_info *space_info)
1017 {
1018         struct reserve_ticket *ticket;
1019         u64 tickets_id = space_info->tickets_id;
1020         const bool aborted = BTRFS_FS_ERROR(fs_info);
1021
1022         trace_btrfs_fail_all_tickets(fs_info, space_info);
1023
1024         if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1025                 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
1026                 __btrfs_dump_space_info(fs_info, space_info);
1027         }
1028
1029         while (!list_empty(&space_info->tickets) &&
1030                tickets_id == space_info->tickets_id) {
1031                 ticket = list_first_entry(&space_info->tickets,
1032                                           struct reserve_ticket, list);
1033
1034                 if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket))
1035                         return true;
1036
1037                 if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1038                         btrfs_info(fs_info, "failing ticket with %llu bytes",
1039                                    ticket->bytes);
1040
1041                 remove_ticket(space_info, ticket);
1042                 if (aborted)
1043                         ticket->error = -EIO;
1044                 else
1045                         ticket->error = -ENOSPC;
1046                 wake_up(&ticket->wait);
1047
1048                 /*
1049                  * We're just throwing tickets away, so more flushing may not
1050                  * trip over btrfs_try_granting_tickets, so we need to call it
1051                  * here to see if we can make progress with the next ticket in
1052                  * the list.
1053                  */
1054                 if (!aborted)
1055                         btrfs_try_granting_tickets(fs_info, space_info);
1056         }
1057         return (tickets_id != space_info->tickets_id);
1058 }
1059
1060 /*
1061  * This is for normal flushers, we can wait all goddamned day if we want to.  We
1062  * will loop and continuously try to flush as long as we are making progress.
1063  * We count progress as clearing off tickets each time we have to loop.
1064  */
1065 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
1066 {
1067         struct btrfs_fs_info *fs_info;
1068         struct btrfs_space_info *space_info;
1069         u64 to_reclaim;
1070         enum btrfs_flush_state flush_state;
1071         int commit_cycles = 0;
1072         u64 last_tickets_id;
1073
1074         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
1075         space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1076
1077         spin_lock(&space_info->lock);
1078         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1079         if (!to_reclaim) {
1080                 space_info->flush = 0;
1081                 spin_unlock(&space_info->lock);
1082                 return;
1083         }
1084         last_tickets_id = space_info->tickets_id;
1085         spin_unlock(&space_info->lock);
1086
1087         flush_state = FLUSH_DELAYED_ITEMS_NR;
1088         do {
1089                 flush_space(fs_info, space_info, to_reclaim, flush_state, false);
1090                 spin_lock(&space_info->lock);
1091                 if (list_empty(&space_info->tickets)) {
1092                         space_info->flush = 0;
1093                         spin_unlock(&space_info->lock);
1094                         return;
1095                 }
1096                 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
1097                                                               space_info);
1098                 if (last_tickets_id == space_info->tickets_id) {
1099                         flush_state++;
1100                 } else {
1101                         last_tickets_id = space_info->tickets_id;
1102                         flush_state = FLUSH_DELAYED_ITEMS_NR;
1103                         if (commit_cycles)
1104                                 commit_cycles--;
1105                 }
1106
1107                 /*
1108                  * We do not want to empty the system of delalloc unless we're
1109                  * under heavy pressure, so allow one trip through the flushing
1110                  * logic before we start doing a FLUSH_DELALLOC_FULL.
1111                  */
1112                 if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
1113                         flush_state++;
1114
1115                 /*
1116                  * We don't want to force a chunk allocation until we've tried
1117                  * pretty hard to reclaim space.  Think of the case where we
1118                  * freed up a bunch of space and so have a lot of pinned space
1119                  * to reclaim.  We would rather use that than possibly create a
1120                  * underutilized metadata chunk.  So if this is our first run
1121                  * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1122                  * commit the transaction.  If nothing has changed the next go
1123                  * around then we can force a chunk allocation.
1124                  */
1125                 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1126                         flush_state++;
1127
1128                 if (flush_state > COMMIT_TRANS) {
1129                         commit_cycles++;
1130                         if (commit_cycles > 2) {
1131                                 if (maybe_fail_all_tickets(fs_info, space_info)) {
1132                                         flush_state = FLUSH_DELAYED_ITEMS_NR;
1133                                         commit_cycles--;
1134                                 } else {
1135                                         space_info->flush = 0;
1136                                 }
1137                         } else {
1138                                 flush_state = FLUSH_DELAYED_ITEMS_NR;
1139                         }
1140                 }
1141                 spin_unlock(&space_info->lock);
1142         } while (flush_state <= COMMIT_TRANS);
1143 }
1144
1145 /*
1146  * This handles pre-flushing of metadata space before we get to the point that
1147  * we need to start blocking threads on tickets.  The logic here is different
1148  * from the other flush paths because it doesn't rely on tickets to tell us how
1149  * much we need to flush, instead it attempts to keep us below the 80% full
1150  * watermark of space by flushing whichever reservation pool is currently the
1151  * largest.
1152  */
1153 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1154 {
1155         struct btrfs_fs_info *fs_info;
1156         struct btrfs_space_info *space_info;
1157         struct btrfs_block_rsv *delayed_block_rsv;
1158         struct btrfs_block_rsv *delayed_refs_rsv;
1159         struct btrfs_block_rsv *global_rsv;
1160         struct btrfs_block_rsv *trans_rsv;
1161         int loops = 0;
1162
1163         fs_info = container_of(work, struct btrfs_fs_info,
1164                                preempt_reclaim_work);
1165         space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1166         delayed_block_rsv = &fs_info->delayed_block_rsv;
1167         delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1168         global_rsv = &fs_info->global_block_rsv;
1169         trans_rsv = &fs_info->trans_block_rsv;
1170
1171         spin_lock(&space_info->lock);
1172         while (need_preemptive_reclaim(fs_info, space_info)) {
1173                 enum btrfs_flush_state flush;
1174                 u64 delalloc_size = 0;
1175                 u64 to_reclaim, block_rsv_size;
1176                 const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv);
1177
1178                 loops++;
1179
1180                 /*
1181                  * We don't have a precise counter for the metadata being
1182                  * reserved for delalloc, so we'll approximate it by subtracting
1183                  * out the block rsv's space from the bytes_may_use.  If that
1184                  * amount is higher than the individual reserves, then we can
1185                  * assume it's tied up in delalloc reservations.
1186                  */
1187                 block_rsv_size = global_rsv_size +
1188                         btrfs_block_rsv_reserved(delayed_block_rsv) +
1189                         btrfs_block_rsv_reserved(delayed_refs_rsv) +
1190                         btrfs_block_rsv_reserved(trans_rsv);
1191                 if (block_rsv_size < space_info->bytes_may_use)
1192                         delalloc_size = space_info->bytes_may_use - block_rsv_size;
1193
1194                 /*
1195                  * We don't want to include the global_rsv in our calculation,
1196                  * because that's space we can't touch.  Subtract it from the
1197                  * block_rsv_size for the next checks.
1198                  */
1199                 block_rsv_size -= global_rsv_size;
1200
1201                 /*
1202                  * We really want to avoid flushing delalloc too much, as it
1203                  * could result in poor allocation patterns, so only flush it if
1204                  * it's larger than the rest of the pools combined.
1205                  */
1206                 if (delalloc_size > block_rsv_size) {
1207                         to_reclaim = delalloc_size;
1208                         flush = FLUSH_DELALLOC;
1209                 } else if (space_info->bytes_pinned >
1210                            (btrfs_block_rsv_reserved(delayed_block_rsv) +
1211                             btrfs_block_rsv_reserved(delayed_refs_rsv))) {
1212                         to_reclaim = space_info->bytes_pinned;
1213                         flush = COMMIT_TRANS;
1214                 } else if (btrfs_block_rsv_reserved(delayed_block_rsv) >
1215                            btrfs_block_rsv_reserved(delayed_refs_rsv)) {
1216                         to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv);
1217                         flush = FLUSH_DELAYED_ITEMS_NR;
1218                 } else {
1219                         to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv);
1220                         flush = FLUSH_DELAYED_REFS_NR;
1221                 }
1222
1223                 spin_unlock(&space_info->lock);
1224
1225                 /*
1226                  * We don't want to reclaim everything, just a portion, so scale
1227                  * down the to_reclaim by 1/4.  If it takes us down to 0,
1228                  * reclaim 1 items worth.
1229                  */
1230                 to_reclaim >>= 2;
1231                 if (!to_reclaim)
1232                         to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1233                 flush_space(fs_info, space_info, to_reclaim, flush, true);
1234                 cond_resched();
1235                 spin_lock(&space_info->lock);
1236         }
1237
1238         /* We only went through once, back off our clamping. */
1239         if (loops == 1 && !space_info->reclaim_size)
1240                 space_info->clamp = max(1, space_info->clamp - 1);
1241         trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1242         spin_unlock(&space_info->lock);
1243 }
1244
1245 /*
1246  * FLUSH_DELALLOC_WAIT:
1247  *   Space is freed from flushing delalloc in one of two ways.
1248  *
1249  *   1) compression is on and we allocate less space than we reserved
1250  *   2) we are overwriting existing space
1251  *
1252  *   For #1 that extra space is reclaimed as soon as the delalloc pages are
1253  *   COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
1254  *   length to ->bytes_reserved, and subtracts the reserved space from
1255  *   ->bytes_may_use.
1256  *
1257  *   For #2 this is trickier.  Once the ordered extent runs we will drop the
1258  *   extent in the range we are overwriting, which creates a delayed ref for
1259  *   that freed extent.  This however is not reclaimed until the transaction
1260  *   commits, thus the next stages.
1261  *
1262  * RUN_DELAYED_IPUTS
1263  *   If we are freeing inodes, we want to make sure all delayed iputs have
1264  *   completed, because they could have been on an inode with i_nlink == 0, and
1265  *   thus have been truncated and freed up space.  But again this space is not
1266  *   immediately re-usable, it comes in the form of a delayed ref, which must be
1267  *   run and then the transaction must be committed.
1268  *
1269  * COMMIT_TRANS
1270  *   This is where we reclaim all of the pinned space generated by running the
1271  *   iputs
1272  *
1273  * ALLOC_CHUNK_FORCE
1274  *   For data we start with alloc chunk force, however we could have been full
1275  *   before, and then the transaction commit could have freed new block groups,
1276  *   so if we now have space to allocate do the force chunk allocation.
1277  */
1278 static const enum btrfs_flush_state data_flush_states[] = {
1279         FLUSH_DELALLOC_FULL,
1280         RUN_DELAYED_IPUTS,
1281         COMMIT_TRANS,
1282         ALLOC_CHUNK_FORCE,
1283 };
1284
1285 static void btrfs_async_reclaim_data_space(struct work_struct *work)
1286 {
1287         struct btrfs_fs_info *fs_info;
1288         struct btrfs_space_info *space_info;
1289         u64 last_tickets_id;
1290         enum btrfs_flush_state flush_state = 0;
1291
1292         fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1293         space_info = fs_info->data_sinfo;
1294
1295         spin_lock(&space_info->lock);
1296         if (list_empty(&space_info->tickets)) {
1297                 space_info->flush = 0;
1298                 spin_unlock(&space_info->lock);
1299                 return;
1300         }
1301         last_tickets_id = space_info->tickets_id;
1302         spin_unlock(&space_info->lock);
1303
1304         while (!space_info->full) {
1305                 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1306                 spin_lock(&space_info->lock);
1307                 if (list_empty(&space_info->tickets)) {
1308                         space_info->flush = 0;
1309                         spin_unlock(&space_info->lock);
1310                         return;
1311                 }
1312
1313                 /* Something happened, fail everything and bail. */
1314                 if (BTRFS_FS_ERROR(fs_info))
1315                         goto aborted_fs;
1316                 last_tickets_id = space_info->tickets_id;
1317                 spin_unlock(&space_info->lock);
1318         }
1319
1320         while (flush_state < ARRAY_SIZE(data_flush_states)) {
1321                 flush_space(fs_info, space_info, U64_MAX,
1322                             data_flush_states[flush_state], false);
1323                 spin_lock(&space_info->lock);
1324                 if (list_empty(&space_info->tickets)) {
1325                         space_info->flush = 0;
1326                         spin_unlock(&space_info->lock);
1327                         return;
1328                 }
1329
1330                 if (last_tickets_id == space_info->tickets_id) {
1331                         flush_state++;
1332                 } else {
1333                         last_tickets_id = space_info->tickets_id;
1334                         flush_state = 0;
1335                 }
1336
1337                 if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1338                         if (space_info->full) {
1339                                 if (maybe_fail_all_tickets(fs_info, space_info))
1340                                         flush_state = 0;
1341                                 else
1342                                         space_info->flush = 0;
1343                         } else {
1344                                 flush_state = 0;
1345                         }
1346
1347                         /* Something happened, fail everything and bail. */
1348                         if (BTRFS_FS_ERROR(fs_info))
1349                                 goto aborted_fs;
1350
1351                 }
1352                 spin_unlock(&space_info->lock);
1353         }
1354         return;
1355
1356 aborted_fs:
1357         maybe_fail_all_tickets(fs_info, space_info);
1358         space_info->flush = 0;
1359         spin_unlock(&space_info->lock);
1360 }
1361
1362 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1363 {
1364         INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1365         INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1366         INIT_WORK(&fs_info->preempt_reclaim_work,
1367                   btrfs_preempt_reclaim_metadata_space);
1368 }
1369
1370 static const enum btrfs_flush_state priority_flush_states[] = {
1371         FLUSH_DELAYED_ITEMS_NR,
1372         FLUSH_DELAYED_ITEMS,
1373         ALLOC_CHUNK,
1374 };
1375
1376 static const enum btrfs_flush_state evict_flush_states[] = {
1377         FLUSH_DELAYED_ITEMS_NR,
1378         FLUSH_DELAYED_ITEMS,
1379         FLUSH_DELAYED_REFS_NR,
1380         FLUSH_DELAYED_REFS,
1381         FLUSH_DELALLOC,
1382         FLUSH_DELALLOC_WAIT,
1383         FLUSH_DELALLOC_FULL,
1384         ALLOC_CHUNK,
1385         COMMIT_TRANS,
1386 };
1387
1388 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1389                                 struct btrfs_space_info *space_info,
1390                                 struct reserve_ticket *ticket,
1391                                 const enum btrfs_flush_state *states,
1392                                 int states_nr)
1393 {
1394         u64 to_reclaim;
1395         int flush_state = 0;
1396
1397         spin_lock(&space_info->lock);
1398         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1399         /*
1400          * This is the priority reclaim path, so to_reclaim could be >0 still
1401          * because we may have only satisfied the priority tickets and still
1402          * left non priority tickets on the list.  We would then have
1403          * to_reclaim but ->bytes == 0.
1404          */
1405         if (ticket->bytes == 0) {
1406                 spin_unlock(&space_info->lock);
1407                 return;
1408         }
1409
1410         while (flush_state < states_nr) {
1411                 spin_unlock(&space_info->lock);
1412                 flush_space(fs_info, space_info, to_reclaim, states[flush_state],
1413                             false);
1414                 flush_state++;
1415                 spin_lock(&space_info->lock);
1416                 if (ticket->bytes == 0) {
1417                         spin_unlock(&space_info->lock);
1418                         return;
1419                 }
1420         }
1421
1422         /*
1423          * Attempt to steal from the global rsv if we can, except if the fs was
1424          * turned into error mode due to a transaction abort when flushing space
1425          * above, in that case fail with the abort error instead of returning
1426          * success to the caller if we can steal from the global rsv - this is
1427          * just to have caller fail immeditelly instead of later when trying to
1428          * modify the fs, making it easier to debug -ENOSPC problems.
1429          */
1430         if (BTRFS_FS_ERROR(fs_info)) {
1431                 ticket->error = BTRFS_FS_ERROR(fs_info);
1432                 remove_ticket(space_info, ticket);
1433         } else if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
1434                 ticket->error = -ENOSPC;
1435                 remove_ticket(space_info, ticket);
1436         }
1437
1438         /*
1439          * We must run try_granting_tickets here because we could be a large
1440          * ticket in front of a smaller ticket that can now be satisfied with
1441          * the available space.
1442          */
1443         btrfs_try_granting_tickets(fs_info, space_info);
1444         spin_unlock(&space_info->lock);
1445 }
1446
1447 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
1448                                         struct btrfs_space_info *space_info,
1449                                         struct reserve_ticket *ticket)
1450 {
1451         spin_lock(&space_info->lock);
1452
1453         /* We could have been granted before we got here. */
1454         if (ticket->bytes == 0) {
1455                 spin_unlock(&space_info->lock);
1456                 return;
1457         }
1458
1459         while (!space_info->full) {
1460                 spin_unlock(&space_info->lock);
1461                 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1462                 spin_lock(&space_info->lock);
1463                 if (ticket->bytes == 0) {
1464                         spin_unlock(&space_info->lock);
1465                         return;
1466                 }
1467         }
1468
1469         ticket->error = -ENOSPC;
1470         remove_ticket(space_info, ticket);
1471         btrfs_try_granting_tickets(fs_info, space_info);
1472         spin_unlock(&space_info->lock);
1473 }
1474
1475 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1476                                 struct btrfs_space_info *space_info,
1477                                 struct reserve_ticket *ticket)
1478
1479 {
1480         DEFINE_WAIT(wait);
1481         int ret = 0;
1482
1483         spin_lock(&space_info->lock);
1484         while (ticket->bytes > 0 && ticket->error == 0) {
1485                 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1486                 if (ret) {
1487                         /*
1488                          * Delete us from the list. After we unlock the space
1489                          * info, we don't want the async reclaim job to reserve
1490                          * space for this ticket. If that would happen, then the
1491                          * ticket's task would not known that space was reserved
1492                          * despite getting an error, resulting in a space leak
1493                          * (bytes_may_use counter of our space_info).
1494                          */
1495                         remove_ticket(space_info, ticket);
1496                         ticket->error = -EINTR;
1497                         break;
1498                 }
1499                 spin_unlock(&space_info->lock);
1500
1501                 schedule();
1502
1503                 finish_wait(&ticket->wait, &wait);
1504                 spin_lock(&space_info->lock);
1505         }
1506         spin_unlock(&space_info->lock);
1507 }
1508
1509 /*
1510  * Do the appropriate flushing and waiting for a ticket.
1511  *
1512  * @fs_info:    the filesystem
1513  * @space_info: space info for the reservation
1514  * @ticket:     ticket for the reservation
1515  * @start_ns:   timestamp when the reservation started
1516  * @orig_bytes: amount of bytes originally reserved
1517  * @flush:      how much we can flush
1518  *
1519  * This does the work of figuring out how to flush for the ticket, waiting for
1520  * the reservation, and returning the appropriate error if there is one.
1521  */
1522 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1523                                  struct btrfs_space_info *space_info,
1524                                  struct reserve_ticket *ticket,
1525                                  u64 start_ns, u64 orig_bytes,
1526                                  enum btrfs_reserve_flush_enum flush)
1527 {
1528         int ret;
1529
1530         switch (flush) {
1531         case BTRFS_RESERVE_FLUSH_DATA:
1532         case BTRFS_RESERVE_FLUSH_ALL:
1533         case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1534                 wait_reserve_ticket(fs_info, space_info, ticket);
1535                 break;
1536         case BTRFS_RESERVE_FLUSH_LIMIT:
1537                 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1538                                                 priority_flush_states,
1539                                                 ARRAY_SIZE(priority_flush_states));
1540                 break;
1541         case BTRFS_RESERVE_FLUSH_EVICT:
1542                 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1543                                                 evict_flush_states,
1544                                                 ARRAY_SIZE(evict_flush_states));
1545                 break;
1546         case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1547                 priority_reclaim_data_space(fs_info, space_info, ticket);
1548                 break;
1549         default:
1550                 ASSERT(0);
1551                 break;
1552         }
1553
1554         ret = ticket->error;
1555         ASSERT(list_empty(&ticket->list));
1556         /*
1557          * Check that we can't have an error set if the reservation succeeded,
1558          * as that would confuse tasks and lead them to error out without
1559          * releasing reserved space (if an error happens the expectation is that
1560          * space wasn't reserved at all).
1561          */
1562         ASSERT(!(ticket->bytes == 0 && ticket->error));
1563         trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
1564                                    start_ns, flush, ticket->error);
1565         return ret;
1566 }
1567
1568 /*
1569  * This returns true if this flush state will go through the ordinary flushing
1570  * code.
1571  */
1572 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1573 {
1574         return  (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1575                 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1576 }
1577
1578 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
1579                                        struct btrfs_space_info *space_info)
1580 {
1581         u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1582         u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1583
1584         /*
1585          * If we're heavy on ordered operations then clamping won't help us.  We
1586          * need to clamp specifically to keep up with dirty'ing buffered
1587          * writers, because there's not a 1:1 correlation of writing delalloc
1588          * and freeing space, like there is with flushing delayed refs or
1589          * delayed nodes.  If we're already more ordered than delalloc then
1590          * we're keeping up, otherwise we aren't and should probably clamp.
1591          */
1592         if (ordered < delalloc)
1593                 space_info->clamp = min(space_info->clamp + 1, 8);
1594 }
1595
1596 static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
1597 {
1598         return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1599                 flush == BTRFS_RESERVE_FLUSH_EVICT);
1600 }
1601
1602 /*
1603  * NO_FLUSH and FLUSH_EMERGENCY don't want to create a ticket, they just want to
1604  * fail as quickly as possible.
1605  */
1606 static inline bool can_ticket(enum btrfs_reserve_flush_enum flush)
1607 {
1608         return (flush != BTRFS_RESERVE_NO_FLUSH &&
1609                 flush != BTRFS_RESERVE_FLUSH_EMERGENCY);
1610 }
1611
1612 /*
1613  * Try to reserve bytes from the block_rsv's space.
1614  *
1615  * @fs_info:    the filesystem
1616  * @space_info: space info we want to allocate from
1617  * @orig_bytes: number of bytes we want
1618  * @flush:      whether or not we can flush to make our reservation
1619  *
1620  * This will reserve orig_bytes number of bytes from the space info associated
1621  * with the block_rsv.  If there is not enough space it will make an attempt to
1622  * flush out space to make room.  It will do this by flushing delalloc if
1623  * possible or committing the transaction.  If flush is 0 then no attempts to
1624  * regain reservations will be made and this will fail if there is not enough
1625  * space already.
1626  */
1627 static int __reserve_bytes(struct btrfs_fs_info *fs_info,
1628                            struct btrfs_space_info *space_info, u64 orig_bytes,
1629                            enum btrfs_reserve_flush_enum flush)
1630 {
1631         struct work_struct *async_work;
1632         struct reserve_ticket ticket;
1633         u64 start_ns = 0;
1634         u64 used;
1635         int ret = -ENOSPC;
1636         bool pending_tickets;
1637
1638         ASSERT(orig_bytes);
1639         /*
1640          * If have a transaction handle (current->journal_info != NULL), then
1641          * the flush method can not be neither BTRFS_RESERVE_FLUSH_ALL* nor
1642          * BTRFS_RESERVE_FLUSH_EVICT, as we could deadlock because those
1643          * flushing methods can trigger transaction commits.
1644          */
1645         if (current->journal_info) {
1646                 /* One assert per line for easier debugging. */
1647                 ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL);
1648                 ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL);
1649                 ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT);
1650         }
1651
1652         if (flush == BTRFS_RESERVE_FLUSH_DATA)
1653                 async_work = &fs_info->async_data_reclaim_work;
1654         else
1655                 async_work = &fs_info->async_reclaim_work;
1656
1657         spin_lock(&space_info->lock);
1658         used = btrfs_space_info_used(space_info, true);
1659
1660         /*
1661          * We don't want NO_FLUSH allocations to jump everybody, they can
1662          * generally handle ENOSPC in a different way, so treat them the same as
1663          * normal flushers when it comes to skipping pending tickets.
1664          */
1665         if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1666                 pending_tickets = !list_empty(&space_info->tickets) ||
1667                         !list_empty(&space_info->priority_tickets);
1668         else
1669                 pending_tickets = !list_empty(&space_info->priority_tickets);
1670
1671         /*
1672          * Carry on if we have enough space (short-circuit) OR call
1673          * can_overcommit() to ensure we can overcommit to continue.
1674          */
1675         if (!pending_tickets &&
1676             ((used + orig_bytes <= space_info->total_bytes) ||
1677              btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1678                 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1679                                                       orig_bytes);
1680                 ret = 0;
1681         }
1682
1683         /*
1684          * Things are dire, we need to make a reservation so we don't abort.  We
1685          * will let this reservation go through as long as we have actual space
1686          * left to allocate for the block.
1687          */
1688         if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) {
1689                 used = btrfs_space_info_used(space_info, false);
1690                 if (used + orig_bytes <= space_info->total_bytes) {
1691                         btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1692                                                               orig_bytes);
1693                         ret = 0;
1694                 }
1695         }
1696
1697         /*
1698          * If we couldn't make a reservation then setup our reservation ticket
1699          * and kick the async worker if it's not already running.
1700          *
1701          * If we are a priority flusher then we just need to add our ticket to
1702          * the list and we will do our own flushing further down.
1703          */
1704         if (ret && can_ticket(flush)) {
1705                 ticket.bytes = orig_bytes;
1706                 ticket.error = 0;
1707                 space_info->reclaim_size += ticket.bytes;
1708                 init_waitqueue_head(&ticket.wait);
1709                 ticket.steal = can_steal(flush);
1710                 if (trace_btrfs_reserve_ticket_enabled())
1711                         start_ns = ktime_get_ns();
1712
1713                 if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1714                     flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1715                     flush == BTRFS_RESERVE_FLUSH_DATA) {
1716                         list_add_tail(&ticket.list, &space_info->tickets);
1717                         if (!space_info->flush) {
1718                                 /*
1719                                  * We were forced to add a reserve ticket, so
1720                                  * our preemptive flushing is unable to keep
1721                                  * up.  Clamp down on the threshold for the
1722                                  * preemptive flushing in order to keep up with
1723                                  * the workload.
1724                                  */
1725                                 maybe_clamp_preempt(fs_info, space_info);
1726
1727                                 space_info->flush = 1;
1728                                 trace_btrfs_trigger_flush(fs_info,
1729                                                           space_info->flags,
1730                                                           orig_bytes, flush,
1731                                                           "enospc");
1732                                 queue_work(system_unbound_wq, async_work);
1733                         }
1734                 } else {
1735                         list_add_tail(&ticket.list,
1736                                       &space_info->priority_tickets);
1737                 }
1738         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1739                 /*
1740                  * We will do the space reservation dance during log replay,
1741                  * which means we won't have fs_info->fs_root set, so don't do
1742                  * the async reclaim as we will panic.
1743                  */
1744                 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1745                     !work_busy(&fs_info->preempt_reclaim_work) &&
1746                     need_preemptive_reclaim(fs_info, space_info)) {
1747                         trace_btrfs_trigger_flush(fs_info, space_info->flags,
1748                                                   orig_bytes, flush, "preempt");
1749                         queue_work(system_unbound_wq,
1750                                    &fs_info->preempt_reclaim_work);
1751                 }
1752         }
1753         spin_unlock(&space_info->lock);
1754         if (!ret || !can_ticket(flush))
1755                 return ret;
1756
1757         return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
1758                                      orig_bytes, flush);
1759 }
1760
1761 /*
1762  * Try to reserve metadata bytes from the block_rsv's space.
1763  *
1764  * @fs_info:    the filesystem
1765  * @space_info: the space_info we're allocating for
1766  * @orig_bytes: number of bytes we want
1767  * @flush:      whether or not we can flush to make our reservation
1768  *
1769  * This will reserve orig_bytes number of bytes from the space info associated
1770  * with the block_rsv.  If there is not enough space it will make an attempt to
1771  * flush out space to make room.  It will do this by flushing delalloc if
1772  * possible or committing the transaction.  If flush is 0 then no attempts to
1773  * regain reservations will be made and this will fail if there is not enough
1774  * space already.
1775  */
1776 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
1777                                  struct btrfs_space_info *space_info,
1778                                  u64 orig_bytes,
1779                                  enum btrfs_reserve_flush_enum flush)
1780 {
1781         int ret;
1782
1783         ret = __reserve_bytes(fs_info, space_info, orig_bytes, flush);
1784         if (ret == -ENOSPC) {
1785                 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1786                                               space_info->flags, orig_bytes, 1);
1787
1788                 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1789                         btrfs_dump_space_info(fs_info, space_info, orig_bytes, 0);
1790         }
1791         return ret;
1792 }
1793
1794 /*
1795  * Try to reserve data bytes for an allocation.
1796  *
1797  * @fs_info: the filesystem
1798  * @bytes:   number of bytes we need
1799  * @flush:   how we are allowed to flush
1800  *
1801  * This will reserve bytes from the data space info.  If there is not enough
1802  * space then we will attempt to flush space as specified by flush.
1803  */
1804 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
1805                              enum btrfs_reserve_flush_enum flush)
1806 {
1807         struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
1808         int ret;
1809
1810         ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1811                flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE ||
1812                flush == BTRFS_RESERVE_NO_FLUSH);
1813         ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
1814
1815         ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
1816         if (ret == -ENOSPC) {
1817                 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1818                                               data_sinfo->flags, bytes, 1);
1819                 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1820                         btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
1821         }
1822         return ret;
1823 }
1824
1825 /* Dump all the space infos when we abort a transaction due to ENOSPC. */
1826 __cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info)
1827 {
1828         struct btrfs_space_info *space_info;
1829
1830         btrfs_info(fs_info, "dumping space info:");
1831         list_for_each_entry(space_info, &fs_info->space_info, list) {
1832                 spin_lock(&space_info->lock);
1833                 __btrfs_dump_space_info(fs_info, space_info);
1834                 spin_unlock(&space_info->lock);
1835         }
1836         dump_global_block_rsv(fs_info);
1837 }
1838
1839 /*
1840  * Account the unused space of all the readonly block group in the space_info.
1841  * takes mirrors into account.
1842  */
1843 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
1844 {
1845         struct btrfs_block_group *block_group;
1846         u64 free_bytes = 0;
1847         int factor;
1848
1849         /* It's df, we don't care if it's racy */
1850         if (list_empty(&sinfo->ro_bgs))
1851                 return 0;
1852
1853         spin_lock(&sinfo->lock);
1854         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
1855                 spin_lock(&block_group->lock);
1856
1857                 if (!block_group->ro) {
1858                         spin_unlock(&block_group->lock);
1859                         continue;
1860                 }
1861
1862                 factor = btrfs_bg_type_to_factor(block_group->flags);
1863                 free_bytes += (block_group->length -
1864                                block_group->used) * factor;
1865
1866                 spin_unlock(&block_group->lock);
1867         }
1868         spin_unlock(&sinfo->lock);
1869
1870         return free_bytes;
1871 }