xsk: Fix possible crash when multiple sockets are created
[sfrench/cifs-2.6.git] / fs / btrfs / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/time.h>
9 #include <linux/init.h>
10 #include <linux/string.h>
11 #include <linux/backing-dev.h>
12 #include <linux/falloc.h>
13 #include <linux/writeback.h>
14 #include <linux/compat.h>
15 #include <linux/slab.h>
16 #include <linux/btrfs.h>
17 #include <linux/uio.h>
18 #include <linux/iversion.h>
19 #include <linux/fsverity.h>
20 #include "ctree.h"
21 #include "disk-io.h"
22 #include "transaction.h"
23 #include "btrfs_inode.h"
24 #include "print-tree.h"
25 #include "tree-log.h"
26 #include "locking.h"
27 #include "volumes.h"
28 #include "qgroup.h"
29 #include "compression.h"
30 #include "delalloc-space.h"
31 #include "reflink.h"
32 #include "subpage.h"
33
34 static struct kmem_cache *btrfs_inode_defrag_cachep;
35 /*
36  * when auto defrag is enabled we
37  * queue up these defrag structs to remember which
38  * inodes need defragging passes
39  */
40 struct inode_defrag {
41         struct rb_node rb_node;
42         /* objectid */
43         u64 ino;
44         /*
45          * transid where the defrag was added, we search for
46          * extents newer than this
47          */
48         u64 transid;
49
50         /* root objectid */
51         u64 root;
52
53         /*
54          * The extent size threshold for autodefrag.
55          *
56          * This value is different for compressed/non-compressed extents,
57          * thus needs to be passed from higher layer.
58          * (aka, inode_should_defrag())
59          */
60         u32 extent_thresh;
61 };
62
63 static int __compare_inode_defrag(struct inode_defrag *defrag1,
64                                   struct inode_defrag *defrag2)
65 {
66         if (defrag1->root > defrag2->root)
67                 return 1;
68         else if (defrag1->root < defrag2->root)
69                 return -1;
70         else if (defrag1->ino > defrag2->ino)
71                 return 1;
72         else if (defrag1->ino < defrag2->ino)
73                 return -1;
74         else
75                 return 0;
76 }
77
78 /* pop a record for an inode into the defrag tree.  The lock
79  * must be held already
80  *
81  * If you're inserting a record for an older transid than an
82  * existing record, the transid already in the tree is lowered
83  *
84  * If an existing record is found the defrag item you
85  * pass in is freed
86  */
87 static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
88                                     struct inode_defrag *defrag)
89 {
90         struct btrfs_fs_info *fs_info = inode->root->fs_info;
91         struct inode_defrag *entry;
92         struct rb_node **p;
93         struct rb_node *parent = NULL;
94         int ret;
95
96         p = &fs_info->defrag_inodes.rb_node;
97         while (*p) {
98                 parent = *p;
99                 entry = rb_entry(parent, struct inode_defrag, rb_node);
100
101                 ret = __compare_inode_defrag(defrag, entry);
102                 if (ret < 0)
103                         p = &parent->rb_left;
104                 else if (ret > 0)
105                         p = &parent->rb_right;
106                 else {
107                         /* if we're reinserting an entry for
108                          * an old defrag run, make sure to
109                          * lower the transid of our existing record
110                          */
111                         if (defrag->transid < entry->transid)
112                                 entry->transid = defrag->transid;
113                         entry->extent_thresh = min(defrag->extent_thresh,
114                                                    entry->extent_thresh);
115                         return -EEXIST;
116                 }
117         }
118         set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
119         rb_link_node(&defrag->rb_node, parent, p);
120         rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
121         return 0;
122 }
123
124 static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
125 {
126         if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
127                 return 0;
128
129         if (btrfs_fs_closing(fs_info))
130                 return 0;
131
132         return 1;
133 }
134
135 /*
136  * insert a defrag record for this inode if auto defrag is
137  * enabled
138  */
139 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
140                            struct btrfs_inode *inode, u32 extent_thresh)
141 {
142         struct btrfs_root *root = inode->root;
143         struct btrfs_fs_info *fs_info = root->fs_info;
144         struct inode_defrag *defrag;
145         u64 transid;
146         int ret;
147
148         if (!__need_auto_defrag(fs_info))
149                 return 0;
150
151         if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
152                 return 0;
153
154         if (trans)
155                 transid = trans->transid;
156         else
157                 transid = inode->root->last_trans;
158
159         defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
160         if (!defrag)
161                 return -ENOMEM;
162
163         defrag->ino = btrfs_ino(inode);
164         defrag->transid = transid;
165         defrag->root = root->root_key.objectid;
166         defrag->extent_thresh = extent_thresh;
167
168         spin_lock(&fs_info->defrag_inodes_lock);
169         if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
170                 /*
171                  * If we set IN_DEFRAG flag and evict the inode from memory,
172                  * and then re-read this inode, this new inode doesn't have
173                  * IN_DEFRAG flag. At the case, we may find the existed defrag.
174                  */
175                 ret = __btrfs_add_inode_defrag(inode, defrag);
176                 if (ret)
177                         kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
178         } else {
179                 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
180         }
181         spin_unlock(&fs_info->defrag_inodes_lock);
182         return 0;
183 }
184
185 /*
186  * pick the defragable inode that we want, if it doesn't exist, we will get
187  * the next one.
188  */
189 static struct inode_defrag *
190 btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
191 {
192         struct inode_defrag *entry = NULL;
193         struct inode_defrag tmp;
194         struct rb_node *p;
195         struct rb_node *parent = NULL;
196         int ret;
197
198         tmp.ino = ino;
199         tmp.root = root;
200
201         spin_lock(&fs_info->defrag_inodes_lock);
202         p = fs_info->defrag_inodes.rb_node;
203         while (p) {
204                 parent = p;
205                 entry = rb_entry(parent, struct inode_defrag, rb_node);
206
207                 ret = __compare_inode_defrag(&tmp, entry);
208                 if (ret < 0)
209                         p = parent->rb_left;
210                 else if (ret > 0)
211                         p = parent->rb_right;
212                 else
213                         goto out;
214         }
215
216         if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
217                 parent = rb_next(parent);
218                 if (parent)
219                         entry = rb_entry(parent, struct inode_defrag, rb_node);
220                 else
221                         entry = NULL;
222         }
223 out:
224         if (entry)
225                 rb_erase(parent, &fs_info->defrag_inodes);
226         spin_unlock(&fs_info->defrag_inodes_lock);
227         return entry;
228 }
229
230 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
231 {
232         struct inode_defrag *defrag;
233         struct rb_node *node;
234
235         spin_lock(&fs_info->defrag_inodes_lock);
236         node = rb_first(&fs_info->defrag_inodes);
237         while (node) {
238                 rb_erase(node, &fs_info->defrag_inodes);
239                 defrag = rb_entry(node, struct inode_defrag, rb_node);
240                 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
241
242                 cond_resched_lock(&fs_info->defrag_inodes_lock);
243
244                 node = rb_first(&fs_info->defrag_inodes);
245         }
246         spin_unlock(&fs_info->defrag_inodes_lock);
247 }
248
249 #define BTRFS_DEFRAG_BATCH      1024
250
251 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
252                                     struct inode_defrag *defrag)
253 {
254         struct btrfs_root *inode_root;
255         struct inode *inode;
256         struct btrfs_ioctl_defrag_range_args range;
257         int ret = 0;
258         u64 cur = 0;
259
260 again:
261         if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
262                 goto cleanup;
263         if (!__need_auto_defrag(fs_info))
264                 goto cleanup;
265
266         /* get the inode */
267         inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
268         if (IS_ERR(inode_root)) {
269                 ret = PTR_ERR(inode_root);
270                 goto cleanup;
271         }
272
273         inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
274         btrfs_put_root(inode_root);
275         if (IS_ERR(inode)) {
276                 ret = PTR_ERR(inode);
277                 goto cleanup;
278         }
279
280         if (cur >= i_size_read(inode)) {
281                 iput(inode);
282                 goto cleanup;
283         }
284
285         /* do a chunk of defrag */
286         clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
287         memset(&range, 0, sizeof(range));
288         range.len = (u64)-1;
289         range.start = cur;
290         range.extent_thresh = defrag->extent_thresh;
291
292         sb_start_write(fs_info->sb);
293         ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
294                                        BTRFS_DEFRAG_BATCH);
295         sb_end_write(fs_info->sb);
296         iput(inode);
297
298         if (ret < 0)
299                 goto cleanup;
300
301         cur = max(cur + fs_info->sectorsize, range.start);
302         goto again;
303
304 cleanup:
305         kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
306         return ret;
307 }
308
309 /*
310  * run through the list of inodes in the FS that need
311  * defragging
312  */
313 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
314 {
315         struct inode_defrag *defrag;
316         u64 first_ino = 0;
317         u64 root_objectid = 0;
318
319         atomic_inc(&fs_info->defrag_running);
320         while (1) {
321                 /* Pause the auto defragger. */
322                 if (test_bit(BTRFS_FS_STATE_REMOUNTING,
323                              &fs_info->fs_state))
324                         break;
325
326                 if (!__need_auto_defrag(fs_info))
327                         break;
328
329                 /* find an inode to defrag */
330                 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
331                                                  first_ino);
332                 if (!defrag) {
333                         if (root_objectid || first_ino) {
334                                 root_objectid = 0;
335                                 first_ino = 0;
336                                 continue;
337                         } else {
338                                 break;
339                         }
340                 }
341
342                 first_ino = defrag->ino + 1;
343                 root_objectid = defrag->root;
344
345                 __btrfs_run_defrag_inode(fs_info, defrag);
346         }
347         atomic_dec(&fs_info->defrag_running);
348
349         /*
350          * during unmount, we use the transaction_wait queue to
351          * wait for the defragger to stop
352          */
353         wake_up(&fs_info->transaction_wait);
354         return 0;
355 }
356
357 /* simple helper to fault in pages and copy.  This should go away
358  * and be replaced with calls into generic code.
359  */
360 static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
361                                          struct page **prepared_pages,
362                                          struct iov_iter *i)
363 {
364         size_t copied = 0;
365         size_t total_copied = 0;
366         int pg = 0;
367         int offset = offset_in_page(pos);
368
369         while (write_bytes > 0) {
370                 size_t count = min_t(size_t,
371                                      PAGE_SIZE - offset, write_bytes);
372                 struct page *page = prepared_pages[pg];
373                 /*
374                  * Copy data from userspace to the current page
375                  */
376                 copied = copy_page_from_iter_atomic(page, offset, count, i);
377
378                 /* Flush processor's dcache for this page */
379                 flush_dcache_page(page);
380
381                 /*
382                  * if we get a partial write, we can end up with
383                  * partially up to date pages.  These add
384                  * a lot of complexity, so make sure they don't
385                  * happen by forcing this copy to be retried.
386                  *
387                  * The rest of the btrfs_file_write code will fall
388                  * back to page at a time copies after we return 0.
389                  */
390                 if (unlikely(copied < count)) {
391                         if (!PageUptodate(page)) {
392                                 iov_iter_revert(i, copied);
393                                 copied = 0;
394                         }
395                         if (!copied)
396                                 break;
397                 }
398
399                 write_bytes -= copied;
400                 total_copied += copied;
401                 offset += copied;
402                 if (offset == PAGE_SIZE) {
403                         pg++;
404                         offset = 0;
405                 }
406         }
407         return total_copied;
408 }
409
410 /*
411  * unlocks pages after btrfs_file_write is done with them
412  */
413 static void btrfs_drop_pages(struct btrfs_fs_info *fs_info,
414                              struct page **pages, size_t num_pages,
415                              u64 pos, u64 copied)
416 {
417         size_t i;
418         u64 block_start = round_down(pos, fs_info->sectorsize);
419         u64 block_len = round_up(pos + copied, fs_info->sectorsize) - block_start;
420
421         ASSERT(block_len <= U32_MAX);
422         for (i = 0; i < num_pages; i++) {
423                 /* page checked is some magic around finding pages that
424                  * have been modified without going through btrfs_set_page_dirty
425                  * clear it here. There should be no need to mark the pages
426                  * accessed as prepare_pages should have marked them accessed
427                  * in prepare_pages via find_or_create_page()
428                  */
429                 btrfs_page_clamp_clear_checked(fs_info, pages[i], block_start,
430                                                block_len);
431                 unlock_page(pages[i]);
432                 put_page(pages[i]);
433         }
434 }
435
436 /*
437  * After btrfs_copy_from_user(), update the following things for delalloc:
438  * - Mark newly dirtied pages as DELALLOC in the io tree.
439  *   Used to advise which range is to be written back.
440  * - Mark modified pages as Uptodate/Dirty and not needing COW fixup
441  * - Update inode size for past EOF write
442  */
443 int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
444                       size_t num_pages, loff_t pos, size_t write_bytes,
445                       struct extent_state **cached, bool noreserve)
446 {
447         struct btrfs_fs_info *fs_info = inode->root->fs_info;
448         int err = 0;
449         int i;
450         u64 num_bytes;
451         u64 start_pos;
452         u64 end_of_last_block;
453         u64 end_pos = pos + write_bytes;
454         loff_t isize = i_size_read(&inode->vfs_inode);
455         unsigned int extra_bits = 0;
456
457         if (write_bytes == 0)
458                 return 0;
459
460         if (noreserve)
461                 extra_bits |= EXTENT_NORESERVE;
462
463         start_pos = round_down(pos, fs_info->sectorsize);
464         num_bytes = round_up(write_bytes + pos - start_pos,
465                              fs_info->sectorsize);
466         ASSERT(num_bytes <= U32_MAX);
467
468         end_of_last_block = start_pos + num_bytes - 1;
469
470         /*
471          * The pages may have already been dirty, clear out old accounting so
472          * we can set things up properly
473          */
474         clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
475                          EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
476                          0, 0, cached);
477
478         err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
479                                         extra_bits, cached);
480         if (err)
481                 return err;
482
483         for (i = 0; i < num_pages; i++) {
484                 struct page *p = pages[i];
485
486                 btrfs_page_clamp_set_uptodate(fs_info, p, start_pos, num_bytes);
487                 btrfs_page_clamp_clear_checked(fs_info, p, start_pos, num_bytes);
488                 btrfs_page_clamp_set_dirty(fs_info, p, start_pos, num_bytes);
489         }
490
491         /*
492          * we've only changed i_size in ram, and we haven't updated
493          * the disk i_size.  There is no need to log the inode
494          * at this time.
495          */
496         if (end_pos > isize)
497                 i_size_write(&inode->vfs_inode, end_pos);
498         return 0;
499 }
500
501 /*
502  * this drops all the extents in the cache that intersect the range
503  * [start, end].  Existing extents are split as required.
504  */
505 void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
506                              int skip_pinned)
507 {
508         struct extent_map *em;
509         struct extent_map *split = NULL;
510         struct extent_map *split2 = NULL;
511         struct extent_map_tree *em_tree = &inode->extent_tree;
512         u64 len = end - start + 1;
513         u64 gen;
514         int ret;
515         int testend = 1;
516         unsigned long flags;
517         int compressed = 0;
518         bool modified;
519
520         WARN_ON(end < start);
521         if (end == (u64)-1) {
522                 len = (u64)-1;
523                 testend = 0;
524         }
525         while (1) {
526                 int no_splits = 0;
527
528                 modified = false;
529                 if (!split)
530                         split = alloc_extent_map();
531                 if (!split2)
532                         split2 = alloc_extent_map();
533                 if (!split || !split2)
534                         no_splits = 1;
535
536                 write_lock(&em_tree->lock);
537                 em = lookup_extent_mapping(em_tree, start, len);
538                 if (!em) {
539                         write_unlock(&em_tree->lock);
540                         break;
541                 }
542                 flags = em->flags;
543                 gen = em->generation;
544                 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
545                         if (testend && em->start + em->len >= start + len) {
546                                 free_extent_map(em);
547                                 write_unlock(&em_tree->lock);
548                                 break;
549                         }
550                         start = em->start + em->len;
551                         if (testend)
552                                 len = start + len - (em->start + em->len);
553                         free_extent_map(em);
554                         write_unlock(&em_tree->lock);
555                         continue;
556                 }
557                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
558                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
559                 clear_bit(EXTENT_FLAG_LOGGING, &flags);
560                 modified = !list_empty(&em->list);
561                 if (no_splits)
562                         goto next;
563
564                 if (em->start < start) {
565                         split->start = em->start;
566                         split->len = start - em->start;
567
568                         if (em->block_start < EXTENT_MAP_LAST_BYTE) {
569                                 split->orig_start = em->orig_start;
570                                 split->block_start = em->block_start;
571
572                                 if (compressed)
573                                         split->block_len = em->block_len;
574                                 else
575                                         split->block_len = split->len;
576                                 split->orig_block_len = max(split->block_len,
577                                                 em->orig_block_len);
578                                 split->ram_bytes = em->ram_bytes;
579                         } else {
580                                 split->orig_start = split->start;
581                                 split->block_len = 0;
582                                 split->block_start = em->block_start;
583                                 split->orig_block_len = 0;
584                                 split->ram_bytes = split->len;
585                         }
586
587                         split->generation = gen;
588                         split->flags = flags;
589                         split->compress_type = em->compress_type;
590                         replace_extent_mapping(em_tree, em, split, modified);
591                         free_extent_map(split);
592                         split = split2;
593                         split2 = NULL;
594                 }
595                 if (testend && em->start + em->len > start + len) {
596                         u64 diff = start + len - em->start;
597
598                         split->start = start + len;
599                         split->len = em->start + em->len - (start + len);
600                         split->flags = flags;
601                         split->compress_type = em->compress_type;
602                         split->generation = gen;
603
604                         if (em->block_start < EXTENT_MAP_LAST_BYTE) {
605                                 split->orig_block_len = max(em->block_len,
606                                                     em->orig_block_len);
607
608                                 split->ram_bytes = em->ram_bytes;
609                                 if (compressed) {
610                                         split->block_len = em->block_len;
611                                         split->block_start = em->block_start;
612                                         split->orig_start = em->orig_start;
613                                 } else {
614                                         split->block_len = split->len;
615                                         split->block_start = em->block_start
616                                                 + diff;
617                                         split->orig_start = em->orig_start;
618                                 }
619                         } else {
620                                 split->ram_bytes = split->len;
621                                 split->orig_start = split->start;
622                                 split->block_len = 0;
623                                 split->block_start = em->block_start;
624                                 split->orig_block_len = 0;
625                         }
626
627                         if (extent_map_in_tree(em)) {
628                                 replace_extent_mapping(em_tree, em, split,
629                                                        modified);
630                         } else {
631                                 ret = add_extent_mapping(em_tree, split,
632                                                          modified);
633                                 ASSERT(ret == 0); /* Logic error */
634                         }
635                         free_extent_map(split);
636                         split = NULL;
637                 }
638 next:
639                 if (extent_map_in_tree(em))
640                         remove_extent_mapping(em_tree, em);
641                 write_unlock(&em_tree->lock);
642
643                 /* once for us */
644                 free_extent_map(em);
645                 /* once for the tree*/
646                 free_extent_map(em);
647         }
648         if (split)
649                 free_extent_map(split);
650         if (split2)
651                 free_extent_map(split2);
652 }
653
654 /*
655  * this is very complex, but the basic idea is to drop all extents
656  * in the range start - end.  hint_block is filled in with a block number
657  * that would be a good hint to the block allocator for this file.
658  *
659  * If an extent intersects the range but is not entirely inside the range
660  * it is either truncated or split.  Anything entirely inside the range
661  * is deleted from the tree.
662  *
663  * Note: the VFS' inode number of bytes is not updated, it's up to the caller
664  * to deal with that. We set the field 'bytes_found' of the arguments structure
665  * with the number of allocated bytes found in the target range, so that the
666  * caller can update the inode's number of bytes in an atomic way when
667  * replacing extents in a range to avoid races with stat(2).
668  */
669 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
670                        struct btrfs_root *root, struct btrfs_inode *inode,
671                        struct btrfs_drop_extents_args *args)
672 {
673         struct btrfs_fs_info *fs_info = root->fs_info;
674         struct extent_buffer *leaf;
675         struct btrfs_file_extent_item *fi;
676         struct btrfs_ref ref = { 0 };
677         struct btrfs_key key;
678         struct btrfs_key new_key;
679         u64 ino = btrfs_ino(inode);
680         u64 search_start = args->start;
681         u64 disk_bytenr = 0;
682         u64 num_bytes = 0;
683         u64 extent_offset = 0;
684         u64 extent_end = 0;
685         u64 last_end = args->start;
686         int del_nr = 0;
687         int del_slot = 0;
688         int extent_type;
689         int recow;
690         int ret;
691         int modify_tree = -1;
692         int update_refs;
693         int found = 0;
694         struct btrfs_path *path = args->path;
695
696         args->bytes_found = 0;
697         args->extent_inserted = false;
698
699         /* Must always have a path if ->replace_extent is true */
700         ASSERT(!(args->replace_extent && !args->path));
701
702         if (!path) {
703                 path = btrfs_alloc_path();
704                 if (!path) {
705                         ret = -ENOMEM;
706                         goto out;
707                 }
708         }
709
710         if (args->drop_cache)
711                 btrfs_drop_extent_cache(inode, args->start, args->end - 1, 0);
712
713         if (args->start >= inode->disk_i_size && !args->replace_extent)
714                 modify_tree = 0;
715
716         update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
717         while (1) {
718                 recow = 0;
719                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
720                                                search_start, modify_tree);
721                 if (ret < 0)
722                         break;
723                 if (ret > 0 && path->slots[0] > 0 && search_start == args->start) {
724                         leaf = path->nodes[0];
725                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
726                         if (key.objectid == ino &&
727                             key.type == BTRFS_EXTENT_DATA_KEY)
728                                 path->slots[0]--;
729                 }
730                 ret = 0;
731 next_slot:
732                 leaf = path->nodes[0];
733                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
734                         BUG_ON(del_nr > 0);
735                         ret = btrfs_next_leaf(root, path);
736                         if (ret < 0)
737                                 break;
738                         if (ret > 0) {
739                                 ret = 0;
740                                 break;
741                         }
742                         leaf = path->nodes[0];
743                         recow = 1;
744                 }
745
746                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
747
748                 if (key.objectid > ino)
749                         break;
750                 if (WARN_ON_ONCE(key.objectid < ino) ||
751                     key.type < BTRFS_EXTENT_DATA_KEY) {
752                         ASSERT(del_nr == 0);
753                         path->slots[0]++;
754                         goto next_slot;
755                 }
756                 if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end)
757                         break;
758
759                 fi = btrfs_item_ptr(leaf, path->slots[0],
760                                     struct btrfs_file_extent_item);
761                 extent_type = btrfs_file_extent_type(leaf, fi);
762
763                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
764                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
765                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
766                         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
767                         extent_offset = btrfs_file_extent_offset(leaf, fi);
768                         extent_end = key.offset +
769                                 btrfs_file_extent_num_bytes(leaf, fi);
770                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
771                         extent_end = key.offset +
772                                 btrfs_file_extent_ram_bytes(leaf, fi);
773                 } else {
774                         /* can't happen */
775                         BUG();
776                 }
777
778                 /*
779                  * Don't skip extent items representing 0 byte lengths. They
780                  * used to be created (bug) if while punching holes we hit
781                  * -ENOSPC condition. So if we find one here, just ensure we
782                  * delete it, otherwise we would insert a new file extent item
783                  * with the same key (offset) as that 0 bytes length file
784                  * extent item in the call to setup_items_for_insert() later
785                  * in this function.
786                  */
787                 if (extent_end == key.offset && extent_end >= search_start) {
788                         last_end = extent_end;
789                         goto delete_extent_item;
790                 }
791
792                 if (extent_end <= search_start) {
793                         path->slots[0]++;
794                         goto next_slot;
795                 }
796
797                 found = 1;
798                 search_start = max(key.offset, args->start);
799                 if (recow || !modify_tree) {
800                         modify_tree = -1;
801                         btrfs_release_path(path);
802                         continue;
803                 }
804
805                 /*
806                  *     | - range to drop - |
807                  *  | -------- extent -------- |
808                  */
809                 if (args->start > key.offset && args->end < extent_end) {
810                         BUG_ON(del_nr > 0);
811                         if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
812                                 ret = -EOPNOTSUPP;
813                                 break;
814                         }
815
816                         memcpy(&new_key, &key, sizeof(new_key));
817                         new_key.offset = args->start;
818                         ret = btrfs_duplicate_item(trans, root, path,
819                                                    &new_key);
820                         if (ret == -EAGAIN) {
821                                 btrfs_release_path(path);
822                                 continue;
823                         }
824                         if (ret < 0)
825                                 break;
826
827                         leaf = path->nodes[0];
828                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
829                                             struct btrfs_file_extent_item);
830                         btrfs_set_file_extent_num_bytes(leaf, fi,
831                                                         args->start - key.offset);
832
833                         fi = btrfs_item_ptr(leaf, path->slots[0],
834                                             struct btrfs_file_extent_item);
835
836                         extent_offset += args->start - key.offset;
837                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
838                         btrfs_set_file_extent_num_bytes(leaf, fi,
839                                                         extent_end - args->start);
840                         btrfs_mark_buffer_dirty(leaf);
841
842                         if (update_refs && disk_bytenr > 0) {
843                                 btrfs_init_generic_ref(&ref,
844                                                 BTRFS_ADD_DELAYED_REF,
845                                                 disk_bytenr, num_bytes, 0);
846                                 btrfs_init_data_ref(&ref,
847                                                 root->root_key.objectid,
848                                                 new_key.objectid,
849                                                 args->start - extent_offset,
850                                                 0, false);
851                                 ret = btrfs_inc_extent_ref(trans, &ref);
852                                 BUG_ON(ret); /* -ENOMEM */
853                         }
854                         key.offset = args->start;
855                 }
856                 /*
857                  * From here on out we will have actually dropped something, so
858                  * last_end can be updated.
859                  */
860                 last_end = extent_end;
861
862                 /*
863                  *  | ---- range to drop ----- |
864                  *      | -------- extent -------- |
865                  */
866                 if (args->start <= key.offset && args->end < extent_end) {
867                         if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
868                                 ret = -EOPNOTSUPP;
869                                 break;
870                         }
871
872                         memcpy(&new_key, &key, sizeof(new_key));
873                         new_key.offset = args->end;
874                         btrfs_set_item_key_safe(fs_info, path, &new_key);
875
876                         extent_offset += args->end - key.offset;
877                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
878                         btrfs_set_file_extent_num_bytes(leaf, fi,
879                                                         extent_end - args->end);
880                         btrfs_mark_buffer_dirty(leaf);
881                         if (update_refs && disk_bytenr > 0)
882                                 args->bytes_found += args->end - key.offset;
883                         break;
884                 }
885
886                 search_start = extent_end;
887                 /*
888                  *       | ---- range to drop ----- |
889                  *  | -------- extent -------- |
890                  */
891                 if (args->start > key.offset && args->end >= extent_end) {
892                         BUG_ON(del_nr > 0);
893                         if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
894                                 ret = -EOPNOTSUPP;
895                                 break;
896                         }
897
898                         btrfs_set_file_extent_num_bytes(leaf, fi,
899                                                         args->start - key.offset);
900                         btrfs_mark_buffer_dirty(leaf);
901                         if (update_refs && disk_bytenr > 0)
902                                 args->bytes_found += extent_end - args->start;
903                         if (args->end == extent_end)
904                                 break;
905
906                         path->slots[0]++;
907                         goto next_slot;
908                 }
909
910                 /*
911                  *  | ---- range to drop ----- |
912                  *    | ------ extent ------ |
913                  */
914                 if (args->start <= key.offset && args->end >= extent_end) {
915 delete_extent_item:
916                         if (del_nr == 0) {
917                                 del_slot = path->slots[0];
918                                 del_nr = 1;
919                         } else {
920                                 BUG_ON(del_slot + del_nr != path->slots[0]);
921                                 del_nr++;
922                         }
923
924                         if (update_refs &&
925                             extent_type == BTRFS_FILE_EXTENT_INLINE) {
926                                 args->bytes_found += extent_end - key.offset;
927                                 extent_end = ALIGN(extent_end,
928                                                    fs_info->sectorsize);
929                         } else if (update_refs && disk_bytenr > 0) {
930                                 btrfs_init_generic_ref(&ref,
931                                                 BTRFS_DROP_DELAYED_REF,
932                                                 disk_bytenr, num_bytes, 0);
933                                 btrfs_init_data_ref(&ref,
934                                                 root->root_key.objectid,
935                                                 key.objectid,
936                                                 key.offset - extent_offset, 0,
937                                                 false);
938                                 ret = btrfs_free_extent(trans, &ref);
939                                 BUG_ON(ret); /* -ENOMEM */
940                                 args->bytes_found += extent_end - key.offset;
941                         }
942
943                         if (args->end == extent_end)
944                                 break;
945
946                         if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
947                                 path->slots[0]++;
948                                 goto next_slot;
949                         }
950
951                         ret = btrfs_del_items(trans, root, path, del_slot,
952                                               del_nr);
953                         if (ret) {
954                                 btrfs_abort_transaction(trans, ret);
955                                 break;
956                         }
957
958                         del_nr = 0;
959                         del_slot = 0;
960
961                         btrfs_release_path(path);
962                         continue;
963                 }
964
965                 BUG();
966         }
967
968         if (!ret && del_nr > 0) {
969                 /*
970                  * Set path->slots[0] to first slot, so that after the delete
971                  * if items are move off from our leaf to its immediate left or
972                  * right neighbor leafs, we end up with a correct and adjusted
973                  * path->slots[0] for our insertion (if args->replace_extent).
974                  */
975                 path->slots[0] = del_slot;
976                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
977                 if (ret)
978                         btrfs_abort_transaction(trans, ret);
979         }
980
981         leaf = path->nodes[0];
982         /*
983          * If btrfs_del_items() was called, it might have deleted a leaf, in
984          * which case it unlocked our path, so check path->locks[0] matches a
985          * write lock.
986          */
987         if (!ret && args->replace_extent &&
988             path->locks[0] == BTRFS_WRITE_LOCK &&
989             btrfs_leaf_free_space(leaf) >=
990             sizeof(struct btrfs_item) + args->extent_item_size) {
991
992                 key.objectid = ino;
993                 key.type = BTRFS_EXTENT_DATA_KEY;
994                 key.offset = args->start;
995                 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
996                         struct btrfs_key slot_key;
997
998                         btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
999                         if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
1000                                 path->slots[0]++;
1001                 }
1002                 btrfs_setup_item_for_insert(root, path, &key, args->extent_item_size);
1003                 args->extent_inserted = true;
1004         }
1005
1006         if (!args->path)
1007                 btrfs_free_path(path);
1008         else if (!args->extent_inserted)
1009                 btrfs_release_path(path);
1010 out:
1011         args->drop_end = found ? min(args->end, last_end) : args->end;
1012
1013         return ret;
1014 }
1015
1016 static int extent_mergeable(struct extent_buffer *leaf, int slot,
1017                             u64 objectid, u64 bytenr, u64 orig_offset,
1018                             u64 *start, u64 *end)
1019 {
1020         struct btrfs_file_extent_item *fi;
1021         struct btrfs_key key;
1022         u64 extent_end;
1023
1024         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1025                 return 0;
1026
1027         btrfs_item_key_to_cpu(leaf, &key, slot);
1028         if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1029                 return 0;
1030
1031         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1032         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1033             btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1034             btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1035             btrfs_file_extent_compression(leaf, fi) ||
1036             btrfs_file_extent_encryption(leaf, fi) ||
1037             btrfs_file_extent_other_encoding(leaf, fi))
1038                 return 0;
1039
1040         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1041         if ((*start && *start != key.offset) || (*end && *end != extent_end))
1042                 return 0;
1043
1044         *start = key.offset;
1045         *end = extent_end;
1046         return 1;
1047 }
1048
1049 /*
1050  * Mark extent in the range start - end as written.
1051  *
1052  * This changes extent type from 'pre-allocated' to 'regular'. If only
1053  * part of extent is marked as written, the extent will be split into
1054  * two or three.
1055  */
1056 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1057                               struct btrfs_inode *inode, u64 start, u64 end)
1058 {
1059         struct btrfs_fs_info *fs_info = trans->fs_info;
1060         struct btrfs_root *root = inode->root;
1061         struct extent_buffer *leaf;
1062         struct btrfs_path *path;
1063         struct btrfs_file_extent_item *fi;
1064         struct btrfs_ref ref = { 0 };
1065         struct btrfs_key key;
1066         struct btrfs_key new_key;
1067         u64 bytenr;
1068         u64 num_bytes;
1069         u64 extent_end;
1070         u64 orig_offset;
1071         u64 other_start;
1072         u64 other_end;
1073         u64 split;
1074         int del_nr = 0;
1075         int del_slot = 0;
1076         int recow;
1077         int ret = 0;
1078         u64 ino = btrfs_ino(inode);
1079
1080         path = btrfs_alloc_path();
1081         if (!path)
1082                 return -ENOMEM;
1083 again:
1084         recow = 0;
1085         split = start;
1086         key.objectid = ino;
1087         key.type = BTRFS_EXTENT_DATA_KEY;
1088         key.offset = split;
1089
1090         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1091         if (ret < 0)
1092                 goto out;
1093         if (ret > 0 && path->slots[0] > 0)
1094                 path->slots[0]--;
1095
1096         leaf = path->nodes[0];
1097         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1098         if (key.objectid != ino ||
1099             key.type != BTRFS_EXTENT_DATA_KEY) {
1100                 ret = -EINVAL;
1101                 btrfs_abort_transaction(trans, ret);
1102                 goto out;
1103         }
1104         fi = btrfs_item_ptr(leaf, path->slots[0],
1105                             struct btrfs_file_extent_item);
1106         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
1107                 ret = -EINVAL;
1108                 btrfs_abort_transaction(trans, ret);
1109                 goto out;
1110         }
1111         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1112         if (key.offset > start || extent_end < end) {
1113                 ret = -EINVAL;
1114                 btrfs_abort_transaction(trans, ret);
1115                 goto out;
1116         }
1117
1118         bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1119         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1120         orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1121         memcpy(&new_key, &key, sizeof(new_key));
1122
1123         if (start == key.offset && end < extent_end) {
1124                 other_start = 0;
1125                 other_end = start;
1126                 if (extent_mergeable(leaf, path->slots[0] - 1,
1127                                      ino, bytenr, orig_offset,
1128                                      &other_start, &other_end)) {
1129                         new_key.offset = end;
1130                         btrfs_set_item_key_safe(fs_info, path, &new_key);
1131                         fi = btrfs_item_ptr(leaf, path->slots[0],
1132                                             struct btrfs_file_extent_item);
1133                         btrfs_set_file_extent_generation(leaf, fi,
1134                                                          trans->transid);
1135                         btrfs_set_file_extent_num_bytes(leaf, fi,
1136                                                         extent_end - end);
1137                         btrfs_set_file_extent_offset(leaf, fi,
1138                                                      end - orig_offset);
1139                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1140                                             struct btrfs_file_extent_item);
1141                         btrfs_set_file_extent_generation(leaf, fi,
1142                                                          trans->transid);
1143                         btrfs_set_file_extent_num_bytes(leaf, fi,
1144                                                         end - other_start);
1145                         btrfs_mark_buffer_dirty(leaf);
1146                         goto out;
1147                 }
1148         }
1149
1150         if (start > key.offset && end == extent_end) {
1151                 other_start = end;
1152                 other_end = 0;
1153                 if (extent_mergeable(leaf, path->slots[0] + 1,
1154                                      ino, bytenr, orig_offset,
1155                                      &other_start, &other_end)) {
1156                         fi = btrfs_item_ptr(leaf, path->slots[0],
1157                                             struct btrfs_file_extent_item);
1158                         btrfs_set_file_extent_num_bytes(leaf, fi,
1159                                                         start - key.offset);
1160                         btrfs_set_file_extent_generation(leaf, fi,
1161                                                          trans->transid);
1162                         path->slots[0]++;
1163                         new_key.offset = start;
1164                         btrfs_set_item_key_safe(fs_info, path, &new_key);
1165
1166                         fi = btrfs_item_ptr(leaf, path->slots[0],
1167                                             struct btrfs_file_extent_item);
1168                         btrfs_set_file_extent_generation(leaf, fi,
1169                                                          trans->transid);
1170                         btrfs_set_file_extent_num_bytes(leaf, fi,
1171                                                         other_end - start);
1172                         btrfs_set_file_extent_offset(leaf, fi,
1173                                                      start - orig_offset);
1174                         btrfs_mark_buffer_dirty(leaf);
1175                         goto out;
1176                 }
1177         }
1178
1179         while (start > key.offset || end < extent_end) {
1180                 if (key.offset == start)
1181                         split = end;
1182
1183                 new_key.offset = split;
1184                 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1185                 if (ret == -EAGAIN) {
1186                         btrfs_release_path(path);
1187                         goto again;
1188                 }
1189                 if (ret < 0) {
1190                         btrfs_abort_transaction(trans, ret);
1191                         goto out;
1192                 }
1193
1194                 leaf = path->nodes[0];
1195                 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1196                                     struct btrfs_file_extent_item);
1197                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1198                 btrfs_set_file_extent_num_bytes(leaf, fi,
1199                                                 split - key.offset);
1200
1201                 fi = btrfs_item_ptr(leaf, path->slots[0],
1202                                     struct btrfs_file_extent_item);
1203
1204                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1205                 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1206                 btrfs_set_file_extent_num_bytes(leaf, fi,
1207                                                 extent_end - split);
1208                 btrfs_mark_buffer_dirty(leaf);
1209
1210                 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
1211                                        num_bytes, 0);
1212                 btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
1213                                     orig_offset, 0, false);
1214                 ret = btrfs_inc_extent_ref(trans, &ref);
1215                 if (ret) {
1216                         btrfs_abort_transaction(trans, ret);
1217                         goto out;
1218                 }
1219
1220                 if (split == start) {
1221                         key.offset = start;
1222                 } else {
1223                         if (start != key.offset) {
1224                                 ret = -EINVAL;
1225                                 btrfs_abort_transaction(trans, ret);
1226                                 goto out;
1227                         }
1228                         path->slots[0]--;
1229                         extent_end = end;
1230                 }
1231                 recow = 1;
1232         }
1233
1234         other_start = end;
1235         other_end = 0;
1236         btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1237                                num_bytes, 0);
1238         btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset,
1239                             0, false);
1240         if (extent_mergeable(leaf, path->slots[0] + 1,
1241                              ino, bytenr, orig_offset,
1242                              &other_start, &other_end)) {
1243                 if (recow) {
1244                         btrfs_release_path(path);
1245                         goto again;
1246                 }
1247                 extent_end = other_end;
1248                 del_slot = path->slots[0] + 1;
1249                 del_nr++;
1250                 ret = btrfs_free_extent(trans, &ref);
1251                 if (ret) {
1252                         btrfs_abort_transaction(trans, ret);
1253                         goto out;
1254                 }
1255         }
1256         other_start = 0;
1257         other_end = start;
1258         if (extent_mergeable(leaf, path->slots[0] - 1,
1259                              ino, bytenr, orig_offset,
1260                              &other_start, &other_end)) {
1261                 if (recow) {
1262                         btrfs_release_path(path);
1263                         goto again;
1264                 }
1265                 key.offset = other_start;
1266                 del_slot = path->slots[0];
1267                 del_nr++;
1268                 ret = btrfs_free_extent(trans, &ref);
1269                 if (ret) {
1270                         btrfs_abort_transaction(trans, ret);
1271                         goto out;
1272                 }
1273         }
1274         if (del_nr == 0) {
1275                 fi = btrfs_item_ptr(leaf, path->slots[0],
1276                            struct btrfs_file_extent_item);
1277                 btrfs_set_file_extent_type(leaf, fi,
1278                                            BTRFS_FILE_EXTENT_REG);
1279                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1280                 btrfs_mark_buffer_dirty(leaf);
1281         } else {
1282                 fi = btrfs_item_ptr(leaf, del_slot - 1,
1283                            struct btrfs_file_extent_item);
1284                 btrfs_set_file_extent_type(leaf, fi,
1285                                            BTRFS_FILE_EXTENT_REG);
1286                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1287                 btrfs_set_file_extent_num_bytes(leaf, fi,
1288                                                 extent_end - key.offset);
1289                 btrfs_mark_buffer_dirty(leaf);
1290
1291                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1292                 if (ret < 0) {
1293                         btrfs_abort_transaction(trans, ret);
1294                         goto out;
1295                 }
1296         }
1297 out:
1298         btrfs_free_path(path);
1299         return ret;
1300 }
1301
1302 /*
1303  * on error we return an unlocked page and the error value
1304  * on success we return a locked page and 0
1305  */
1306 static int prepare_uptodate_page(struct inode *inode,
1307                                  struct page *page, u64 pos,
1308                                  bool force_uptodate)
1309 {
1310         int ret = 0;
1311
1312         if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1313             !PageUptodate(page)) {
1314                 ret = btrfs_readpage(NULL, page);
1315                 if (ret)
1316                         return ret;
1317                 lock_page(page);
1318                 if (!PageUptodate(page)) {
1319                         unlock_page(page);
1320                         return -EIO;
1321                 }
1322
1323                 /*
1324                  * Since btrfs_readpage() will unlock the page before it
1325                  * returns, there is a window where btrfs_releasepage() can be
1326                  * called to release the page.  Here we check both inode
1327                  * mapping and PagePrivate() to make sure the page was not
1328                  * released.
1329                  *
1330                  * The private flag check is essential for subpage as we need
1331                  * to store extra bitmap using page->private.
1332                  */
1333                 if (page->mapping != inode->i_mapping || !PagePrivate(page)) {
1334                         unlock_page(page);
1335                         return -EAGAIN;
1336                 }
1337         }
1338         return 0;
1339 }
1340
1341 /*
1342  * this just gets pages into the page cache and locks them down.
1343  */
1344 static noinline int prepare_pages(struct inode *inode, struct page **pages,
1345                                   size_t num_pages, loff_t pos,
1346                                   size_t write_bytes, bool force_uptodate)
1347 {
1348         int i;
1349         unsigned long index = pos >> PAGE_SHIFT;
1350         gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1351         int err = 0;
1352         int faili;
1353
1354         for (i = 0; i < num_pages; i++) {
1355 again:
1356                 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1357                                                mask | __GFP_WRITE);
1358                 if (!pages[i]) {
1359                         faili = i - 1;
1360                         err = -ENOMEM;
1361                         goto fail;
1362                 }
1363
1364                 err = set_page_extent_mapped(pages[i]);
1365                 if (err < 0) {
1366                         faili = i;
1367                         goto fail;
1368                 }
1369
1370                 if (i == 0)
1371                         err = prepare_uptodate_page(inode, pages[i], pos,
1372                                                     force_uptodate);
1373                 if (!err && i == num_pages - 1)
1374                         err = prepare_uptodate_page(inode, pages[i],
1375                                                     pos + write_bytes, false);
1376                 if (err) {
1377                         put_page(pages[i]);
1378                         if (err == -EAGAIN) {
1379                                 err = 0;
1380                                 goto again;
1381                         }
1382                         faili = i - 1;
1383                         goto fail;
1384                 }
1385                 wait_on_page_writeback(pages[i]);
1386         }
1387
1388         return 0;
1389 fail:
1390         while (faili >= 0) {
1391                 unlock_page(pages[faili]);
1392                 put_page(pages[faili]);
1393                 faili--;
1394         }
1395         return err;
1396
1397 }
1398
1399 /*
1400  * This function locks the extent and properly waits for data=ordered extents
1401  * to finish before allowing the pages to be modified if need.
1402  *
1403  * The return value:
1404  * 1 - the extent is locked
1405  * 0 - the extent is not locked, and everything is OK
1406  * -EAGAIN - need re-prepare the pages
1407  * the other < 0 number - Something wrong happens
1408  */
1409 static noinline int
1410 lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1411                                 size_t num_pages, loff_t pos,
1412                                 size_t write_bytes,
1413                                 u64 *lockstart, u64 *lockend,
1414                                 struct extent_state **cached_state)
1415 {
1416         struct btrfs_fs_info *fs_info = inode->root->fs_info;
1417         u64 start_pos;
1418         u64 last_pos;
1419         int i;
1420         int ret = 0;
1421
1422         start_pos = round_down(pos, fs_info->sectorsize);
1423         last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
1424
1425         if (start_pos < inode->vfs_inode.i_size) {
1426                 struct btrfs_ordered_extent *ordered;
1427
1428                 lock_extent_bits(&inode->io_tree, start_pos, last_pos,
1429                                 cached_state);
1430                 ordered = btrfs_lookup_ordered_range(inode, start_pos,
1431                                                      last_pos - start_pos + 1);
1432                 if (ordered &&
1433                     ordered->file_offset + ordered->num_bytes > start_pos &&
1434                     ordered->file_offset <= last_pos) {
1435                         unlock_extent_cached(&inode->io_tree, start_pos,
1436                                         last_pos, cached_state);
1437                         for (i = 0; i < num_pages; i++) {
1438                                 unlock_page(pages[i]);
1439                                 put_page(pages[i]);
1440                         }
1441                         btrfs_start_ordered_extent(ordered, 1);
1442                         btrfs_put_ordered_extent(ordered);
1443                         return -EAGAIN;
1444                 }
1445                 if (ordered)
1446                         btrfs_put_ordered_extent(ordered);
1447
1448                 *lockstart = start_pos;
1449                 *lockend = last_pos;
1450                 ret = 1;
1451         }
1452
1453         /*
1454          * We should be called after prepare_pages() which should have locked
1455          * all pages in the range.
1456          */
1457         for (i = 0; i < num_pages; i++)
1458                 WARN_ON(!PageLocked(pages[i]));
1459
1460         return ret;
1461 }
1462
1463 static int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
1464                            size_t *write_bytes, bool nowait)
1465 {
1466         struct btrfs_fs_info *fs_info = inode->root->fs_info;
1467         struct btrfs_root *root = inode->root;
1468         u64 lockstart, lockend;
1469         u64 num_bytes;
1470         int ret;
1471
1472         if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1473                 return 0;
1474
1475         if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock))
1476                 return -EAGAIN;
1477
1478         lockstart = round_down(pos, fs_info->sectorsize);
1479         lockend = round_up(pos + *write_bytes,
1480                            fs_info->sectorsize) - 1;
1481         num_bytes = lockend - lockstart + 1;
1482
1483         if (nowait) {
1484                 struct btrfs_ordered_extent *ordered;
1485
1486                 if (!try_lock_extent(&inode->io_tree, lockstart, lockend))
1487                         return -EAGAIN;
1488
1489                 ordered = btrfs_lookup_ordered_range(inode, lockstart,
1490                                                      num_bytes);
1491                 if (ordered) {
1492                         btrfs_put_ordered_extent(ordered);
1493                         ret = -EAGAIN;
1494                         goto out_unlock;
1495                 }
1496         } else {
1497                 btrfs_lock_and_flush_ordered_range(inode, lockstart,
1498                                                    lockend, NULL);
1499         }
1500
1501         ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1502                         NULL, NULL, NULL, false);
1503         if (ret <= 0) {
1504                 ret = 0;
1505                 if (!nowait)
1506                         btrfs_drew_write_unlock(&root->snapshot_lock);
1507         } else {
1508                 *write_bytes = min_t(size_t, *write_bytes ,
1509                                      num_bytes - pos + lockstart);
1510         }
1511 out_unlock:
1512         unlock_extent(&inode->io_tree, lockstart, lockend);
1513
1514         return ret;
1515 }
1516
1517 static int check_nocow_nolock(struct btrfs_inode *inode, loff_t pos,
1518                               size_t *write_bytes)
1519 {
1520         return check_can_nocow(inode, pos, write_bytes, true);
1521 }
1522
1523 /*
1524  * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
1525  *
1526  * @pos:         File offset
1527  * @write_bytes: The length to write, will be updated to the nocow writeable
1528  *               range
1529  *
1530  * This function will flush ordered extents in the range to ensure proper
1531  * nocow checks.
1532  *
1533  * Return:
1534  * >0           and update @write_bytes if we can do nocow write
1535  *  0           if we can't do nocow write
1536  * -EAGAIN      if we can't get the needed lock or there are ordered extents
1537  *              for * (nowait == true) case
1538  * <0           if other error happened
1539  *
1540  * NOTE: Callers need to release the lock by btrfs_check_nocow_unlock().
1541  */
1542 int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
1543                            size_t *write_bytes)
1544 {
1545         return check_can_nocow(inode, pos, write_bytes, false);
1546 }
1547
1548 void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
1549 {
1550         btrfs_drew_write_unlock(&inode->root->snapshot_lock);
1551 }
1552
1553 static void update_time_for_write(struct inode *inode)
1554 {
1555         struct timespec64 now;
1556
1557         if (IS_NOCMTIME(inode))
1558                 return;
1559
1560         now = current_time(inode);
1561         if (!timespec64_equal(&inode->i_mtime, &now))
1562                 inode->i_mtime = now;
1563
1564         if (!timespec64_equal(&inode->i_ctime, &now))
1565                 inode->i_ctime = now;
1566
1567         if (IS_I_VERSION(inode))
1568                 inode_inc_iversion(inode);
1569 }
1570
1571 static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
1572                              size_t count)
1573 {
1574         struct file *file = iocb->ki_filp;
1575         struct inode *inode = file_inode(file);
1576         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1577         loff_t pos = iocb->ki_pos;
1578         int ret;
1579         loff_t oldsize;
1580         loff_t start_pos;
1581
1582         if (iocb->ki_flags & IOCB_NOWAIT) {
1583                 size_t nocow_bytes = count;
1584
1585                 /* We will allocate space in case nodatacow is not set, so bail */
1586                 if (check_nocow_nolock(BTRFS_I(inode), pos, &nocow_bytes) <= 0)
1587                         return -EAGAIN;
1588                 /*
1589                  * There are holes in the range or parts of the range that must
1590                  * be COWed (shared extents, RO block groups, etc), so just bail
1591                  * out.
1592                  */
1593                 if (nocow_bytes < count)
1594                         return -EAGAIN;
1595         }
1596
1597         current->backing_dev_info = inode_to_bdi(inode);
1598         ret = file_remove_privs(file);
1599         if (ret)
1600                 return ret;
1601
1602         /*
1603          * We reserve space for updating the inode when we reserve space for the
1604          * extent we are going to write, so we will enospc out there.  We don't
1605          * need to start yet another transaction to update the inode as we will
1606          * update the inode when we finish writing whatever data we write.
1607          */
1608         update_time_for_write(inode);
1609
1610         start_pos = round_down(pos, fs_info->sectorsize);
1611         oldsize = i_size_read(inode);
1612         if (start_pos > oldsize) {
1613                 /* Expand hole size to cover write data, preventing empty gap */
1614                 loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
1615
1616                 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos);
1617                 if (ret) {
1618                         current->backing_dev_info = NULL;
1619                         return ret;
1620                 }
1621         }
1622
1623         return 0;
1624 }
1625
1626 static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1627                                                struct iov_iter *i)
1628 {
1629         struct file *file = iocb->ki_filp;
1630         loff_t pos;
1631         struct inode *inode = file_inode(file);
1632         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1633         struct page **pages = NULL;
1634         struct extent_changeset *data_reserved = NULL;
1635         u64 release_bytes = 0;
1636         u64 lockstart;
1637         u64 lockend;
1638         size_t num_written = 0;
1639         int nrptrs;
1640         ssize_t ret;
1641         bool only_release_metadata = false;
1642         bool force_page_uptodate = false;
1643         loff_t old_isize = i_size_read(inode);
1644         unsigned int ilock_flags = 0;
1645
1646         if (iocb->ki_flags & IOCB_NOWAIT)
1647                 ilock_flags |= BTRFS_ILOCK_TRY;
1648
1649         ret = btrfs_inode_lock(inode, ilock_flags);
1650         if (ret < 0)
1651                 return ret;
1652
1653         ret = generic_write_checks(iocb, i);
1654         if (ret <= 0)
1655                 goto out;
1656
1657         ret = btrfs_write_check(iocb, i, ret);
1658         if (ret < 0)
1659                 goto out;
1660
1661         pos = iocb->ki_pos;
1662         nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1663                         PAGE_SIZE / (sizeof(struct page *)));
1664         nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1665         nrptrs = max(nrptrs, 8);
1666         pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1667         if (!pages) {
1668                 ret = -ENOMEM;
1669                 goto out;
1670         }
1671
1672         while (iov_iter_count(i) > 0) {
1673                 struct extent_state *cached_state = NULL;
1674                 size_t offset = offset_in_page(pos);
1675                 size_t sector_offset;
1676                 size_t write_bytes = min(iov_iter_count(i),
1677                                          nrptrs * (size_t)PAGE_SIZE -
1678                                          offset);
1679                 size_t num_pages;
1680                 size_t reserve_bytes;
1681                 size_t dirty_pages;
1682                 size_t copied;
1683                 size_t dirty_sectors;
1684                 size_t num_sectors;
1685                 int extents_locked;
1686
1687                 /*
1688                  * Fault pages before locking them in prepare_pages
1689                  * to avoid recursive lock
1690                  */
1691                 if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) {
1692                         ret = -EFAULT;
1693                         break;
1694                 }
1695
1696                 only_release_metadata = false;
1697                 sector_offset = pos & (fs_info->sectorsize - 1);
1698
1699                 extent_changeset_release(data_reserved);
1700                 ret = btrfs_check_data_free_space(BTRFS_I(inode),
1701                                                   &data_reserved, pos,
1702                                                   write_bytes);
1703                 if (ret < 0) {
1704                         /*
1705                          * If we don't have to COW at the offset, reserve
1706                          * metadata only. write_bytes may get smaller than
1707                          * requested here.
1708                          */
1709                         if (btrfs_check_nocow_lock(BTRFS_I(inode), pos,
1710                                                    &write_bytes) > 0)
1711                                 only_release_metadata = true;
1712                         else
1713                                 break;
1714                 }
1715
1716                 num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE);
1717                 WARN_ON(num_pages > nrptrs);
1718                 reserve_bytes = round_up(write_bytes + sector_offset,
1719                                          fs_info->sectorsize);
1720                 WARN_ON(reserve_bytes == 0);
1721                 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1722                                                       reserve_bytes,
1723                                                       reserve_bytes);
1724                 if (ret) {
1725                         if (!only_release_metadata)
1726                                 btrfs_free_reserved_data_space(BTRFS_I(inode),
1727                                                 data_reserved, pos,
1728                                                 write_bytes);
1729                         else
1730                                 btrfs_check_nocow_unlock(BTRFS_I(inode));
1731                         break;
1732                 }
1733
1734                 release_bytes = reserve_bytes;
1735 again:
1736                 /*
1737                  * This is going to setup the pages array with the number of
1738                  * pages we want, so we don't really need to worry about the
1739                  * contents of pages from loop to loop
1740                  */
1741                 ret = prepare_pages(inode, pages, num_pages,
1742                                     pos, write_bytes,
1743                                     force_page_uptodate);
1744                 if (ret) {
1745                         btrfs_delalloc_release_extents(BTRFS_I(inode),
1746                                                        reserve_bytes);
1747                         break;
1748                 }
1749
1750                 extents_locked = lock_and_cleanup_extent_if_need(
1751                                 BTRFS_I(inode), pages,
1752                                 num_pages, pos, write_bytes, &lockstart,
1753                                 &lockend, &cached_state);
1754                 if (extents_locked < 0) {
1755                         if (extents_locked == -EAGAIN)
1756                                 goto again;
1757                         btrfs_delalloc_release_extents(BTRFS_I(inode),
1758                                                        reserve_bytes);
1759                         ret = extents_locked;
1760                         break;
1761                 }
1762
1763                 copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1764
1765                 num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1766                 dirty_sectors = round_up(copied + sector_offset,
1767                                         fs_info->sectorsize);
1768                 dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1769
1770                 /*
1771                  * if we have trouble faulting in the pages, fall
1772                  * back to one page at a time
1773                  */
1774                 if (copied < write_bytes)
1775                         nrptrs = 1;
1776
1777                 if (copied == 0) {
1778                         force_page_uptodate = true;
1779                         dirty_sectors = 0;
1780                         dirty_pages = 0;
1781                 } else {
1782                         force_page_uptodate = false;
1783                         dirty_pages = DIV_ROUND_UP(copied + offset,
1784                                                    PAGE_SIZE);
1785                 }
1786
1787                 if (num_sectors > dirty_sectors) {
1788                         /* release everything except the sectors we dirtied */
1789                         release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
1790                         if (only_release_metadata) {
1791                                 btrfs_delalloc_release_metadata(BTRFS_I(inode),
1792                                                         release_bytes, true);
1793                         } else {
1794                                 u64 __pos;
1795
1796                                 __pos = round_down(pos,
1797                                                    fs_info->sectorsize) +
1798                                         (dirty_pages << PAGE_SHIFT);
1799                                 btrfs_delalloc_release_space(BTRFS_I(inode),
1800                                                 data_reserved, __pos,
1801                                                 release_bytes, true);
1802                         }
1803                 }
1804
1805                 release_bytes = round_up(copied + sector_offset,
1806                                         fs_info->sectorsize);
1807
1808                 ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
1809                                         dirty_pages, pos, copied,
1810                                         &cached_state, only_release_metadata);
1811
1812                 /*
1813                  * If we have not locked the extent range, because the range's
1814                  * start offset is >= i_size, we might still have a non-NULL
1815                  * cached extent state, acquired while marking the extent range
1816                  * as delalloc through btrfs_dirty_pages(). Therefore free any
1817                  * possible cached extent state to avoid a memory leak.
1818                  */
1819                 if (extents_locked)
1820                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1821                                              lockstart, lockend, &cached_state);
1822                 else
1823                         free_extent_state(cached_state);
1824
1825                 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1826                 if (ret) {
1827                         btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1828                         break;
1829                 }
1830
1831                 release_bytes = 0;
1832                 if (only_release_metadata)
1833                         btrfs_check_nocow_unlock(BTRFS_I(inode));
1834
1835                 btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1836
1837                 cond_resched();
1838
1839                 balance_dirty_pages_ratelimited(inode->i_mapping);
1840
1841                 pos += copied;
1842                 num_written += copied;
1843         }
1844
1845         kfree(pages);
1846
1847         if (release_bytes) {
1848                 if (only_release_metadata) {
1849                         btrfs_check_nocow_unlock(BTRFS_I(inode));
1850                         btrfs_delalloc_release_metadata(BTRFS_I(inode),
1851                                         release_bytes, true);
1852                 } else {
1853                         btrfs_delalloc_release_space(BTRFS_I(inode),
1854                                         data_reserved,
1855                                         round_down(pos, fs_info->sectorsize),
1856                                         release_bytes, true);
1857                 }
1858         }
1859
1860         extent_changeset_free(data_reserved);
1861         if (num_written > 0) {
1862                 pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
1863                 iocb->ki_pos += num_written;
1864         }
1865 out:
1866         btrfs_inode_unlock(inode, ilock_flags);
1867         return num_written ? num_written : ret;
1868 }
1869
1870 static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
1871                                const struct iov_iter *iter, loff_t offset)
1872 {
1873         const u32 blocksize_mask = fs_info->sectorsize - 1;
1874
1875         if (offset & blocksize_mask)
1876                 return -EINVAL;
1877
1878         if (iov_iter_alignment(iter) & blocksize_mask)
1879                 return -EINVAL;
1880
1881         return 0;
1882 }
1883
1884 static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1885 {
1886         const bool is_sync_write = (iocb->ki_flags & IOCB_DSYNC);
1887         struct file *file = iocb->ki_filp;
1888         struct inode *inode = file_inode(file);
1889         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1890         loff_t pos;
1891         ssize_t written = 0;
1892         ssize_t written_buffered;
1893         size_t prev_left = 0;
1894         loff_t endbyte;
1895         ssize_t err;
1896         unsigned int ilock_flags = 0;
1897
1898         if (iocb->ki_flags & IOCB_NOWAIT)
1899                 ilock_flags |= BTRFS_ILOCK_TRY;
1900
1901         /* If the write DIO is within EOF, use a shared lock */
1902         if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode))
1903                 ilock_flags |= BTRFS_ILOCK_SHARED;
1904
1905 relock:
1906         err = btrfs_inode_lock(inode, ilock_flags);
1907         if (err < 0)
1908                 return err;
1909
1910         err = generic_write_checks(iocb, from);
1911         if (err <= 0) {
1912                 btrfs_inode_unlock(inode, ilock_flags);
1913                 return err;
1914         }
1915
1916         err = btrfs_write_check(iocb, from, err);
1917         if (err < 0) {
1918                 btrfs_inode_unlock(inode, ilock_flags);
1919                 goto out;
1920         }
1921
1922         pos = iocb->ki_pos;
1923         /*
1924          * Re-check since file size may have changed just before taking the
1925          * lock or pos may have changed because of O_APPEND in generic_write_check()
1926          */
1927         if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
1928             pos + iov_iter_count(from) > i_size_read(inode)) {
1929                 btrfs_inode_unlock(inode, ilock_flags);
1930                 ilock_flags &= ~BTRFS_ILOCK_SHARED;
1931                 goto relock;
1932         }
1933
1934         if (check_direct_IO(fs_info, from, pos)) {
1935                 btrfs_inode_unlock(inode, ilock_flags);
1936                 goto buffered;
1937         }
1938
1939         /*
1940          * We remove IOCB_DSYNC so that we don't deadlock when iomap_dio_rw()
1941          * calls generic_write_sync() (through iomap_dio_complete()), because
1942          * that results in calling fsync (btrfs_sync_file()) which will try to
1943          * lock the inode in exclusive/write mode.
1944          */
1945         if (is_sync_write)
1946                 iocb->ki_flags &= ~IOCB_DSYNC;
1947
1948         /*
1949          * The iov_iter can be mapped to the same file range we are writing to.
1950          * If that's the case, then we will deadlock in the iomap code, because
1951          * it first calls our callback btrfs_dio_iomap_begin(), which will create
1952          * an ordered extent, and after that it will fault in the pages that the
1953          * iov_iter refers to. During the fault in we end up in the readahead
1954          * pages code (starting at btrfs_readahead()), which will lock the range,
1955          * find that ordered extent and then wait for it to complete (at
1956          * btrfs_lock_and_flush_ordered_range()), resulting in a deadlock since
1957          * obviously the ordered extent can never complete as we didn't submit
1958          * yet the respective bio(s). This always happens when the buffer is
1959          * memory mapped to the same file range, since the iomap DIO code always
1960          * invalidates pages in the target file range (after starting and waiting
1961          * for any writeback).
1962          *
1963          * So here we disable page faults in the iov_iter and then retry if we
1964          * got -EFAULT, faulting in the pages before the retry.
1965          */
1966 again:
1967         from->nofault = true;
1968         err = iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
1969                            IOMAP_DIO_PARTIAL, written);
1970         from->nofault = false;
1971
1972         /* No increment (+=) because iomap returns a cumulative value. */
1973         if (err > 0)
1974                 written = err;
1975
1976         if (iov_iter_count(from) > 0 && (err == -EFAULT || err > 0)) {
1977                 const size_t left = iov_iter_count(from);
1978                 /*
1979                  * We have more data left to write. Try to fault in as many as
1980                  * possible of the remainder pages and retry. We do this without
1981                  * releasing and locking again the inode, to prevent races with
1982                  * truncate.
1983                  *
1984                  * Also, in case the iov refers to pages in the file range of the
1985                  * file we want to write to (due to a mmap), we could enter an
1986                  * infinite loop if we retry after faulting the pages in, since
1987                  * iomap will invalidate any pages in the range early on, before
1988                  * it tries to fault in the pages of the iov. So we keep track of
1989                  * how much was left of iov in the previous EFAULT and fallback
1990                  * to buffered IO in case we haven't made any progress.
1991                  */
1992                 if (left == prev_left) {
1993                         err = -ENOTBLK;
1994                 } else {
1995                         fault_in_iov_iter_readable(from, left);
1996                         prev_left = left;
1997                         goto again;
1998                 }
1999         }
2000
2001         btrfs_inode_unlock(inode, ilock_flags);
2002
2003         /*
2004          * Add back IOCB_DSYNC. Our caller, btrfs_file_write_iter(), will do
2005          * the fsync (call generic_write_sync()).
2006          */
2007         if (is_sync_write)
2008                 iocb->ki_flags |= IOCB_DSYNC;
2009
2010         /* If 'err' is -ENOTBLK then it means we must fallback to buffered IO. */
2011         if ((err < 0 && err != -ENOTBLK) || !iov_iter_count(from))
2012                 goto out;
2013
2014 buffered:
2015         pos = iocb->ki_pos;
2016         written_buffered = btrfs_buffered_write(iocb, from);
2017         if (written_buffered < 0) {
2018                 err = written_buffered;
2019                 goto out;
2020         }
2021         /*
2022          * Ensure all data is persisted. We want the next direct IO read to be
2023          * able to read what was just written.
2024          */
2025         endbyte = pos + written_buffered - 1;
2026         err = btrfs_fdatawrite_range(inode, pos, endbyte);
2027         if (err)
2028                 goto out;
2029         err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
2030         if (err)
2031                 goto out;
2032         written += written_buffered;
2033         iocb->ki_pos = pos + written_buffered;
2034         invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
2035                                  endbyte >> PAGE_SHIFT);
2036 out:
2037         return err < 0 ? err : written;
2038 }
2039
2040 static ssize_t btrfs_encoded_write(struct kiocb *iocb, struct iov_iter *from,
2041                         const struct btrfs_ioctl_encoded_io_args *encoded)
2042 {
2043         struct file *file = iocb->ki_filp;
2044         struct inode *inode = file_inode(file);
2045         loff_t count;
2046         ssize_t ret;
2047
2048         btrfs_inode_lock(inode, 0);
2049         count = encoded->len;
2050         ret = generic_write_checks_count(iocb, &count);
2051         if (ret == 0 && count != encoded->len) {
2052                 /*
2053                  * The write got truncated by generic_write_checks_count(). We
2054                  * can't do a partial encoded write.
2055                  */
2056                 ret = -EFBIG;
2057         }
2058         if (ret || encoded->len == 0)
2059                 goto out;
2060
2061         ret = btrfs_write_check(iocb, from, encoded->len);
2062         if (ret < 0)
2063                 goto out;
2064
2065         ret = btrfs_do_encoded_write(iocb, from, encoded);
2066 out:
2067         btrfs_inode_unlock(inode, 0);
2068         return ret;
2069 }
2070
2071 ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
2072                             const struct btrfs_ioctl_encoded_io_args *encoded)
2073 {
2074         struct file *file = iocb->ki_filp;
2075         struct btrfs_inode *inode = BTRFS_I(file_inode(file));
2076         ssize_t num_written, num_sync;
2077         const bool sync = iocb->ki_flags & IOCB_DSYNC;
2078
2079         /*
2080          * If the fs flips readonly due to some impossible error, although we
2081          * have opened a file as writable, we have to stop this write operation
2082          * to ensure consistency.
2083          */
2084         if (BTRFS_FS_ERROR(inode->root->fs_info))
2085                 return -EROFS;
2086
2087         if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
2088                 return -EOPNOTSUPP;
2089
2090         if (sync)
2091                 atomic_inc(&inode->sync_writers);
2092
2093         if (encoded) {
2094                 num_written = btrfs_encoded_write(iocb, from, encoded);
2095                 num_sync = encoded->len;
2096         } else if (iocb->ki_flags & IOCB_DIRECT) {
2097                 num_written = num_sync = btrfs_direct_write(iocb, from);
2098         } else {
2099                 num_written = num_sync = btrfs_buffered_write(iocb, from);
2100         }
2101
2102         btrfs_set_inode_last_sub_trans(inode);
2103
2104         if (num_sync > 0) {
2105                 num_sync = generic_write_sync(iocb, num_sync);
2106                 if (num_sync < 0)
2107                         num_written = num_sync;
2108         }
2109
2110         if (sync)
2111                 atomic_dec(&inode->sync_writers);
2112
2113         current->backing_dev_info = NULL;
2114         return num_written;
2115 }
2116
2117 static ssize_t btrfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2118 {
2119         return btrfs_do_write_iter(iocb, from, NULL);
2120 }
2121
2122 int btrfs_release_file(struct inode *inode, struct file *filp)
2123 {
2124         struct btrfs_file_private *private = filp->private_data;
2125
2126         if (private && private->filldir_buf)
2127                 kfree(private->filldir_buf);
2128         kfree(private);
2129         filp->private_data = NULL;
2130
2131         /*
2132          * Set by setattr when we are about to truncate a file from a non-zero
2133          * size to a zero size.  This tries to flush down new bytes that may
2134          * have been written if the application were using truncate to replace
2135          * a file in place.
2136          */
2137         if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
2138                                &BTRFS_I(inode)->runtime_flags))
2139                         filemap_flush(inode->i_mapping);
2140         return 0;
2141 }
2142
2143 static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
2144 {
2145         int ret;
2146         struct blk_plug plug;
2147
2148         /*
2149          * This is only called in fsync, which would do synchronous writes, so
2150          * a plug can merge adjacent IOs as much as possible.  Esp. in case of
2151          * multiple disks using raid profile, a large IO can be split to
2152          * several segments of stripe length (currently 64K).
2153          */
2154         blk_start_plug(&plug);
2155         atomic_inc(&BTRFS_I(inode)->sync_writers);
2156         ret = btrfs_fdatawrite_range(inode, start, end);
2157         atomic_dec(&BTRFS_I(inode)->sync_writers);
2158         blk_finish_plug(&plug);
2159
2160         return ret;
2161 }
2162
2163 static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
2164 {
2165         struct btrfs_inode *inode = BTRFS_I(ctx->inode);
2166         struct btrfs_fs_info *fs_info = inode->root->fs_info;
2167
2168         if (btrfs_inode_in_log(inode, fs_info->generation) &&
2169             list_empty(&ctx->ordered_extents))
2170                 return true;
2171
2172         /*
2173          * If we are doing a fast fsync we can not bail out if the inode's
2174          * last_trans is <= then the last committed transaction, because we only
2175          * update the last_trans of the inode during ordered extent completion,
2176          * and for a fast fsync we don't wait for that, we only wait for the
2177          * writeback to complete.
2178          */
2179         if (inode->last_trans <= fs_info->last_trans_committed &&
2180             (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
2181              list_empty(&ctx->ordered_extents)))
2182                 return true;
2183
2184         return false;
2185 }
2186
2187 /*
2188  * fsync call for both files and directories.  This logs the inode into
2189  * the tree log instead of forcing full commits whenever possible.
2190  *
2191  * It needs to call filemap_fdatawait so that all ordered extent updates are
2192  * in the metadata btree are up to date for copying to the log.
2193  *
2194  * It drops the inode mutex before doing the tree log commit.  This is an
2195  * important optimization for directories because holding the mutex prevents
2196  * new operations on the dir while we write to disk.
2197  */
2198 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2199 {
2200         struct dentry *dentry = file_dentry(file);
2201         struct inode *inode = d_inode(dentry);
2202         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2203         struct btrfs_root *root = BTRFS_I(inode)->root;
2204         struct btrfs_trans_handle *trans;
2205         struct btrfs_log_ctx ctx;
2206         int ret = 0, err;
2207         u64 len;
2208         bool full_sync;
2209
2210         trace_btrfs_sync_file(file, datasync);
2211
2212         btrfs_init_log_ctx(&ctx, inode);
2213
2214         /*
2215          * Always set the range to a full range, otherwise we can get into
2216          * several problems, from missing file extent items to represent holes
2217          * when not using the NO_HOLES feature, to log tree corruption due to
2218          * races between hole detection during logging and completion of ordered
2219          * extents outside the range, to missing checksums due to ordered extents
2220          * for which we flushed only a subset of their pages.
2221          */
2222         start = 0;
2223         end = LLONG_MAX;
2224         len = (u64)LLONG_MAX + 1;
2225
2226         /*
2227          * We write the dirty pages in the range and wait until they complete
2228          * out of the ->i_mutex. If so, we can flush the dirty pages by
2229          * multi-task, and make the performance up.  See
2230          * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2231          */
2232         ret = start_ordered_ops(inode, start, end);
2233         if (ret)
2234                 goto out;
2235
2236         btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
2237
2238         atomic_inc(&root->log_batch);
2239
2240         /*
2241          * Always check for the full sync flag while holding the inode's lock,
2242          * to avoid races with other tasks. The flag must be either set all the
2243          * time during logging or always off all the time while logging.
2244          */
2245         full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2246                              &BTRFS_I(inode)->runtime_flags);
2247
2248         /*
2249          * Before we acquired the inode's lock and the mmap lock, someone may
2250          * have dirtied more pages in the target range. We need to make sure
2251          * that writeback for any such pages does not start while we are logging
2252          * the inode, because if it does, any of the following might happen when
2253          * we are not doing a full inode sync:
2254          *
2255          * 1) We log an extent after its writeback finishes but before its
2256          *    checksums are added to the csum tree, leading to -EIO errors
2257          *    when attempting to read the extent after a log replay.
2258          *
2259          * 2) We can end up logging an extent before its writeback finishes.
2260          *    Therefore after the log replay we will have a file extent item
2261          *    pointing to an unwritten extent (and no data checksums as well).
2262          *
2263          * So trigger writeback for any eventual new dirty pages and then we
2264          * wait for all ordered extents to complete below.
2265          */
2266         ret = start_ordered_ops(inode, start, end);
2267         if (ret) {
2268                 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2269                 goto out;
2270         }
2271
2272         /*
2273          * We have to do this here to avoid the priority inversion of waiting on
2274          * IO of a lower priority task while holding a transaction open.
2275          *
2276          * For a full fsync we wait for the ordered extents to complete while
2277          * for a fast fsync we wait just for writeback to complete, and then
2278          * attach the ordered extents to the transaction so that a transaction
2279          * commit waits for their completion, to avoid data loss if we fsync,
2280          * the current transaction commits before the ordered extents complete
2281          * and a power failure happens right after that.
2282          *
2283          * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the
2284          * logical address recorded in the ordered extent may change. We need
2285          * to wait for the IO to stabilize the logical address.
2286          */
2287         if (full_sync || btrfs_is_zoned(fs_info)) {
2288                 ret = btrfs_wait_ordered_range(inode, start, len);
2289         } else {
2290                 /*
2291                  * Get our ordered extents as soon as possible to avoid doing
2292                  * checksum lookups in the csum tree, and use instead the
2293                  * checksums attached to the ordered extents.
2294                  */
2295                 btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
2296                                                       &ctx.ordered_extents);
2297                 ret = filemap_fdatawait_range(inode->i_mapping, start, end);
2298         }
2299
2300         if (ret)
2301                 goto out_release_extents;
2302
2303         atomic_inc(&root->log_batch);
2304
2305         smp_mb();
2306         if (skip_inode_logging(&ctx)) {
2307                 /*
2308                  * We've had everything committed since the last time we were
2309                  * modified so clear this flag in case it was set for whatever
2310                  * reason, it's no longer relevant.
2311                  */
2312                 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2313                           &BTRFS_I(inode)->runtime_flags);
2314                 /*
2315                  * An ordered extent might have started before and completed
2316                  * already with io errors, in which case the inode was not
2317                  * updated and we end up here. So check the inode's mapping
2318                  * for any errors that might have happened since we last
2319                  * checked called fsync.
2320                  */
2321                 ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
2322                 goto out_release_extents;
2323         }
2324
2325         /*
2326          * We use start here because we will need to wait on the IO to complete
2327          * in btrfs_sync_log, which could require joining a transaction (for
2328          * example checking cross references in the nocow path).  If we use join
2329          * here we could get into a situation where we're waiting on IO to
2330          * happen that is blocked on a transaction trying to commit.  With start
2331          * we inc the extwriter counter, so we wait for all extwriters to exit
2332          * before we start blocking joiners.  This comment is to keep somebody
2333          * from thinking they are super smart and changing this to
2334          * btrfs_join_transaction *cough*Josef*cough*.
2335          */
2336         trans = btrfs_start_transaction(root, 0);
2337         if (IS_ERR(trans)) {
2338                 ret = PTR_ERR(trans);
2339                 goto out_release_extents;
2340         }
2341         trans->in_fsync = true;
2342
2343         ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
2344         btrfs_release_log_ctx_extents(&ctx);
2345         if (ret < 0) {
2346                 /* Fallthrough and commit/free transaction. */
2347                 ret = 1;
2348         }
2349
2350         /* we've logged all the items and now have a consistent
2351          * version of the file in the log.  It is possible that
2352          * someone will come in and modify the file, but that's
2353          * fine because the log is consistent on disk, and we
2354          * have references to all of the file's extents
2355          *
2356          * It is possible that someone will come in and log the
2357          * file again, but that will end up using the synchronization
2358          * inside btrfs_sync_log to keep things safe.
2359          */
2360         btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2361
2362         if (ret != BTRFS_NO_LOG_SYNC) {
2363                 if (!ret) {
2364                         ret = btrfs_sync_log(trans, root, &ctx);
2365                         if (!ret) {
2366                                 ret = btrfs_end_transaction(trans);
2367                                 goto out;
2368                         }
2369                 }
2370                 if (!full_sync) {
2371                         ret = btrfs_wait_ordered_range(inode, start, len);
2372                         if (ret) {
2373                                 btrfs_end_transaction(trans);
2374                                 goto out;
2375                         }
2376                 }
2377                 ret = btrfs_commit_transaction(trans);
2378         } else {
2379                 ret = btrfs_end_transaction(trans);
2380         }
2381 out:
2382         ASSERT(list_empty(&ctx.list));
2383         err = file_check_and_advance_wb_err(file);
2384         if (!ret)
2385                 ret = err;
2386         return ret > 0 ? -EIO : ret;
2387
2388 out_release_extents:
2389         btrfs_release_log_ctx_extents(&ctx);
2390         btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2391         goto out;
2392 }
2393
2394 static const struct vm_operations_struct btrfs_file_vm_ops = {
2395         .fault          = filemap_fault,
2396         .map_pages      = filemap_map_pages,
2397         .page_mkwrite   = btrfs_page_mkwrite,
2398 };
2399
2400 static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
2401 {
2402         struct address_space *mapping = filp->f_mapping;
2403
2404         if (!mapping->a_ops->readpage)
2405                 return -ENOEXEC;
2406
2407         file_accessed(filp);
2408         vma->vm_ops = &btrfs_file_vm_ops;
2409
2410         return 0;
2411 }
2412
2413 static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2414                           int slot, u64 start, u64 end)
2415 {
2416         struct btrfs_file_extent_item *fi;
2417         struct btrfs_key key;
2418
2419         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2420                 return 0;
2421
2422         btrfs_item_key_to_cpu(leaf, &key, slot);
2423         if (key.objectid != btrfs_ino(inode) ||
2424             key.type != BTRFS_EXTENT_DATA_KEY)
2425                 return 0;
2426
2427         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2428
2429         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2430                 return 0;
2431
2432         if (btrfs_file_extent_disk_bytenr(leaf, fi))
2433                 return 0;
2434
2435         if (key.offset == end)
2436                 return 1;
2437         if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2438                 return 1;
2439         return 0;
2440 }
2441
2442 static int fill_holes(struct btrfs_trans_handle *trans,
2443                 struct btrfs_inode *inode,
2444                 struct btrfs_path *path, u64 offset, u64 end)
2445 {
2446         struct btrfs_fs_info *fs_info = trans->fs_info;
2447         struct btrfs_root *root = inode->root;
2448         struct extent_buffer *leaf;
2449         struct btrfs_file_extent_item *fi;
2450         struct extent_map *hole_em;
2451         struct extent_map_tree *em_tree = &inode->extent_tree;
2452         struct btrfs_key key;
2453         int ret;
2454
2455         if (btrfs_fs_incompat(fs_info, NO_HOLES))
2456                 goto out;
2457
2458         key.objectid = btrfs_ino(inode);
2459         key.type = BTRFS_EXTENT_DATA_KEY;
2460         key.offset = offset;
2461
2462         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2463         if (ret <= 0) {
2464                 /*
2465                  * We should have dropped this offset, so if we find it then
2466                  * something has gone horribly wrong.
2467                  */
2468                 if (ret == 0)
2469                         ret = -EINVAL;
2470                 return ret;
2471         }
2472
2473         leaf = path->nodes[0];
2474         if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2475                 u64 num_bytes;
2476
2477                 path->slots[0]--;
2478                 fi = btrfs_item_ptr(leaf, path->slots[0],
2479                                     struct btrfs_file_extent_item);
2480                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2481                         end - offset;
2482                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2483                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2484                 btrfs_set_file_extent_offset(leaf, fi, 0);
2485                 btrfs_mark_buffer_dirty(leaf);
2486                 goto out;
2487         }
2488
2489         if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2490                 u64 num_bytes;
2491
2492                 key.offset = offset;
2493                 btrfs_set_item_key_safe(fs_info, path, &key);
2494                 fi = btrfs_item_ptr(leaf, path->slots[0],
2495                                     struct btrfs_file_extent_item);
2496                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2497                         offset;
2498                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2499                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2500                 btrfs_set_file_extent_offset(leaf, fi, 0);
2501                 btrfs_mark_buffer_dirty(leaf);
2502                 goto out;
2503         }
2504         btrfs_release_path(path);
2505
2506         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
2507                         offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
2508         if (ret)
2509                 return ret;
2510
2511 out:
2512         btrfs_release_path(path);
2513
2514         hole_em = alloc_extent_map();
2515         if (!hole_em) {
2516                 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2517                 btrfs_set_inode_full_sync(inode);
2518         } else {
2519                 hole_em->start = offset;
2520                 hole_em->len = end - offset;
2521                 hole_em->ram_bytes = hole_em->len;
2522                 hole_em->orig_start = offset;
2523
2524                 hole_em->block_start = EXTENT_MAP_HOLE;
2525                 hole_em->block_len = 0;
2526                 hole_em->orig_block_len = 0;
2527                 hole_em->compress_type = BTRFS_COMPRESS_NONE;
2528                 hole_em->generation = trans->transid;
2529
2530                 do {
2531                         btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2532                         write_lock(&em_tree->lock);
2533                         ret = add_extent_mapping(em_tree, hole_em, 1);
2534                         write_unlock(&em_tree->lock);
2535                 } while (ret == -EEXIST);
2536                 free_extent_map(hole_em);
2537                 if (ret)
2538                         btrfs_set_inode_full_sync(inode);
2539         }
2540
2541         return 0;
2542 }
2543
2544 /*
2545  * Find a hole extent on given inode and change start/len to the end of hole
2546  * extent.(hole/vacuum extent whose em->start <= start &&
2547  *         em->start + em->len > start)
2548  * When a hole extent is found, return 1 and modify start/len.
2549  */
2550 static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
2551 {
2552         struct btrfs_fs_info *fs_info = inode->root->fs_info;
2553         struct extent_map *em;
2554         int ret = 0;
2555
2556         em = btrfs_get_extent(inode, NULL, 0,
2557                               round_down(*start, fs_info->sectorsize),
2558                               round_up(*len, fs_info->sectorsize));
2559         if (IS_ERR(em))
2560                 return PTR_ERR(em);
2561
2562         /* Hole or vacuum extent(only exists in no-hole mode) */
2563         if (em->block_start == EXTENT_MAP_HOLE) {
2564                 ret = 1;
2565                 *len = em->start + em->len > *start + *len ?
2566                        0 : *start + *len - em->start - em->len;
2567                 *start = em->start + em->len;
2568         }
2569         free_extent_map(em);
2570         return ret;
2571 }
2572
2573 static int btrfs_punch_hole_lock_range(struct inode *inode,
2574                                        const u64 lockstart,
2575                                        const u64 lockend,
2576                                        struct extent_state **cached_state)
2577 {
2578         /*
2579          * For subpage case, if the range is not at page boundary, we could
2580          * have pages at the leading/tailing part of the range.
2581          * This could lead to dead loop since filemap_range_has_page()
2582          * will always return true.
2583          * So here we need to do extra page alignment for
2584          * filemap_range_has_page().
2585          */
2586         const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
2587         const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
2588
2589         while (1) {
2590                 struct btrfs_ordered_extent *ordered;
2591                 int ret;
2592
2593                 truncate_pagecache_range(inode, lockstart, lockend);
2594
2595                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2596                                  cached_state);
2597                 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode),
2598                                                             lockend);
2599
2600                 /*
2601                  * We need to make sure we have no ordered extents in this range
2602                  * and nobody raced in and read a page in this range, if we did
2603                  * we need to try again.
2604                  */
2605                 if ((!ordered ||
2606                     (ordered->file_offset + ordered->num_bytes <= lockstart ||
2607                      ordered->file_offset > lockend)) &&
2608                      !filemap_range_has_page(inode->i_mapping,
2609                                              page_lockstart, page_lockend)) {
2610                         if (ordered)
2611                                 btrfs_put_ordered_extent(ordered);
2612                         break;
2613                 }
2614                 if (ordered)
2615                         btrfs_put_ordered_extent(ordered);
2616                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2617                                      lockend, cached_state);
2618                 ret = btrfs_wait_ordered_range(inode, lockstart,
2619                                                lockend - lockstart + 1);
2620                 if (ret)
2621                         return ret;
2622         }
2623         return 0;
2624 }
2625
2626 static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
2627                                      struct btrfs_inode *inode,
2628                                      struct btrfs_path *path,
2629                                      struct btrfs_replace_extent_info *extent_info,
2630                                      const u64 replace_len,
2631                                      const u64 bytes_to_drop)
2632 {
2633         struct btrfs_fs_info *fs_info = trans->fs_info;
2634         struct btrfs_root *root = inode->root;
2635         struct btrfs_file_extent_item *extent;
2636         struct extent_buffer *leaf;
2637         struct btrfs_key key;
2638         int slot;
2639         struct btrfs_ref ref = { 0 };
2640         int ret;
2641
2642         if (replace_len == 0)
2643                 return 0;
2644
2645         if (extent_info->disk_offset == 0 &&
2646             btrfs_fs_incompat(fs_info, NO_HOLES)) {
2647                 btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2648                 return 0;
2649         }
2650
2651         key.objectid = btrfs_ino(inode);
2652         key.type = BTRFS_EXTENT_DATA_KEY;
2653         key.offset = extent_info->file_offset;
2654         ret = btrfs_insert_empty_item(trans, root, path, &key,
2655                                       sizeof(struct btrfs_file_extent_item));
2656         if (ret)
2657                 return ret;
2658         leaf = path->nodes[0];
2659         slot = path->slots[0];
2660         write_extent_buffer(leaf, extent_info->extent_buf,
2661                             btrfs_item_ptr_offset(leaf, slot),
2662                             sizeof(struct btrfs_file_extent_item));
2663         extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2664         ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE);
2665         btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset);
2666         btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
2667         if (extent_info->is_new_extent)
2668                 btrfs_set_file_extent_generation(leaf, extent, trans->transid);
2669         btrfs_mark_buffer_dirty(leaf);
2670         btrfs_release_path(path);
2671
2672         ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
2673                                                 replace_len);
2674         if (ret)
2675                 return ret;
2676
2677         /* If it's a hole, nothing more needs to be done. */
2678         if (extent_info->disk_offset == 0) {
2679                 btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2680                 return 0;
2681         }
2682
2683         btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop);
2684
2685         if (extent_info->is_new_extent && extent_info->insertions == 0) {
2686                 key.objectid = extent_info->disk_offset;
2687                 key.type = BTRFS_EXTENT_ITEM_KEY;
2688                 key.offset = extent_info->disk_len;
2689                 ret = btrfs_alloc_reserved_file_extent(trans, root,
2690                                                        btrfs_ino(inode),
2691                                                        extent_info->file_offset,
2692                                                        extent_info->qgroup_reserved,
2693                                                        &key);
2694         } else {
2695                 u64 ref_offset;
2696
2697                 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2698                                        extent_info->disk_offset,
2699                                        extent_info->disk_len, 0);
2700                 ref_offset = extent_info->file_offset - extent_info->data_offset;
2701                 btrfs_init_data_ref(&ref, root->root_key.objectid,
2702                                     btrfs_ino(inode), ref_offset, 0, false);
2703                 ret = btrfs_inc_extent_ref(trans, &ref);
2704         }
2705
2706         extent_info->insertions++;
2707
2708         return ret;
2709 }
2710
2711 /*
2712  * The respective range must have been previously locked, as well as the inode.
2713  * The end offset is inclusive (last byte of the range).
2714  * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing
2715  * the file range with an extent.
2716  * When not punching a hole, we don't want to end up in a state where we dropped
2717  * extents without inserting a new one, so we must abort the transaction to avoid
2718  * a corruption.
2719  */
2720 int btrfs_replace_file_extents(struct btrfs_inode *inode,
2721                                struct btrfs_path *path, const u64 start,
2722                                const u64 end,
2723                                struct btrfs_replace_extent_info *extent_info,
2724                                struct btrfs_trans_handle **trans_out)
2725 {
2726         struct btrfs_drop_extents_args drop_args = { 0 };
2727         struct btrfs_root *root = inode->root;
2728         struct btrfs_fs_info *fs_info = root->fs_info;
2729         u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2730         u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
2731         struct btrfs_trans_handle *trans = NULL;
2732         struct btrfs_block_rsv *rsv;
2733         unsigned int rsv_count;
2734         u64 cur_offset;
2735         u64 len = end - start;
2736         int ret = 0;
2737
2738         if (end <= start)
2739                 return -EINVAL;
2740
2741         rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2742         if (!rsv) {
2743                 ret = -ENOMEM;
2744                 goto out;
2745         }
2746         rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2747         rsv->failfast = 1;
2748
2749         /*
2750          * 1 - update the inode
2751          * 1 - removing the extents in the range
2752          * 1 - adding the hole extent if no_holes isn't set or if we are
2753          *     replacing the range with a new extent
2754          */
2755         if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info)
2756                 rsv_count = 3;
2757         else
2758                 rsv_count = 2;
2759
2760         trans = btrfs_start_transaction(root, rsv_count);
2761         if (IS_ERR(trans)) {
2762                 ret = PTR_ERR(trans);
2763                 trans = NULL;
2764                 goto out_free;
2765         }
2766
2767         ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2768                                       min_size, false);
2769         BUG_ON(ret);
2770         trans->block_rsv = rsv;
2771
2772         cur_offset = start;
2773         drop_args.path = path;
2774         drop_args.end = end + 1;
2775         drop_args.drop_cache = true;
2776         while (cur_offset < end) {
2777                 drop_args.start = cur_offset;
2778                 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2779                 /* If we are punching a hole decrement the inode's byte count */
2780                 if (!extent_info)
2781                         btrfs_update_inode_bytes(inode, 0,
2782                                                  drop_args.bytes_found);
2783                 if (ret != -ENOSPC) {
2784                         /*
2785                          * The only time we don't want to abort is if we are
2786                          * attempting to clone a partial inline extent, in which
2787                          * case we'll get EOPNOTSUPP.  However if we aren't
2788                          * clone we need to abort no matter what, because if we
2789                          * got EOPNOTSUPP via prealloc then we messed up and
2790                          * need to abort.
2791                          */
2792                         if (ret &&
2793                             (ret != -EOPNOTSUPP ||
2794                              (extent_info && extent_info->is_new_extent)))
2795                                 btrfs_abort_transaction(trans, ret);
2796                         break;
2797                 }
2798
2799                 trans->block_rsv = &fs_info->trans_block_rsv;
2800
2801                 if (!extent_info && cur_offset < drop_args.drop_end &&
2802                     cur_offset < ino_size) {
2803                         ret = fill_holes(trans, inode, path, cur_offset,
2804                                          drop_args.drop_end);
2805                         if (ret) {
2806                                 /*
2807                                  * If we failed then we didn't insert our hole
2808                                  * entries for the area we dropped, so now the
2809                                  * fs is corrupted, so we must abort the
2810                                  * transaction.
2811                                  */
2812                                 btrfs_abort_transaction(trans, ret);
2813                                 break;
2814                         }
2815                 } else if (!extent_info && cur_offset < drop_args.drop_end) {
2816                         /*
2817                          * We are past the i_size here, but since we didn't
2818                          * insert holes we need to clear the mapped area so we
2819                          * know to not set disk_i_size in this area until a new
2820                          * file extent is inserted here.
2821                          */
2822                         ret = btrfs_inode_clear_file_extent_range(inode,
2823                                         cur_offset,
2824                                         drop_args.drop_end - cur_offset);
2825                         if (ret) {
2826                                 /*
2827                                  * We couldn't clear our area, so we could
2828                                  * presumably adjust up and corrupt the fs, so
2829                                  * we need to abort.
2830                                  */
2831                                 btrfs_abort_transaction(trans, ret);
2832                                 break;
2833                         }
2834                 }
2835
2836                 if (extent_info &&
2837                     drop_args.drop_end > extent_info->file_offset) {
2838                         u64 replace_len = drop_args.drop_end -
2839                                           extent_info->file_offset;
2840
2841                         ret = btrfs_insert_replace_extent(trans, inode, path,
2842                                         extent_info, replace_len,
2843                                         drop_args.bytes_found);
2844                         if (ret) {
2845                                 btrfs_abort_transaction(trans, ret);
2846                                 break;
2847                         }
2848                         extent_info->data_len -= replace_len;
2849                         extent_info->data_offset += replace_len;
2850                         extent_info->file_offset += replace_len;
2851                 }
2852
2853                 ret = btrfs_update_inode(trans, root, inode);
2854                 if (ret)
2855                         break;
2856
2857                 btrfs_end_transaction(trans);
2858                 btrfs_btree_balance_dirty(fs_info);
2859
2860                 trans = btrfs_start_transaction(root, rsv_count);
2861                 if (IS_ERR(trans)) {
2862                         ret = PTR_ERR(trans);
2863                         trans = NULL;
2864                         break;
2865                 }
2866
2867                 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2868                                               rsv, min_size, false);
2869                 BUG_ON(ret);    /* shouldn't happen */
2870                 trans->block_rsv = rsv;
2871
2872                 cur_offset = drop_args.drop_end;
2873                 len = end - cur_offset;
2874                 if (!extent_info && len) {
2875                         ret = find_first_non_hole(inode, &cur_offset, &len);
2876                         if (unlikely(ret < 0))
2877                                 break;
2878                         if (ret && !len) {
2879                                 ret = 0;
2880                                 break;
2881                         }
2882                 }
2883         }
2884
2885         /*
2886          * If we were cloning, force the next fsync to be a full one since we
2887          * we replaced (or just dropped in the case of cloning holes when
2888          * NO_HOLES is enabled) file extent items and did not setup new extent
2889          * maps for the replacement extents (or holes).
2890          */
2891         if (extent_info && !extent_info->is_new_extent)
2892                 btrfs_set_inode_full_sync(inode);
2893
2894         if (ret)
2895                 goto out_trans;
2896
2897         trans->block_rsv = &fs_info->trans_block_rsv;
2898         /*
2899          * If we are using the NO_HOLES feature we might have had already an
2900          * hole that overlaps a part of the region [lockstart, lockend] and
2901          * ends at (or beyond) lockend. Since we have no file extent items to
2902          * represent holes, drop_end can be less than lockend and so we must
2903          * make sure we have an extent map representing the existing hole (the
2904          * call to __btrfs_drop_extents() might have dropped the existing extent
2905          * map representing the existing hole), otherwise the fast fsync path
2906          * will not record the existence of the hole region
2907          * [existing_hole_start, lockend].
2908          */
2909         if (drop_args.drop_end <= end)
2910                 drop_args.drop_end = end + 1;
2911         /*
2912          * Don't insert file hole extent item if it's for a range beyond eof
2913          * (because it's useless) or if it represents a 0 bytes range (when
2914          * cur_offset == drop_end).
2915          */
2916         if (!extent_info && cur_offset < ino_size &&
2917             cur_offset < drop_args.drop_end) {
2918                 ret = fill_holes(trans, inode, path, cur_offset,
2919                                  drop_args.drop_end);
2920                 if (ret) {
2921                         /* Same comment as above. */
2922                         btrfs_abort_transaction(trans, ret);
2923                         goto out_trans;
2924                 }
2925         } else if (!extent_info && cur_offset < drop_args.drop_end) {
2926                 /* See the comment in the loop above for the reasoning here. */
2927                 ret = btrfs_inode_clear_file_extent_range(inode, cur_offset,
2928                                         drop_args.drop_end - cur_offset);
2929                 if (ret) {
2930                         btrfs_abort_transaction(trans, ret);
2931                         goto out_trans;
2932                 }
2933
2934         }
2935         if (extent_info) {
2936                 ret = btrfs_insert_replace_extent(trans, inode, path,
2937                                 extent_info, extent_info->data_len,
2938                                 drop_args.bytes_found);
2939                 if (ret) {
2940                         btrfs_abort_transaction(trans, ret);
2941                         goto out_trans;
2942                 }
2943         }
2944
2945 out_trans:
2946         if (!trans)
2947                 goto out_free;
2948
2949         trans->block_rsv = &fs_info->trans_block_rsv;
2950         if (ret)
2951                 btrfs_end_transaction(trans);
2952         else
2953                 *trans_out = trans;
2954 out_free:
2955         btrfs_free_block_rsv(fs_info, rsv);
2956 out:
2957         return ret;
2958 }
2959
2960 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2961 {
2962         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2963         struct btrfs_root *root = BTRFS_I(inode)->root;
2964         struct extent_state *cached_state = NULL;
2965         struct btrfs_path *path;
2966         struct btrfs_trans_handle *trans = NULL;
2967         u64 lockstart;
2968         u64 lockend;
2969         u64 tail_start;
2970         u64 tail_len;
2971         u64 orig_start = offset;
2972         int ret = 0;
2973         bool same_block;
2974         u64 ino_size;
2975         bool truncated_block = false;
2976         bool updated_inode = false;
2977
2978         ret = btrfs_wait_ordered_range(inode, offset, len);
2979         if (ret)
2980                 return ret;
2981
2982         btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
2983         ino_size = round_up(inode->i_size, fs_info->sectorsize);
2984         ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2985         if (ret < 0)
2986                 goto out_only_mutex;
2987         if (ret && !len) {
2988                 /* Already in a large hole */
2989                 ret = 0;
2990                 goto out_only_mutex;
2991         }
2992
2993         lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode)));
2994         lockend = round_down(offset + len,
2995                              btrfs_inode_sectorsize(BTRFS_I(inode))) - 1;
2996         same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2997                 == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2998         /*
2999          * We needn't truncate any block which is beyond the end of the file
3000          * because we are sure there is no data there.
3001          */
3002         /*
3003          * Only do this if we are in the same block and we aren't doing the
3004          * entire block.
3005          */
3006         if (same_block && len < fs_info->sectorsize) {
3007                 if (offset < ino_size) {
3008                         truncated_block = true;
3009                         ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
3010                                                    0);
3011                 } else {
3012                         ret = 0;
3013                 }
3014                 goto out_only_mutex;
3015         }
3016
3017         /* zero back part of the first block */
3018         if (offset < ino_size) {
3019                 truncated_block = true;
3020                 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
3021                 if (ret) {
3022                         btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3023                         return ret;
3024                 }
3025         }
3026
3027         /* Check the aligned pages after the first unaligned page,
3028          * if offset != orig_start, which means the first unaligned page
3029          * including several following pages are already in holes,
3030          * the extra check can be skipped */
3031         if (offset == orig_start) {
3032                 /* after truncate page, check hole again */
3033                 len = offset + len - lockstart;
3034                 offset = lockstart;
3035                 ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
3036                 if (ret < 0)
3037                         goto out_only_mutex;
3038                 if (ret && !len) {
3039                         ret = 0;
3040                         goto out_only_mutex;
3041                 }
3042                 lockstart = offset;
3043         }
3044
3045         /* Check the tail unaligned part is in a hole */
3046         tail_start = lockend + 1;
3047         tail_len = offset + len - tail_start;
3048         if (tail_len) {
3049                 ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len);
3050                 if (unlikely(ret < 0))
3051                         goto out_only_mutex;
3052                 if (!ret) {
3053                         /* zero the front end of the last page */
3054                         if (tail_start + tail_len < ino_size) {
3055                                 truncated_block = true;
3056                                 ret = btrfs_truncate_block(BTRFS_I(inode),
3057                                                         tail_start + tail_len,
3058                                                         0, 1);
3059                                 if (ret)
3060                                         goto out_only_mutex;
3061                         }
3062                 }
3063         }
3064
3065         if (lockend < lockstart) {
3066                 ret = 0;
3067                 goto out_only_mutex;
3068         }
3069
3070         ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3071                                           &cached_state);
3072         if (ret)
3073                 goto out_only_mutex;
3074
3075         path = btrfs_alloc_path();
3076         if (!path) {
3077                 ret = -ENOMEM;
3078                 goto out;
3079         }
3080
3081         ret = btrfs_replace_file_extents(BTRFS_I(inode), path, lockstart,
3082                                          lockend, NULL, &trans);
3083         btrfs_free_path(path);
3084         if (ret)
3085                 goto out;
3086
3087         ASSERT(trans != NULL);
3088         inode_inc_iversion(inode);
3089         inode->i_mtime = inode->i_ctime = current_time(inode);
3090         ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3091         updated_inode = true;
3092         btrfs_end_transaction(trans);
3093         btrfs_btree_balance_dirty(fs_info);
3094 out:
3095         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3096                              &cached_state);
3097 out_only_mutex:
3098         if (!updated_inode && truncated_block && !ret) {
3099                 /*
3100                  * If we only end up zeroing part of a page, we still need to
3101                  * update the inode item, so that all the time fields are
3102                  * updated as well as the necessary btrfs inode in memory fields
3103                  * for detecting, at fsync time, if the inode isn't yet in the
3104                  * log tree or it's there but not up to date.
3105                  */
3106                 struct timespec64 now = current_time(inode);
3107
3108                 inode_inc_iversion(inode);
3109                 inode->i_mtime = now;
3110                 inode->i_ctime = now;
3111                 trans = btrfs_start_transaction(root, 1);
3112                 if (IS_ERR(trans)) {
3113                         ret = PTR_ERR(trans);
3114                 } else {
3115                         int ret2;
3116
3117                         ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3118                         ret2 = btrfs_end_transaction(trans);
3119                         if (!ret)
3120                                 ret = ret2;
3121                 }
3122         }
3123         btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3124         return ret;
3125 }
3126
3127 /* Helper structure to record which range is already reserved */
3128 struct falloc_range {
3129         struct list_head list;
3130         u64 start;
3131         u64 len;
3132 };
3133
3134 /*
3135  * Helper function to add falloc range
3136  *
3137  * Caller should have locked the larger range of extent containing
3138  * [start, len)
3139  */
3140 static int add_falloc_range(struct list_head *head, u64 start, u64 len)
3141 {
3142         struct falloc_range *range = NULL;
3143
3144         if (!list_empty(head)) {
3145                 /*
3146                  * As fallocate iterates by bytenr order, we only need to check
3147                  * the last range.
3148                  */
3149                 range = list_last_entry(head, struct falloc_range, list);
3150                 if (range->start + range->len == start) {
3151                         range->len += len;
3152                         return 0;
3153                 }
3154         }
3155
3156         range = kmalloc(sizeof(*range), GFP_KERNEL);
3157         if (!range)
3158                 return -ENOMEM;
3159         range->start = start;
3160         range->len = len;
3161         list_add_tail(&range->list, head);
3162         return 0;
3163 }
3164
3165 static int btrfs_fallocate_update_isize(struct inode *inode,
3166                                         const u64 end,
3167                                         const int mode)
3168 {
3169         struct btrfs_trans_handle *trans;
3170         struct btrfs_root *root = BTRFS_I(inode)->root;
3171         int ret;
3172         int ret2;
3173
3174         if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
3175                 return 0;
3176
3177         trans = btrfs_start_transaction(root, 1);
3178         if (IS_ERR(trans))
3179                 return PTR_ERR(trans);
3180
3181         inode->i_ctime = current_time(inode);
3182         i_size_write(inode, end);
3183         btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
3184         ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3185         ret2 = btrfs_end_transaction(trans);
3186
3187         return ret ? ret : ret2;
3188 }
3189
3190 enum {
3191         RANGE_BOUNDARY_WRITTEN_EXTENT,
3192         RANGE_BOUNDARY_PREALLOC_EXTENT,
3193         RANGE_BOUNDARY_HOLE,
3194 };
3195
3196 static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
3197                                                  u64 offset)
3198 {
3199         const u64 sectorsize = btrfs_inode_sectorsize(inode);
3200         struct extent_map *em;
3201         int ret;
3202
3203         offset = round_down(offset, sectorsize);
3204         em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize);
3205         if (IS_ERR(em))
3206                 return PTR_ERR(em);
3207
3208         if (em->block_start == EXTENT_MAP_HOLE)
3209                 ret = RANGE_BOUNDARY_HOLE;
3210         else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3211                 ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
3212         else
3213                 ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
3214
3215         free_extent_map(em);
3216         return ret;
3217 }
3218
3219 static int btrfs_zero_range(struct inode *inode,
3220                             loff_t offset,
3221                             loff_t len,
3222                             const int mode)
3223 {
3224         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
3225         struct extent_map *em;
3226         struct extent_changeset *data_reserved = NULL;
3227         int ret;
3228         u64 alloc_hint = 0;
3229         const u64 sectorsize = btrfs_inode_sectorsize(BTRFS_I(inode));
3230         u64 alloc_start = round_down(offset, sectorsize);
3231         u64 alloc_end = round_up(offset + len, sectorsize);
3232         u64 bytes_to_reserve = 0;
3233         bool space_reserved = false;
3234
3235         inode_dio_wait(inode);
3236
3237         em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3238                               alloc_end - alloc_start);
3239         if (IS_ERR(em)) {
3240                 ret = PTR_ERR(em);
3241                 goto out;
3242         }
3243
3244         /*
3245          * Avoid hole punching and extent allocation for some cases. More cases
3246          * could be considered, but these are unlikely common and we keep things
3247          * as simple as possible for now. Also, intentionally, if the target
3248          * range contains one or more prealloc extents together with regular
3249          * extents and holes, we drop all the existing extents and allocate a
3250          * new prealloc extent, so that we get a larger contiguous disk extent.
3251          */
3252         if (em->start <= alloc_start &&
3253             test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3254                 const u64 em_end = em->start + em->len;
3255
3256                 if (em_end >= offset + len) {
3257                         /*
3258                          * The whole range is already a prealloc extent,
3259                          * do nothing except updating the inode's i_size if
3260                          * needed.
3261                          */
3262                         free_extent_map(em);
3263                         ret = btrfs_fallocate_update_isize(inode, offset + len,
3264                                                            mode);
3265                         goto out;
3266                 }
3267                 /*
3268                  * Part of the range is already a prealloc extent, so operate
3269                  * only on the remaining part of the range.
3270                  */
3271                 alloc_start = em_end;
3272                 ASSERT(IS_ALIGNED(alloc_start, sectorsize));
3273                 len = offset + len - alloc_start;
3274                 offset = alloc_start;
3275                 alloc_hint = em->block_start + em->len;
3276         }
3277         free_extent_map(em);
3278
3279         if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
3280             BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
3281                 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3282                                       sectorsize);
3283                 if (IS_ERR(em)) {
3284                         ret = PTR_ERR(em);
3285                         goto out;
3286                 }
3287
3288                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3289                         free_extent_map(em);
3290                         ret = btrfs_fallocate_update_isize(inode, offset + len,
3291                                                            mode);
3292                         goto out;
3293                 }
3294                 if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
3295                         free_extent_map(em);
3296                         ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
3297                                                    0);
3298                         if (!ret)
3299                                 ret = btrfs_fallocate_update_isize(inode,
3300                                                                    offset + len,
3301                                                                    mode);
3302                         return ret;
3303                 }
3304                 free_extent_map(em);
3305                 alloc_start = round_down(offset, sectorsize);
3306                 alloc_end = alloc_start + sectorsize;
3307                 goto reserve_space;
3308         }
3309
3310         alloc_start = round_up(offset, sectorsize);
3311         alloc_end = round_down(offset + len, sectorsize);
3312
3313         /*
3314          * For unaligned ranges, check the pages at the boundaries, they might
3315          * map to an extent, in which case we need to partially zero them, or
3316          * they might map to a hole, in which case we need our allocation range
3317          * to cover them.
3318          */
3319         if (!IS_ALIGNED(offset, sectorsize)) {
3320                 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
3321                                                             offset);
3322                 if (ret < 0)
3323                         goto out;
3324                 if (ret == RANGE_BOUNDARY_HOLE) {
3325                         alloc_start = round_down(offset, sectorsize);
3326                         ret = 0;
3327                 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3328                         ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
3329                         if (ret)
3330                                 goto out;
3331                 } else {
3332                         ret = 0;
3333                 }
3334         }
3335
3336         if (!IS_ALIGNED(offset + len, sectorsize)) {
3337                 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
3338                                                             offset + len);
3339                 if (ret < 0)
3340                         goto out;
3341                 if (ret == RANGE_BOUNDARY_HOLE) {
3342                         alloc_end = round_up(offset + len, sectorsize);
3343                         ret = 0;
3344                 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3345                         ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
3346                                                    0, 1);
3347                         if (ret)
3348                                 goto out;
3349                 } else {
3350                         ret = 0;
3351                 }
3352         }
3353
3354 reserve_space:
3355         if (alloc_start < alloc_end) {
3356                 struct extent_state *cached_state = NULL;
3357                 const u64 lockstart = alloc_start;
3358                 const u64 lockend = alloc_end - 1;
3359
3360                 bytes_to_reserve = alloc_end - alloc_start;
3361                 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3362                                                       bytes_to_reserve);
3363                 if (ret < 0)
3364                         goto out;
3365                 space_reserved = true;
3366                 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3367                                                   &cached_state);
3368                 if (ret)
3369                         goto out;
3370                 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
3371                                                 alloc_start, bytes_to_reserve);
3372                 if (ret) {
3373                         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
3374                                              lockend, &cached_state);
3375                         goto out;
3376                 }
3377                 ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3378                                                 alloc_end - alloc_start,
3379                                                 i_blocksize(inode),
3380                                                 offset + len, &alloc_hint);
3381                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
3382                                      lockend, &cached_state);
3383                 /* btrfs_prealloc_file_range releases reserved space on error */
3384                 if (ret) {
3385                         space_reserved = false;
3386                         goto out;
3387                 }
3388         }
3389         ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3390  out:
3391         if (ret && space_reserved)
3392                 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3393                                                alloc_start, bytes_to_reserve);
3394         extent_changeset_free(data_reserved);
3395
3396         return ret;
3397 }
3398
3399 static long btrfs_fallocate(struct file *file, int mode,
3400                             loff_t offset, loff_t len)
3401 {
3402         struct inode *inode = file_inode(file);
3403         struct extent_state *cached_state = NULL;
3404         struct extent_changeset *data_reserved = NULL;
3405         struct falloc_range *range;
3406         struct falloc_range *tmp;
3407         struct list_head reserve_list;
3408         u64 cur_offset;
3409         u64 last_byte;
3410         u64 alloc_start;
3411         u64 alloc_end;
3412         u64 alloc_hint = 0;
3413         u64 locked_end;
3414         u64 actual_end = 0;
3415         struct extent_map *em;
3416         int blocksize = btrfs_inode_sectorsize(BTRFS_I(inode));
3417         int ret;
3418
3419         /* Do not allow fallocate in ZONED mode */
3420         if (btrfs_is_zoned(btrfs_sb(inode->i_sb)))
3421                 return -EOPNOTSUPP;
3422
3423         alloc_start = round_down(offset, blocksize);
3424         alloc_end = round_up(offset + len, blocksize);
3425         cur_offset = alloc_start;
3426
3427         /* Make sure we aren't being give some crap mode */
3428         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3429                      FALLOC_FL_ZERO_RANGE))
3430                 return -EOPNOTSUPP;
3431
3432         if (mode & FALLOC_FL_PUNCH_HOLE)
3433                 return btrfs_punch_hole(inode, offset, len);
3434
3435         /*
3436          * Only trigger disk allocation, don't trigger qgroup reserve
3437          *
3438          * For qgroup space, it will be checked later.
3439          */
3440         if (!(mode & FALLOC_FL_ZERO_RANGE)) {
3441                 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3442                                                       alloc_end - alloc_start);
3443                 if (ret < 0)
3444                         return ret;
3445         }
3446
3447         btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
3448
3449         if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3450                 ret = inode_newsize_ok(inode, offset + len);
3451                 if (ret)
3452                         goto out;
3453         }
3454
3455         /*
3456          * TODO: Move these two operations after we have checked
3457          * accurate reserved space, or fallocate can still fail but
3458          * with page truncated or size expanded.
3459          *
3460          * But that's a minor problem and won't do much harm BTW.
3461          */
3462         if (alloc_start > inode->i_size) {
3463                 ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode),
3464                                         alloc_start);
3465                 if (ret)
3466                         goto out;
3467         } else if (offset + len > inode->i_size) {
3468                 /*
3469                  * If we are fallocating from the end of the file onward we
3470                  * need to zero out the end of the block if i_size lands in the
3471                  * middle of a block.
3472                  */
3473                 ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
3474                 if (ret)
3475                         goto out;
3476         }
3477
3478         /*
3479          * wait for ordered IO before we have any locks.  We'll loop again
3480          * below with the locks held.
3481          */
3482         ret = btrfs_wait_ordered_range(inode, alloc_start,
3483                                        alloc_end - alloc_start);
3484         if (ret)
3485                 goto out;
3486
3487         if (mode & FALLOC_FL_ZERO_RANGE) {
3488                 ret = btrfs_zero_range(inode, offset, len, mode);
3489                 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3490                 return ret;
3491         }
3492
3493         locked_end = alloc_end - 1;
3494         while (1) {
3495                 struct btrfs_ordered_extent *ordered;
3496
3497                 /* the extent lock is ordered inside the running
3498                  * transaction
3499                  */
3500                 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
3501                                  locked_end, &cached_state);
3502                 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode),
3503                                                             locked_end);
3504
3505                 if (ordered &&
3506                     ordered->file_offset + ordered->num_bytes > alloc_start &&
3507                     ordered->file_offset < alloc_end) {
3508                         btrfs_put_ordered_extent(ordered);
3509                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
3510                                              alloc_start, locked_end,
3511                                              &cached_state);
3512                         /*
3513                          * we can't wait on the range with the transaction
3514                          * running or with the extent lock held
3515                          */
3516                         ret = btrfs_wait_ordered_range(inode, alloc_start,
3517                                                        alloc_end - alloc_start);
3518                         if (ret)
3519                                 goto out;
3520                 } else {
3521                         if (ordered)
3522                                 btrfs_put_ordered_extent(ordered);
3523                         break;
3524                 }
3525         }
3526
3527         /* First, check if we exceed the qgroup limit */
3528         INIT_LIST_HEAD(&reserve_list);
3529         while (cur_offset < alloc_end) {
3530                 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3531                                       alloc_end - cur_offset);
3532                 if (IS_ERR(em)) {
3533                         ret = PTR_ERR(em);
3534                         break;
3535                 }
3536                 last_byte = min(extent_map_end(em), alloc_end);
3537                 actual_end = min_t(u64, extent_map_end(em), offset + len);
3538                 last_byte = ALIGN(last_byte, blocksize);
3539                 if (em->block_start == EXTENT_MAP_HOLE ||
3540                     (cur_offset >= inode->i_size &&
3541                      !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3542                         ret = add_falloc_range(&reserve_list, cur_offset,
3543                                                last_byte - cur_offset);
3544                         if (ret < 0) {
3545                                 free_extent_map(em);
3546                                 break;
3547                         }
3548                         ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
3549                                         &data_reserved, cur_offset,
3550                                         last_byte - cur_offset);
3551                         if (ret < 0) {
3552                                 cur_offset = last_byte;
3553                                 free_extent_map(em);
3554                                 break;
3555                         }
3556                 } else {
3557                         /*
3558                          * Do not need to reserve unwritten extent for this
3559                          * range, free reserved data space first, otherwise
3560                          * it'll result in false ENOSPC error.
3561                          */
3562                         btrfs_free_reserved_data_space(BTRFS_I(inode),
3563                                 data_reserved, cur_offset,
3564                                 last_byte - cur_offset);
3565                 }
3566                 free_extent_map(em);
3567                 cur_offset = last_byte;
3568         }
3569
3570         /*
3571          * If ret is still 0, means we're OK to fallocate.
3572          * Or just cleanup the list and exit.
3573          */
3574         list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3575                 if (!ret)
3576                         ret = btrfs_prealloc_file_range(inode, mode,
3577                                         range->start,
3578                                         range->len, i_blocksize(inode),
3579                                         offset + len, &alloc_hint);
3580                 else
3581                         btrfs_free_reserved_data_space(BTRFS_I(inode),
3582                                         data_reserved, range->start,
3583                                         range->len);
3584                 list_del(&range->list);
3585                 kfree(range);
3586         }
3587         if (ret < 0)
3588                 goto out_unlock;
3589
3590         /*
3591          * We didn't need to allocate any more space, but we still extended the
3592          * size of the file so we need to update i_size and the inode item.
3593          */
3594         ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3595 out_unlock:
3596         unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3597                              &cached_state);
3598 out:
3599         btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3600         /* Let go of our reservation. */
3601         if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
3602                 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3603                                 cur_offset, alloc_end - cur_offset);
3604         extent_changeset_free(data_reserved);
3605         return ret;
3606 }
3607
3608 static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
3609                                   int whence)
3610 {
3611         struct btrfs_fs_info *fs_info = inode->root->fs_info;
3612         struct extent_map *em = NULL;
3613         struct extent_state *cached_state = NULL;
3614         loff_t i_size = inode->vfs_inode.i_size;
3615         u64 lockstart;
3616         u64 lockend;
3617         u64 start;
3618         u64 len;
3619         int ret = 0;
3620
3621         if (i_size == 0 || offset >= i_size)
3622                 return -ENXIO;
3623
3624         /*
3625          * offset can be negative, in this case we start finding DATA/HOLE from
3626          * the very start of the file.
3627          */
3628         start = max_t(loff_t, 0, offset);
3629
3630         lockstart = round_down(start, fs_info->sectorsize);
3631         lockend = round_up(i_size, fs_info->sectorsize);
3632         if (lockend <= lockstart)
3633                 lockend = lockstart + fs_info->sectorsize;
3634         lockend--;
3635         len = lockend - lockstart + 1;
3636
3637         lock_extent_bits(&inode->io_tree, lockstart, lockend, &cached_state);
3638
3639         while (start < i_size) {
3640                 em = btrfs_get_extent_fiemap(inode, start, len);
3641                 if (IS_ERR(em)) {
3642                         ret = PTR_ERR(em);
3643                         em = NULL;
3644                         break;
3645                 }
3646
3647                 if (whence == SEEK_HOLE &&
3648                     (em->block_start == EXTENT_MAP_HOLE ||
3649                      test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3650                         break;
3651                 else if (whence == SEEK_DATA &&
3652                            (em->block_start != EXTENT_MAP_HOLE &&
3653                             !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3654                         break;
3655
3656                 start = em->start + em->len;
3657                 free_extent_map(em);
3658                 em = NULL;
3659                 cond_resched();
3660         }
3661         free_extent_map(em);
3662         unlock_extent_cached(&inode->io_tree, lockstart, lockend,
3663                              &cached_state);
3664         if (ret) {
3665                 offset = ret;
3666         } else {
3667                 if (whence == SEEK_DATA && start >= i_size)
3668                         offset = -ENXIO;
3669                 else
3670                         offset = min_t(loff_t, start, i_size);
3671         }
3672
3673         return offset;
3674 }
3675
3676 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3677 {
3678         struct inode *inode = file->f_mapping->host;
3679
3680         switch (whence) {
3681         default:
3682                 return generic_file_llseek(file, offset, whence);
3683         case SEEK_DATA:
3684         case SEEK_HOLE:
3685                 btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
3686                 offset = find_desired_extent(BTRFS_I(inode), offset, whence);
3687                 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
3688                 break;
3689         }
3690
3691         if (offset < 0)
3692                 return offset;
3693
3694         return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3695 }
3696
3697 static int btrfs_file_open(struct inode *inode, struct file *filp)
3698 {
3699         int ret;
3700
3701         filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
3702
3703         ret = fsverity_file_open(inode, filp);
3704         if (ret)
3705                 return ret;
3706         return generic_file_open(inode, filp);
3707 }
3708
3709 static int check_direct_read(struct btrfs_fs_info *fs_info,
3710                              const struct iov_iter *iter, loff_t offset)
3711 {
3712         int ret;
3713         int i, seg;
3714
3715         ret = check_direct_IO(fs_info, iter, offset);
3716         if (ret < 0)
3717                 return ret;
3718
3719         if (!iter_is_iovec(iter))
3720                 return 0;
3721
3722         for (seg = 0; seg < iter->nr_segs; seg++)
3723                 for (i = seg + 1; i < iter->nr_segs; i++)
3724                         if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
3725                                 return -EINVAL;
3726         return 0;
3727 }
3728
3729 static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
3730 {
3731         struct inode *inode = file_inode(iocb->ki_filp);
3732         size_t prev_left = 0;
3733         ssize_t read = 0;
3734         ssize_t ret;
3735
3736         if (fsverity_active(inode))
3737                 return 0;
3738
3739         if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos))
3740                 return 0;
3741
3742         btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
3743 again:
3744         /*
3745          * This is similar to what we do for direct IO writes, see the comment
3746          * at btrfs_direct_write(), but we also disable page faults in addition
3747          * to disabling them only at the iov_iter level. This is because when
3748          * reading from a hole or prealloc extent, iomap calls iov_iter_zero(),
3749          * which can still trigger page fault ins despite having set ->nofault
3750          * to true of our 'to' iov_iter.
3751          *
3752          * The difference to direct IO writes is that we deadlock when trying
3753          * to lock the extent range in the inode's tree during he page reads
3754          * triggered by the fault in (while for writes it is due to waiting for
3755          * our own ordered extent). This is because for direct IO reads,
3756          * btrfs_dio_iomap_begin() returns with the extent range locked, which
3757          * is only unlocked in the endio callback (end_bio_extent_readpage()).
3758          */
3759         pagefault_disable();
3760         to->nofault = true;
3761         ret = iomap_dio_rw(iocb, to, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
3762                            IOMAP_DIO_PARTIAL, read);
3763         to->nofault = false;
3764         pagefault_enable();
3765
3766         /* No increment (+=) because iomap returns a cumulative value. */
3767         if (ret > 0)
3768                 read = ret;
3769
3770         if (iov_iter_count(to) > 0 && (ret == -EFAULT || ret > 0)) {
3771                 const size_t left = iov_iter_count(to);
3772
3773                 if (left == prev_left) {
3774                         /*
3775                          * We didn't make any progress since the last attempt,
3776                          * fallback to a buffered read for the remainder of the
3777                          * range. This is just to avoid any possibility of looping
3778                          * for too long.
3779                          */
3780                         ret = read;
3781                 } else {
3782                         /*
3783                          * We made some progress since the last retry or this is
3784                          * the first time we are retrying. Fault in as many pages
3785                          * as possible and retry.
3786                          */
3787                         fault_in_iov_iter_writeable(to, left);
3788                         prev_left = left;
3789                         goto again;
3790                 }
3791         }
3792         btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
3793         return ret < 0 ? ret : read;
3794 }
3795
3796 static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3797 {
3798         ssize_t ret = 0;
3799
3800         if (iocb->ki_flags & IOCB_DIRECT) {
3801                 ret = btrfs_direct_read(iocb, to);
3802                 if (ret < 0 || !iov_iter_count(to) ||
3803                     iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp)))
3804                         return ret;
3805         }
3806
3807         return filemap_read(iocb, to, ret);
3808 }
3809
3810 const struct file_operations btrfs_file_operations = {
3811         .llseek         = btrfs_file_llseek,
3812         .read_iter      = btrfs_file_read_iter,
3813         .splice_read    = generic_file_splice_read,
3814         .write_iter     = btrfs_file_write_iter,
3815         .splice_write   = iter_file_splice_write,
3816         .mmap           = btrfs_file_mmap,
3817         .open           = btrfs_file_open,
3818         .release        = btrfs_release_file,
3819         .fsync          = btrfs_sync_file,
3820         .fallocate      = btrfs_fallocate,
3821         .unlocked_ioctl = btrfs_ioctl,
3822 #ifdef CONFIG_COMPAT
3823         .compat_ioctl   = btrfs_compat_ioctl,
3824 #endif
3825         .remap_file_range = btrfs_remap_file_range,
3826 };
3827
3828 void __cold btrfs_auto_defrag_exit(void)
3829 {
3830         kmem_cache_destroy(btrfs_inode_defrag_cachep);
3831 }
3832
3833 int __init btrfs_auto_defrag_init(void)
3834 {
3835         btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
3836                                         sizeof(struct inode_defrag), 0,
3837                                         SLAB_MEM_SPREAD,
3838                                         NULL);
3839         if (!btrfs_inode_defrag_cachep)
3840                 return -ENOMEM;
3841
3842         return 0;
3843 }
3844
3845 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3846 {
3847         int ret;
3848
3849         /*
3850          * So with compression we will find and lock a dirty page and clear the
3851          * first one as dirty, setup an async extent, and immediately return
3852          * with the entire range locked but with nobody actually marked with
3853          * writeback.  So we can't just filemap_write_and_wait_range() and
3854          * expect it to work since it will just kick off a thread to do the
3855          * actual work.  So we need to call filemap_fdatawrite_range _again_
3856          * since it will wait on the page lock, which won't be unlocked until
3857          * after the pages have been marked as writeback and so we're good to go
3858          * from there.  We have to do this otherwise we'll miss the ordered
3859          * extents and that results in badness.  Please Josef, do not think you
3860          * know better and pull this out at some point in the future, it is
3861          * right and you are wrong.
3862          */
3863         ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3864         if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3865                              &BTRFS_I(inode)->runtime_flags))
3866                 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3867
3868         return ret;
3869 }