2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/compat.h>
34 #include <linux/bit_spinlock.h>
35 #include <linux/xattr.h>
36 #include <linux/posix_acl.h>
37 #include <linux/falloc.h>
38 #include <linux/slab.h>
39 #include <linux/ratelimit.h>
40 #include <linux/mount.h>
41 #include <linux/btrfs.h>
42 #include <linux/blkdev.h>
43 #include <linux/posix_acl_xattr.h>
44 #include <linux/uio.h>
45 #include <linux/magic.h>
46 #include <linux/iversion.h>
49 #include "transaction.h"
50 #include "btrfs_inode.h"
51 #include "print-tree.h"
52 #include "ordered-data.h"
56 #include "compression.h"
58 #include "free-space-cache.h"
59 #include "inode-map.h"
65 struct btrfs_iget_args {
66 struct btrfs_key *location;
67 struct btrfs_root *root;
70 struct btrfs_dio_data {
72 u64 unsubmitted_oe_range_start;
73 u64 unsubmitted_oe_range_end;
77 static const struct inode_operations btrfs_dir_inode_operations;
78 static const struct inode_operations btrfs_symlink_inode_operations;
79 static const struct inode_operations btrfs_dir_ro_inode_operations;
80 static const struct inode_operations btrfs_special_inode_operations;
81 static const struct inode_operations btrfs_file_inode_operations;
82 static const struct address_space_operations btrfs_aops;
83 static const struct address_space_operations btrfs_symlink_aops;
84 static const struct file_operations btrfs_dir_file_operations;
85 static const struct extent_io_ops btrfs_extent_io_ops;
87 static struct kmem_cache *btrfs_inode_cachep;
88 struct kmem_cache *btrfs_trans_handle_cachep;
89 struct kmem_cache *btrfs_path_cachep;
90 struct kmem_cache *btrfs_free_space_cachep;
93 static const unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
94 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
95 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
96 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
97 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
98 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
99 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
100 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
103 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
104 static int btrfs_truncate(struct inode *inode, bool skip_writeback);
105 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
106 static noinline int cow_file_range(struct inode *inode,
107 struct page *locked_page,
108 u64 start, u64 end, u64 delalloc_end,
109 int *page_started, unsigned long *nr_written,
110 int unlock, struct btrfs_dedupe_hash *hash);
111 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
112 u64 orig_start, u64 block_start,
113 u64 block_len, u64 orig_block_len,
114 u64 ram_bytes, int compress_type,
117 static void __endio_write_update_ordered(struct inode *inode,
118 const u64 offset, const u64 bytes,
119 const bool uptodate);
122 * Cleanup all submitted ordered extents in specified range to handle errors
123 * from the fill_dellaloc() callback.
125 * NOTE: caller must ensure that when an error happens, it can not call
126 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
127 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
128 * to be released, which we want to happen only when finishing the ordered
129 * extent (btrfs_finish_ordered_io()). Also note that the caller of the
130 * fill_delalloc() callback already does proper cleanup for the first page of
131 * the range, that is, it invokes the callback writepage_end_io_hook() for the
132 * range of the first page.
134 static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
138 unsigned long index = offset >> PAGE_SHIFT;
139 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
142 while (index <= end_index) {
143 page = find_get_page(inode->i_mapping, index);
147 ClearPagePrivate2(page);
150 return __endio_write_update_ordered(inode, offset + PAGE_SIZE,
151 bytes - PAGE_SIZE, false);
154 static int btrfs_dirty_inode(struct inode *inode);
156 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
157 void btrfs_test_inode_set_ops(struct inode *inode)
159 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
163 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
164 struct inode *inode, struct inode *dir,
165 const struct qstr *qstr)
169 err = btrfs_init_acl(trans, inode, dir);
171 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
176 * this does all the hard work for inserting an inline extent into
177 * the btree. The caller should have done a btrfs_drop_extents so that
178 * no overlapping inline items exist in the btree
180 static int insert_inline_extent(struct btrfs_trans_handle *trans,
181 struct btrfs_path *path, int extent_inserted,
182 struct btrfs_root *root, struct inode *inode,
183 u64 start, size_t size, size_t compressed_size,
185 struct page **compressed_pages)
187 struct extent_buffer *leaf;
188 struct page *page = NULL;
191 struct btrfs_file_extent_item *ei;
193 size_t cur_size = size;
194 unsigned long offset;
196 if (compressed_size && compressed_pages)
197 cur_size = compressed_size;
199 inode_add_bytes(inode, size);
201 if (!extent_inserted) {
202 struct btrfs_key key;
205 key.objectid = btrfs_ino(BTRFS_I(inode));
207 key.type = BTRFS_EXTENT_DATA_KEY;
209 datasize = btrfs_file_extent_calc_inline_size(cur_size);
210 path->leave_spinning = 1;
211 ret = btrfs_insert_empty_item(trans, root, path, &key,
216 leaf = path->nodes[0];
217 ei = btrfs_item_ptr(leaf, path->slots[0],
218 struct btrfs_file_extent_item);
219 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
220 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
221 btrfs_set_file_extent_encryption(leaf, ei, 0);
222 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
223 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
224 ptr = btrfs_file_extent_inline_start(ei);
226 if (compress_type != BTRFS_COMPRESS_NONE) {
229 while (compressed_size > 0) {
230 cpage = compressed_pages[i];
231 cur_size = min_t(unsigned long, compressed_size,
234 kaddr = kmap_atomic(cpage);
235 write_extent_buffer(leaf, kaddr, ptr, cur_size);
236 kunmap_atomic(kaddr);
240 compressed_size -= cur_size;
242 btrfs_set_file_extent_compression(leaf, ei,
245 page = find_get_page(inode->i_mapping,
246 start >> PAGE_SHIFT);
247 btrfs_set_file_extent_compression(leaf, ei, 0);
248 kaddr = kmap_atomic(page);
249 offset = start & (PAGE_SIZE - 1);
250 write_extent_buffer(leaf, kaddr + offset, ptr, size);
251 kunmap_atomic(kaddr);
254 btrfs_mark_buffer_dirty(leaf);
255 btrfs_release_path(path);
258 * we're an inline extent, so nobody can
259 * extend the file past i_size without locking
260 * a page we already have locked.
262 * We must do any isize and inode updates
263 * before we unlock the pages. Otherwise we
264 * could end up racing with unlink.
266 BTRFS_I(inode)->disk_i_size = inode->i_size;
267 ret = btrfs_update_inode(trans, root, inode);
275 * conditionally insert an inline extent into the file. This
276 * does the checks required to make sure the data is small enough
277 * to fit as an inline extent.
279 static noinline int cow_file_range_inline(struct inode *inode, u64 start,
280 u64 end, size_t compressed_size,
282 struct page **compressed_pages)
284 struct btrfs_root *root = BTRFS_I(inode)->root;
285 struct btrfs_fs_info *fs_info = root->fs_info;
286 struct btrfs_trans_handle *trans;
287 u64 isize = i_size_read(inode);
288 u64 actual_end = min(end + 1, isize);
289 u64 inline_len = actual_end - start;
290 u64 aligned_end = ALIGN(end, fs_info->sectorsize);
291 u64 data_len = inline_len;
293 struct btrfs_path *path;
294 int extent_inserted = 0;
295 u32 extent_item_size;
298 data_len = compressed_size;
301 actual_end > fs_info->sectorsize ||
302 data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
304 (actual_end & (fs_info->sectorsize - 1)) == 0) ||
306 data_len > fs_info->max_inline) {
310 path = btrfs_alloc_path();
314 trans = btrfs_join_transaction(root);
316 btrfs_free_path(path);
317 return PTR_ERR(trans);
319 trans->block_rsv = &BTRFS_I(inode)->block_rsv;
321 if (compressed_size && compressed_pages)
322 extent_item_size = btrfs_file_extent_calc_inline_size(
325 extent_item_size = btrfs_file_extent_calc_inline_size(
328 ret = __btrfs_drop_extents(trans, root, inode, path,
329 start, aligned_end, NULL,
330 1, 1, extent_item_size, &extent_inserted);
332 btrfs_abort_transaction(trans, ret);
336 if (isize > actual_end)
337 inline_len = min_t(u64, isize, actual_end);
338 ret = insert_inline_extent(trans, path, extent_inserted,
340 inline_len, compressed_size,
341 compress_type, compressed_pages);
342 if (ret && ret != -ENOSPC) {
343 btrfs_abort_transaction(trans, ret);
345 } else if (ret == -ENOSPC) {
350 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
351 btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0);
354 * Don't forget to free the reserved space, as for inlined extent
355 * it won't count as data extent, free them directly here.
356 * And at reserve time, it's always aligned to page size, so
357 * just free one page here.
359 btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
360 btrfs_free_path(path);
361 btrfs_end_transaction(trans);
365 struct async_extent {
370 unsigned long nr_pages;
372 struct list_head list;
377 struct btrfs_root *root;
378 struct page *locked_page;
381 unsigned int write_flags;
382 struct list_head extents;
383 struct btrfs_work work;
386 static noinline int add_async_extent(struct async_cow *cow,
387 u64 start, u64 ram_size,
390 unsigned long nr_pages,
393 struct async_extent *async_extent;
395 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
396 BUG_ON(!async_extent); /* -ENOMEM */
397 async_extent->start = start;
398 async_extent->ram_size = ram_size;
399 async_extent->compressed_size = compressed_size;
400 async_extent->pages = pages;
401 async_extent->nr_pages = nr_pages;
402 async_extent->compress_type = compress_type;
403 list_add_tail(&async_extent->list, &cow->extents);
407 static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
409 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
412 if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
415 if (BTRFS_I(inode)->defrag_compress)
417 /* bad compression ratios */
418 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
420 if (btrfs_test_opt(fs_info, COMPRESS) ||
421 BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
422 BTRFS_I(inode)->prop_compress)
423 return btrfs_compress_heuristic(inode, start, end);
427 static inline void inode_should_defrag(struct btrfs_inode *inode,
428 u64 start, u64 end, u64 num_bytes, u64 small_write)
430 /* If this is a small write inside eof, kick off a defrag */
431 if (num_bytes < small_write &&
432 (start > 0 || end + 1 < inode->disk_i_size))
433 btrfs_add_inode_defrag(NULL, inode);
437 * we create compressed extents in two phases. The first
438 * phase compresses a range of pages that have already been
439 * locked (both pages and state bits are locked).
441 * This is done inside an ordered work queue, and the compression
442 * is spread across many cpus. The actual IO submission is step
443 * two, and the ordered work queue takes care of making sure that
444 * happens in the same order things were put onto the queue by
445 * writepages and friends.
447 * If this code finds it can't get good compression, it puts an
448 * entry onto the work queue to write the uncompressed bytes. This
449 * makes sure that both compressed inodes and uncompressed inodes
450 * are written in the same order that the flusher thread sent them
453 static noinline void compress_file_range(struct inode *inode,
454 struct page *locked_page,
456 struct async_cow *async_cow,
459 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
460 u64 blocksize = fs_info->sectorsize;
462 u64 isize = i_size_read(inode);
464 struct page **pages = NULL;
465 unsigned long nr_pages;
466 unsigned long total_compressed = 0;
467 unsigned long total_in = 0;
470 int compress_type = fs_info->compress_type;
473 inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
476 actual_end = min_t(u64, isize, end + 1);
479 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
480 BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0);
481 nr_pages = min_t(unsigned long, nr_pages,
482 BTRFS_MAX_COMPRESSED / PAGE_SIZE);
485 * we don't want to send crud past the end of i_size through
486 * compression, that's just a waste of CPU time. So, if the
487 * end of the file is before the start of our current
488 * requested range of bytes, we bail out to the uncompressed
489 * cleanup code that can deal with all of this.
491 * It isn't really the fastest way to fix things, but this is a
492 * very uncommon corner.
494 if (actual_end <= start)
495 goto cleanup_and_bail_uncompressed;
497 total_compressed = actual_end - start;
500 * skip compression for a small file range(<=blocksize) that
501 * isn't an inline extent, since it doesn't save disk space at all.
503 if (total_compressed <= blocksize &&
504 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
505 goto cleanup_and_bail_uncompressed;
507 total_compressed = min_t(unsigned long, total_compressed,
508 BTRFS_MAX_UNCOMPRESSED);
513 * we do compression for mount -o compress and when the
514 * inode has not been flagged as nocompress. This flag can
515 * change at any time if we discover bad compression ratios.
517 if (inode_need_compress(inode, start, end)) {
519 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
521 /* just bail out to the uncompressed code */
525 if (BTRFS_I(inode)->defrag_compress)
526 compress_type = BTRFS_I(inode)->defrag_compress;
527 else if (BTRFS_I(inode)->prop_compress)
528 compress_type = BTRFS_I(inode)->prop_compress;
531 * we need to call clear_page_dirty_for_io on each
532 * page in the range. Otherwise applications with the file
533 * mmap'd can wander in and change the page contents while
534 * we are compressing them.
536 * If the compression fails for any reason, we set the pages
537 * dirty again later on.
539 * Note that the remaining part is redirtied, the start pointer
540 * has moved, the end is the original one.
543 extent_range_clear_dirty_for_io(inode, start, end);
547 /* Compression level is applied here and only here */
548 ret = btrfs_compress_pages(
549 compress_type | (fs_info->compress_level << 4),
550 inode->i_mapping, start,
557 unsigned long offset = total_compressed &
559 struct page *page = pages[nr_pages - 1];
562 /* zero the tail end of the last page, we might be
563 * sending it down to disk
566 kaddr = kmap_atomic(page);
567 memset(kaddr + offset, 0,
569 kunmap_atomic(kaddr);
576 /* lets try to make an inline extent */
577 if (ret || total_in < actual_end) {
578 /* we didn't compress the entire range, try
579 * to make an uncompressed inline extent.
581 ret = cow_file_range_inline(inode, start, end, 0,
582 BTRFS_COMPRESS_NONE, NULL);
584 /* try making a compressed inline extent */
585 ret = cow_file_range_inline(inode, start, end,
587 compress_type, pages);
590 unsigned long clear_flags = EXTENT_DELALLOC |
591 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
592 EXTENT_DO_ACCOUNTING;
593 unsigned long page_error_op;
595 page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
598 * inline extent creation worked or returned error,
599 * we don't need to create any more async work items.
600 * Unlock and free up our temp pages.
602 * We use DO_ACCOUNTING here because we need the
603 * delalloc_release_metadata to be done _after_ we drop
604 * our outstanding extent for clearing delalloc for this
607 extent_clear_unlock_delalloc(inode, start, end, end,
620 * we aren't doing an inline extent round the compressed size
621 * up to a block size boundary so the allocator does sane
624 total_compressed = ALIGN(total_compressed, blocksize);
627 * one last check to make sure the compression is really a
628 * win, compare the page count read with the blocks on disk,
629 * compression must free at least one sector size
631 total_in = ALIGN(total_in, PAGE_SIZE);
632 if (total_compressed + blocksize <= total_in) {
636 * The async work queues will take care of doing actual
637 * allocation on disk for these compressed pages, and
638 * will submit them to the elevator.
640 add_async_extent(async_cow, start, total_in,
641 total_compressed, pages, nr_pages,
644 if (start + total_in < end) {
655 * the compression code ran but failed to make things smaller,
656 * free any pages it allocated and our page pointer array
658 for (i = 0; i < nr_pages; i++) {
659 WARN_ON(pages[i]->mapping);
664 total_compressed = 0;
667 /* flag the file so we don't compress in the future */
668 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
669 !(BTRFS_I(inode)->prop_compress)) {
670 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
673 cleanup_and_bail_uncompressed:
675 * No compression, but we still need to write the pages in the file
676 * we've been given so far. redirty the locked page if it corresponds
677 * to our extent and set things up for the async work queue to run
678 * cow_file_range to do the normal delalloc dance.
680 if (page_offset(locked_page) >= start &&
681 page_offset(locked_page) <= end)
682 __set_page_dirty_nobuffers(locked_page);
683 /* unlocked later on in the async handlers */
686 extent_range_redirty_for_io(inode, start, end);
687 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0,
688 BTRFS_COMPRESS_NONE);
694 for (i = 0; i < nr_pages; i++) {
695 WARN_ON(pages[i]->mapping);
701 static void free_async_extent_pages(struct async_extent *async_extent)
705 if (!async_extent->pages)
708 for (i = 0; i < async_extent->nr_pages; i++) {
709 WARN_ON(async_extent->pages[i]->mapping);
710 put_page(async_extent->pages[i]);
712 kfree(async_extent->pages);
713 async_extent->nr_pages = 0;
714 async_extent->pages = NULL;
718 * phase two of compressed writeback. This is the ordered portion
719 * of the code, which only gets called in the order the work was
720 * queued. We walk all the async extents created by compress_file_range
721 * and send them down to the disk.
723 static noinline void submit_compressed_extents(struct inode *inode,
724 struct async_cow *async_cow)
726 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
727 struct async_extent *async_extent;
729 struct btrfs_key ins;
730 struct extent_map *em;
731 struct btrfs_root *root = BTRFS_I(inode)->root;
732 struct extent_io_tree *io_tree;
736 while (!list_empty(&async_cow->extents)) {
737 async_extent = list_entry(async_cow->extents.next,
738 struct async_extent, list);
739 list_del(&async_extent->list);
741 io_tree = &BTRFS_I(inode)->io_tree;
744 /* did the compression code fall back to uncompressed IO? */
745 if (!async_extent->pages) {
746 int page_started = 0;
747 unsigned long nr_written = 0;
749 lock_extent(io_tree, async_extent->start,
750 async_extent->start +
751 async_extent->ram_size - 1);
753 /* allocate blocks */
754 ret = cow_file_range(inode, async_cow->locked_page,
756 async_extent->start +
757 async_extent->ram_size - 1,
758 async_extent->start +
759 async_extent->ram_size - 1,
760 &page_started, &nr_written, 0,
766 * if page_started, cow_file_range inserted an
767 * inline extent and took care of all the unlocking
768 * and IO for us. Otherwise, we need to submit
769 * all those pages down to the drive.
771 if (!page_started && !ret)
772 extent_write_locked_range(inode,
774 async_extent->start +
775 async_extent->ram_size - 1,
778 unlock_page(async_cow->locked_page);
784 lock_extent(io_tree, async_extent->start,
785 async_extent->start + async_extent->ram_size - 1);
787 ret = btrfs_reserve_extent(root, async_extent->ram_size,
788 async_extent->compressed_size,
789 async_extent->compressed_size,
790 0, alloc_hint, &ins, 1, 1);
792 free_async_extent_pages(async_extent);
794 if (ret == -ENOSPC) {
795 unlock_extent(io_tree, async_extent->start,
796 async_extent->start +
797 async_extent->ram_size - 1);
800 * we need to redirty the pages if we decide to
801 * fallback to uncompressed IO, otherwise we
802 * will not submit these pages down to lower
805 extent_range_redirty_for_io(inode,
807 async_extent->start +
808 async_extent->ram_size - 1);
815 * here we're doing allocation and writeback of the
818 em = create_io_em(inode, async_extent->start,
819 async_extent->ram_size, /* len */
820 async_extent->start, /* orig_start */
821 ins.objectid, /* block_start */
822 ins.offset, /* block_len */
823 ins.offset, /* orig_block_len */
824 async_extent->ram_size, /* ram_bytes */
825 async_extent->compress_type,
826 BTRFS_ORDERED_COMPRESSED);
828 /* ret value is not necessary due to void function */
829 goto out_free_reserve;
832 ret = btrfs_add_ordered_extent_compress(inode,
835 async_extent->ram_size,
837 BTRFS_ORDERED_COMPRESSED,
838 async_extent->compress_type);
840 btrfs_drop_extent_cache(BTRFS_I(inode),
842 async_extent->start +
843 async_extent->ram_size - 1, 0);
844 goto out_free_reserve;
846 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
849 * clear dirty, set writeback and unlock the pages.
851 extent_clear_unlock_delalloc(inode, async_extent->start,
852 async_extent->start +
853 async_extent->ram_size - 1,
854 async_extent->start +
855 async_extent->ram_size - 1,
856 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
857 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
859 if (btrfs_submit_compressed_write(inode,
861 async_extent->ram_size,
863 ins.offset, async_extent->pages,
864 async_extent->nr_pages,
865 async_cow->write_flags)) {
866 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
867 struct page *p = async_extent->pages[0];
868 const u64 start = async_extent->start;
869 const u64 end = start + async_extent->ram_size - 1;
871 p->mapping = inode->i_mapping;
872 tree->ops->writepage_end_io_hook(p, start, end,
875 extent_clear_unlock_delalloc(inode, start, end, end,
879 free_async_extent_pages(async_extent);
881 alloc_hint = ins.objectid + ins.offset;
887 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
888 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
890 extent_clear_unlock_delalloc(inode, async_extent->start,
891 async_extent->start +
892 async_extent->ram_size - 1,
893 async_extent->start +
894 async_extent->ram_size - 1,
895 NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
896 EXTENT_DELALLOC_NEW |
897 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
898 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
899 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
901 free_async_extent_pages(async_extent);
906 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
909 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
910 struct extent_map *em;
913 read_lock(&em_tree->lock);
914 em = search_extent_mapping(em_tree, start, num_bytes);
917 * if block start isn't an actual block number then find the
918 * first block in this inode and use that as a hint. If that
919 * block is also bogus then just don't worry about it.
921 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
923 em = search_extent_mapping(em_tree, 0, 0);
924 if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
925 alloc_hint = em->block_start;
929 alloc_hint = em->block_start;
933 read_unlock(&em_tree->lock);
939 * when extent_io.c finds a delayed allocation range in the file,
940 * the call backs end up in this code. The basic idea is to
941 * allocate extents on disk for the range, and create ordered data structs
942 * in ram to track those extents.
944 * locked_page is the page that writepage had locked already. We use
945 * it to make sure we don't do extra locks or unlocks.
947 * *page_started is set to one if we unlock locked_page and do everything
948 * required to start IO on it. It may be clean and already done with
951 static noinline int cow_file_range(struct inode *inode,
952 struct page *locked_page,
953 u64 start, u64 end, u64 delalloc_end,
954 int *page_started, unsigned long *nr_written,
955 int unlock, struct btrfs_dedupe_hash *hash)
957 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
958 struct btrfs_root *root = BTRFS_I(inode)->root;
961 unsigned long ram_size;
962 u64 cur_alloc_size = 0;
963 u64 blocksize = fs_info->sectorsize;
964 struct btrfs_key ins;
965 struct extent_map *em;
967 unsigned long page_ops;
968 bool extent_reserved = false;
971 if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
977 num_bytes = ALIGN(end - start + 1, blocksize);
978 num_bytes = max(blocksize, num_bytes);
979 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
981 inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K);
984 /* lets try to make an inline extent */
985 ret = cow_file_range_inline(inode, start, end, 0,
986 BTRFS_COMPRESS_NONE, NULL);
989 * We use DO_ACCOUNTING here because we need the
990 * delalloc_release_metadata to be run _after_ we drop
991 * our outstanding extent for clearing delalloc for this
994 extent_clear_unlock_delalloc(inode, start, end,
996 EXTENT_LOCKED | EXTENT_DELALLOC |
997 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
998 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
999 PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
1000 PAGE_END_WRITEBACK);
1001 *nr_written = *nr_written +
1002 (end - start + PAGE_SIZE) / PAGE_SIZE;
1005 } else if (ret < 0) {
1010 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
1011 btrfs_drop_extent_cache(BTRFS_I(inode), start,
1012 start + num_bytes - 1, 0);
1014 while (num_bytes > 0) {
1015 cur_alloc_size = num_bytes;
1016 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
1017 fs_info->sectorsize, 0, alloc_hint,
1021 cur_alloc_size = ins.offset;
1022 extent_reserved = true;
1024 ram_size = ins.offset;
1025 em = create_io_em(inode, start, ins.offset, /* len */
1026 start, /* orig_start */
1027 ins.objectid, /* block_start */
1028 ins.offset, /* block_len */
1029 ins.offset, /* orig_block_len */
1030 ram_size, /* ram_bytes */
1031 BTRFS_COMPRESS_NONE, /* compress_type */
1032 BTRFS_ORDERED_REGULAR /* type */);
1035 free_extent_map(em);
1037 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1038 ram_size, cur_alloc_size, 0);
1040 goto out_drop_extent_cache;
1042 if (root->root_key.objectid ==
1043 BTRFS_DATA_RELOC_TREE_OBJECTID) {
1044 ret = btrfs_reloc_clone_csums(inode, start,
1047 * Only drop cache here, and process as normal.
1049 * We must not allow extent_clear_unlock_delalloc()
1050 * at out_unlock label to free meta of this ordered
1051 * extent, as its meta should be freed by
1052 * btrfs_finish_ordered_io().
1054 * So we must continue until @start is increased to
1055 * skip current ordered extent.
1058 btrfs_drop_extent_cache(BTRFS_I(inode), start,
1059 start + ram_size - 1, 0);
1062 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1064 /* we're not doing compressed IO, don't unlock the first
1065 * page (which the caller expects to stay locked), don't
1066 * clear any dirty bits and don't set any writeback bits
1068 * Do set the Private2 bit so we know this page was properly
1069 * setup for writepage
1071 page_ops = unlock ? PAGE_UNLOCK : 0;
1072 page_ops |= PAGE_SET_PRIVATE2;
1074 extent_clear_unlock_delalloc(inode, start,
1075 start + ram_size - 1,
1076 delalloc_end, locked_page,
1077 EXTENT_LOCKED | EXTENT_DELALLOC,
1079 if (num_bytes < cur_alloc_size)
1082 num_bytes -= cur_alloc_size;
1083 alloc_hint = ins.objectid + ins.offset;
1084 start += cur_alloc_size;
1085 extent_reserved = false;
1088 * btrfs_reloc_clone_csums() error, since start is increased
1089 * extent_clear_unlock_delalloc() at out_unlock label won't
1090 * free metadata of current ordered extent, we're OK to exit.
1098 out_drop_extent_cache:
1099 btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0);
1101 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1102 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1104 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1105 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1106 page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
1109 * If we reserved an extent for our delalloc range (or a subrange) and
1110 * failed to create the respective ordered extent, then it means that
1111 * when we reserved the extent we decremented the extent's size from
1112 * the data space_info's bytes_may_use counter and incremented the
1113 * space_info's bytes_reserved counter by the same amount. We must make
1114 * sure extent_clear_unlock_delalloc() does not try to decrement again
1115 * the data space_info's bytes_may_use counter, therefore we do not pass
1116 * it the flag EXTENT_CLEAR_DATA_RESV.
1118 if (extent_reserved) {
1119 extent_clear_unlock_delalloc(inode, start,
1120 start + cur_alloc_size,
1121 start + cur_alloc_size,
1125 start += cur_alloc_size;
1129 extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
1131 clear_bits | EXTENT_CLEAR_DATA_RESV,
1137 * work queue call back to started compression on a file and pages
1139 static noinline void async_cow_start(struct btrfs_work *work)
1141 struct async_cow *async_cow;
1143 async_cow = container_of(work, struct async_cow, work);
1145 compress_file_range(async_cow->inode, async_cow->locked_page,
1146 async_cow->start, async_cow->end, async_cow,
1148 if (num_added == 0) {
1149 btrfs_add_delayed_iput(async_cow->inode);
1150 async_cow->inode = NULL;
1155 * work queue call back to submit previously compressed pages
1157 static noinline void async_cow_submit(struct btrfs_work *work)
1159 struct btrfs_fs_info *fs_info;
1160 struct async_cow *async_cow;
1161 struct btrfs_root *root;
1162 unsigned long nr_pages;
1164 async_cow = container_of(work, struct async_cow, work);
1166 root = async_cow->root;
1167 fs_info = root->fs_info;
1168 nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
1172 * atomic_sub_return implies a barrier for waitqueue_active
1174 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1176 waitqueue_active(&fs_info->async_submit_wait))
1177 wake_up(&fs_info->async_submit_wait);
1179 if (async_cow->inode)
1180 submit_compressed_extents(async_cow->inode, async_cow);
1183 static noinline void async_cow_free(struct btrfs_work *work)
1185 struct async_cow *async_cow;
1186 async_cow = container_of(work, struct async_cow, work);
1187 if (async_cow->inode)
1188 btrfs_add_delayed_iput(async_cow->inode);
1192 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1193 u64 start, u64 end, int *page_started,
1194 unsigned long *nr_written,
1195 unsigned int write_flags)
1197 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1198 struct async_cow *async_cow;
1199 struct btrfs_root *root = BTRFS_I(inode)->root;
1200 unsigned long nr_pages;
1203 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1205 while (start < end) {
1206 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1207 BUG_ON(!async_cow); /* -ENOMEM */
1208 async_cow->inode = igrab(inode);
1209 async_cow->root = root;
1210 async_cow->locked_page = locked_page;
1211 async_cow->start = start;
1212 async_cow->write_flags = write_flags;
1214 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
1215 !btrfs_test_opt(fs_info, FORCE_COMPRESS))
1218 cur_end = min(end, start + SZ_512K - 1);
1220 async_cow->end = cur_end;
1221 INIT_LIST_HEAD(&async_cow->extents);
1223 btrfs_init_work(&async_cow->work,
1224 btrfs_delalloc_helper,
1225 async_cow_start, async_cow_submit,
1228 nr_pages = (cur_end - start + PAGE_SIZE) >>
1230 atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1232 btrfs_queue_work(fs_info->delalloc_workers, &async_cow->work);
1234 *nr_written += nr_pages;
1235 start = cur_end + 1;
1241 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
1242 u64 bytenr, u64 num_bytes)
1245 struct btrfs_ordered_sum *sums;
1248 ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr,
1249 bytenr + num_bytes - 1, &list, 0);
1250 if (ret == 0 && list_empty(&list))
1253 while (!list_empty(&list)) {
1254 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1255 list_del(&sums->list);
1264 * when nowcow writeback call back. This checks for snapshots or COW copies
1265 * of the extents that exist in the file, and COWs the file as required.
1267 * If no cow copies or snapshots exist, we write directly to the existing
1270 static noinline int run_delalloc_nocow(struct inode *inode,
1271 struct page *locked_page,
1272 u64 start, u64 end, int *page_started, int force,
1273 unsigned long *nr_written)
1275 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1276 struct btrfs_root *root = BTRFS_I(inode)->root;
1277 struct extent_buffer *leaf;
1278 struct btrfs_path *path;
1279 struct btrfs_file_extent_item *fi;
1280 struct btrfs_key found_key;
1281 struct extent_map *em;
1296 u64 ino = btrfs_ino(BTRFS_I(inode));
1298 path = btrfs_alloc_path();
1300 extent_clear_unlock_delalloc(inode, start, end, end,
1302 EXTENT_LOCKED | EXTENT_DELALLOC |
1303 EXTENT_DO_ACCOUNTING |
1304 EXTENT_DEFRAG, PAGE_UNLOCK |
1306 PAGE_SET_WRITEBACK |
1307 PAGE_END_WRITEBACK);
1311 nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
1313 cow_start = (u64)-1;
1316 ret = btrfs_lookup_file_extent(NULL, root, path, ino,
1320 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1321 leaf = path->nodes[0];
1322 btrfs_item_key_to_cpu(leaf, &found_key,
1323 path->slots[0] - 1);
1324 if (found_key.objectid == ino &&
1325 found_key.type == BTRFS_EXTENT_DATA_KEY)
1330 leaf = path->nodes[0];
1331 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1332 ret = btrfs_next_leaf(root, path);
1334 if (cow_start != (u64)-1)
1335 cur_offset = cow_start;
1340 leaf = path->nodes[0];
1346 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1348 if (found_key.objectid > ino)
1350 if (WARN_ON_ONCE(found_key.objectid < ino) ||
1351 found_key.type < BTRFS_EXTENT_DATA_KEY) {
1355 if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1356 found_key.offset > end)
1359 if (found_key.offset > cur_offset) {
1360 extent_end = found_key.offset;
1365 fi = btrfs_item_ptr(leaf, path->slots[0],
1366 struct btrfs_file_extent_item);
1367 extent_type = btrfs_file_extent_type(leaf, fi);
1369 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1370 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1371 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1372 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1373 extent_offset = btrfs_file_extent_offset(leaf, fi);
1374 extent_end = found_key.offset +
1375 btrfs_file_extent_num_bytes(leaf, fi);
1377 btrfs_file_extent_disk_num_bytes(leaf, fi);
1378 if (extent_end <= start) {
1382 if (disk_bytenr == 0)
1384 if (btrfs_file_extent_compression(leaf, fi) ||
1385 btrfs_file_extent_encryption(leaf, fi) ||
1386 btrfs_file_extent_other_encoding(leaf, fi))
1388 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1390 if (btrfs_extent_readonly(fs_info, disk_bytenr))
1392 ret = btrfs_cross_ref_exist(root, ino,
1394 extent_offset, disk_bytenr);
1397 * ret could be -EIO if the above fails to read
1401 if (cow_start != (u64)-1)
1402 cur_offset = cow_start;
1406 WARN_ON_ONCE(nolock);
1409 disk_bytenr += extent_offset;
1410 disk_bytenr += cur_offset - found_key.offset;
1411 num_bytes = min(end + 1, extent_end) - cur_offset;
1413 * if there are pending snapshots for this root,
1414 * we fall into common COW way.
1417 err = btrfs_start_write_no_snapshotting(root);
1422 * force cow if csum exists in the range.
1423 * this ensure that csum for a given extent are
1424 * either valid or do not exist.
1426 ret = csum_exist_in_range(fs_info, disk_bytenr,
1430 btrfs_end_write_no_snapshotting(root);
1433 * ret could be -EIO if the above fails to read
1437 if (cow_start != (u64)-1)
1438 cur_offset = cow_start;
1441 WARN_ON_ONCE(nolock);
1444 if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) {
1446 btrfs_end_write_no_snapshotting(root);
1450 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1451 extent_end = found_key.offset +
1452 btrfs_file_extent_inline_len(leaf,
1453 path->slots[0], fi);
1454 extent_end = ALIGN(extent_end,
1455 fs_info->sectorsize);
1460 if (extent_end <= start) {
1462 if (!nolock && nocow)
1463 btrfs_end_write_no_snapshotting(root);
1465 btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1469 if (cow_start == (u64)-1)
1470 cow_start = cur_offset;
1471 cur_offset = extent_end;
1472 if (cur_offset > end)
1478 btrfs_release_path(path);
1479 if (cow_start != (u64)-1) {
1480 ret = cow_file_range(inode, locked_page,
1481 cow_start, found_key.offset - 1,
1482 end, page_started, nr_written, 1,
1485 if (!nolock && nocow)
1486 btrfs_end_write_no_snapshotting(root);
1488 btrfs_dec_nocow_writers(fs_info,
1492 cow_start = (u64)-1;
1495 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1496 u64 orig_start = found_key.offset - extent_offset;
1498 em = create_io_em(inode, cur_offset, num_bytes,
1500 disk_bytenr, /* block_start */
1501 num_bytes, /* block_len */
1502 disk_num_bytes, /* orig_block_len */
1503 ram_bytes, BTRFS_COMPRESS_NONE,
1504 BTRFS_ORDERED_PREALLOC);
1506 if (!nolock && nocow)
1507 btrfs_end_write_no_snapshotting(root);
1509 btrfs_dec_nocow_writers(fs_info,
1514 free_extent_map(em);
1517 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1518 type = BTRFS_ORDERED_PREALLOC;
1520 type = BTRFS_ORDERED_NOCOW;
1523 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1524 num_bytes, num_bytes, type);
1526 btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1527 BUG_ON(ret); /* -ENOMEM */
1529 if (root->root_key.objectid ==
1530 BTRFS_DATA_RELOC_TREE_OBJECTID)
1532 * Error handled later, as we must prevent
1533 * extent_clear_unlock_delalloc() in error handler
1534 * from freeing metadata of created ordered extent.
1536 ret = btrfs_reloc_clone_csums(inode, cur_offset,
1539 extent_clear_unlock_delalloc(inode, cur_offset,
1540 cur_offset + num_bytes - 1, end,
1541 locked_page, EXTENT_LOCKED |
1543 EXTENT_CLEAR_DATA_RESV,
1544 PAGE_UNLOCK | PAGE_SET_PRIVATE2);
1546 if (!nolock && nocow)
1547 btrfs_end_write_no_snapshotting(root);
1548 cur_offset = extent_end;
1551 * btrfs_reloc_clone_csums() error, now we're OK to call error
1552 * handler, as metadata for created ordered extent will only
1553 * be freed by btrfs_finish_ordered_io().
1557 if (cur_offset > end)
1560 btrfs_release_path(path);
1562 if (cur_offset <= end && cow_start == (u64)-1) {
1563 cow_start = cur_offset;
1567 if (cow_start != (u64)-1) {
1568 ret = cow_file_range(inode, locked_page, cow_start, end, end,
1569 page_started, nr_written, 1, NULL);
1575 if (ret && cur_offset < end)
1576 extent_clear_unlock_delalloc(inode, cur_offset, end, end,
1577 locked_page, EXTENT_LOCKED |
1578 EXTENT_DELALLOC | EXTENT_DEFRAG |
1579 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1581 PAGE_SET_WRITEBACK |
1582 PAGE_END_WRITEBACK);
1583 btrfs_free_path(path);
1587 static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
1590 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
1591 !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
1595 * @defrag_bytes is a hint value, no spinlock held here,
1596 * if is not zero, it means the file is defragging.
1597 * Force cow if given extent needs to be defragged.
1599 if (BTRFS_I(inode)->defrag_bytes &&
1600 test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1601 EXTENT_DEFRAG, 0, NULL))
1608 * extent_io.c call back to do delayed allocation processing
1610 static int run_delalloc_range(void *private_data, struct page *locked_page,
1611 u64 start, u64 end, int *page_started,
1612 unsigned long *nr_written,
1613 struct writeback_control *wbc)
1615 struct inode *inode = private_data;
1617 int force_cow = need_force_cow(inode, start, end);
1618 unsigned int write_flags = wbc_to_write_flags(wbc);
1620 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
1621 ret = run_delalloc_nocow(inode, locked_page, start, end,
1622 page_started, 1, nr_written);
1623 } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1624 ret = run_delalloc_nocow(inode, locked_page, start, end,
1625 page_started, 0, nr_written);
1626 } else if (!inode_need_compress(inode, start, end)) {
1627 ret = cow_file_range(inode, locked_page, start, end, end,
1628 page_started, nr_written, 1, NULL);
1630 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1631 &BTRFS_I(inode)->runtime_flags);
1632 ret = cow_file_range_async(inode, locked_page, start, end,
1633 page_started, nr_written,
1637 btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
1641 static void btrfs_split_extent_hook(void *private_data,
1642 struct extent_state *orig, u64 split)
1644 struct inode *inode = private_data;
1647 /* not delalloc, ignore it */
1648 if (!(orig->state & EXTENT_DELALLOC))
1651 size = orig->end - orig->start + 1;
1652 if (size > BTRFS_MAX_EXTENT_SIZE) {
1657 * See the explanation in btrfs_merge_extent_hook, the same
1658 * applies here, just in reverse.
1660 new_size = orig->end - split + 1;
1661 num_extents = count_max_extents(new_size);
1662 new_size = split - orig->start;
1663 num_extents += count_max_extents(new_size);
1664 if (count_max_extents(size) >= num_extents)
1668 spin_lock(&BTRFS_I(inode)->lock);
1669 btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
1670 spin_unlock(&BTRFS_I(inode)->lock);
1674 * extent_io.c merge_extent_hook, used to track merged delayed allocation
1675 * extents so we can keep track of new extents that are just merged onto old
1676 * extents, such as when we are doing sequential writes, so we can properly
1677 * account for the metadata space we'll need.
1679 static void btrfs_merge_extent_hook(void *private_data,
1680 struct extent_state *new,
1681 struct extent_state *other)
1683 struct inode *inode = private_data;
1684 u64 new_size, old_size;
1687 /* not delalloc, ignore it */
1688 if (!(other->state & EXTENT_DELALLOC))
1691 if (new->start > other->start)
1692 new_size = new->end - other->start + 1;
1694 new_size = other->end - new->start + 1;
1696 /* we're not bigger than the max, unreserve the space and go */
1697 if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1698 spin_lock(&BTRFS_I(inode)->lock);
1699 btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
1700 spin_unlock(&BTRFS_I(inode)->lock);
1705 * We have to add up either side to figure out how many extents were
1706 * accounted for before we merged into one big extent. If the number of
1707 * extents we accounted for is <= the amount we need for the new range
1708 * then we can return, otherwise drop. Think of it like this
1712 * So we've grown the extent by a MAX_SIZE extent, this would mean we
1713 * need 2 outstanding extents, on one side we have 1 and the other side
1714 * we have 1 so they are == and we can return. But in this case
1716 * [MAX_SIZE+4k][MAX_SIZE+4k]
1718 * Each range on their own accounts for 2 extents, but merged together
1719 * they are only 3 extents worth of accounting, so we need to drop in
1722 old_size = other->end - other->start + 1;
1723 num_extents = count_max_extents(old_size);
1724 old_size = new->end - new->start + 1;
1725 num_extents += count_max_extents(old_size);
1726 if (count_max_extents(new_size) >= num_extents)
1729 spin_lock(&BTRFS_I(inode)->lock);
1730 btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
1731 spin_unlock(&BTRFS_I(inode)->lock);
1734 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1735 struct inode *inode)
1737 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1739 spin_lock(&root->delalloc_lock);
1740 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1741 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1742 &root->delalloc_inodes);
1743 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1744 &BTRFS_I(inode)->runtime_flags);
1745 root->nr_delalloc_inodes++;
1746 if (root->nr_delalloc_inodes == 1) {
1747 spin_lock(&fs_info->delalloc_root_lock);
1748 BUG_ON(!list_empty(&root->delalloc_root));
1749 list_add_tail(&root->delalloc_root,
1750 &fs_info->delalloc_roots);
1751 spin_unlock(&fs_info->delalloc_root_lock);
1754 spin_unlock(&root->delalloc_lock);
1757 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1758 struct btrfs_inode *inode)
1760 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1762 spin_lock(&root->delalloc_lock);
1763 if (!list_empty(&inode->delalloc_inodes)) {
1764 list_del_init(&inode->delalloc_inodes);
1765 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1766 &inode->runtime_flags);
1767 root->nr_delalloc_inodes--;
1768 if (!root->nr_delalloc_inodes) {
1769 spin_lock(&fs_info->delalloc_root_lock);
1770 BUG_ON(list_empty(&root->delalloc_root));
1771 list_del_init(&root->delalloc_root);
1772 spin_unlock(&fs_info->delalloc_root_lock);
1775 spin_unlock(&root->delalloc_lock);
1779 * extent_io.c set_bit_hook, used to track delayed allocation
1780 * bytes in this file, and to maintain the list of inodes that
1781 * have pending delalloc work to be done.
1783 static void btrfs_set_bit_hook(void *private_data,
1784 struct extent_state *state, unsigned *bits)
1786 struct inode *inode = private_data;
1788 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1790 if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
1793 * set_bit and clear bit hooks normally require _irqsave/restore
1794 * but in this case, we are only testing for the DELALLOC
1795 * bit, which is only set or cleared with irqs on
1797 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1798 struct btrfs_root *root = BTRFS_I(inode)->root;
1799 u64 len = state->end + 1 - state->start;
1800 u32 num_extents = count_max_extents(len);
1801 bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode));
1803 spin_lock(&BTRFS_I(inode)->lock);
1804 btrfs_mod_outstanding_extents(BTRFS_I(inode), num_extents);
1805 spin_unlock(&BTRFS_I(inode)->lock);
1807 /* For sanity tests */
1808 if (btrfs_is_testing(fs_info))
1811 percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
1812 fs_info->delalloc_batch);
1813 spin_lock(&BTRFS_I(inode)->lock);
1814 BTRFS_I(inode)->delalloc_bytes += len;
1815 if (*bits & EXTENT_DEFRAG)
1816 BTRFS_I(inode)->defrag_bytes += len;
1817 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1818 &BTRFS_I(inode)->runtime_flags))
1819 btrfs_add_delalloc_inodes(root, inode);
1820 spin_unlock(&BTRFS_I(inode)->lock);
1823 if (!(state->state & EXTENT_DELALLOC_NEW) &&
1824 (*bits & EXTENT_DELALLOC_NEW)) {
1825 spin_lock(&BTRFS_I(inode)->lock);
1826 BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 -
1828 spin_unlock(&BTRFS_I(inode)->lock);
1833 * extent_io.c clear_bit_hook, see set_bit_hook for why
1835 static void btrfs_clear_bit_hook(void *private_data,
1836 struct extent_state *state,
1839 struct btrfs_inode *inode = BTRFS_I((struct inode *)private_data);
1840 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1841 u64 len = state->end + 1 - state->start;
1842 u32 num_extents = count_max_extents(len);
1844 if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) {
1845 spin_lock(&inode->lock);
1846 inode->defrag_bytes -= len;
1847 spin_unlock(&inode->lock);
1851 * set_bit and clear bit hooks normally require _irqsave/restore
1852 * but in this case, we are only testing for the DELALLOC
1853 * bit, which is only set or cleared with irqs on
1855 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1856 struct btrfs_root *root = inode->root;
1857 bool do_list = !btrfs_is_free_space_inode(inode);
1859 spin_lock(&inode->lock);
1860 btrfs_mod_outstanding_extents(inode, -num_extents);
1861 spin_unlock(&inode->lock);
1864 * We don't reserve metadata space for space cache inodes so we
1865 * don't need to call dellalloc_release_metadata if there is an
1868 if (*bits & EXTENT_CLEAR_META_RESV &&
1869 root != fs_info->tree_root)
1870 btrfs_delalloc_release_metadata(inode, len);
1872 /* For sanity tests. */
1873 if (btrfs_is_testing(fs_info))
1876 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID &&
1877 do_list && !(state->state & EXTENT_NORESERVE) &&
1878 (*bits & EXTENT_CLEAR_DATA_RESV))
1879 btrfs_free_reserved_data_space_noquota(
1883 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
1884 fs_info->delalloc_batch);
1885 spin_lock(&inode->lock);
1886 inode->delalloc_bytes -= len;
1887 if (do_list && inode->delalloc_bytes == 0 &&
1888 test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1889 &inode->runtime_flags))
1890 btrfs_del_delalloc_inode(root, inode);
1891 spin_unlock(&inode->lock);
1894 if ((state->state & EXTENT_DELALLOC_NEW) &&
1895 (*bits & EXTENT_DELALLOC_NEW)) {
1896 spin_lock(&inode->lock);
1897 ASSERT(inode->new_delalloc_bytes >= len);
1898 inode->new_delalloc_bytes -= len;
1899 spin_unlock(&inode->lock);
1904 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1905 * we don't create bios that span stripes or chunks
1907 * return 1 if page cannot be merged to bio
1908 * return 0 if page can be merged to bio
1909 * return error otherwise
1911 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1912 size_t size, struct bio *bio,
1913 unsigned long bio_flags)
1915 struct inode *inode = page->mapping->host;
1916 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1917 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1922 if (bio_flags & EXTENT_BIO_COMPRESSED)
1925 length = bio->bi_iter.bi_size;
1926 map_length = length;
1927 ret = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
1931 if (map_length < length + size)
1937 * in order to insert checksums into the metadata in large chunks,
1938 * we wait until bio submission time. All the pages in the bio are
1939 * checksummed and sums are attached onto the ordered extent record.
1941 * At IO completion time the cums attached on the ordered extent record
1942 * are inserted into the btree
1944 static blk_status_t btrfs_submit_bio_start(void *private_data, struct bio *bio,
1947 struct inode *inode = private_data;
1948 blk_status_t ret = 0;
1950 ret = btrfs_csum_one_bio(inode, bio, 0, 0);
1951 BUG_ON(ret); /* -ENOMEM */
1956 * in order to insert checksums into the metadata in large chunks,
1957 * we wait until bio submission time. All the pages in the bio are
1958 * checksummed and sums are attached onto the ordered extent record.
1960 * At IO completion time the cums attached on the ordered extent record
1961 * are inserted into the btree
1963 static blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
1966 struct inode *inode = private_data;
1967 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1970 ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
1972 bio->bi_status = ret;
1979 * extent_io.c submission hook. This does the right thing for csum calculation
1980 * on write, or reading the csums from the tree before a read.
1982 * Rules about async/sync submit,
1983 * a) read: sync submit
1985 * b) write without checksum: sync submit
1987 * c) write with checksum:
1988 * c-1) if bio is issued by fsync: sync submit
1989 * (sync_writers != 0)
1991 * c-2) if root is reloc root: sync submit
1992 * (only in case of buffered IO)
1994 * c-3) otherwise: async submit
1996 static blk_status_t btrfs_submit_bio_hook(void *private_data, struct bio *bio,
1997 int mirror_num, unsigned long bio_flags,
2000 struct inode *inode = private_data;
2001 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2002 struct btrfs_root *root = BTRFS_I(inode)->root;
2003 enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
2004 blk_status_t ret = 0;
2006 int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
2008 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
2010 if (btrfs_is_free_space_inode(BTRFS_I(inode)))
2011 metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
2013 if (bio_op(bio) != REQ_OP_WRITE) {
2014 ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
2018 if (bio_flags & EXTENT_BIO_COMPRESSED) {
2019 ret = btrfs_submit_compressed_read(inode, bio,
2023 } else if (!skip_sum) {
2024 ret = btrfs_lookup_bio_sums(inode, bio, NULL);
2029 } else if (async && !skip_sum) {
2030 /* csum items have already been cloned */
2031 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2033 /* we're doing a write, do the async checksumming */
2034 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, bio_flags,
2036 btrfs_submit_bio_start,
2037 btrfs_submit_bio_done);
2039 } else if (!skip_sum) {
2040 ret = btrfs_csum_one_bio(inode, bio, 0, 0);
2046 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
2050 bio->bi_status = ret;
2057 * given a list of ordered sums record them in the inode. This happens
2058 * at IO completion time based on sums calculated at bio submission time.
2060 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
2061 struct inode *inode, struct list_head *list)
2063 struct btrfs_ordered_sum *sum;
2066 list_for_each_entry(sum, list, list) {
2067 trans->adding_csums = true;
2068 ret = btrfs_csum_file_blocks(trans,
2069 BTRFS_I(inode)->root->fs_info->csum_root, sum);
2070 trans->adding_csums = false;
2077 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
2078 unsigned int extra_bits,
2079 struct extent_state **cached_state, int dedupe)
2081 WARN_ON((end & (PAGE_SIZE - 1)) == 0);
2082 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
2083 extra_bits, cached_state);
2086 /* see btrfs_writepage_start_hook for details on why this is required */
2087 struct btrfs_writepage_fixup {
2089 struct btrfs_work work;
2092 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2094 struct btrfs_writepage_fixup *fixup;
2095 struct btrfs_ordered_extent *ordered;
2096 struct extent_state *cached_state = NULL;
2097 struct extent_changeset *data_reserved = NULL;
2099 struct inode *inode;
2104 fixup = container_of(work, struct btrfs_writepage_fixup, work);
2108 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2109 ClearPageChecked(page);
2113 inode = page->mapping->host;
2114 page_start = page_offset(page);
2115 page_end = page_offset(page) + PAGE_SIZE - 1;
2117 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
2120 /* already ordered? We're done */
2121 if (PagePrivate2(page))
2124 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
2127 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
2128 page_end, &cached_state);
2130 btrfs_start_ordered_extent(inode, ordered, 1);
2131 btrfs_put_ordered_extent(ordered);
2135 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2138 mapping_set_error(page->mapping, ret);
2139 end_extent_writepage(page, ret, page_start, page_end);
2140 ClearPageChecked(page);
2144 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2147 mapping_set_error(page->mapping, ret);
2148 end_extent_writepage(page, ret, page_start, page_end);
2149 ClearPageChecked(page);
2153 ClearPageChecked(page);
2154 set_page_dirty(page);
2155 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
2157 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
2163 extent_changeset_free(data_reserved);
2167 * There are a few paths in the higher layers of the kernel that directly
2168 * set the page dirty bit without asking the filesystem if it is a
2169 * good idea. This causes problems because we want to make sure COW
2170 * properly happens and the data=ordered rules are followed.
2172 * In our case any range that doesn't have the ORDERED bit set
2173 * hasn't been properly setup for IO. We kick off an async process
2174 * to fix it up. The async helper will wait for ordered extents, set
2175 * the delalloc bit and make it safe to write the page.
2177 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2179 struct inode *inode = page->mapping->host;
2180 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2181 struct btrfs_writepage_fixup *fixup;
2183 /* this page is properly in the ordered list */
2184 if (TestClearPagePrivate2(page))
2187 if (PageChecked(page))
2190 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2194 SetPageChecked(page);
2196 btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2197 btrfs_writepage_fixup_worker, NULL, NULL);
2199 btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
2203 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2204 struct inode *inode, u64 file_pos,
2205 u64 disk_bytenr, u64 disk_num_bytes,
2206 u64 num_bytes, u64 ram_bytes,
2207 u8 compression, u8 encryption,
2208 u16 other_encoding, int extent_type)
2210 struct btrfs_root *root = BTRFS_I(inode)->root;
2211 struct btrfs_file_extent_item *fi;
2212 struct btrfs_path *path;
2213 struct extent_buffer *leaf;
2214 struct btrfs_key ins;
2216 int extent_inserted = 0;
2219 path = btrfs_alloc_path();
2224 * we may be replacing one extent in the tree with another.
2225 * The new extent is pinned in the extent map, and we don't want
2226 * to drop it from the cache until it is completely in the btree.
2228 * So, tell btrfs_drop_extents to leave this extent in the cache.
2229 * the caller is expected to unpin it and allow it to be merged
2232 ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
2233 file_pos + num_bytes, NULL, 0,
2234 1, sizeof(*fi), &extent_inserted);
2238 if (!extent_inserted) {
2239 ins.objectid = btrfs_ino(BTRFS_I(inode));
2240 ins.offset = file_pos;
2241 ins.type = BTRFS_EXTENT_DATA_KEY;
2243 path->leave_spinning = 1;
2244 ret = btrfs_insert_empty_item(trans, root, path, &ins,
2249 leaf = path->nodes[0];
2250 fi = btrfs_item_ptr(leaf, path->slots[0],
2251 struct btrfs_file_extent_item);
2252 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2253 btrfs_set_file_extent_type(leaf, fi, extent_type);
2254 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
2255 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
2256 btrfs_set_file_extent_offset(leaf, fi, 0);
2257 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2258 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
2259 btrfs_set_file_extent_compression(leaf, fi, compression);
2260 btrfs_set_file_extent_encryption(leaf, fi, encryption);
2261 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
2263 btrfs_mark_buffer_dirty(leaf);
2264 btrfs_release_path(path);
2266 inode_add_bytes(inode, num_bytes);
2268 ins.objectid = disk_bytenr;
2269 ins.offset = disk_num_bytes;
2270 ins.type = BTRFS_EXTENT_ITEM_KEY;
2273 * Release the reserved range from inode dirty range map, as it is
2274 * already moved into delayed_ref_head
2276 ret = btrfs_qgroup_release_data(inode, file_pos, ram_bytes);
2280 ret = btrfs_alloc_reserved_file_extent(trans, root,
2281 btrfs_ino(BTRFS_I(inode)),
2282 file_pos, qg_released, &ins);
2284 btrfs_free_path(path);
2289 /* snapshot-aware defrag */
2290 struct sa_defrag_extent_backref {
2291 struct rb_node node;
2292 struct old_sa_defrag_extent *old;
2301 struct old_sa_defrag_extent {
2302 struct list_head list;
2303 struct new_sa_defrag_extent *new;
2312 struct new_sa_defrag_extent {
2313 struct rb_root root;
2314 struct list_head head;
2315 struct btrfs_path *path;
2316 struct inode *inode;
2324 static int backref_comp(struct sa_defrag_extent_backref *b1,
2325 struct sa_defrag_extent_backref *b2)
2327 if (b1->root_id < b2->root_id)
2329 else if (b1->root_id > b2->root_id)
2332 if (b1->inum < b2->inum)
2334 else if (b1->inum > b2->inum)
2337 if (b1->file_pos < b2->file_pos)
2339 else if (b1->file_pos > b2->file_pos)
2343 * [------------------------------] ===> (a range of space)
2344 * |<--->| |<---->| =============> (fs/file tree A)
2345 * |<---------------------------->| ===> (fs/file tree B)
2347 * A range of space can refer to two file extents in one tree while
2348 * refer to only one file extent in another tree.
2350 * So we may process a disk offset more than one time(two extents in A)
2351 * and locate at the same extent(one extent in B), then insert two same
2352 * backrefs(both refer to the extent in B).
2357 static void backref_insert(struct rb_root *root,
2358 struct sa_defrag_extent_backref *backref)
2360 struct rb_node **p = &root->rb_node;
2361 struct rb_node *parent = NULL;
2362 struct sa_defrag_extent_backref *entry;
2367 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2369 ret = backref_comp(backref, entry);
2373 p = &(*p)->rb_right;
2376 rb_link_node(&backref->node, parent, p);
2377 rb_insert_color(&backref->node, root);
2381 * Note the backref might has changed, and in this case we just return 0.
2383 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2386 struct btrfs_file_extent_item *extent;
2387 struct old_sa_defrag_extent *old = ctx;
2388 struct new_sa_defrag_extent *new = old->new;
2389 struct btrfs_path *path = new->path;
2390 struct btrfs_key key;
2391 struct btrfs_root *root;
2392 struct sa_defrag_extent_backref *backref;
2393 struct extent_buffer *leaf;
2394 struct inode *inode = new->inode;
2395 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2401 if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2402 inum == btrfs_ino(BTRFS_I(inode)))
2405 key.objectid = root_id;
2406 key.type = BTRFS_ROOT_ITEM_KEY;
2407 key.offset = (u64)-1;
2409 root = btrfs_read_fs_root_no_name(fs_info, &key);
2411 if (PTR_ERR(root) == -ENOENT)
2414 btrfs_debug(fs_info, "inum=%llu, offset=%llu, root_id=%llu",
2415 inum, offset, root_id);
2416 return PTR_ERR(root);
2419 key.objectid = inum;
2420 key.type = BTRFS_EXTENT_DATA_KEY;
2421 if (offset > (u64)-1 << 32)
2424 key.offset = offset;
2426 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2427 if (WARN_ON(ret < 0))
2434 leaf = path->nodes[0];
2435 slot = path->slots[0];
2437 if (slot >= btrfs_header_nritems(leaf)) {
2438 ret = btrfs_next_leaf(root, path);
2441 } else if (ret > 0) {
2450 btrfs_item_key_to_cpu(leaf, &key, slot);
2452 if (key.objectid > inum)
2455 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2458 extent = btrfs_item_ptr(leaf, slot,
2459 struct btrfs_file_extent_item);
2461 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2465 * 'offset' refers to the exact key.offset,
2466 * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2467 * (key.offset - extent_offset).
2469 if (key.offset != offset)
2472 extent_offset = btrfs_file_extent_offset(leaf, extent);
2473 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2475 if (extent_offset >= old->extent_offset + old->offset +
2476 old->len || extent_offset + num_bytes <=
2477 old->extent_offset + old->offset)
2482 backref = kmalloc(sizeof(*backref), GFP_NOFS);
2488 backref->root_id = root_id;
2489 backref->inum = inum;
2490 backref->file_pos = offset;
2491 backref->num_bytes = num_bytes;
2492 backref->extent_offset = extent_offset;
2493 backref->generation = btrfs_file_extent_generation(leaf, extent);
2495 backref_insert(&new->root, backref);
2498 btrfs_release_path(path);
2503 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2504 struct new_sa_defrag_extent *new)
2506 struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2507 struct old_sa_defrag_extent *old, *tmp;
2512 list_for_each_entry_safe(old, tmp, &new->head, list) {
2513 ret = iterate_inodes_from_logical(old->bytenr +
2514 old->extent_offset, fs_info,
2515 path, record_one_backref,
2517 if (ret < 0 && ret != -ENOENT)
2520 /* no backref to be processed for this extent */
2522 list_del(&old->list);
2527 if (list_empty(&new->head))
2533 static int relink_is_mergable(struct extent_buffer *leaf,
2534 struct btrfs_file_extent_item *fi,
2535 struct new_sa_defrag_extent *new)
2537 if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
2540 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2543 if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2546 if (btrfs_file_extent_encryption(leaf, fi) ||
2547 btrfs_file_extent_other_encoding(leaf, fi))
2554 * Note the backref might has changed, and in this case we just return 0.
2556 static noinline int relink_extent_backref(struct btrfs_path *path,
2557 struct sa_defrag_extent_backref *prev,
2558 struct sa_defrag_extent_backref *backref)
2560 struct btrfs_file_extent_item *extent;
2561 struct btrfs_file_extent_item *item;
2562 struct btrfs_ordered_extent *ordered;
2563 struct btrfs_trans_handle *trans;
2564 struct btrfs_root *root;
2565 struct btrfs_key key;
2566 struct extent_buffer *leaf;
2567 struct old_sa_defrag_extent *old = backref->old;
2568 struct new_sa_defrag_extent *new = old->new;
2569 struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2570 struct inode *inode;
2571 struct extent_state *cached = NULL;
2580 if (prev && prev->root_id == backref->root_id &&
2581 prev->inum == backref->inum &&
2582 prev->file_pos + prev->num_bytes == backref->file_pos)
2585 /* step 1: get root */
2586 key.objectid = backref->root_id;
2587 key.type = BTRFS_ROOT_ITEM_KEY;
2588 key.offset = (u64)-1;
2590 index = srcu_read_lock(&fs_info->subvol_srcu);
2592 root = btrfs_read_fs_root_no_name(fs_info, &key);
2594 srcu_read_unlock(&fs_info->subvol_srcu, index);
2595 if (PTR_ERR(root) == -ENOENT)
2597 return PTR_ERR(root);
2600 if (btrfs_root_readonly(root)) {
2601 srcu_read_unlock(&fs_info->subvol_srcu, index);
2605 /* step 2: get inode */
2606 key.objectid = backref->inum;
2607 key.type = BTRFS_INODE_ITEM_KEY;
2610 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2611 if (IS_ERR(inode)) {
2612 srcu_read_unlock(&fs_info->subvol_srcu, index);
2616 srcu_read_unlock(&fs_info->subvol_srcu, index);
2618 /* step 3: relink backref */
2619 lock_start = backref->file_pos;
2620 lock_end = backref->file_pos + backref->num_bytes - 1;
2621 lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2624 ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2626 btrfs_put_ordered_extent(ordered);
2630 trans = btrfs_join_transaction(root);
2631 if (IS_ERR(trans)) {
2632 ret = PTR_ERR(trans);
2636 key.objectid = backref->inum;
2637 key.type = BTRFS_EXTENT_DATA_KEY;
2638 key.offset = backref->file_pos;
2640 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2643 } else if (ret > 0) {
2648 extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2649 struct btrfs_file_extent_item);
2651 if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2652 backref->generation)
2655 btrfs_release_path(path);
2657 start = backref->file_pos;
2658 if (backref->extent_offset < old->extent_offset + old->offset)
2659 start += old->extent_offset + old->offset -
2660 backref->extent_offset;
2662 len = min(backref->extent_offset + backref->num_bytes,
2663 old->extent_offset + old->offset + old->len);
2664 len -= max(backref->extent_offset, old->extent_offset + old->offset);
2666 ret = btrfs_drop_extents(trans, root, inode, start,
2671 key.objectid = btrfs_ino(BTRFS_I(inode));
2672 key.type = BTRFS_EXTENT_DATA_KEY;
2675 path->leave_spinning = 1;
2677 struct btrfs_file_extent_item *fi;
2679 struct btrfs_key found_key;
2681 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2686 leaf = path->nodes[0];
2687 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2689 fi = btrfs_item_ptr(leaf, path->slots[0],
2690 struct btrfs_file_extent_item);
2691 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2693 if (extent_len + found_key.offset == start &&
2694 relink_is_mergable(leaf, fi, new)) {
2695 btrfs_set_file_extent_num_bytes(leaf, fi,
2697 btrfs_mark_buffer_dirty(leaf);
2698 inode_add_bytes(inode, len);
2704 btrfs_release_path(path);
2709 ret = btrfs_insert_empty_item(trans, root, path, &key,
2712 btrfs_abort_transaction(trans, ret);
2716 leaf = path->nodes[0];
2717 item = btrfs_item_ptr(leaf, path->slots[0],
2718 struct btrfs_file_extent_item);
2719 btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2720 btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2721 btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2722 btrfs_set_file_extent_num_bytes(leaf, item, len);
2723 btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2724 btrfs_set_file_extent_generation(leaf, item, trans->transid);
2725 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2726 btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2727 btrfs_set_file_extent_encryption(leaf, item, 0);
2728 btrfs_set_file_extent_other_encoding(leaf, item, 0);
2730 btrfs_mark_buffer_dirty(leaf);
2731 inode_add_bytes(inode, len);
2732 btrfs_release_path(path);
2734 ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2736 backref->root_id, backref->inum,
2737 new->file_pos); /* start - extent_offset */
2739 btrfs_abort_transaction(trans, ret);
2745 btrfs_release_path(path);
2746 path->leave_spinning = 0;
2747 btrfs_end_transaction(trans);
2749 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2755 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2757 struct old_sa_defrag_extent *old, *tmp;
2762 list_for_each_entry_safe(old, tmp, &new->head, list) {
2768 static void relink_file_extents(struct new_sa_defrag_extent *new)
2770 struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2771 struct btrfs_path *path;
2772 struct sa_defrag_extent_backref *backref;
2773 struct sa_defrag_extent_backref *prev = NULL;
2774 struct inode *inode;
2775 struct rb_node *node;
2780 path = btrfs_alloc_path();
2784 if (!record_extent_backrefs(path, new)) {
2785 btrfs_free_path(path);
2788 btrfs_release_path(path);
2791 node = rb_first(&new->root);
2794 rb_erase(node, &new->root);
2796 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2798 ret = relink_extent_backref(path, prev, backref);
2811 btrfs_free_path(path);
2813 free_sa_defrag_extent(new);
2815 atomic_dec(&fs_info->defrag_running);
2816 wake_up(&fs_info->transaction_wait);
2819 static struct new_sa_defrag_extent *
2820 record_old_file_extents(struct inode *inode,
2821 struct btrfs_ordered_extent *ordered)
2823 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2824 struct btrfs_root *root = BTRFS_I(inode)->root;
2825 struct btrfs_path *path;
2826 struct btrfs_key key;
2827 struct old_sa_defrag_extent *old;
2828 struct new_sa_defrag_extent *new;
2831 new = kmalloc(sizeof(*new), GFP_NOFS);
2836 new->file_pos = ordered->file_offset;
2837 new->len = ordered->len;
2838 new->bytenr = ordered->start;
2839 new->disk_len = ordered->disk_len;
2840 new->compress_type = ordered->compress_type;
2841 new->root = RB_ROOT;
2842 INIT_LIST_HEAD(&new->head);
2844 path = btrfs_alloc_path();
2848 key.objectid = btrfs_ino(BTRFS_I(inode));
2849 key.type = BTRFS_EXTENT_DATA_KEY;
2850 key.offset = new->file_pos;
2852 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2855 if (ret > 0 && path->slots[0] > 0)
2858 /* find out all the old extents for the file range */
2860 struct btrfs_file_extent_item *extent;
2861 struct extent_buffer *l;
2870 slot = path->slots[0];
2872 if (slot >= btrfs_header_nritems(l)) {
2873 ret = btrfs_next_leaf(root, path);
2881 btrfs_item_key_to_cpu(l, &key, slot);
2883 if (key.objectid != btrfs_ino(BTRFS_I(inode)))
2885 if (key.type != BTRFS_EXTENT_DATA_KEY)
2887 if (key.offset >= new->file_pos + new->len)
2890 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2892 num_bytes = btrfs_file_extent_num_bytes(l, extent);
2893 if (key.offset + num_bytes < new->file_pos)
2896 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2900 extent_offset = btrfs_file_extent_offset(l, extent);
2902 old = kmalloc(sizeof(*old), GFP_NOFS);
2906 offset = max(new->file_pos, key.offset);
2907 end = min(new->file_pos + new->len, key.offset + num_bytes);
2909 old->bytenr = disk_bytenr;
2910 old->extent_offset = extent_offset;
2911 old->offset = offset - key.offset;
2912 old->len = end - offset;
2915 list_add_tail(&old->list, &new->head);
2921 btrfs_free_path(path);
2922 atomic_inc(&fs_info->defrag_running);
2927 btrfs_free_path(path);
2929 free_sa_defrag_extent(new);
2933 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
2936 struct btrfs_block_group_cache *cache;
2938 cache = btrfs_lookup_block_group(fs_info, start);
2941 spin_lock(&cache->lock);
2942 cache->delalloc_bytes -= len;
2943 spin_unlock(&cache->lock);
2945 btrfs_put_block_group(cache);
2948 /* as ordered data IO finishes, this gets called so we can finish
2949 * an ordered extent if the range of bytes in the file it covers are
2952 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2954 struct inode *inode = ordered_extent->inode;
2955 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2956 struct btrfs_root *root = BTRFS_I(inode)->root;
2957 struct btrfs_trans_handle *trans = NULL;
2958 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2959 struct extent_state *cached_state = NULL;
2960 struct new_sa_defrag_extent *new = NULL;
2961 int compress_type = 0;
2963 u64 logical_len = ordered_extent->len;
2965 bool truncated = false;
2966 bool range_locked = false;
2967 bool clear_new_delalloc_bytes = false;
2969 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2970 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
2971 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags))
2972 clear_new_delalloc_bytes = true;
2974 nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
2976 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2981 btrfs_free_io_failure_record(BTRFS_I(inode),
2982 ordered_extent->file_offset,
2983 ordered_extent->file_offset +
2984 ordered_extent->len - 1);
2986 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2988 logical_len = ordered_extent->truncated_len;
2989 /* Truncated the entire extent, don't bother adding */
2994 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2995 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2998 * For mwrite(mmap + memset to write) case, we still reserve
2999 * space for NOCOW range.
3000 * As NOCOW won't cause a new delayed ref, just free the space
3002 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
3003 ordered_extent->len);
3004 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
3006 trans = btrfs_join_transaction_nolock(root);
3008 trans = btrfs_join_transaction(root);
3009 if (IS_ERR(trans)) {
3010 ret = PTR_ERR(trans);
3014 trans->block_rsv = &BTRFS_I(inode)->block_rsv;
3015 ret = btrfs_update_inode_fallback(trans, root, inode);
3016 if (ret) /* -ENOMEM or corruption */
3017 btrfs_abort_transaction(trans, ret);
3021 range_locked = true;
3022 lock_extent_bits(io_tree, ordered_extent->file_offset,
3023 ordered_extent->file_offset + ordered_extent->len - 1,
3026 ret = test_range_bit(io_tree, ordered_extent->file_offset,
3027 ordered_extent->file_offset + ordered_extent->len - 1,
3028 EXTENT_DEFRAG, 0, cached_state);
3030 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
3031 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
3032 /* the inode is shared */
3033 new = record_old_file_extents(inode, ordered_extent);
3035 clear_extent_bit(io_tree, ordered_extent->file_offset,
3036 ordered_extent->file_offset + ordered_extent->len - 1,
3037 EXTENT_DEFRAG, 0, 0, &cached_state);
3041 trans = btrfs_join_transaction_nolock(root);
3043 trans = btrfs_join_transaction(root);
3044 if (IS_ERR(trans)) {
3045 ret = PTR_ERR(trans);
3050 trans->block_rsv = &BTRFS_I(inode)->block_rsv;
3052 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3053 compress_type = ordered_extent->compress_type;
3054 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3055 BUG_ON(compress_type);
3056 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
3057 ordered_extent->len);
3058 ret = btrfs_mark_extent_written(trans, BTRFS_I(inode),
3059 ordered_extent->file_offset,
3060 ordered_extent->file_offset +
3063 BUG_ON(root == fs_info->tree_root);
3064 ret = insert_reserved_file_extent(trans, inode,
3065 ordered_extent->file_offset,
3066 ordered_extent->start,
3067 ordered_extent->disk_len,
3068 logical_len, logical_len,