Merge tag 'mmc-v4.17-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[sfrench/cifs-2.6.git] / fs / btrfs / inode.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/bio.h>
8 #include <linux/buffer_head.h>
9 #include <linux/file.h>
10 #include <linux/fs.h>
11 #include <linux/pagemap.h>
12 #include <linux/highmem.h>
13 #include <linux/time.h>
14 #include <linux/init.h>
15 #include <linux/string.h>
16 #include <linux/backing-dev.h>
17 #include <linux/mpage.h>
18 #include <linux/swap.h>
19 #include <linux/writeback.h>
20 #include <linux/compat.h>
21 #include <linux/bit_spinlock.h>
22 #include <linux/xattr.h>
23 #include <linux/posix_acl.h>
24 #include <linux/falloc.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/mount.h>
28 #include <linux/btrfs.h>
29 #include <linux/blkdev.h>
30 #include <linux/posix_acl_xattr.h>
31 #include <linux/uio.h>
32 #include <linux/magic.h>
33 #include <linux/iversion.h>
34 #include <asm/unaligned.h>
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "print-tree.h"
40 #include "ordered-data.h"
41 #include "xattr.h"
42 #include "tree-log.h"
43 #include "volumes.h"
44 #include "compression.h"
45 #include "locking.h"
46 #include "free-space-cache.h"
47 #include "inode-map.h"
48 #include "backref.h"
49 #include "props.h"
50 #include "qgroup.h"
51 #include "dedupe.h"
52
53 struct btrfs_iget_args {
54         struct btrfs_key *location;
55         struct btrfs_root *root;
56 };
57
58 struct btrfs_dio_data {
59         u64 reserve;
60         u64 unsubmitted_oe_range_start;
61         u64 unsubmitted_oe_range_end;
62         int overwrite;
63 };
64
65 static const struct inode_operations btrfs_dir_inode_operations;
66 static const struct inode_operations btrfs_symlink_inode_operations;
67 static const struct inode_operations btrfs_dir_ro_inode_operations;
68 static const struct inode_operations btrfs_special_inode_operations;
69 static const struct inode_operations btrfs_file_inode_operations;
70 static const struct address_space_operations btrfs_aops;
71 static const struct address_space_operations btrfs_symlink_aops;
72 static const struct file_operations btrfs_dir_file_operations;
73 static const struct extent_io_ops btrfs_extent_io_ops;
74
75 static struct kmem_cache *btrfs_inode_cachep;
76 struct kmem_cache *btrfs_trans_handle_cachep;
77 struct kmem_cache *btrfs_path_cachep;
78 struct kmem_cache *btrfs_free_space_cachep;
79
80 #define S_SHIFT 12
81 static const unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
82         [S_IFREG >> S_SHIFT]    = BTRFS_FT_REG_FILE,
83         [S_IFDIR >> S_SHIFT]    = BTRFS_FT_DIR,
84         [S_IFCHR >> S_SHIFT]    = BTRFS_FT_CHRDEV,
85         [S_IFBLK >> S_SHIFT]    = BTRFS_FT_BLKDEV,
86         [S_IFIFO >> S_SHIFT]    = BTRFS_FT_FIFO,
87         [S_IFSOCK >> S_SHIFT]   = BTRFS_FT_SOCK,
88         [S_IFLNK >> S_SHIFT]    = BTRFS_FT_SYMLINK,
89 };
90
91 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
92 static int btrfs_truncate(struct inode *inode, bool skip_writeback);
93 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
94 static noinline int cow_file_range(struct inode *inode,
95                                    struct page *locked_page,
96                                    u64 start, u64 end, u64 delalloc_end,
97                                    int *page_started, unsigned long *nr_written,
98                                    int unlock, struct btrfs_dedupe_hash *hash);
99 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
100                                        u64 orig_start, u64 block_start,
101                                        u64 block_len, u64 orig_block_len,
102                                        u64 ram_bytes, int compress_type,
103                                        int type);
104
105 static void __endio_write_update_ordered(struct inode *inode,
106                                          const u64 offset, const u64 bytes,
107                                          const bool uptodate);
108
109 /*
110  * Cleanup all submitted ordered extents in specified range to handle errors
111  * from the fill_dellaloc() callback.
112  *
113  * NOTE: caller must ensure that when an error happens, it can not call
114  * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
115  * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
116  * to be released, which we want to happen only when finishing the ordered
117  * extent (btrfs_finish_ordered_io()). Also note that the caller of the
118  * fill_delalloc() callback already does proper cleanup for the first page of
119  * the range, that is, it invokes the callback writepage_end_io_hook() for the
120  * range of the first page.
121  */
122 static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
123                                                  const u64 offset,
124                                                  const u64 bytes)
125 {
126         unsigned long index = offset >> PAGE_SHIFT;
127         unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
128         struct page *page;
129
130         while (index <= end_index) {
131                 page = find_get_page(inode->i_mapping, index);
132                 index++;
133                 if (!page)
134                         continue;
135                 ClearPagePrivate2(page);
136                 put_page(page);
137         }
138         return __endio_write_update_ordered(inode, offset + PAGE_SIZE,
139                                             bytes - PAGE_SIZE, false);
140 }
141
142 static int btrfs_dirty_inode(struct inode *inode);
143
144 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
145 void btrfs_test_inode_set_ops(struct inode *inode)
146 {
147         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
148 }
149 #endif
150
151 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
152                                      struct inode *inode,  struct inode *dir,
153                                      const struct qstr *qstr)
154 {
155         int err;
156
157         err = btrfs_init_acl(trans, inode, dir);
158         if (!err)
159                 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
160         return err;
161 }
162
163 /*
164  * this does all the hard work for inserting an inline extent into
165  * the btree.  The caller should have done a btrfs_drop_extents so that
166  * no overlapping inline items exist in the btree
167  */
168 static int insert_inline_extent(struct btrfs_trans_handle *trans,
169                                 struct btrfs_path *path, int extent_inserted,
170                                 struct btrfs_root *root, struct inode *inode,
171                                 u64 start, size_t size, size_t compressed_size,
172                                 int compress_type,
173                                 struct page **compressed_pages)
174 {
175         struct extent_buffer *leaf;
176         struct page *page = NULL;
177         char *kaddr;
178         unsigned long ptr;
179         struct btrfs_file_extent_item *ei;
180         int ret;
181         size_t cur_size = size;
182         unsigned long offset;
183
184         if (compressed_size && compressed_pages)
185                 cur_size = compressed_size;
186
187         inode_add_bytes(inode, size);
188
189         if (!extent_inserted) {
190                 struct btrfs_key key;
191                 size_t datasize;
192
193                 key.objectid = btrfs_ino(BTRFS_I(inode));
194                 key.offset = start;
195                 key.type = BTRFS_EXTENT_DATA_KEY;
196
197                 datasize = btrfs_file_extent_calc_inline_size(cur_size);
198                 path->leave_spinning = 1;
199                 ret = btrfs_insert_empty_item(trans, root, path, &key,
200                                               datasize);
201                 if (ret)
202                         goto fail;
203         }
204         leaf = path->nodes[0];
205         ei = btrfs_item_ptr(leaf, path->slots[0],
206                             struct btrfs_file_extent_item);
207         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
208         btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
209         btrfs_set_file_extent_encryption(leaf, ei, 0);
210         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
211         btrfs_set_file_extent_ram_bytes(leaf, ei, size);
212         ptr = btrfs_file_extent_inline_start(ei);
213
214         if (compress_type != BTRFS_COMPRESS_NONE) {
215                 struct page *cpage;
216                 int i = 0;
217                 while (compressed_size > 0) {
218                         cpage = compressed_pages[i];
219                         cur_size = min_t(unsigned long, compressed_size,
220                                        PAGE_SIZE);
221
222                         kaddr = kmap_atomic(cpage);
223                         write_extent_buffer(leaf, kaddr, ptr, cur_size);
224                         kunmap_atomic(kaddr);
225
226                         i++;
227                         ptr += cur_size;
228                         compressed_size -= cur_size;
229                 }
230                 btrfs_set_file_extent_compression(leaf, ei,
231                                                   compress_type);
232         } else {
233                 page = find_get_page(inode->i_mapping,
234                                      start >> PAGE_SHIFT);
235                 btrfs_set_file_extent_compression(leaf, ei, 0);
236                 kaddr = kmap_atomic(page);
237                 offset = start & (PAGE_SIZE - 1);
238                 write_extent_buffer(leaf, kaddr + offset, ptr, size);
239                 kunmap_atomic(kaddr);
240                 put_page(page);
241         }
242         btrfs_mark_buffer_dirty(leaf);
243         btrfs_release_path(path);
244
245         /*
246          * we're an inline extent, so nobody can
247          * extend the file past i_size without locking
248          * a page we already have locked.
249          *
250          * We must do any isize and inode updates
251          * before we unlock the pages.  Otherwise we
252          * could end up racing with unlink.
253          */
254         BTRFS_I(inode)->disk_i_size = inode->i_size;
255         ret = btrfs_update_inode(trans, root, inode);
256
257 fail:
258         return ret;
259 }
260
261
262 /*
263  * conditionally insert an inline extent into the file.  This
264  * does the checks required to make sure the data is small enough
265  * to fit as an inline extent.
266  */
267 static noinline int cow_file_range_inline(struct inode *inode, u64 start,
268                                           u64 end, size_t compressed_size,
269                                           int compress_type,
270                                           struct page **compressed_pages)
271 {
272         struct btrfs_root *root = BTRFS_I(inode)->root;
273         struct btrfs_fs_info *fs_info = root->fs_info;
274         struct btrfs_trans_handle *trans;
275         u64 isize = i_size_read(inode);
276         u64 actual_end = min(end + 1, isize);
277         u64 inline_len = actual_end - start;
278         u64 aligned_end = ALIGN(end, fs_info->sectorsize);
279         u64 data_len = inline_len;
280         int ret;
281         struct btrfs_path *path;
282         int extent_inserted = 0;
283         u32 extent_item_size;
284
285         if (compressed_size)
286                 data_len = compressed_size;
287
288         if (start > 0 ||
289             actual_end > fs_info->sectorsize ||
290             data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
291             (!compressed_size &&
292             (actual_end & (fs_info->sectorsize - 1)) == 0) ||
293             end + 1 < isize ||
294             data_len > fs_info->max_inline) {
295                 return 1;
296         }
297
298         path = btrfs_alloc_path();
299         if (!path)
300                 return -ENOMEM;
301
302         trans = btrfs_join_transaction(root);
303         if (IS_ERR(trans)) {
304                 btrfs_free_path(path);
305                 return PTR_ERR(trans);
306         }
307         trans->block_rsv = &BTRFS_I(inode)->block_rsv;
308
309         if (compressed_size && compressed_pages)
310                 extent_item_size = btrfs_file_extent_calc_inline_size(
311                    compressed_size);
312         else
313                 extent_item_size = btrfs_file_extent_calc_inline_size(
314                     inline_len);
315
316         ret = __btrfs_drop_extents(trans, root, inode, path,
317                                    start, aligned_end, NULL,
318                                    1, 1, extent_item_size, &extent_inserted);
319         if (ret) {
320                 btrfs_abort_transaction(trans, ret);
321                 goto out;
322         }
323
324         if (isize > actual_end)
325                 inline_len = min_t(u64, isize, actual_end);
326         ret = insert_inline_extent(trans, path, extent_inserted,
327                                    root, inode, start,
328                                    inline_len, compressed_size,
329                                    compress_type, compressed_pages);
330         if (ret && ret != -ENOSPC) {
331                 btrfs_abort_transaction(trans, ret);
332                 goto out;
333         } else if (ret == -ENOSPC) {
334                 ret = 1;
335                 goto out;
336         }
337
338         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
339         btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0);
340 out:
341         /*
342          * Don't forget to free the reserved space, as for inlined extent
343          * it won't count as data extent, free them directly here.
344          * And at reserve time, it's always aligned to page size, so
345          * just free one page here.
346          */
347         btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
348         btrfs_free_path(path);
349         btrfs_end_transaction(trans);
350         return ret;
351 }
352
353 struct async_extent {
354         u64 start;
355         u64 ram_size;
356         u64 compressed_size;
357         struct page **pages;
358         unsigned long nr_pages;
359         int compress_type;
360         struct list_head list;
361 };
362
363 struct async_cow {
364         struct inode *inode;
365         struct btrfs_root *root;
366         struct page *locked_page;
367         u64 start;
368         u64 end;
369         unsigned int write_flags;
370         struct list_head extents;
371         struct btrfs_work work;
372 };
373
374 static noinline int add_async_extent(struct async_cow *cow,
375                                      u64 start, u64 ram_size,
376                                      u64 compressed_size,
377                                      struct page **pages,
378                                      unsigned long nr_pages,
379                                      int compress_type)
380 {
381         struct async_extent *async_extent;
382
383         async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
384         BUG_ON(!async_extent); /* -ENOMEM */
385         async_extent->start = start;
386         async_extent->ram_size = ram_size;
387         async_extent->compressed_size = compressed_size;
388         async_extent->pages = pages;
389         async_extent->nr_pages = nr_pages;
390         async_extent->compress_type = compress_type;
391         list_add_tail(&async_extent->list, &cow->extents);
392         return 0;
393 }
394
395 static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
396 {
397         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
398
399         /* force compress */
400         if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
401                 return 1;
402         /* defrag ioctl */
403         if (BTRFS_I(inode)->defrag_compress)
404                 return 1;
405         /* bad compression ratios */
406         if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
407                 return 0;
408         if (btrfs_test_opt(fs_info, COMPRESS) ||
409             BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
410             BTRFS_I(inode)->prop_compress)
411                 return btrfs_compress_heuristic(inode, start, end);
412         return 0;
413 }
414
415 static inline void inode_should_defrag(struct btrfs_inode *inode,
416                 u64 start, u64 end, u64 num_bytes, u64 small_write)
417 {
418         /* If this is a small write inside eof, kick off a defrag */
419         if (num_bytes < small_write &&
420             (start > 0 || end + 1 < inode->disk_i_size))
421                 btrfs_add_inode_defrag(NULL, inode);
422 }
423
424 /*
425  * we create compressed extents in two phases.  The first
426  * phase compresses a range of pages that have already been
427  * locked (both pages and state bits are locked).
428  *
429  * This is done inside an ordered work queue, and the compression
430  * is spread across many cpus.  The actual IO submission is step
431  * two, and the ordered work queue takes care of making sure that
432  * happens in the same order things were put onto the queue by
433  * writepages and friends.
434  *
435  * If this code finds it can't get good compression, it puts an
436  * entry onto the work queue to write the uncompressed bytes.  This
437  * makes sure that both compressed inodes and uncompressed inodes
438  * are written in the same order that the flusher thread sent them
439  * down.
440  */
441 static noinline void compress_file_range(struct inode *inode,
442                                         struct page *locked_page,
443                                         u64 start, u64 end,
444                                         struct async_cow *async_cow,
445                                         int *num_added)
446 {
447         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
448         u64 blocksize = fs_info->sectorsize;
449         u64 actual_end;
450         u64 isize = i_size_read(inode);
451         int ret = 0;
452         struct page **pages = NULL;
453         unsigned long nr_pages;
454         unsigned long total_compressed = 0;
455         unsigned long total_in = 0;
456         int i;
457         int will_compress;
458         int compress_type = fs_info->compress_type;
459         int redirty = 0;
460
461         inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
462                         SZ_16K);
463
464         actual_end = min_t(u64, isize, end + 1);
465 again:
466         will_compress = 0;
467         nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
468         BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0);
469         nr_pages = min_t(unsigned long, nr_pages,
470                         BTRFS_MAX_COMPRESSED / PAGE_SIZE);
471
472         /*
473          * we don't want to send crud past the end of i_size through
474          * compression, that's just a waste of CPU time.  So, if the
475          * end of the file is before the start of our current
476          * requested range of bytes, we bail out to the uncompressed
477          * cleanup code that can deal with all of this.
478          *
479          * It isn't really the fastest way to fix things, but this is a
480          * very uncommon corner.
481          */
482         if (actual_end <= start)
483                 goto cleanup_and_bail_uncompressed;
484
485         total_compressed = actual_end - start;
486
487         /*
488          * skip compression for a small file range(<=blocksize) that
489          * isn't an inline extent, since it doesn't save disk space at all.
490          */
491         if (total_compressed <= blocksize &&
492            (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
493                 goto cleanup_and_bail_uncompressed;
494
495         total_compressed = min_t(unsigned long, total_compressed,
496                         BTRFS_MAX_UNCOMPRESSED);
497         total_in = 0;
498         ret = 0;
499
500         /*
501          * we do compression for mount -o compress and when the
502          * inode has not been flagged as nocompress.  This flag can
503          * change at any time if we discover bad compression ratios.
504          */
505         if (inode_need_compress(inode, start, end)) {
506                 WARN_ON(pages);
507                 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
508                 if (!pages) {
509                         /* just bail out to the uncompressed code */
510                         goto cont;
511                 }
512
513                 if (BTRFS_I(inode)->defrag_compress)
514                         compress_type = BTRFS_I(inode)->defrag_compress;
515                 else if (BTRFS_I(inode)->prop_compress)
516                         compress_type = BTRFS_I(inode)->prop_compress;
517
518                 /*
519                  * we need to call clear_page_dirty_for_io on each
520                  * page in the range.  Otherwise applications with the file
521                  * mmap'd can wander in and change the page contents while
522                  * we are compressing them.
523                  *
524                  * If the compression fails for any reason, we set the pages
525                  * dirty again later on.
526                  *
527                  * Note that the remaining part is redirtied, the start pointer
528                  * has moved, the end is the original one.
529                  */
530                 if (!redirty) {
531                         extent_range_clear_dirty_for_io(inode, start, end);
532                         redirty = 1;
533                 }
534
535                 /* Compression level is applied here and only here */
536                 ret = btrfs_compress_pages(
537                         compress_type | (fs_info->compress_level << 4),
538                                            inode->i_mapping, start,
539                                            pages,
540                                            &nr_pages,
541                                            &total_in,
542                                            &total_compressed);
543
544                 if (!ret) {
545                         unsigned long offset = total_compressed &
546                                 (PAGE_SIZE - 1);
547                         struct page *page = pages[nr_pages - 1];
548                         char *kaddr;
549
550                         /* zero the tail end of the last page, we might be
551                          * sending it down to disk
552                          */
553                         if (offset) {
554                                 kaddr = kmap_atomic(page);
555                                 memset(kaddr + offset, 0,
556                                        PAGE_SIZE - offset);
557                                 kunmap_atomic(kaddr);
558                         }
559                         will_compress = 1;
560                 }
561         }
562 cont:
563         if (start == 0) {
564                 /* lets try to make an inline extent */
565                 if (ret || total_in < actual_end) {
566                         /* we didn't compress the entire range, try
567                          * to make an uncompressed inline extent.
568                          */
569                         ret = cow_file_range_inline(inode, start, end, 0,
570                                                     BTRFS_COMPRESS_NONE, NULL);
571                 } else {
572                         /* try making a compressed inline extent */
573                         ret = cow_file_range_inline(inode, start, end,
574                                                     total_compressed,
575                                                     compress_type, pages);
576                 }
577                 if (ret <= 0) {
578                         unsigned long clear_flags = EXTENT_DELALLOC |
579                                 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
580                                 EXTENT_DO_ACCOUNTING;
581                         unsigned long page_error_op;
582
583                         page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
584
585                         /*
586                          * inline extent creation worked or returned error,
587                          * we don't need to create any more async work items.
588                          * Unlock and free up our temp pages.
589                          *
590                          * We use DO_ACCOUNTING here because we need the
591                          * delalloc_release_metadata to be done _after_ we drop
592                          * our outstanding extent for clearing delalloc for this
593                          * range.
594                          */
595                         extent_clear_unlock_delalloc(inode, start, end, end,
596                                                      NULL, clear_flags,
597                                                      PAGE_UNLOCK |
598                                                      PAGE_CLEAR_DIRTY |
599                                                      PAGE_SET_WRITEBACK |
600                                                      page_error_op |
601                                                      PAGE_END_WRITEBACK);
602                         goto free_pages_out;
603                 }
604         }
605
606         if (will_compress) {
607                 /*
608                  * we aren't doing an inline extent round the compressed size
609                  * up to a block size boundary so the allocator does sane
610                  * things
611                  */
612                 total_compressed = ALIGN(total_compressed, blocksize);
613
614                 /*
615                  * one last check to make sure the compression is really a
616                  * win, compare the page count read with the blocks on disk,
617                  * compression must free at least one sector size
618                  */
619                 total_in = ALIGN(total_in, PAGE_SIZE);
620                 if (total_compressed + blocksize <= total_in) {
621                         *num_added += 1;
622
623                         /*
624                          * The async work queues will take care of doing actual
625                          * allocation on disk for these compressed pages, and
626                          * will submit them to the elevator.
627                          */
628                         add_async_extent(async_cow, start, total_in,
629                                         total_compressed, pages, nr_pages,
630                                         compress_type);
631
632                         if (start + total_in < end) {
633                                 start += total_in;
634                                 pages = NULL;
635                                 cond_resched();
636                                 goto again;
637                         }
638                         return;
639                 }
640         }
641         if (pages) {
642                 /*
643                  * the compression code ran but failed to make things smaller,
644                  * free any pages it allocated and our page pointer array
645                  */
646                 for (i = 0; i < nr_pages; i++) {
647                         WARN_ON(pages[i]->mapping);
648                         put_page(pages[i]);
649                 }
650                 kfree(pages);
651                 pages = NULL;
652                 total_compressed = 0;
653                 nr_pages = 0;
654
655                 /* flag the file so we don't compress in the future */
656                 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
657                     !(BTRFS_I(inode)->prop_compress)) {
658                         BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
659                 }
660         }
661 cleanup_and_bail_uncompressed:
662         /*
663          * No compression, but we still need to write the pages in the file
664          * we've been given so far.  redirty the locked page if it corresponds
665          * to our extent and set things up for the async work queue to run
666          * cow_file_range to do the normal delalloc dance.
667          */
668         if (page_offset(locked_page) >= start &&
669             page_offset(locked_page) <= end)
670                 __set_page_dirty_nobuffers(locked_page);
671                 /* unlocked later on in the async handlers */
672
673         if (redirty)
674                 extent_range_redirty_for_io(inode, start, end);
675         add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0,
676                          BTRFS_COMPRESS_NONE);
677         *num_added += 1;
678
679         return;
680
681 free_pages_out:
682         for (i = 0; i < nr_pages; i++) {
683                 WARN_ON(pages[i]->mapping);
684                 put_page(pages[i]);
685         }
686         kfree(pages);
687 }
688
689 static void free_async_extent_pages(struct async_extent *async_extent)
690 {
691         int i;
692
693         if (!async_extent->pages)
694                 return;
695
696         for (i = 0; i < async_extent->nr_pages; i++) {
697                 WARN_ON(async_extent->pages[i]->mapping);
698                 put_page(async_extent->pages[i]);
699         }
700         kfree(async_extent->pages);
701         async_extent->nr_pages = 0;
702         async_extent->pages = NULL;
703 }
704
705 /*
706  * phase two of compressed writeback.  This is the ordered portion
707  * of the code, which only gets called in the order the work was
708  * queued.  We walk all the async extents created by compress_file_range
709  * and send them down to the disk.
710  */
711 static noinline void submit_compressed_extents(struct inode *inode,
712                                               struct async_cow *async_cow)
713 {
714         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
715         struct async_extent *async_extent;
716         u64 alloc_hint = 0;
717         struct btrfs_key ins;
718         struct extent_map *em;
719         struct btrfs_root *root = BTRFS_I(inode)->root;
720         struct extent_io_tree *io_tree;
721         int ret = 0;
722
723 again:
724         while (!list_empty(&async_cow->extents)) {
725                 async_extent = list_entry(async_cow->extents.next,
726                                           struct async_extent, list);
727                 list_del(&async_extent->list);
728
729                 io_tree = &BTRFS_I(inode)->io_tree;
730
731 retry:
732                 /* did the compression code fall back to uncompressed IO? */
733                 if (!async_extent->pages) {
734                         int page_started = 0;
735                         unsigned long nr_written = 0;
736
737                         lock_extent(io_tree, async_extent->start,
738                                          async_extent->start +
739                                          async_extent->ram_size - 1);
740
741                         /* allocate blocks */
742                         ret = cow_file_range(inode, async_cow->locked_page,
743                                              async_extent->start,
744                                              async_extent->start +
745                                              async_extent->ram_size - 1,
746                                              async_extent->start +
747                                              async_extent->ram_size - 1,
748                                              &page_started, &nr_written, 0,
749                                              NULL);
750
751                         /* JDM XXX */
752
753                         /*
754                          * if page_started, cow_file_range inserted an
755                          * inline extent and took care of all the unlocking
756                          * and IO for us.  Otherwise, we need to submit
757                          * all those pages down to the drive.
758                          */
759                         if (!page_started && !ret)
760                                 extent_write_locked_range(inode,
761                                                   async_extent->start,
762                                                   async_extent->start +
763                                                   async_extent->ram_size - 1,
764                                                   WB_SYNC_ALL);
765                         else if (ret)
766                                 unlock_page(async_cow->locked_page);
767                         kfree(async_extent);
768                         cond_resched();
769                         continue;
770                 }
771
772                 lock_extent(io_tree, async_extent->start,
773                             async_extent->start + async_extent->ram_size - 1);
774
775                 ret = btrfs_reserve_extent(root, async_extent->ram_size,
776                                            async_extent->compressed_size,
777                                            async_extent->compressed_size,
778                                            0, alloc_hint, &ins, 1, 1);
779                 if (ret) {
780                         free_async_extent_pages(async_extent);
781
782                         if (ret == -ENOSPC) {
783                                 unlock_extent(io_tree, async_extent->start,
784                                               async_extent->start +
785                                               async_extent->ram_size - 1);
786
787                                 /*
788                                  * we need to redirty the pages if we decide to
789                                  * fallback to uncompressed IO, otherwise we
790                                  * will not submit these pages down to lower
791                                  * layers.
792                                  */
793                                 extent_range_redirty_for_io(inode,
794                                                 async_extent->start,
795                                                 async_extent->start +
796                                                 async_extent->ram_size - 1);
797
798                                 goto retry;
799                         }
800                         goto out_free;
801                 }
802                 /*
803                  * here we're doing allocation and writeback of the
804                  * compressed pages
805                  */
806                 em = create_io_em(inode, async_extent->start,
807                                   async_extent->ram_size, /* len */
808                                   async_extent->start, /* orig_start */
809                                   ins.objectid, /* block_start */
810                                   ins.offset, /* block_len */
811                                   ins.offset, /* orig_block_len */
812                                   async_extent->ram_size, /* ram_bytes */
813                                   async_extent->compress_type,
814                                   BTRFS_ORDERED_COMPRESSED);
815                 if (IS_ERR(em))
816                         /* ret value is not necessary due to void function */
817                         goto out_free_reserve;
818                 free_extent_map(em);
819
820                 ret = btrfs_add_ordered_extent_compress(inode,
821                                                 async_extent->start,
822                                                 ins.objectid,
823                                                 async_extent->ram_size,
824                                                 ins.offset,
825                                                 BTRFS_ORDERED_COMPRESSED,
826                                                 async_extent->compress_type);
827                 if (ret) {
828                         btrfs_drop_extent_cache(BTRFS_I(inode),
829                                                 async_extent->start,
830                                                 async_extent->start +
831                                                 async_extent->ram_size - 1, 0);
832                         goto out_free_reserve;
833                 }
834                 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
835
836                 /*
837                  * clear dirty, set writeback and unlock the pages.
838                  */
839                 extent_clear_unlock_delalloc(inode, async_extent->start,
840                                 async_extent->start +
841                                 async_extent->ram_size - 1,
842                                 async_extent->start +
843                                 async_extent->ram_size - 1,
844                                 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
845                                 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
846                                 PAGE_SET_WRITEBACK);
847                 if (btrfs_submit_compressed_write(inode,
848                                     async_extent->start,
849                                     async_extent->ram_size,
850                                     ins.objectid,
851                                     ins.offset, async_extent->pages,
852                                     async_extent->nr_pages,
853                                     async_cow->write_flags)) {
854                         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
855                         struct page *p = async_extent->pages[0];
856                         const u64 start = async_extent->start;
857                         const u64 end = start + async_extent->ram_size - 1;
858
859                         p->mapping = inode->i_mapping;
860                         tree->ops->writepage_end_io_hook(p, start, end,
861                                                          NULL, 0);
862                         p->mapping = NULL;
863                         extent_clear_unlock_delalloc(inode, start, end, end,
864                                                      NULL, 0,
865                                                      PAGE_END_WRITEBACK |
866                                                      PAGE_SET_ERROR);
867                         free_async_extent_pages(async_extent);
868                 }
869                 alloc_hint = ins.objectid + ins.offset;
870                 kfree(async_extent);
871                 cond_resched();
872         }
873         return;
874 out_free_reserve:
875         btrfs_dec_block_group_reservations(fs_info, ins.objectid);
876         btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
877 out_free:
878         extent_clear_unlock_delalloc(inode, async_extent->start,
879                                      async_extent->start +
880                                      async_extent->ram_size - 1,
881                                      async_extent->start +
882                                      async_extent->ram_size - 1,
883                                      NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
884                                      EXTENT_DELALLOC_NEW |
885                                      EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
886                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
887                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
888                                      PAGE_SET_ERROR);
889         free_async_extent_pages(async_extent);
890         kfree(async_extent);
891         goto again;
892 }
893
894 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
895                                       u64 num_bytes)
896 {
897         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
898         struct extent_map *em;
899         u64 alloc_hint = 0;
900
901         read_lock(&em_tree->lock);
902         em = search_extent_mapping(em_tree, start, num_bytes);
903         if (em) {
904                 /*
905                  * if block start isn't an actual block number then find the
906                  * first block in this inode and use that as a hint.  If that
907                  * block is also bogus then just don't worry about it.
908                  */
909                 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
910                         free_extent_map(em);
911                         em = search_extent_mapping(em_tree, 0, 0);
912                         if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
913                                 alloc_hint = em->block_start;
914                         if (em)
915                                 free_extent_map(em);
916                 } else {
917                         alloc_hint = em->block_start;
918                         free_extent_map(em);
919                 }
920         }
921         read_unlock(&em_tree->lock);
922
923         return alloc_hint;
924 }
925
926 /*
927  * when extent_io.c finds a delayed allocation range in the file,
928  * the call backs end up in this code.  The basic idea is to
929  * allocate extents on disk for the range, and create ordered data structs
930  * in ram to track those extents.
931  *
932  * locked_page is the page that writepage had locked already.  We use
933  * it to make sure we don't do extra locks or unlocks.
934  *
935  * *page_started is set to one if we unlock locked_page and do everything
936  * required to start IO on it.  It may be clean and already done with
937  * IO when we return.
938  */
939 static noinline int cow_file_range(struct inode *inode,
940                                    struct page *locked_page,
941                                    u64 start, u64 end, u64 delalloc_end,
942                                    int *page_started, unsigned long *nr_written,
943                                    int unlock, struct btrfs_dedupe_hash *hash)
944 {
945         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
946         struct btrfs_root *root = BTRFS_I(inode)->root;
947         u64 alloc_hint = 0;
948         u64 num_bytes;
949         unsigned long ram_size;
950         u64 cur_alloc_size = 0;
951         u64 blocksize = fs_info->sectorsize;
952         struct btrfs_key ins;
953         struct extent_map *em;
954         unsigned clear_bits;
955         unsigned long page_ops;
956         bool extent_reserved = false;
957         int ret = 0;
958
959         if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
960                 WARN_ON_ONCE(1);
961                 ret = -EINVAL;
962                 goto out_unlock;
963         }
964
965         num_bytes = ALIGN(end - start + 1, blocksize);
966         num_bytes = max(blocksize,  num_bytes);
967         ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
968
969         inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K);
970
971         if (start == 0) {
972                 /* lets try to make an inline extent */
973                 ret = cow_file_range_inline(inode, start, end, 0,
974                                             BTRFS_COMPRESS_NONE, NULL);
975                 if (ret == 0) {
976                         /*
977                          * We use DO_ACCOUNTING here because we need the
978                          * delalloc_release_metadata to be run _after_ we drop
979                          * our outstanding extent for clearing delalloc for this
980                          * range.
981                          */
982                         extent_clear_unlock_delalloc(inode, start, end,
983                                      delalloc_end, NULL,
984                                      EXTENT_LOCKED | EXTENT_DELALLOC |
985                                      EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
986                                      EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
987                                      PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
988                                      PAGE_END_WRITEBACK);
989                         *nr_written = *nr_written +
990                              (end - start + PAGE_SIZE) / PAGE_SIZE;
991                         *page_started = 1;
992                         goto out;
993                 } else if (ret < 0) {
994                         goto out_unlock;
995                 }
996         }
997
998         alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
999         btrfs_drop_extent_cache(BTRFS_I(inode), start,
1000                         start + num_bytes - 1, 0);
1001
1002         while (num_bytes > 0) {
1003                 cur_alloc_size = num_bytes;
1004                 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
1005                                            fs_info->sectorsize, 0, alloc_hint,
1006                                            &ins, 1, 1);
1007                 if (ret < 0)
1008                         goto out_unlock;
1009                 cur_alloc_size = ins.offset;
1010                 extent_reserved = true;
1011
1012                 ram_size = ins.offset;
1013                 em = create_io_em(inode, start, ins.offset, /* len */
1014                                   start, /* orig_start */
1015                                   ins.objectid, /* block_start */
1016                                   ins.offset, /* block_len */
1017                                   ins.offset, /* orig_block_len */
1018                                   ram_size, /* ram_bytes */
1019                                   BTRFS_COMPRESS_NONE, /* compress_type */
1020                                   BTRFS_ORDERED_REGULAR /* type */);
1021                 if (IS_ERR(em))
1022                         goto out_reserve;
1023                 free_extent_map(em);
1024
1025                 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1026                                                ram_size, cur_alloc_size, 0);
1027                 if (ret)
1028                         goto out_drop_extent_cache;
1029
1030                 if (root->root_key.objectid ==
1031                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1032                         ret = btrfs_reloc_clone_csums(inode, start,
1033                                                       cur_alloc_size);
1034                         /*
1035                          * Only drop cache here, and process as normal.
1036                          *
1037                          * We must not allow extent_clear_unlock_delalloc()
1038                          * at out_unlock label to free meta of this ordered
1039                          * extent, as its meta should be freed by
1040                          * btrfs_finish_ordered_io().
1041                          *
1042                          * So we must continue until @start is increased to
1043                          * skip current ordered extent.
1044                          */
1045                         if (ret)
1046                                 btrfs_drop_extent_cache(BTRFS_I(inode), start,
1047                                                 start + ram_size - 1, 0);
1048                 }
1049
1050                 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1051
1052                 /* we're not doing compressed IO, don't unlock the first
1053                  * page (which the caller expects to stay locked), don't
1054                  * clear any dirty bits and don't set any writeback bits
1055                  *
1056                  * Do set the Private2 bit so we know this page was properly
1057                  * setup for writepage
1058                  */
1059                 page_ops = unlock ? PAGE_UNLOCK : 0;
1060                 page_ops |= PAGE_SET_PRIVATE2;
1061
1062                 extent_clear_unlock_delalloc(inode, start,
1063                                              start + ram_size - 1,
1064                                              delalloc_end, locked_page,
1065                                              EXTENT_LOCKED | EXTENT_DELALLOC,
1066                                              page_ops);
1067                 if (num_bytes < cur_alloc_size)
1068                         num_bytes = 0;
1069                 else
1070                         num_bytes -= cur_alloc_size;
1071                 alloc_hint = ins.objectid + ins.offset;
1072                 start += cur_alloc_size;
1073                 extent_reserved = false;
1074
1075                 /*
1076                  * btrfs_reloc_clone_csums() error, since start is increased
1077                  * extent_clear_unlock_delalloc() at out_unlock label won't
1078                  * free metadata of current ordered extent, we're OK to exit.
1079                  */
1080                 if (ret)
1081                         goto out_unlock;
1082         }
1083 out:
1084         return ret;
1085
1086 out_drop_extent_cache:
1087         btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0);
1088 out_reserve:
1089         btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1090         btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1091 out_unlock:
1092         clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1093                 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1094         page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
1095                 PAGE_END_WRITEBACK;
1096         /*
1097          * If we reserved an extent for our delalloc range (or a subrange) and
1098          * failed to create the respective ordered extent, then it means that
1099          * when we reserved the extent we decremented the extent's size from
1100          * the data space_info's bytes_may_use counter and incremented the
1101          * space_info's bytes_reserved counter by the same amount. We must make
1102          * sure extent_clear_unlock_delalloc() does not try to decrement again
1103          * the data space_info's bytes_may_use counter, therefore we do not pass
1104          * it the flag EXTENT_CLEAR_DATA_RESV.
1105          */
1106         if (extent_reserved) {
1107                 extent_clear_unlock_delalloc(inode, start,
1108                                              start + cur_alloc_size,
1109                                              start + cur_alloc_size,
1110                                              locked_page,
1111                                              clear_bits,
1112                                              page_ops);
1113                 start += cur_alloc_size;
1114                 if (start >= end)
1115                         goto out;
1116         }
1117         extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
1118                                      locked_page,
1119                                      clear_bits | EXTENT_CLEAR_DATA_RESV,
1120                                      page_ops);
1121         goto out;
1122 }
1123
1124 /*
1125  * work queue call back to started compression on a file and pages
1126  */
1127 static noinline void async_cow_start(struct btrfs_work *work)
1128 {
1129         struct async_cow *async_cow;
1130         int num_added = 0;
1131         async_cow = container_of(work, struct async_cow, work);
1132
1133         compress_file_range(async_cow->inode, async_cow->locked_page,
1134                             async_cow->start, async_cow->end, async_cow,
1135                             &num_added);
1136         if (num_added == 0) {
1137                 btrfs_add_delayed_iput(async_cow->inode);
1138                 async_cow->inode = NULL;
1139         }
1140 }
1141
1142 /*
1143  * work queue call back to submit previously compressed pages
1144  */
1145 static noinline void async_cow_submit(struct btrfs_work *work)
1146 {
1147         struct btrfs_fs_info *fs_info;
1148         struct async_cow *async_cow;
1149         struct btrfs_root *root;
1150         unsigned long nr_pages;
1151
1152         async_cow = container_of(work, struct async_cow, work);
1153
1154         root = async_cow->root;
1155         fs_info = root->fs_info;
1156         nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
1157                 PAGE_SHIFT;
1158
1159         /*
1160          * atomic_sub_return implies a barrier for waitqueue_active
1161          */
1162         if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1163             5 * SZ_1M &&
1164             waitqueue_active(&fs_info->async_submit_wait))
1165                 wake_up(&fs_info->async_submit_wait);
1166
1167         if (async_cow->inode)
1168                 submit_compressed_extents(async_cow->inode, async_cow);
1169 }
1170
1171 static noinline void async_cow_free(struct btrfs_work *work)
1172 {
1173         struct async_cow *async_cow;
1174         async_cow = container_of(work, struct async_cow, work);
1175         if (async_cow->inode)
1176                 btrfs_add_delayed_iput(async_cow->inode);
1177         kfree(async_cow);
1178 }
1179
1180 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1181                                 u64 start, u64 end, int *page_started,
1182                                 unsigned long *nr_written,
1183                                 unsigned int write_flags)
1184 {
1185         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1186         struct async_cow *async_cow;
1187         struct btrfs_root *root = BTRFS_I(inode)->root;
1188         unsigned long nr_pages;
1189         u64 cur_end;
1190
1191         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1192                          1, 0, NULL);
1193         while (start < end) {
1194                 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1195                 BUG_ON(!async_cow); /* -ENOMEM */
1196                 async_cow->inode = igrab(inode);
1197                 async_cow->root = root;
1198                 async_cow->locked_page = locked_page;
1199                 async_cow->start = start;
1200                 async_cow->write_flags = write_flags;
1201
1202                 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
1203                     !btrfs_test_opt(fs_info, FORCE_COMPRESS))
1204                         cur_end = end;
1205                 else
1206                         cur_end = min(end, start + SZ_512K - 1);
1207
1208                 async_cow->end = cur_end;
1209                 INIT_LIST_HEAD(&async_cow->extents);
1210
1211                 btrfs_init_work(&async_cow->work,
1212                                 btrfs_delalloc_helper,
1213                                 async_cow_start, async_cow_submit,
1214                                 async_cow_free);
1215
1216                 nr_pages = (cur_end - start + PAGE_SIZE) >>
1217                         PAGE_SHIFT;
1218                 atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1219
1220                 btrfs_queue_work(fs_info->delalloc_workers, &async_cow->work);
1221
1222                 *nr_written += nr_pages;
1223                 start = cur_end + 1;
1224         }
1225         *page_started = 1;
1226         return 0;
1227 }
1228
1229 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
1230                                         u64 bytenr, u64 num_bytes)
1231 {
1232         int ret;
1233         struct btrfs_ordered_sum *sums;
1234         LIST_HEAD(list);
1235
1236         ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr,
1237                                        bytenr + num_bytes - 1, &list, 0);
1238         if (ret == 0 && list_empty(&list))
1239                 return 0;
1240
1241         while (!list_empty(&list)) {
1242                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1243                 list_del(&sums->list);
1244                 kfree(sums);
1245         }
1246         if (ret < 0)
1247                 return ret;
1248         return 1;
1249 }
1250
1251 /*
1252  * when nowcow writeback call back.  This checks for snapshots or COW copies
1253  * of the extents that exist in the file, and COWs the file as required.
1254  *
1255  * If no cow copies or snapshots exist, we write directly to the existing
1256  * blocks on disk
1257  */
1258 static noinline int run_delalloc_nocow(struct inode *inode,
1259                                        struct page *locked_page,
1260                               u64 start, u64 end, int *page_started, int force,
1261                               unsigned long *nr_written)
1262 {
1263         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1264         struct btrfs_root *root = BTRFS_I(inode)->root;
1265         struct extent_buffer *leaf;
1266         struct btrfs_path *path;
1267         struct btrfs_file_extent_item *fi;
1268         struct btrfs_key found_key;
1269         struct extent_map *em;
1270         u64 cow_start;
1271         u64 cur_offset;
1272         u64 extent_end;
1273         u64 extent_offset;
1274         u64 disk_bytenr;
1275         u64 num_bytes;
1276         u64 disk_num_bytes;
1277         u64 ram_bytes;
1278         int extent_type;
1279         int ret, err;
1280         int type;
1281         int nocow;
1282         int check_prev = 1;
1283         bool nolock;
1284         u64 ino = btrfs_ino(BTRFS_I(inode));
1285
1286         path = btrfs_alloc_path();
1287         if (!path) {
1288                 extent_clear_unlock_delalloc(inode, start, end, end,
1289                                              locked_page,
1290                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1291                                              EXTENT_DO_ACCOUNTING |
1292                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1293                                              PAGE_CLEAR_DIRTY |
1294                                              PAGE_SET_WRITEBACK |
1295                                              PAGE_END_WRITEBACK);
1296                 return -ENOMEM;
1297         }
1298
1299         nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
1300
1301         cow_start = (u64)-1;
1302         cur_offset = start;
1303         while (1) {
1304                 ret = btrfs_lookup_file_extent(NULL, root, path, ino,
1305                                                cur_offset, 0);
1306                 if (ret < 0)
1307                         goto error;
1308                 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1309                         leaf = path->nodes[0];
1310                         btrfs_item_key_to_cpu(leaf, &found_key,
1311                                               path->slots[0] - 1);
1312                         if (found_key.objectid == ino &&
1313                             found_key.type == BTRFS_EXTENT_DATA_KEY)
1314                                 path->slots[0]--;
1315                 }
1316                 check_prev = 0;
1317 next_slot:
1318                 leaf = path->nodes[0];
1319                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1320                         ret = btrfs_next_leaf(root, path);
1321                         if (ret < 0) {
1322                                 if (cow_start != (u64)-1)
1323                                         cur_offset = cow_start;
1324                                 goto error;
1325                         }
1326                         if (ret > 0)
1327                                 break;
1328                         leaf = path->nodes[0];
1329                 }
1330
1331                 nocow = 0;
1332                 disk_bytenr = 0;
1333                 num_bytes = 0;
1334                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1335
1336                 if (found_key.objectid > ino)
1337                         break;
1338                 if (WARN_ON_ONCE(found_key.objectid < ino) ||
1339                     found_key.type < BTRFS_EXTENT_DATA_KEY) {
1340                         path->slots[0]++;
1341                         goto next_slot;
1342                 }
1343                 if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1344                     found_key.offset > end)
1345                         break;
1346
1347                 if (found_key.offset > cur_offset) {
1348                         extent_end = found_key.offset;
1349                         extent_type = 0;
1350                         goto out_check;
1351                 }
1352
1353                 fi = btrfs_item_ptr(leaf, path->slots[0],
1354                                     struct btrfs_file_extent_item);
1355                 extent_type = btrfs_file_extent_type(leaf, fi);
1356
1357                 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1358                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1359                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1360                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1361                         extent_offset = btrfs_file_extent_offset(leaf, fi);
1362                         extent_end = found_key.offset +
1363                                 btrfs_file_extent_num_bytes(leaf, fi);
1364                         disk_num_bytes =
1365                                 btrfs_file_extent_disk_num_bytes(leaf, fi);
1366                         if (extent_end <= start) {
1367                                 path->slots[0]++;
1368                                 goto next_slot;
1369                         }
1370                         if (disk_bytenr == 0)
1371                                 goto out_check;
1372                         if (btrfs_file_extent_compression(leaf, fi) ||
1373                             btrfs_file_extent_encryption(leaf, fi) ||
1374                             btrfs_file_extent_other_encoding(leaf, fi))
1375                                 goto out_check;
1376                         if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1377                                 goto out_check;
1378                         if (btrfs_extent_readonly(fs_info, disk_bytenr))
1379                                 goto out_check;
1380                         ret = btrfs_cross_ref_exist(root, ino,
1381                                                     found_key.offset -
1382                                                     extent_offset, disk_bytenr);
1383                         if (ret) {
1384                                 /*
1385                                  * ret could be -EIO if the above fails to read
1386                                  * metadata.
1387                                  */
1388                                 if (ret < 0) {
1389                                         if (cow_start != (u64)-1)
1390                                                 cur_offset = cow_start;
1391                                         goto error;
1392                                 }
1393
1394                                 WARN_ON_ONCE(nolock);
1395                                 goto out_check;
1396                         }
1397                         disk_bytenr += extent_offset;
1398                         disk_bytenr += cur_offset - found_key.offset;
1399                         num_bytes = min(end + 1, extent_end) - cur_offset;
1400                         /*
1401                          * if there are pending snapshots for this root,
1402                          * we fall into common COW way.
1403                          */
1404                         if (!nolock) {
1405                                 err = btrfs_start_write_no_snapshotting(root);
1406                                 if (!err)
1407                                         goto out_check;
1408                         }
1409                         /*
1410                          * force cow if csum exists in the range.
1411                          * this ensure that csum for a given extent are
1412                          * either valid or do not exist.
1413                          */
1414                         ret = csum_exist_in_range(fs_info, disk_bytenr,
1415                                                   num_bytes);
1416                         if (ret) {
1417                                 if (!nolock)
1418                                         btrfs_end_write_no_snapshotting(root);
1419
1420                                 /*
1421                                  * ret could be -EIO if the above fails to read
1422                                  * metadata.
1423                                  */
1424                                 if (ret < 0) {
1425                                         if (cow_start != (u64)-1)
1426                                                 cur_offset = cow_start;
1427                                         goto error;
1428                                 }
1429                                 WARN_ON_ONCE(nolock);
1430                                 goto out_check;
1431                         }
1432                         if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) {
1433                                 if (!nolock)
1434                                         btrfs_end_write_no_snapshotting(root);
1435                                 goto out_check;
1436                         }
1437                         nocow = 1;
1438                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1439                         extent_end = found_key.offset +
1440                                 btrfs_file_extent_inline_len(leaf,
1441                                                      path->slots[0], fi);
1442                         extent_end = ALIGN(extent_end,
1443                                            fs_info->sectorsize);
1444                 } else {
1445                         BUG_ON(1);
1446                 }
1447 out_check:
1448                 if (extent_end <= start) {
1449                         path->slots[0]++;
1450                         if (!nolock && nocow)
1451                                 btrfs_end_write_no_snapshotting(root);
1452                         if (nocow)
1453                                 btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1454                         goto next_slot;
1455                 }
1456                 if (!nocow) {
1457                         if (cow_start == (u64)-1)
1458                                 cow_start = cur_offset;
1459                         cur_offset = extent_end;
1460                         if (cur_offset > end)
1461                                 break;
1462                         path->slots[0]++;
1463                         goto next_slot;
1464                 }
1465
1466                 btrfs_release_path(path);
1467                 if (cow_start != (u64)-1) {
1468                         ret = cow_file_range(inode, locked_page,
1469                                              cow_start, found_key.offset - 1,
1470                                              end, page_started, nr_written, 1,
1471                                              NULL);
1472                         if (ret) {
1473                                 if (!nolock && nocow)
1474                                         btrfs_end_write_no_snapshotting(root);
1475                                 if (nocow)
1476                                         btrfs_dec_nocow_writers(fs_info,
1477                                                                 disk_bytenr);
1478                                 goto error;
1479                         }
1480                         cow_start = (u64)-1;
1481                 }
1482
1483                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1484                         u64 orig_start = found_key.offset - extent_offset;
1485
1486                         em = create_io_em(inode, cur_offset, num_bytes,
1487                                           orig_start,
1488                                           disk_bytenr, /* block_start */
1489                                           num_bytes, /* block_len */
1490                                           disk_num_bytes, /* orig_block_len */
1491                                           ram_bytes, BTRFS_COMPRESS_NONE,
1492                                           BTRFS_ORDERED_PREALLOC);
1493                         if (IS_ERR(em)) {
1494                                 if (!nolock && nocow)
1495                                         btrfs_end_write_no_snapshotting(root);
1496                                 if (nocow)
1497                                         btrfs_dec_nocow_writers(fs_info,
1498                                                                 disk_bytenr);
1499                                 ret = PTR_ERR(em);
1500                                 goto error;
1501                         }
1502                         free_extent_map(em);
1503                 }
1504
1505                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1506                         type = BTRFS_ORDERED_PREALLOC;
1507                 } else {
1508                         type = BTRFS_ORDERED_NOCOW;
1509                 }
1510
1511                 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1512                                                num_bytes, num_bytes, type);
1513                 if (nocow)
1514                         btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1515                 BUG_ON(ret); /* -ENOMEM */
1516
1517                 if (root->root_key.objectid ==
1518                     BTRFS_DATA_RELOC_TREE_OBJECTID)
1519                         /*
1520                          * Error handled later, as we must prevent
1521                          * extent_clear_unlock_delalloc() in error handler
1522                          * from freeing metadata of created ordered extent.
1523                          */
1524                         ret = btrfs_reloc_clone_csums(inode, cur_offset,
1525                                                       num_bytes);
1526
1527                 extent_clear_unlock_delalloc(inode, cur_offset,
1528                                              cur_offset + num_bytes - 1, end,
1529                                              locked_page, EXTENT_LOCKED |
1530                                              EXTENT_DELALLOC |
1531                                              EXTENT_CLEAR_DATA_RESV,
1532                                              PAGE_UNLOCK | PAGE_SET_PRIVATE2);
1533
1534                 if (!nolock && nocow)
1535                         btrfs_end_write_no_snapshotting(root);
1536                 cur_offset = extent_end;
1537
1538                 /*
1539                  * btrfs_reloc_clone_csums() error, now we're OK to call error
1540                  * handler, as metadata for created ordered extent will only
1541                  * be freed by btrfs_finish_ordered_io().
1542                  */
1543                 if (ret)
1544                         goto error;
1545                 if (cur_offset > end)
1546                         break;
1547         }
1548         btrfs_release_path(path);
1549
1550         if (cur_offset <= end && cow_start == (u64)-1) {
1551                 cow_start = cur_offset;
1552                 cur_offset = end;
1553         }
1554
1555         if (cow_start != (u64)-1) {
1556                 ret = cow_file_range(inode, locked_page, cow_start, end, end,
1557                                      page_started, nr_written, 1, NULL);
1558                 if (ret)
1559                         goto error;
1560         }
1561
1562 error:
1563         if (ret && cur_offset < end)
1564                 extent_clear_unlock_delalloc(inode, cur_offset, end, end,
1565                                              locked_page, EXTENT_LOCKED |
1566                                              EXTENT_DELALLOC | EXTENT_DEFRAG |
1567                                              EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1568                                              PAGE_CLEAR_DIRTY |
1569                                              PAGE_SET_WRITEBACK |
1570                                              PAGE_END_WRITEBACK);
1571         btrfs_free_path(path);
1572         return ret;
1573 }
1574
1575 static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
1576 {
1577
1578         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
1579             !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
1580                 return 0;
1581
1582         /*
1583          * @defrag_bytes is a hint value, no spinlock held here,
1584          * if is not zero, it means the file is defragging.
1585          * Force cow if given extent needs to be defragged.
1586          */
1587         if (BTRFS_I(inode)->defrag_bytes &&
1588             test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1589                            EXTENT_DEFRAG, 0, NULL))
1590                 return 1;
1591
1592         return 0;
1593 }
1594
1595 /*
1596  * extent_io.c call back to do delayed allocation processing
1597  */
1598 static int run_delalloc_range(void *private_data, struct page *locked_page,
1599                               u64 start, u64 end, int *page_started,
1600                               unsigned long *nr_written,
1601                               struct writeback_control *wbc)
1602 {
1603         struct inode *inode = private_data;
1604         int ret;
1605         int force_cow = need_force_cow(inode, start, end);
1606         unsigned int write_flags = wbc_to_write_flags(wbc);
1607
1608         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
1609                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1610                                          page_started, 1, nr_written);
1611         } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1612                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1613                                          page_started, 0, nr_written);
1614         } else if (!inode_need_compress(inode, start, end)) {
1615                 ret = cow_file_range(inode, locked_page, start, end, end,
1616                                       page_started, nr_written, 1, NULL);
1617         } else {
1618                 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1619                         &BTRFS_I(inode)->runtime_flags);
1620                 ret = cow_file_range_async(inode, locked_page, start, end,
1621                                            page_started, nr_written,
1622                                            write_flags);
1623         }
1624         if (ret)
1625                 btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
1626         return ret;
1627 }
1628
1629 static void btrfs_split_extent_hook(void *private_data,
1630                                     struct extent_state *orig, u64 split)
1631 {
1632         struct inode *inode = private_data;
1633         u64 size;
1634
1635         /* not delalloc, ignore it */
1636         if (!(orig->state & EXTENT_DELALLOC))
1637                 return;
1638
1639         size = orig->end - orig->start + 1;
1640         if (size > BTRFS_MAX_EXTENT_SIZE) {
1641                 u32 num_extents;
1642                 u64 new_size;
1643
1644                 /*
1645                  * See the explanation in btrfs_merge_extent_hook, the same
1646                  * applies here, just in reverse.
1647                  */
1648                 new_size = orig->end - split + 1;
1649                 num_extents = count_max_extents(new_size);
1650                 new_size = split - orig->start;
1651                 num_extents += count_max_extents(new_size);
1652                 if (count_max_extents(size) >= num_extents)
1653                         return;
1654         }
1655
1656         spin_lock(&BTRFS_I(inode)->lock);
1657         btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
1658         spin_unlock(&BTRFS_I(inode)->lock);
1659 }
1660
1661 /*
1662  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1663  * extents so we can keep track of new extents that are just merged onto old
1664  * extents, such as when we are doing sequential writes, so we can properly
1665  * account for the metadata space we'll need.
1666  */
1667 static void btrfs_merge_extent_hook(void *private_data,
1668                                     struct extent_state *new,
1669                                     struct extent_state *other)
1670 {
1671         struct inode *inode = private_data;
1672         u64 new_size, old_size;
1673         u32 num_extents;
1674
1675         /* not delalloc, ignore it */
1676         if (!(other->state & EXTENT_DELALLOC))
1677                 return;
1678
1679         if (new->start > other->start)
1680                 new_size = new->end - other->start + 1;
1681         else
1682                 new_size = other->end - new->start + 1;
1683
1684         /* we're not bigger than the max, unreserve the space and go */
1685         if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1686                 spin_lock(&BTRFS_I(inode)->lock);
1687                 btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
1688                 spin_unlock(&BTRFS_I(inode)->lock);
1689                 return;
1690         }
1691
1692         /*
1693          * We have to add up either side to figure out how many extents were
1694          * accounted for before we merged into one big extent.  If the number of
1695          * extents we accounted for is <= the amount we need for the new range
1696          * then we can return, otherwise drop.  Think of it like this
1697          *
1698          * [ 4k][MAX_SIZE]
1699          *
1700          * So we've grown the extent by a MAX_SIZE extent, this would mean we
1701          * need 2 outstanding extents, on one side we have 1 and the other side
1702          * we have 1 so they are == and we can return.  But in this case
1703          *
1704          * [MAX_SIZE+4k][MAX_SIZE+4k]
1705          *
1706          * Each range on their own accounts for 2 extents, but merged together
1707          * they are only 3 extents worth of accounting, so we need to drop in
1708          * this case.
1709          */
1710         old_size = other->end - other->start + 1;
1711         num_extents = count_max_extents(old_size);
1712         old_size = new->end - new->start + 1;
1713         num_extents += count_max_extents(old_size);
1714         if (count_max_extents(new_size) >= num_extents)
1715                 return;
1716
1717         spin_lock(&BTRFS_I(inode)->lock);
1718         btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
1719         spin_unlock(&BTRFS_I(inode)->lock);
1720 }
1721
1722 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1723                                       struct inode *inode)
1724 {
1725         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1726
1727         spin_lock(&root->delalloc_lock);
1728         if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1729                 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1730                               &root->delalloc_inodes);
1731                 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1732                         &BTRFS_I(inode)->runtime_flags);
1733                 root->nr_delalloc_inodes++;
1734                 if (root->nr_delalloc_inodes == 1) {
1735                         spin_lock(&fs_info->delalloc_root_lock);
1736                         BUG_ON(!list_empty(&root->delalloc_root));
1737                         list_add_tail(&root->delalloc_root,
1738                                       &fs_info->delalloc_roots);
1739                         spin_unlock(&fs_info->delalloc_root_lock);
1740                 }
1741         }
1742         spin_unlock(&root->delalloc_lock);
1743 }
1744
1745
1746 void __btrfs_del_delalloc_inode(struct btrfs_root *root,
1747                                 struct btrfs_inode *inode)
1748 {
1749         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1750
1751         if (!list_empty(&inode->delalloc_inodes)) {
1752                 list_del_init(&inode->delalloc_inodes);
1753                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1754                           &inode->runtime_flags);
1755                 root->nr_delalloc_inodes--;
1756                 if (!root->nr_delalloc_inodes) {
1757                         spin_lock(&fs_info->delalloc_root_lock);
1758                         BUG_ON(list_empty(&root->delalloc_root));
1759                         list_del_init(&root->delalloc_root);
1760                         spin_unlock(&fs_info->delalloc_root_lock);
1761                 }
1762         }
1763 }
1764
1765 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1766                                      struct btrfs_inode *inode)
1767 {
1768         spin_lock(&root->delalloc_lock);
1769         __btrfs_del_delalloc_inode(root, inode);
1770         spin_unlock(&root->delalloc_lock);
1771 }
1772
1773 /*
1774  * extent_io.c set_bit_hook, used to track delayed allocation
1775  * bytes in this file, and to maintain the list of inodes that
1776  * have pending delalloc work to be done.
1777  */
1778 static void btrfs_set_bit_hook(void *private_data,
1779                                struct extent_state *state, unsigned *bits)
1780 {
1781         struct inode *inode = private_data;
1782
1783         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1784
1785         if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
1786                 WARN_ON(1);
1787         /*
1788          * set_bit and clear bit hooks normally require _irqsave/restore
1789          * but in this case, we are only testing for the DELALLOC
1790          * bit, which is only set or cleared with irqs on
1791          */
1792         if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1793                 struct btrfs_root *root = BTRFS_I(inode)->root;
1794                 u64 len = state->end + 1 - state->start;
1795                 u32 num_extents = count_max_extents(len);
1796                 bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode));
1797
1798                 spin_lock(&BTRFS_I(inode)->lock);
1799                 btrfs_mod_outstanding_extents(BTRFS_I(inode), num_extents);
1800                 spin_unlock(&BTRFS_I(inode)->lock);
1801
1802                 /* For sanity tests */
1803                 if (btrfs_is_testing(fs_info))
1804                         return;
1805
1806                 percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
1807                                          fs_info->delalloc_batch);
1808                 spin_lock(&BTRFS_I(inode)->lock);
1809                 BTRFS_I(inode)->delalloc_bytes += len;
1810                 if (*bits & EXTENT_DEFRAG)
1811                         BTRFS_I(inode)->defrag_bytes += len;
1812                 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1813                                          &BTRFS_I(inode)->runtime_flags))
1814                         btrfs_add_delalloc_inodes(root, inode);
1815                 spin_unlock(&BTRFS_I(inode)->lock);
1816         }
1817
1818         if (!(state->state & EXTENT_DELALLOC_NEW) &&
1819             (*bits & EXTENT_DELALLOC_NEW)) {
1820                 spin_lock(&BTRFS_I(inode)->lock);
1821                 BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 -
1822                         state->start;
1823                 spin_unlock(&BTRFS_I(inode)->lock);
1824         }
1825 }
1826
1827 /*
1828  * extent_io.c clear_bit_hook, see set_bit_hook for why
1829  */
1830 static void btrfs_clear_bit_hook(void *private_data,
1831                                  struct extent_state *state,
1832                                  unsigned *bits)
1833 {
1834         struct btrfs_inode *inode = BTRFS_I((struct inode *)private_data);
1835         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1836         u64 len = state->end + 1 - state->start;
1837         u32 num_extents = count_max_extents(len);
1838
1839         if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) {
1840                 spin_lock(&inode->lock);
1841                 inode->defrag_bytes -= len;
1842                 spin_unlock(&inode->lock);
1843         }
1844
1845         /*
1846          * set_bit and clear bit hooks normally require _irqsave/restore
1847          * but in this case, we are only testing for the DELALLOC
1848          * bit, which is only set or cleared with irqs on
1849          */
1850         if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1851                 struct btrfs_root *root = inode->root;
1852                 bool do_list = !btrfs_is_free_space_inode(inode);
1853
1854                 spin_lock(&inode->lock);
1855                 btrfs_mod_outstanding_extents(inode, -num_extents);
1856                 spin_unlock(&inode->lock);
1857
1858                 /*
1859                  * We don't reserve metadata space for space cache inodes so we
1860                  * don't need to call dellalloc_release_metadata if there is an
1861                  * error.
1862                  */
1863                 if (*bits & EXTENT_CLEAR_META_RESV &&
1864                     root != fs_info->tree_root)
1865                         btrfs_delalloc_release_metadata(inode, len, false);
1866
1867                 /* For sanity tests. */
1868                 if (btrfs_is_testing(fs_info))
1869                         return;
1870
1871                 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID &&
1872                     do_list && !(state->state & EXTENT_NORESERVE) &&
1873                     (*bits & EXTENT_CLEAR_DATA_RESV))
1874                         btrfs_free_reserved_data_space_noquota(
1875                                         &inode->vfs_inode,
1876                                         state->start, len);
1877
1878                 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
1879                                          fs_info->delalloc_batch);
1880                 spin_lock(&inode->lock);
1881                 inode->delalloc_bytes -= len;
1882                 if (do_list && inode->delalloc_bytes == 0 &&
1883                     test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1884                                         &inode->runtime_flags))
1885                         btrfs_del_delalloc_inode(root, inode);
1886                 spin_unlock(&inode->lock);
1887         }
1888
1889         if ((state->state & EXTENT_DELALLOC_NEW) &&
1890             (*bits & EXTENT_DELALLOC_NEW)) {
1891                 spin_lock(&inode->lock);
1892                 ASSERT(inode->new_delalloc_bytes >= len);
1893                 inode->new_delalloc_bytes -= len;
1894                 spin_unlock(&inode->lock);
1895         }
1896 }
1897
1898 /*
1899  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1900  * we don't create bios that span stripes or chunks
1901  *
1902  * return 1 if page cannot be merged to bio
1903  * return 0 if page can be merged to bio
1904  * return error otherwise
1905  */
1906 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1907                          size_t size, struct bio *bio,
1908                          unsigned long bio_flags)
1909 {
1910         struct inode *inode = page->mapping->host;
1911         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1912         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1913         u64 length = 0;
1914         u64 map_length;
1915         int ret;
1916
1917         if (bio_flags & EXTENT_BIO_COMPRESSED)
1918                 return 0;
1919
1920         length = bio->bi_iter.bi_size;
1921         map_length = length;
1922         ret = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
1923                               NULL, 0);
1924         if (ret < 0)
1925                 return ret;
1926         if (map_length < length + size)
1927                 return 1;
1928         return 0;
1929 }
1930
1931 /*
1932  * in order to insert checksums into the metadata in large chunks,
1933  * we wait until bio submission time.   All the pages in the bio are
1934  * checksummed and sums are attached onto the ordered extent record.
1935  *
1936  * At IO completion time the cums attached on the ordered extent record
1937  * are inserted into the btree
1938  */
1939 static blk_status_t btrfs_submit_bio_start(void *private_data, struct bio *bio,
1940                                     u64 bio_offset)
1941 {
1942         struct inode *inode = private_data;
1943         blk_status_t ret = 0;
1944
1945         ret = btrfs_csum_one_bio(inode, bio, 0, 0);
1946         BUG_ON(ret); /* -ENOMEM */
1947         return 0;
1948 }
1949
1950 /*
1951  * in order to insert checksums into the metadata in large chunks,
1952  * we wait until bio submission time.   All the pages in the bio are
1953  * checksummed and sums are attached onto the ordered extent record.
1954  *
1955  * At IO completion time the cums attached on the ordered extent record
1956  * are inserted into the btree
1957  */
1958 static blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
1959                           int mirror_num)
1960 {
1961         struct inode *inode = private_data;
1962         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1963         blk_status_t ret;
1964
1965         ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
1966         if (ret) {
1967                 bio->bi_status = ret;
1968                 bio_endio(bio);
1969         }
1970         return ret;
1971 }
1972
1973 /*
1974  * extent_io.c submission hook. This does the right thing for csum calculation
1975  * on write, or reading the csums from the tree before a read.
1976  *
1977  * Rules about async/sync submit,
1978  * a) read:                             sync submit
1979  *
1980  * b) write without checksum:           sync submit
1981  *
1982  * c) write with checksum:
1983  *    c-1) if bio is issued by fsync:   sync submit
1984  *         (sync_writers != 0)
1985  *
1986  *    c-2) if root is reloc root:       sync submit
1987  *         (only in case of buffered IO)
1988  *
1989  *    c-3) otherwise:                   async submit
1990  */
1991 static blk_status_t btrfs_submit_bio_hook(void *private_data, struct bio *bio,
1992                                  int mirror_num, unsigned long bio_flags,
1993                                  u64 bio_offset)
1994 {
1995         struct inode *inode = private_data;
1996         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1997         struct btrfs_root *root = BTRFS_I(inode)->root;
1998         enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
1999         blk_status_t ret = 0;
2000         int skip_sum;
2001         int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
2002
2003         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
2004
2005         if (btrfs_is_free_space_inode(BTRFS_I(inode)))
2006                 metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
2007
2008         if (bio_op(bio) != REQ_OP_WRITE) {
2009                 ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
2010                 if (ret)
2011                         goto out;
2012
2013                 if (bio_flags & EXTENT_BIO_COMPRESSED) {
2014                         ret = btrfs_submit_compressed_read(inode, bio,
2015                                                            mirror_num,
2016                                                            bio_flags);
2017                         goto out;
2018                 } else if (!skip_sum) {
2019                         ret = btrfs_lookup_bio_sums(inode, bio, NULL);
2020                         if (ret)
2021                                 goto out;
2022                 }
2023                 goto mapit;
2024         } else if (async && !skip_sum) {
2025                 /* csum items have already been cloned */
2026                 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2027                         goto mapit;
2028                 /* we're doing a write, do the async checksumming */
2029                 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, bio_flags,
2030                                           bio_offset, inode,
2031                                           btrfs_submit_bio_start,
2032                                           btrfs_submit_bio_done);
2033                 goto out;
2034         } else if (!skip_sum) {
2035                 ret = btrfs_csum_one_bio(inode, bio, 0, 0);
2036                 if (ret)
2037                         goto out;
2038         }
2039
2040 mapit:
2041         ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
2042
2043 out:
2044         if (ret) {
2045                 bio->bi_status = ret;
2046                 bio_endio(bio);
2047         }
2048         return ret;
2049 }
2050
2051 /*
2052  * given a list of ordered sums record them in the inode.  This happens
2053  * at IO completion time based on sums calculated at bio submission time.
2054  */
2055 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
2056                              struct inode *inode, struct list_head *list)
2057 {
2058         struct btrfs_ordered_sum *sum;
2059         int ret;
2060
2061         list_for_each_entry(sum, list, list) {
2062                 trans->adding_csums = true;
2063                 ret = btrfs_csum_file_blocks(trans,
2064                        BTRFS_I(inode)->root->fs_info->csum_root, sum);
2065                 trans->adding_csums = false;
2066                 if (ret)
2067                         return ret;
2068         }
2069         return 0;
2070 }
2071
2072 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
2073                               unsigned int extra_bits,
2074                               struct extent_state **cached_state, int dedupe)
2075 {
2076         WARN_ON((end & (PAGE_SIZE - 1)) == 0);
2077         return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
2078                                    extra_bits, cached_state);
2079 }
2080
2081 /* see btrfs_writepage_start_hook for details on why this is required */
2082 struct btrfs_writepage_fixup {
2083         struct page *page;
2084         struct btrfs_work work;
2085 };
2086
2087 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2088 {
2089         struct btrfs_writepage_fixup *fixup;
2090         struct btrfs_ordered_extent *ordered;
2091         struct extent_state *cached_state = NULL;
2092         struct extent_changeset *data_reserved = NULL;
2093         struct page *page;
2094         struct inode *inode;
2095         u64 page_start;
2096         u64 page_end;
2097         int ret;
2098
2099         fixup = container_of(work, struct btrfs_writepage_fixup, work);
2100         page = fixup->page;
2101 again:
2102         lock_page(page);
2103         if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2104                 ClearPageChecked(page);
2105                 goto out_page;
2106         }
2107
2108         inode = page->mapping->host;
2109         page_start = page_offset(page);
2110         page_end = page_offset(page) + PAGE_SIZE - 1;
2111
2112         lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
2113                          &cached_state);
2114
2115         /* already ordered? We're done */
2116         if (PagePrivate2(page))
2117                 goto out;
2118
2119         ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
2120                                         PAGE_SIZE);
2121         if (ordered) {
2122                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
2123                                      page_end, &cached_state);
2124                 unlock_page(page);
2125                 btrfs_start_ordered_extent(inode, ordered, 1);
2126                 btrfs_put_ordered_extent(ordered);
2127                 goto again;
2128         }
2129
2130         ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2131                                            PAGE_SIZE);
2132         if (ret) {
2133                 mapping_set_error(page->mapping, ret);
2134                 end_extent_writepage(page, ret, page_start, page_end);
2135                 ClearPageChecked(page);
2136                 goto out;
2137          }
2138
2139         ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2140                                         &cached_state, 0);
2141         if (ret) {
2142                 mapping_set_error(page->mapping, ret);
2143                 end_extent_writepage(page, ret, page_start, page_end);
2144                 ClearPageChecked(page);
2145                 goto out;
2146         }
2147
2148         ClearPageChecked(page);
2149         set_page_dirty(page);
2150         btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, false);
2151 out:
2152         unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
2153                              &cached_state);
2154 out_page:
2155         unlock_page(page);
2156         put_page(page);
2157         kfree(fixup);
2158         extent_changeset_free(data_reserved);
2159 }
2160
2161 /*
2162  * There are a few paths in the higher layers of the kernel that directly
2163  * set the page dirty bit without asking the filesystem if it is a
2164  * good idea.  This causes problems because we want to make sure COW
2165  * properly happens and the data=ordered rules are followed.
2166  *
2167  * In our case any range that doesn't have the ORDERED bit set
2168  * hasn't been properly setup for IO.  We kick off an async process
2169  * to fix it up.  The async helper will wait for ordered extents, set
2170  * the delalloc bit and make it safe to write the page.
2171  */
2172 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2173 {
2174         struct inode *inode = page->mapping->host;
2175         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2176         struct btrfs_writepage_fixup *fixup;
2177
2178         /* this page is properly in the ordered list */
2179         if (TestClearPagePrivate2(page))
2180                 return 0;
2181
2182         if (PageChecked(page))
2183                 return -EAGAIN;
2184
2185         fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2186         if (!fixup)
2187                 return -EAGAIN;
2188
2189         SetPageChecked(page);
2190         get_page(page);
2191         btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2192                         btrfs_writepage_fixup_worker, NULL, NULL);
2193         fixup->page = page;
2194         btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
2195         return -EBUSY;
2196 }
2197
2198 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2199                                        struct inode *inode, u64 file_pos,
2200                                        u64 disk_bytenr, u64 disk_num_bytes,
2201                                        u64 num_bytes, u64 ram_bytes,
2202                                        u8 compression, u8 encryption,
2203                                        u16 other_encoding, int extent_type)
2204 {
2205         struct btrfs_root *root = BTRFS_I(inode)->root;
2206         struct btrfs_file_extent_item *fi;
2207         struct btrfs_path *path;
2208         struct extent_buffer *leaf;
2209         struct btrfs_key ins;
2210         u64 qg_released;
2211         int extent_inserted = 0;
2212         int ret;
2213
2214         path = btrfs_alloc_path();
2215         if (!path)
2216                 return -ENOMEM;
2217
2218         /*
2219          * we may be replacing one extent in the tree with another.
2220          * The new extent is pinned in the extent map, and we don't want
2221          * to drop it from the cache until it is completely in the btree.
2222          *
2223          * So, tell btrfs_drop_extents to leave this extent in the cache.
2224          * the caller is expected to unpin it and allow it to be merged
2225          * with the others.
2226          */
2227         ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
2228                                    file_pos + num_bytes, NULL, 0,
2229                                    1, sizeof(*fi), &extent_inserted);
2230         if (ret)
2231                 goto out;
2232
2233         if (!extent_inserted) {
2234                 ins.objectid = btrfs_ino(BTRFS_I(inode));
2235                 ins.offset = file_pos;
2236                 ins.type = BTRFS_EXTENT_DATA_KEY;
2237
2238                 path->leave_spinning = 1;
2239                 ret = btrfs_insert_empty_item(trans, root, path, &ins,
2240                                               sizeof(*fi));
2241                 if (ret)
2242                         goto out;
2243         }
2244         leaf = path->nodes[0];
2245         fi = btrfs_item_ptr(leaf, path->slots[0],
2246                             struct btrfs_file_extent_item);
2247         btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2248         btrfs_set_file_extent_type(leaf, fi, extent_type);
2249         btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
2250         btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
2251         btrfs_set_file_extent_offset(leaf, fi, 0);
2252         btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2253         btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
2254         btrfs_set_file_extent_compression(leaf, fi, compression);
2255         btrfs_set_file_extent_encryption(leaf, fi, encryption);
2256         btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
2257
2258         btrfs_mark_buffer_dirty(leaf);
2259         btrfs_release_path(path);
2260
2261         inode_add_bytes(inode, num_bytes);
2262
2263         ins.objectid = disk_bytenr;
2264         ins.offset = disk_num_bytes;
2265         ins.type = BTRFS_EXTENT_ITEM_KEY;
2266
2267         /*
2268          * Release the reserved range from inode dirty range map, as it is
2269          * already moved into delayed_ref_head
2270          */
2271         ret = btrfs_qgroup_release_data(inode, file_pos, ram_bytes);
2272         if (ret < 0)
2273                 goto out;
2274         qg_released = ret;
2275         ret = btrfs_alloc_reserved_file_extent(trans, root,
2276                                                btrfs_ino(BTRFS_I(inode)),
2277                                                file_pos, qg_released, &ins);
2278 out:
2279         btrfs_free_path(path);
2280
2281         return ret;
2282 }
2283
2284 /* snapshot-aware defrag */
2285 struct sa_defrag_extent_backref {
2286         struct rb_node node;
2287         struct old_sa_defrag_extent *old;
2288         u64 root_id;
2289         u64 inum;
2290         u64 file_pos;
2291         u64 extent_offset;
2292         u64 num_bytes;
2293         u64 generation;
2294 };
2295
2296 struct old_sa_defrag_extent {
2297         struct list_head list;
2298         struct new_sa_defrag_extent *new;
2299
2300         u64 extent_offset;
2301         u64 bytenr;
2302         u64 offset;
2303         u64 len;
2304         int count;
2305 };
2306
2307 struct new_sa_defrag_extent {
2308         struct rb_root root;
2309         struct list_head head;
2310         struct btrfs_path *path;
2311         struct inode *inode;
2312         u64 file_pos;
2313         u64 len;
2314         u64 bytenr;
2315         u64 disk_len;
2316         u8 compress_type;
2317 };
2318
2319 static int backref_comp(struct sa_defrag_extent_backref *b1,
2320                         struct sa_defrag_extent_backref *b2)
2321 {
2322         if (b1->root_id < b2->root_id)
2323                 return -1;
2324         else if (b1->root_id > b2->root_id)
2325                 return 1;
2326
2327         if (b1->inum < b2->inum)
2328                 return -1;
2329         else if (b1->inum > b2->inum)
2330                 return 1;
2331
2332         if (b1->file_pos < b2->file_pos)
2333                 return -1;
2334         else if (b1->file_pos > b2->file_pos)
2335                 return 1;
2336
2337         /*
2338          * [------------------------------] ===> (a range of space)
2339          *     |<--->|   |<---->| =============> (fs/file tree A)
2340          * |<---------------------------->| ===> (fs/file tree B)
2341          *
2342          * A range of space can refer to two file extents in one tree while
2343          * refer to only one file extent in another tree.
2344          *
2345          * So we may process a disk offset more than one time(two extents in A)
2346          * and locate at the same extent(one extent in B), then insert two same
2347          * backrefs(both refer to the extent in B).
2348          */
2349         return 0;
2350 }
2351
2352 static void backref_insert(struct rb_root *root,
2353                            struct sa_defrag_extent_backref *backref)
2354 {
2355         struct rb_node **p = &root->rb_node;
2356         struct rb_node *parent = NULL;
2357         struct sa_defrag_extent_backref *entry;
2358         int ret;
2359
2360         while (*p) {
2361                 parent = *p;
2362                 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2363
2364                 ret = backref_comp(backref, entry);
2365                 if (ret < 0)
2366                         p = &(*p)->rb_left;
2367                 else
2368                         p = &(*p)->rb_right;
2369         }
2370
2371         rb_link_node(&backref->node, parent, p);
2372         rb_insert_color(&backref->node, root);
2373 }
2374
2375 /*
2376  * Note the backref might has changed, and in this case we just return 0.
2377  */
2378 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2379                                        void *ctx)
2380 {
2381         struct btrfs_file_extent_item *extent;
2382         struct old_sa_defrag_extent *old = ctx;
2383         struct new_sa_defrag_extent *new = old->new;
2384         struct btrfs_path *path = new->path;
2385         struct btrfs_key key;
2386         struct btrfs_root *root;
2387         struct sa_defrag_extent_backref *backref;
2388         struct extent_buffer *leaf;
2389         struct inode *inode = new->inode;
2390         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2391         int slot;
2392         int ret;
2393         u64 extent_offset;
2394         u64 num_bytes;
2395
2396         if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2397             inum == btrfs_ino(BTRFS_I(inode)))
2398                 return 0;
2399
2400         key.objectid = root_id;
2401         key.type = BTRFS_ROOT_ITEM_KEY;
2402         key.offset = (u64)-1;
2403
2404         root = btrfs_read_fs_root_no_name(fs_info, &key);
2405         if (IS_ERR(root)) {
2406                 if (PTR_ERR(root) == -ENOENT)
2407                         return 0;
2408                 WARN_ON(1);
2409                 btrfs_debug(fs_info, "inum=%llu, offset=%llu, root_id=%llu",
2410                          inum, offset, root_id);
2411                 return PTR_ERR(root);
2412         }
2413
2414         key.objectid = inum;
2415         key.type = BTRFS_EXTENT_DATA_KEY;
2416         if (offset > (u64)-1 << 32)
2417                 key.offset = 0;
2418         else
2419                 key.offset = offset;
2420
2421         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2422         if (WARN_ON(ret < 0))
2423                 return ret;
2424         ret = 0;
2425
2426         while (1) {
2427                 cond_resched();
2428
2429                 leaf = path->nodes[0];
2430                 slot = path->slots[0];
2431
2432                 if (slot >= btrfs_header_nritems(leaf)) {
2433                         ret = btrfs_next_leaf(root, path);
2434                         if (ret < 0) {
2435                                 goto out;
2436                         } else if (ret > 0) {
2437                                 ret = 0;
2438                                 goto out;
2439                         }
2440                         continue;
2441                 }
2442
2443                 path->slots[0]++;
2444
2445                 btrfs_item_key_to_cpu(leaf, &key, slot);
2446
2447                 if (key.objectid > inum)
2448                         goto out;
2449
2450                 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2451                         continue;
2452
2453                 extent = btrfs_item_ptr(leaf, slot,
2454                                         struct btrfs_file_extent_item);
2455
2456                 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2457                         continue;
2458
2459                 /*
2460                  * 'offset' refers to the exact key.offset,
2461                  * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2462                  * (key.offset - extent_offset).
2463                  */
2464                 if (key.offset != offset)
2465                         continue;
2466
2467                 extent_offset = btrfs_file_extent_offset(leaf, extent);
2468                 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2469
2470                 if (extent_offset >= old->extent_offset + old->offset +
2471                     old->len || extent_offset + num_bytes <=
2472                     old->extent_offset + old->offset)
2473                         continue;
2474                 break;
2475         }
2476
2477         backref = kmalloc(sizeof(*backref), GFP_NOFS);
2478         if (!backref) {
2479                 ret = -ENOENT;
2480                 goto out;
2481         }
2482
2483         backref->root_id = root_id;
2484         backref->inum = inum;
2485         backref->file_pos = offset;
2486         backref->num_bytes = num_bytes;
2487         backref->extent_offset = extent_offset;
2488         backref->generation = btrfs_file_extent_generation(leaf, extent);
2489         backref->old = old;
2490         backref_insert(&new->root, backref);
2491         old->count++;
2492 out:
2493         btrfs_release_path(path);
2494         WARN_ON(ret);
2495         return ret;
2496 }
2497
2498 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2499                                    struct new_sa_defrag_extent *new)
2500 {
2501         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2502         struct old_sa_defrag_extent *old, *tmp;
2503         int ret;
2504
2505         new->path = path;
2506
2507         list_for_each_entry_safe(old, tmp, &new->head, list) {
2508                 ret = iterate_inodes_from_logical(old->bytenr +
2509                                                   old->extent_offset, fs_info,
2510                                                   path, record_one_backref,
2511                                                   old, false);
2512                 if (ret < 0 && ret != -ENOENT)
2513                         return false;
2514
2515                 /* no backref to be processed for this extent */
2516                 if (!old->count) {
2517                         list_del(&old->list);
2518                         kfree(old);
2519                 }
2520         }
2521
2522         if (list_empty(&new->head))
2523                 return false;
2524
2525         return true;
2526 }
2527
2528 static int relink_is_mergable(struct extent_buffer *leaf,
2529                               struct btrfs_file_extent_item *fi,
2530                               struct new_sa_defrag_extent *new)
2531 {
2532         if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
2533                 return 0;
2534
2535         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2536                 return 0;
2537
2538         if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2539                 return 0;
2540
2541         if (btrfs_file_extent_encryption(leaf, fi) ||
2542             btrfs_file_extent_other_encoding(leaf, fi))
2543                 return 0;
2544
2545         return 1;
2546 }
2547
2548 /*
2549  * Note the backref might has changed, and in this case we just return 0.
2550  */
2551 static noinline int relink_extent_backref(struct btrfs_path *path,
2552                                  struct sa_defrag_extent_backref *prev,
2553                                  struct sa_defrag_extent_backref *backref)
2554 {
2555         struct btrfs_file_extent_item *extent;
2556         struct btrfs_file_extent_item *item;
2557         struct btrfs_ordered_extent *ordered;
2558         struct btrfs_trans_handle *trans;
2559         struct btrfs_root *root;
2560         struct btrfs_key key;
2561         struct extent_buffer *leaf;
2562         struct old_sa_defrag_extent *old = backref->old;
2563         struct new_sa_defrag_extent *new = old->new;
2564         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2565         struct inode *inode;
2566         struct extent_state *cached = NULL;
2567         int ret = 0;
2568         u64 start;
2569         u64 len;
2570         u64 lock_start;
2571         u64 lock_end;
2572         bool merge = false;
2573         int index;
2574
2575         if (prev && prev->root_id == backref->root_id &&
2576             prev->inum == backref->inum &&
2577             prev->file_pos + prev->num_bytes == backref->file_pos)
2578                 merge = true;
2579
2580         /* step 1: get root */
2581         key.objectid = backref->root_id;
2582         key.type = BTRFS_ROOT_ITEM_KEY;
2583         key.offset = (u64)-1;
2584
2585         index = srcu_read_lock(&fs_info->subvol_srcu);
2586
2587         root = btrfs_read_fs_root_no_name(fs_info, &key);
2588         if (IS_ERR(root)) {
2589                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2590                 if (PTR_ERR(root) == -ENOENT)
2591                         return 0;
2592                 return PTR_ERR(root);
2593         }
2594
2595         if (btrfs_root_readonly(root)) {
2596                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2597                 return 0;
2598         }
2599
2600         /* step 2: get inode */
2601         key.objectid = backref->inum;
2602         key.type = BTRFS_INODE_ITEM_KEY;
2603         key.offset = 0;
2604
2605         inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2606         if (IS_ERR(inode)) {
2607                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2608                 return 0;
2609         }
2610
2611         srcu_read_unlock(&fs_info->subvol_srcu, index);
2612
2613         /* step 3: relink backref */
2614         lock_start = backref->file_pos;
2615         lock_end = backref->file_pos + backref->num_bytes - 1;
2616         lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2617                          &cached);
2618
2619         ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2620         if (ordered) {
2621                 btrfs_put_ordered_extent(ordered);
2622                 goto out_unlock;
2623         }
2624
2625         trans = btrfs_join_transaction(root);
2626         if (IS_ERR(trans)) {
2627                 ret = PTR_ERR(trans);
2628                 goto out_unlock;
2629         }
2630
2631         key.objectid = backref->inum;
2632         key.type = BTRFS_EXTENT_DATA_KEY;
2633         key.offset = backref->file_pos;
2634
2635         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2636         if (ret < 0) {
2637                 goto out_free_path;
2638         } else if (ret > 0) {
2639                 ret = 0;
2640                 goto out_free_path;
2641         }
2642
2643         extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2644                                 struct btrfs_file_extent_item);
2645
2646         if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2647             backref->generation)
2648                 goto out_free_path;
2649
2650         btrfs_release_path(path);
2651
2652         start = backref->file_pos;
2653         if (backref->extent_offset < old->extent_offset + old->offset)
2654                 start += old->extent_offset + old->offset -
2655                          backref->extent_offset;
2656
2657         len = min(backref->extent_offset + backref->num_bytes,
2658                   old->extent_offset + old->offset + old->len);
2659         len -= max(backref->extent_offset, old->extent_offset + old->offset);
2660
2661         ret = btrfs_drop_extents(trans, root, inode, start,
2662                                  start + len, 1);
2663         if (ret)
2664                 goto out_free_path;
2665 again:
2666         key.objectid = btrfs_ino(BTRFS_I(inode));
2667         key.type = BTRFS_EXTENT_DATA_KEY;
2668         key.offset = start;
2669
2670         path->leave_spinning = 1;
2671         if (merge) {
2672                 struct btrfs_file_extent_item *fi;
2673                 u64 extent_len;
2674                 struct btrfs_key found_key;
2675
2676                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2677                 if (ret < 0)
2678                         goto out_free_path;
2679
2680                 path->slots[0]--;
2681                 leaf = path->nodes[0];
2682                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2683
2684                 fi = btrfs_item_ptr(leaf, path->slots[0],
2685                                     struct btrfs_file_extent_item);
2686                 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2687
2688                 if (extent_len + found_key.offset == start &&
2689                     relink_is_mergable(leaf, fi, new)) {
2690                         btrfs_set_file_extent_num_bytes(leaf, fi,
2691                                                         extent_len + len);
2692                         btrfs_mark_buffer_dirty(leaf);
2693                         inode_add_bytes(inode, len);
2694
2695                         ret = 1;
2696                         goto out_free_path;
2697                 } else {
2698                         merge = false;
2699                         btrfs_release_path(path);
2700                         goto again;
2701                 }
2702         }
2703
2704         ret = btrfs_insert_empty_item(trans, root, path, &key,
2705                                         sizeof(*extent));
2706         if (ret) {
2707                 btrfs_abort_transaction(trans, ret);
2708                 goto out_free_path;
2709         }
2710
2711         leaf = path->nodes[0];
2712         item = btrfs_item_ptr(leaf, path->slots[0],
2713                                 struct btrfs_file_extent_item);
2714         btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2715         btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2716         btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2717         btrfs_set_file_extent_num_bytes(leaf, item, len);
2718         btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2719         btrfs_set_file_extent_generation(leaf, item, trans->transid);
2720         btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2721         btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2722         btrfs_set_file_extent_encryption(leaf, item, 0);
2723         btrfs_set_file_extent_other_encoding(leaf, item, 0);
2724
2725         btrfs_mark_buffer_dirty(leaf);
2726         inode_add_bytes(inode, len);
2727         btrfs_release_path(path);
2728
2729         ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2730                         new->disk_len, 0,
2731                         backref->root_id, backref->inum,
2732                         new->file_pos); /* start - extent_offset */
2733         if (ret) {
2734                 btrfs_abort_transaction(trans, ret);
2735                 goto out_free_path;
2736         }
2737
2738         ret = 1;
2739 out_free_path:
2740         btrfs_release_path(path);
2741         path->leave_spinning = 0;
2742         btrfs_end_transaction(trans);
2743 out_unlock:
2744         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2745                              &cached);
2746         iput(inode);
2747         return ret;
2748 }
2749
2750 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2751 {
2752         struct old_sa_defrag_extent *old, *tmp;
2753
2754         if (!new)
2755                 return;
2756
2757         list_for_each_entry_safe(old, tmp, &new->head, list) {
2758                 kfree(old);
2759         }
2760         kfree(new);
2761 }
2762
2763 static void relink_file_extents(struct new_sa_defrag_extent *new)
2764 {
2765         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2766         struct btrfs_path *path;
2767         struct sa_defrag_extent_backref *backref;
2768         struct sa_defrag_extent_backref *prev = NULL;
2769         struct inode *inode;
2770         struct rb_node *node;
2771         int ret;
2772
2773         inode = new->inode;
2774
2775         path = btrfs_alloc_path();
2776         if (!path)
2777                 return;
2778
2779         if (!record_extent_backrefs(path, new)) {
2780                 btrfs_free_path(path);
2781                 goto out;
2782         }
2783         btrfs_release_path(path);
2784
2785         while (1) {
2786                 node = rb_first(&new->root);
2787                 if (!node)
2788                         break;
2789                 rb_erase(node, &new->root);
2790
2791                 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2792
2793                 ret = relink_extent_backref(path, prev, backref);
2794                 WARN_ON(ret < 0);
2795
2796                 kfree(prev);
2797
2798                 if (ret == 1)
2799                         prev = backref;
2800                 else
2801                         prev = NULL;
2802                 cond_resched();
2803         }
2804         kfree(prev);
2805
2806         btrfs_free_path(path);
2807 out:
2808         free_sa_defrag_extent(new);
2809
2810         atomic_dec(&fs_info->defrag_running);
2811         wake_up(&fs_info->transaction_wait);
2812 }
2813
2814 static struct new_sa_defrag_extent *
2815 record_old_file_extents(struct inode *inode,
2816                         struct btrfs_ordered_extent *ordered)
2817 {
2818         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2819         struct btrfs_root *root = BTRFS_I(inode)->root;
2820         struct btrfs_path *path;
2821         struct btrfs_key key;
2822         struct old_sa_defrag_extent *old;
2823         struct new_sa_defrag_extent *new;
2824         int ret;
2825
2826         new = kmalloc(sizeof(*new), GFP_NOFS);
2827         if (!new)
2828                 return NULL;
2829
2830         new->inode = inode;
2831         new->file_pos = ordered->file_offset;
2832         new->len = ordered->len;
2833         new->bytenr = ordered->start;
2834         new->disk_len = ordered->disk_len;
2835         new->compress_type = ordered->compress_type;
2836         new->root = RB_ROOT;
2837         INIT_LIST_HEAD(&new->head);
2838
2839         path = btrfs_alloc_path();
2840         if (!path)
2841                 goto out_kfree;
2842
2843         key.objectid = btrfs_ino(BTRFS_I(inode));
2844         key.type = BTRFS_EXTENT_DATA_KEY;
2845         key.offset = new->file_pos;
2846
2847         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2848         if (ret < 0)
2849                 goto out_free_path;
2850         if (ret > 0 && path->slots[0] > 0)
2851                 path->slots[0]--;
2852
2853         /* find out all the old extents for the file range */
2854         while (1) {
2855                 struct btrfs_file_extent_item *extent;
2856                 struct extent_buffer *l;
2857                 int slot;
2858                 u64 num_bytes;
2859                 u64 offset;
2860                 u64 end;
2861                 u64 disk_bytenr;
2862                 u64 extent_offset;
2863
2864                 l = path->nodes[0];
2865                 slot = path->slots[0];
2866
2867                 if (slot >= btrfs_header_nritems(l)) {
2868                         ret = btrfs_next_leaf(root, path);
2869                         if (ret < 0)
2870                                 goto out_free_path;
2871                         else if (ret > 0)
2872                                 break;
2873                         continue;
2874                 }
2875
2876                 btrfs_item_key_to_cpu(l, &key, slot);
2877
2878                 if (key.objectid != btrfs_ino(BTRFS_I(inode)))
2879                         break;
2880                 if (key.type != BTRFS_EXTENT_DATA_KEY)
2881                         break;
2882                 if (key.offset >= new->file_pos + new->len)
2883                         break;
2884
2885                 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2886
2887                 num_bytes = btrfs_file_extent_num_bytes(l, extent);
2888                 if (key.offset + num_bytes < new->file_pos)
2889                         goto next;
2890
2891                 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2892                 if (!disk_bytenr)
2893                         goto next;
2894
2895                 extent_offset = btrfs_file_extent_offset(l, extent);
2896
2897                 old = kmalloc(sizeof(*old), GFP_NOFS);
2898                 if (!old)
2899                         goto out_free_path;
2900
2901                 offset = max(new->file_pos, key.offset);
2902                 end = min(new->file_pos + new->len, key.offset + num_bytes);
2903
2904                 old->bytenr = disk_bytenr;
2905                 old->extent_offset = extent_offset;
2906                 old->offset = offset - key.offset;
2907                 old->len = end - offset;
2908                 old->new = new;
2909                 old->count = 0;
2910                 list_add_tail(&old->list, &new->head);
2911 next:
2912                 path->slots[0]++;
2913                 cond_resched();
2914         }
2915
2916         btrfs_free_path(path);
2917         atomic_inc(&fs_info->defrag_running);
2918
2919         return new;
2920
2921 out_free_path:
2922         btrfs_free_path(path);
2923 out_kfree:
2924         free_sa_defrag_extent(new);
2925         return NULL;
2926 }
2927
2928 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
2929                                          u64 start, u64 len)
2930 {
2931         struct btrfs_block_group_cache *cache;
2932
2933         cache = btrfs_lookup_block_group(fs_info, start);
2934         ASSERT(cache);
2935
2936         spin_lock(&cache->lock);
2937         cache->delalloc_bytes -= len;
2938         spin_unlock(&cache->lock);
2939
2940         btrfs_put_block_group(cache);
2941 }
2942
2943 /* as ordered data IO finishes, this gets called so we can finish
2944  * an ordered extent if the range of bytes in the file it covers are
2945  * fully written.
2946  */
2947 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2948 {
2949         struct inode *inode = ordered_extent->inode;
2950         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2951         struct btrfs_root *root = BTRFS_I(inode)->root;
2952         struct btrfs_trans_handle *trans = NULL;
2953         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2954         struct extent_state *cached_state = NULL;
2955         struct new_sa_defrag_extent *new = NULL;
2956         int compress_type = 0;
2957         int ret = 0;
2958         u64 logical_len = ordered_extent->len;
2959         bool nolock;
2960         bool truncated = false;
2961         bool range_locked = false;
2962         bool clear_new_delalloc_bytes = false;
2963
2964         if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2965             !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
2966             !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags))
2967                 clear_new_delalloc_bytes = true;
2968
2969         nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
2970
2971         if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2972                 ret = -EIO;
2973                 goto out;
2974         }
2975
2976         btrfs_free_io_failure_record(BTRFS_I(inode),
2977                         ordered_extent->file_offset,
2978                         ordered_extent->file_offset +
2979                         ordered_extent->len - 1);
2980
2981         if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2982                 truncated = true;
2983                 logical_len = ordered_extent->truncated_len;
2984                 /* Truncated the entire extent, don't bother adding */
2985                 if (!logical_len)
2986                         goto out;
2987         }
2988
2989         if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2990                 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2991
2992                 /*
2993                  * For mwrite(mmap + memset to write) case, we still reserve
2994                  * space for NOCOW range.
2995                  * As NOCOW won't cause a new delayed ref, just free the space
2996                  */
2997                 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
2998                                        ordered_extent->len);
2999                 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
3000                 if (nolock)
3001                         trans = btrfs_join_transaction_nolock(root);
3002                 else
3003                         trans = btrfs_join_transaction(root);
3004                 if (IS_ERR(trans)) {
3005                         ret = PTR_ERR(trans);
3006                         trans = NULL;
3007                         goto out;
3008                 }
3009                 trans->block_rsv = &BTRFS_I(inode)->block_rsv;
3010                 ret = btrfs_update_inode_fallback(trans, root, inode);
3011                 if (ret) /* -ENOMEM or corruption */
3012                         btrfs_abort_transaction(trans, ret);
3013                 goto out;
3014         }
3015
3016         range_locked = true;
3017         lock_extent_bits(io_tree, ordered_extent->file_offset,
3018                          ordered_extent->file_offset + ordered_extent->len - 1,
3019                          &cached_state);
3020
3021         ret = test_range_bit(io_tree, ordered_extent->file_offset,
3022                         ordered_extent->file_offset + ordered_extent->len - 1,
3023                         EXTENT_DEFRAG, 0, cached_state);
3024         if (ret) {
3025                 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
3026                 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
3027                         /* the inode is shared */
3028                         new = record_old_file_extents(inode, ordered_extent);
3029
3030                 clear_extent_bit(io_tree, ordered_extent->file_offset,
3031                         ordered_extent->file_offset + ordered_extent->len - 1,
3032                         EXTENT_DEFRAG, 0, 0, &cached_state);
3033         }
3034
3035         if (nolock)
3036                 trans = btrfs_join_transaction_nolock(root);
3037         else
3038                 trans = btrfs_join_transaction(root);
3039         if (IS_ERR(trans)) {
3040                 ret = PTR_ERR(trans);
3041                 trans = NULL;
3042                 goto out;
3043         }
3044
3045         trans->block_rsv = &BTRFS_I(inode)->block_rsv;
3046
3047         if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3048                 compress_type = ordered_extent->compress_type;
3049         if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3050                 BUG_ON(compress_type);
3051                 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
3052                                        ordered_extent->len);
3053                 ret = btrfs_mark_extent_written(trans, BTRFS_I(inode),
3054                                                 ordered_extent->file_offset,
3055                                                 ordered_extent->file_offset +
3056                                                 logical_len);
3057         } else {
3058                 BUG_ON(root == fs_info->tree_root);
3059                 ret = insert_reserved_file_extent(trans, inode,
3060                                                 ordered_extent->file_offset,
3061                                                 ordered_extent->start,
3062                                                 ordered_extent->disk_len,
3063                                                 logical_len, logical_len,
3064                                                 compress_type, 0, 0,
3065                                                 BTRFS_FILE_EXTENT_REG);
3066                 if (!ret)
3067                         btrfs_release_delalloc_bytes(fs_info,
3068                                                      ordered_extent->start,
3069                                                      ordered_extent->disk_len);
3070         }
3071         unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
3072                            ordered_extent->file_offset, ordered_extent->len,
3073                            trans->transid);
3074         if (ret < 0) {
3075                 btrfs_abort_transaction(trans, ret);
3076                 goto out;
3077         }
3078
3079         ret = add_pending_csums(trans, inode, &ordered_extent->list);
3080         if (ret) {
3081                 btrfs_abort_transaction(trans, ret);
3082                 goto out;
3083         }
3084
3085         btrfs_ordered_update_i_size(inode, 0, ordered_extent);
3086         ret = btrfs_update_inode_fallback(trans, root, inode);
3087         if (ret) { /* -ENOMEM or corruption */
3088                 btrfs_abort_transaction(trans, ret);
3089                 goto out;
3090         }
3091         ret = 0;
3092 out:
3093         if (range_locked || clear_new_delalloc_bytes) {
3094                 unsigned int clear_bits = 0;
3095
3096                 if (range_locked)
3097                         clear_bits |= EXTENT_LOCKED;
3098                 if (clear_new_delalloc_bytes)
3099                         clear_bits |= EXTENT_DELALLOC_NEW;
3100                 clear_extent_bit(&BTRFS_I(inode)->io_tree,
3101                                  ordered_extent->file_offset,
3102                                  ordered_extent->file_offset +
3103                                  ordered_extent->len - 1,
3104                                  clear_bits,
3105                                  (clear_bits & EXTENT_LOCKED) ? 1 : 0,
3106                                  0, &cached_state);
3107         }
3108
3109         if (trans)
3110                 btrfs_end_transaction(trans);
3111
3112         if (ret || truncated) {
3113                 u64 start, end;
3114
3115                 if (truncated)
3116                         start = ordered_extent->file_offset + logical_len;
3117                 else
3118                         start = ordered_extent->file_offset;
3119                 end = ordered_extent->file_offset + ordered_extent->len - 1;
3120                 clear_extent_uptodate(io_tree, start, end, NULL);
3121
3122                 /* Drop the cache for the part of the extent we didn't write. */
3123                 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
3124
3125                 /*
3126                  * If the ordered extent had an IOERR or something else went
3127                  * wrong we need to return the space for this ordered extent
3128                  * back to the allocator.  We only free the extent in the
3129                  * truncated case if we didn't write out the extent at all.
3130                  */
3131                 if ((ret || !logical_len) &&
3132                     !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3133                     !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
3134                         btrfs_free_reserved_extent(fs_info,
3135                                                    ordered_extent->start,
3136                                                    ordered_extent->disk_len, 1);
3137         }
3138
3139
3140         /*
3141          * This needs to be done to make sure anybody waiting knows we are done
3142          * updating everything for this ordered extent.
3143          */
3144         btrfs_remove_ordered_extent(inode, ordered_extent);
3145
3146         /* for snapshot-aware defrag */
3147         if (new) {
3148                 if (ret) {
3149                         free_sa_defrag_extent(new);
3150                         atomic_dec(&fs_info->defrag_running);
3151                 } else {
3152                         relink_file_extents(new);
3153                 }
3154         }
3155
3156         /* once for us */
3157         btrfs_put_ordered_extent(ordered_extent);
3158         /* once for the tree */
3159         btrfs_put_ordered_extent(ordered_extent);
3160
3161         return ret;
3162 }
3163
3164 static void finish_ordered_fn(struct btrfs_work *work)
3165 {
3166         struct btrfs_ordered_extent *ordered_extent;
3167         ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
3168         btrfs_finish_ordered_io(ordered_extent);
3169 }
3170
3171 static void btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
3172                                 struct extent_state *state, int uptodate)
3173 {
3174         struct inode *inode = page->mapping->host;
3175         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3176         struct btrfs_ordered_extent *ordered_extent = NULL;
3177         struct btrfs_workqueue *wq;
3178         btrfs_work_func_t func;
3179
3180         trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
3181
3182         ClearPagePrivate2(page);
3183         if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
3184                                             end - start + 1, uptodate))
3185                 return;
3186
3187         if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
3188                 wq = fs_info->endio_freespace_worker;
3189                 func = btrfs_freespace_write_helper;
3190         } else {
3191                 wq = fs_info->endio_write_workers;
3192                 func = btrfs_endio_write_helper;
3193         }
3194
3195         btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
3196                         NULL);
3197         btrfs_queue_work(wq, &ordered_extent->work);
3198 }
3199
3200 static int __readpage_endio_check(struct inode *inode,
3201                                   struct btrfs_io_bio *io_bio,
3202                                   int icsum, struct page *page,
3203                                   int pgoff, u64 start, size_t len)
3204 {
3205         char *kaddr;
3206         u32 csum_expected;
3207         u32 csum = ~(u32)0;
3208
3209         csum_expected = *(((u32 *)io_bio->csum) + icsum);
3210
3211         kaddr = kmap_atomic(page);
3212         csum = btrfs_csum_data(kaddr + pgoff, csum,  len);
3213         btrfs_csum_final(csum, (u8 *)&csum);
3214         if (csum != csum_expected)
3215                 goto zeroit;
3216
3217         kunmap_atomic(kaddr);
3218         return 0;
3219 zeroit:
3220         btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
3221                                     io_bio->mirror_num);
3222         memset(kaddr + pgoff, 1, len);
3223         flush_dcache_page(page);
3224         kunmap_atomic(kaddr);
3225         return -EIO;
3226 }
3227
3228 /*
3229  * when reads are done, we need to check csums to verify the data is correct
3230  * if there's a match, we allow the bio to finish.  If not, the code in
3231  * extent_io.c will try to find good copies for us.
3232  */
3233 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
3234                                       u64 phy_offset, struct page *page,
3235                                       u64 start, u64 end, int mirror)
3236 {
3237         size_t offset = start - page_offset(page);
3238         struct inode *inode = page->mapping->host;
3239         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3240         struct btrfs_root *root = BTRFS_I(inode)->root;
3241
3242         if (PageChecked(page)) {
3243                 ClearPageChecked(page);
3244                 return 0;
3245         }
3246
3247         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3248                 return 0;
3249
3250         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
3251             test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
3252                 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM);
3253                 return 0;
3254         }
3255
3256         phy_offset >>= inode->i_sb->s_blocksize_bits;
3257         return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
3258                                       start, (size_t)(end - start + 1));
3259 }
3260
3261 /*
3262  * btrfs_add_delayed_iput - perform a delayed iput on @inode
3263  *
3264  * @inode: The inode we want to perform iput on
3265  *
3266  * This function uses the generic vfs_inode::i_count to track whether we should
3267  * just decrement it (in case it's > 1) or if this is the last iput then link
3268  * the inode to the delayed iput machinery. Delayed iputs are processed at
3269  * transaction commit time/superblock commit/cleaner kthread.
3270  */
3271 void btrfs_add_delayed_iput(struct inode *inode)
3272 {
3273         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3274         struct btrfs_inode *binode = BTRFS_I(inode);
3275
3276         if (atomic_add_unless(&inode->i_count, -1, 1))
3277                 return;
3278
3279         spin_lock(&fs_info->delayed_iput_lock);
3280         ASSERT(list_empty(&binode->delayed_iput));
3281         list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
3282         spin_unlock(&fs_info->delayed_iput_lock);
3283 }
3284
3285 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3286 {
3287
3288         spin_lock(&fs_info->delayed_iput_lock);
3289         while (!list_empty(&fs_info->delayed_iputs)) {
3290                 struct btrfs_inode *inode;
3291
3292                 inode = list_first_entry(&fs_info->delayed_iputs,
3293                                 struct btrfs_inode, delayed_iput);
3294                 list_del_init(&inode->delayed_iput);
3295                 spin_unlock(&fs_info->delayed_iput_lock);
3296                 iput(&inode->vfs_inode);
3297                 spin_lock(&fs_info->delayed_iput_lock);
3298         }
3299         spin_unlock(&fs_info->delayed_iput_lock);
3300 }
3301
3302 /*
3303  * This is called in transaction commit time. If there are no orphan
3304  * files in the subvolume, it removes orphan item and frees block_rsv
3305  * structure.
3306  */
3307 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
3308                               struct btrfs_root *root)
3309 {
3310         struct btrfs_fs_info *fs_info = root->fs_info;
3311         struct btrfs_block_rsv *block_rsv;
3312         int ret;
3313
3314         if (atomic_read(&root->orphan_inodes) ||
3315             root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
3316                 return;
3317
3318         spin_lock(&root->orphan_lock);
3319         if (atomic_read(&root->orphan_inodes)) {
3320                 spin_unlock(&root->orphan_lock);
3321                 return;
3322         }
3323
3324         if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
3325                 spin_unlock(&root->orphan_lock);
3326                 return;
3327         }
3328
3329         block_rsv = root->orphan_block_rsv;
3330         root->orphan_block_rsv = NULL;
3331         spin_unlock(&root->orphan_lock);
3332
3333         if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) &&
3334             btrfs_root_refs(&root->root_item) > 0) {
3335                 ret = btrfs_del_orphan_item(trans, fs_info->tree_root,
3336                                             root->root_key.objectid);
3337                 if (ret)
3338                         btrfs_abort_transaction(trans, ret);
3339                 else
3340                         clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
3341                                   &root->state);
3342         }
3343
3344         if (block_rsv) {
3345                 WARN_ON(block_rsv->size > 0);
3346                 btrfs_free_block_rsv(fs_info, block_rsv);
3347         }
3348 }
3349
3350 /*
3351  * This creates an orphan entry for the given inode in case something goes
3352  * wrong in the middle of an unlink/truncate.
3353  *
3354  * NOTE: caller of this function should reserve 5 units of metadata for
3355  *       this function.
3356  */
3357 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3358                 struct btrfs_inode *inode)
3359 {
3360         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
3361         struct btrfs_root *root = inode->root;
3362         struct btrfs_block_rsv *block_rsv = NULL;
3363         int reserve = 0;
3364         bool insert = false;
3365         int ret;
3366
3367         if (!root->orphan_block_rsv) {
3368                 block_rsv = btrfs_alloc_block_rsv(fs_info,
3369                                                   BTRFS_BLOCK_RSV_TEMP);
3370                 if (!block_rsv)
3371                         return -ENOMEM;
3372         }
3373
3374         if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3375                               &inode->runtime_flags))
3376                 insert = true;
3377
3378         if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3379                               &inode->runtime_flags))
3380                 reserve = 1;
3381
3382         spin_lock(&root->orphan_lock);
3383         /* If someone has created ->orphan_block_rsv, be happy to use it. */
3384         if (!root->orphan_block_rsv) {
3385                 root->orphan_block_rsv = block_rsv;
3386         } else if (block_rsv) {
3387                 btrfs_free_block_rsv(fs_info, block_rsv);
3388                 block_rsv = NULL;
3389         }
3390
3391         if (insert)
3392                 atomic_inc(&root->orphan_inodes);
3393         spin_unlock(&root->orphan_lock);
3394
3395         /* grab metadata reservation from transaction handle */
3396         if (reserve) {
3397                 ret = btrfs_orphan_reserve_metadata(trans, inode);
3398                 ASSERT(!ret);
3399                 if (ret) {
3400                         /*
3401                          * dec doesn't need spin_lock as ->orphan_block_rsv
3402                          * would be released only if ->orphan_inodes is
3403                          * zero.
3404                          */
3405                         atomic_dec(&root->orphan_inodes);
3406                         clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3407                                   &inode->runtime_flags);
3408                         if (insert)
3409                                 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3410                                           &inode->runtime_flags);
3411                         return ret;
3412                 }
3413         }
3414
3415         /* insert an orphan item to track this unlinked/truncated file */
3416         if (insert) {
3417                 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
3418                 if (ret) {
3419                         if (reserve) {
3420                                 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3421                                           &inode->runtime_flags);
3422                                 btrfs_orphan_release_metadata(inode);
3423                         }
3424                         /*
3425                          * btrfs_orphan_commit_root may race with us and set
3426                          * ->orphan_block_rsv to zero, in order to avoid that,
3427                          * decrease ->orphan_inodes after everything is done.
3428                          */
3429                         atomic_dec(&root->orphan_inodes);
3430                         if (ret != -EEXIST) {
3431                                 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3432                                           &inode->runtime_flags);
3433                                 btrfs_abort_transaction(trans, ret);
3434                                 return ret;
3435                         }
3436                 }
3437                 ret = 0;
3438         }
3439
3440         return 0;
3441 }
3442
3443 /*
3444  * We have done the truncate/delete so we can go ahead and remove the orphan
3445  * item for this particular inode.
3446  */
3447 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3448                             struct btrfs_inode *inode)
3449 {
3450         struct btrfs_root *root = inode->root;
3451         int delete_item = 0;
3452         int ret = 0;
3453
3454         if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3455                                &inode->runtime_flags))
3456                 delete_item = 1;
3457
3458         if (delete_item && trans)
3459                 ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
3460
3461         if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3462                                &inode->runtime_flags))
3463                 btrfs_orphan_release_metadata(inode);
3464
3465         /*
3466          * btrfs_orphan_commit_root may race with us and set ->orphan_block_rsv
3467          * to zero, in order to avoid that, decrease ->orphan_inodes after
3468          * everything is done.
3469          */
3470         if (delete_item)
3471                 atomic_dec(&root->orphan_inodes);
3472
3473         return ret;
3474 }
3475
3476 /*
3477  * this cleans up any orphans that may be left on the list from the last use
3478  * of this root.
3479  */
3480 int btrfs_orphan_cleanup(struct btrfs_root *root)
3481 {
3482         struct btrfs_fs_info *fs_info = root->fs_info;
3483         struct btrfs_path *path;
3484         struct extent_buffer *leaf;
3485         struct btrfs_key key, found_key;
3486         struct btrfs_trans_handle *trans;
3487         struct inode *inode;
3488         u64 last_objectid = 0;
3489         int ret = 0, nr_unlink = 0, nr_truncate = 0;
3490
3491         if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3492                 return 0;
3493
3494         path = btrfs_alloc_path();
3495         if (!path) {
3496                 ret = -ENOMEM;
3497                 goto out;
3498         }
3499         path->reada = READA_BACK;
3500
3501         key.objectid = BTRFS_ORPHAN_OBJECTID;
3502         key.type = BTRFS_ORPHAN_ITEM_KEY;
3503         key.offset = (u64)-1;
3504
3505         while (1) {
3506                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3507                 if (ret < 0)
3508                         goto out;
3509
3510                 /*
3511                  * if ret == 0 means we found what we were searching for, which
3512                  * is weird, but possible, so only screw with path if we didn't
3513                  * find the key and see if we have stuff that matches
3514                  */
3515                 if (ret > 0) {
3516                         ret = 0;
3517                         if (path->slots[0] == 0)
3518                                 break;
3519                         path->slots[0]--;
3520                 }
3521
3522                 /* pull out the item */
3523                 leaf = path->nodes[0];
3524                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3525
3526                 /* make sure the item matches what we want */
3527                 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3528                         break;
3529                 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3530                         break;
3531
3532                 /* release the path since we're done with it */
3533                 btrfs_release_path(path);
3534
3535                 /*
3536                  * this is where we are basically btrfs_lookup, without the
3537                  * crossing root thing.  we store the inode number in the
3538                  * offset of the orphan item.
3539                  */
3540
3541                 if (found_key.offset == last_objectid) {
3542                         btrfs_err(fs_info,
3543                                   "Error removing orphan entry, stopping orphan cleanup");
3544                         ret = -EINVAL;
3545                         goto out;
3546                 }
3547
3548                 last_objectid = found_key.offset;
3549
3550                 found_key.objectid = found_key.offset;
3551                 found_key.type = BTRFS_INODE_ITEM_KEY;
3552                 found_key.offset = 0;
3553                 inode = btrfs_iget(fs_info->sb, &found_key, root, NULL);
3554                 ret = PTR_ERR_OR_ZERO(inode);
3555                 if (ret && ret != -ENOENT)
3556                         goto out;
3557
3558                 if (ret == -ENOENT && root == fs_info->tree_root) {
3559                         struct btrfs_root *dead_root;
3560                         struct btrfs_fs_info *fs_info = root->fs_info;
3561                         int is_dead_root = 0;
3562
3563                         /*
3564                          * this is an orphan in the tree root. Currently these
3565                          * could come from 2 sources:
3566                          *  a) a snapshot deletion in progress
3567                          *  b) a free space cache inode
3568                          * We need to distinguish those two, as the snapshot
3569                          * orphan must not get deleted.
3570                          * find_dead_roots already ran before us, so if this
3571                          * is a snapshot deletion, we should find the root
3572                          * in the dead_roots list
3573                          */
3574                         spin_lock(&fs_info->trans_lock);
3575                         list_for_each_entry(dead_root, &fs_info->dead_roots,
3576                                             root_list) {
3577                                 if (dead_root->root_key.objectid ==
3578                                     found_key.objectid) {
3579                                         is_dead_root = 1;
3580                                         break;
3581                                 }
3582                         }
3583                         spin_unlock(&fs_info->trans_lock);
3584                         if (is_dead_root) {
3585                                 /* prevent this orphan from being found again */
3586                                 key.offset = found_key.objectid - 1;
3587                                 continue;
3588                         }
3589                 }
3590                 /*
3591                  * Inode is already gone but the orphan item is still there,
3592                  * kill the orphan item.
3593                  */
3594                 if (ret == -ENOENT) {
3595                         trans = btrfs_start_transaction(root, 1);
3596                         if (IS_ERR(trans)) {
3597                                 ret = PTR_ERR(trans);
3598                                 goto out;
3599                         }
3600                         btrfs_debug(fs_info, "auto deleting %Lu",
3601                                     found_key.objectid);
3602                         ret = btrfs_del_orphan_item(trans, root,
3603                                                     found_key.objectid);
3604                         btrfs_end_transaction(trans);
3605                         if (ret)
3606                                 goto out;
3607                         continue;
3608                 }
3609
3610                 /*
3611                  * add this inode to the orphan list so btrfs_orphan_del does
3612                  * the proper thing when we hit it
3613                  */
3614                 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3615                         &BTRFS_I(inode)->runtime_flags);
3616                 atomic_inc(&root->orphan_inodes);
3617
3618                 /* if we have links, this was a truncate, lets do that */
3619                 if (inode->i_nlink) {
3620                         if (WARN_ON(!S_ISREG(inode->i_mode))) {
3621                                 iput(inode);
3622                                 continue;
3623                         }
3624                         nr_truncate++;
3625
3626                         /* 1 for the orphan item deletion. */
3627                         trans = btrfs_start_transaction(root, 1);
3628                         if (IS_ERR(trans)) {
3629                                 iput(inode);
3630                                 ret = PTR_ERR(trans);
3631                                 goto out;
3632                         }
3633                         ret = btrfs_orphan_add(trans, BTRFS_I(inode));
3634                         btrfs_end_transaction(trans);
3635                         if (ret) {
3636                                 iput(inode);
3637                                 goto out;
3638                         }
3639
3640                         ret = btrfs_truncate(inode, false);
3641                         if (ret)
3642                                 btrfs_orphan_del(NULL, BTRFS_I(inode));
3643                 } else {
3644                         nr_unlink++;
3645                 }
3646
3647                 /* this will do delete_inode and everything for us */
3648                 iput(inode);
3649                 if (ret)
3650                         goto out;
3651         }
3652         /* release the path since we're done with it */
3653         btrfs_release_path(path);
3654
3655         root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3656
3657         if (root->orphan_block_rsv)
3658                 btrfs_block_rsv_release(fs_info, root->orphan_block_rsv,
3659                                         (u64)-1);
3660
3661         if (root->orphan_block_rsv ||
3662             test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3663                 trans = btrfs_join_transaction(root);
3664                 if (!IS_ERR(trans))
3665                         btrfs_end_transaction(trans);
3666         }
3667
3668         if (nr_unlink)
3669                 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3670         if (nr_truncate)
3671                 btrfs_debug(fs_info, "truncated %d orphans", nr_truncate);
3672
3673 out:
3674         if (ret)
3675                 btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3676         btrfs_free_path(path);
3677         return ret;
3678 }
3679
3680 /*
3681  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3682  * don't find any xattrs, we know there can't be any acls.
3683  *
3684  * slot is the slot the inode is in, objectid is the objectid of the inode
3685  */
3686 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3687                                           int slot, u64 objectid,
3688                                           int *first_xattr_slot)
3689 {
3690         u32 nritems = btrfs_header_nritems(leaf);
3691         struct btrfs_key found_key;
3692         static u64 xattr_access = 0;
3693         static u64 xattr_default = 0;
3694         int scanned = 0;
3695
3696         if (!xattr_access) {
3697                 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3698                                         strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3699                 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3700                                         strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3701         }
3702
3703         slot++;
3704         *first_xattr_slot = -1;
3705         while (slot < nritems) {
3706                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3707
3708                 /* we found a different objectid, there must not be acls */
3709                 if (found_key.objectid != objectid)
3710                         return 0;
3711
3712                 /* we found an xattr, assume we've got an acl */
3713                 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3714                         if (*first_xattr_slot == -1)
3715                                 *first_xattr_slot = slot;
3716                         if (found_key.offset == xattr_access ||
3717                             found_key.offset == xattr_default)
3718                                 return 1;
3719                 }
3720
3721                 /*
3722                  * we found a key greater than an xattr key, there can't
3723                  * be any acls later on
3724                  */
3725                 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3726                         return 0;
3727
3728                 slot++;
3729                 scanned++;
3730
3731                 /*
3732                  * it goes inode, inode backrefs, xattrs, extents,
3733                  * so if there are a ton of hard links to an inode there can
3734                  * be a lot of backrefs.  Don't waste time searching too hard,
3735                  * this is just an optimization
3736                  */
3737                 if (scanned >= 8)
3738                         break;
3739         }
3740         /* we hit the end of the leaf before we found an xattr or
3741          * something larger than an xattr.  We have to assume the inode
3742          * has acls
3743          */
3744         if (*first_xattr_slot == -1)
3745                 *first_xattr_slot = slot;
3746         return 1;
3747 }
3748
3749 /*
3750  * read an inode from the btree into the in-memory inode
3751  */
3752 static int btrfs_read_locked_inode(struct inode *inode)
3753 {
3754         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3755         struct btrfs_path *path;
3756         struct extent_buffer *leaf;
3757         struct btrfs_inode_item *inode_item;
3758         struct btrfs_root *root = BTRFS_I(inode)->root;
3759         struct btrfs_key location;
3760         unsigned long ptr;
3761         int maybe_acls;
3762         u32 rdev;
3763         int ret;
3764         bool filled = false;
3765         int first_xattr_slot;
3766
3767         ret = btrfs_fill_inode(inode, &rdev);
3768         if (!ret)
3769                 filled = true;
3770
3771         path = btrfs_alloc_path();
3772         if (!path) {
3773                 ret = -ENOMEM;
3774                 goto make_bad;
3775         }
3776
3777         memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3778
3779         ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3780         if (ret) {
3781                 if (ret > 0)
3782                         ret = -ENOENT;
3783                 goto make_bad;
3784         }
3785
3786         leaf = path->nodes[0];
3787
3788         if (filled)
3789                 goto cache_index;
3790
3791         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3792                                     struct btrfs_inode_item);
3793         inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3794         set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3795         i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3796         i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3797         btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3798
3799         inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3800         inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3801
3802         inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3803         inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3804
3805         inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3806         inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3807
3808         BTRFS_I(inode)->i_otime.tv_sec =
3809                 btrfs_timespec_sec(leaf, &inode_item->otime);
3810         BTRFS_I(inode)->i_otime.tv_nsec =
3811                 btrfs_timespec_nsec(leaf, &inode_item->otime);
3812
3813         inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3814         BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3815         BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3816
3817         inode_set_iversion_queried(inode,
3818                                    btrfs_inode_sequence(leaf, inode_item));
3819         inode->i_generation = BTRFS_I(inode)->generation;
3820         inode->i_rdev = 0;
3821         rdev = btrfs_inode_rdev(leaf, inode_item);
3822
3823         BTRFS_I(inode)->index_cnt = (u64)-1;
3824         BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3825
3826 cache_index:
3827         /*
3828          * If we were modified in the current generation and evicted from memory
3829          * and then re-read we need to do a full sync since we don't have any
3830          * idea about which extents were modified before we were evicted from
3831          * cache.
3832          *
3833          * This is required for both inode re-read from disk and delayed inode
3834          * in delayed_nodes_tree.
3835          */
3836         if (BTRFS_I(inode)->last_trans == fs_info->generation)
3837                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3838                         &BTRFS_I(inode)->runtime_flags);
3839
3840         /*
3841          * We don't persist the id of the transaction where an unlink operation
3842          * against the inode was last made. So here we assume the inode might
3843          * have been evicted, and therefore the exact value of last_unlink_trans
3844          * lost, and set it to last_trans to avoid metadata inconsistencies
3845          * between the inode and its parent if the inode is fsync'ed and the log
3846          * replayed. For example, in the scenario:
3847          *
3848          * touch mydir/foo
3849          * ln mydir/foo mydir/bar
3850          * sync
3851          * unlink mydir/bar
3852          * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3853          * xfs_io -c fsync mydir/foo
3854          * <power failure>
3855          * mount fs, triggers fsync log replay
3856          *
3857          * We must make sure that when we fsync our inode foo we also log its
3858          * parent inode, otherwise after log replay the parent still has the
3859          * dentry with the "bar" name but our inode foo has a link count of 1
3860          * and doesn't have an inode ref with the name "bar" anymore.
3861          *
3862          * Setting last_unlink_trans to last_trans is a pessimistic approach,
3863          * but it guarantees correctness at the expense of occasional full
3864          * transaction commits on fsync if our inode is a directory, or if our
3865          * inode is not a directory, logging its parent unnecessarily.
3866          */
3867         BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3868
3869         path->slots[0]++;
3870         if (inode->i_nlink != 1 ||
3871             path->slots[0] >= btrfs_header_nritems(leaf))
3872                 goto cache_acl;
3873
3874         btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3875         if (location.objectid != btrfs_ino(BTRFS_I(inode)))
3876                 goto cache_acl;
3877
3878         ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3879         if (location.type == BTRFS_INODE_REF_KEY) {
3880                 struct btrfs_inode_ref *ref;
3881
3882                 ref = (struct btrfs_inode_ref *)ptr;
3883                 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3884         } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3885                 struct btrfs_inode_extref *extref;
3886
3887                 extref = (struct btrfs_inode_extref *)ptr;
3888                 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3889                                                                      extref);
3890         }
3891 cache_acl:
3892         /*
3893          * try to precache a NULL acl entry for files that don't have
3894          * any xattrs or acls
3895          */
3896         maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3897                         btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
3898         if (first_xattr_slot != -1) {
3899                 path->slots[0] = first_xattr_slot;
3900                 ret = btrfs_load_inode_props(inode, path);
3901                 if (ret)
3902                         btrfs_err(fs_info,
3903                                   "error loading props for ino %llu (root %llu): %d",
3904                                   btrfs_ino(BTRFS_I(inode)),
3905                                   root->root_key.objectid, ret);
3906         }
3907         btrfs_free_path(path);
3908
3909         if (!maybe_acls)
3910                 cache_no_acl(inode);
3911
3912         switch (inode->i_mode & S_IFMT) {
3913         case S_IFREG:
3914                 inode->i_mapping->a_ops = &btrfs_aops;
3915                 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3916                 inode->i_fop = &btrfs_file_operations;
3917                 inode->i_op = &btrfs_file_inode_operations;
3918                 break;
3919         case S_IFDIR:
3920                 inode->i_fop = &btrfs_dir_file_operations;
3921                 inode->i_op = &btrfs_dir_inode_operations;
3922                 break;
3923         case S_IFLNK:
3924                 inode->i_op = &btrfs_symlink_inode_operations;
3925                 inode_nohighmem(inode);
3926                 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3927                 break;
3928         default:
3929                 inode->i_op = &btrfs_special_inode_operations;
3930                 init_special_inode(inode, inode->i_mode, rdev);
3931                 break;
3932         }
3933
3934         btrfs_update_iflags(inode);
3935         return 0;
3936
3937 make_bad:
3938         btrfs_free_path(path);
3939         make_bad_inode(inode);
3940         return ret;
3941 }
3942
3943 /*
3944  * given a leaf and an inode, copy the inode fields into the leaf
3945  */
3946 static void fill_inode_item(struct btrfs_trans_handle *trans,
3947                             struct extent_buffer *leaf,
3948                             struct btrfs_inode_item *item,
3949                             struct inode *inode)
3950 {
3951         struct btrfs_map_token token;
3952
3953         btrfs_init_map_token(&token);
3954
3955         btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3956         btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3957         btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3958                                    &token);
3959         btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3960         btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3961
3962         btrfs_set_token_timespec_sec(leaf, &item->atime,
3963                                      inode->i_atime.tv_sec, &token);
3964         btrfs_set_token_timespec_nsec(leaf, &item->atime,
3965                                       inode->i_atime.tv_nsec, &token);
3966
3967         btrfs_set_token_timespec_sec(leaf, &item->mtime,
3968                                      inode->i_mtime.tv_sec, &token);
3969         btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3970                                       inode->i_mtime.tv_nsec, &token);
3971
3972         btrfs_set_token_timespec_sec(leaf, &item->ctime,
3973                                      inode->i_ctime.tv_sec, &token);
3974         btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3975                                       inode->i_ctime.tv_nsec, &token);
3976
3977         btrfs_set_token_timespec_sec(leaf, &item->otime,
3978                                      BTRFS_I(inode)->i_otime.tv_sec, &token);
3979         btrfs_set_token_timespec_nsec(leaf, &item->otime,
3980                                       BTRFS_I(inode)->i_otime.tv_nsec, &token);
3981
3982         btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3983                                      &token);
3984         btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3985                                          &token);
3986         btrfs_set_token_inode_sequence(leaf, item, inode_peek_iversion(inode),
3987                                        &token);
3988         btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3989         btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3990         btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3991         btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3992 }
3993
3994 /*
3995  * copy everything in the in-memory inode into the btree.
3996  */
3997 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3998                                 struct btrfs_root *root, struct inode *inode)
3999 {
4000         struct btrfs_inode_item *inode_item;
4001         struct btrfs_path *path;
4002         struct extent_buffer *leaf;
4003         int ret;
4004
4005         path = btrfs_alloc_path();
4006         if (!path)
4007                 return -ENOMEM;
4008
4009         path->leave_spinning = 1;
4010         ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
4011                                  1);
4012         if (ret) {
4013                 if (ret > 0)
4014                         ret = -ENOENT;
4015                 goto failed;
4016         }
4017
4018         leaf = path->nodes[0];
4019         inode_item = btrfs_item_ptr(leaf, path->slots[0],
4020                                     struct btrfs_inode_item);
4021
4022         fill_inode_item(trans, leaf, inode_item, inode);
4023         btrfs_mark_buffer_dirty(leaf);
4024         btrfs_set_inode_last_trans(trans, inode);
4025         ret = 0;
4026 failed:
4027         btrfs_free_path(path);
4028         return ret;
4029 }
4030
4031 /*
4032  * copy everything in the in-memory inode into the btree.
4033  */
4034 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
4035                                 struct btrfs_root *root, struct inode *inode)
4036 {
4037         struct btrfs_fs_info *fs_info = root->fs_info;
4038         int ret;
4039
4040         /*
4041          * If the inode is a free space inode, we can deadlock during commit
4042          * if we put it into the delayed code.
4043          *
4044          * The data relocation inode should also be directly updated
4045          * without delay
4046          */
4047         if (!btrfs_is_free_space_inode(BTRFS_I(inode))
4048             && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
4049             && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
4050                 btrfs_update_root_times(trans, root);
4051
4052                 ret = btrfs_delayed_update_inode(trans, root, inode);
4053                 if (!ret)
4054                         btrfs_set_inode_last_trans(trans, inode);
4055                 return ret;
4056         }
4057
4058         return btrfs_update_inode_item(trans, root, inode);
4059 }
4060
4061 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
4062                                          struct btrfs_root *root,
4063                                          struct inode *inode)
4064 {
4065         int ret;
4066
4067         ret = btrfs_update_inode(trans, root, inode);
4068         if (ret == -ENOSPC)
4069                 return btrfs_update_inode_item(trans, root, inode);
4070         return ret;
4071 }
4072
4073 /*
4074  * unlink helper that gets used here in inode.c and in the tree logging
4075  * recovery code.  It remove a link in a directory with a given name, and
4076  * also drops the back refs in the inode to the directory
4077  */
4078 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4079                                 struct btrfs_root *root,
4080                                 struct btrfs_inode *dir,
4081                                 struct btrfs_inode *inode,
4082                                 const char *name, int name_len)
4083 {
4084         struct btrfs_fs_info *fs_info = root->fs_info;
4085         struct btrfs_path *path;
4086         int ret = 0;
4087         struct extent_buffer *leaf;
4088         struct btrfs_dir_item *di;
4089         struct btrfs_key key;
4090         u64 index;
4091         u64 ino = btrfs_ino(inode);
4092         u64 dir_ino = btrfs_ino(dir);
4093
4094         path = btrfs_alloc_path();
4095         if (!path) {
4096                 ret = -ENOMEM;
4097                 goto out;
4098         }
4099
4100         path->leave_spinning = 1;
4101         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4102                                     name, name_len, -1);
4103         if (IS_ERR(di)) {
4104                 ret = PTR_ERR(di);
4105                 goto err;
4106         }
4107         if (!di) {
4108                 ret = -ENOENT;
4109                 goto err;
4110         }
4111         leaf = path->nodes[0];
4112         btrfs_dir_item_key_to_cpu(leaf, di, &key);
4113         ret = btrfs_delete_one_dir_name(trans, root, path, di);
4114         if (ret)
4115                 goto err;
4116         btrfs_release_path(path);
4117
4118         /*
4119          * If we don't have dir index, we have to get it by looking up
4120          * the inode ref, since we get the inode ref, remove it directly,
4121          * it is unnecessary to do delayed deletion.
4122          *
4123          * But if we have dir index, needn't search inode ref to get it.
4124          * Since the inode ref is close to the inode item, it is better
4125          * that we delay to delete it, and just do this deletion when
4126          * we update the inode item.
4127          */
4128         if (inode->dir_index) {
4129                 ret = btrfs_delayed_delete_inode_ref(inode);
4130                 if (!ret) {
4131                         index = inode->dir_index;
4132                         goto skip_backref;
4133                 }
4134         }
4135
4136         ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
4137                                   dir_ino, &index);
4138         if (ret) {
4139                 btrfs_info(fs_info,
4140                         "failed to delete reference to %.*s, inode %llu parent %llu",
4141                         name_len, name, ino, dir_ino);
4142                 btrfs_abort_transaction(trans, ret);
4143                 goto err;
4144         }
4145 skip_backref:
4146         ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index);
4147         if (ret) {
4148                 btrfs_abort_transaction(trans, ret);
4149                 goto err;
4150         }
4151
4152         ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode,
4153                         dir_ino);
4154         if (ret != 0 && ret != -ENOENT) {
4155                 btrfs_abort_transaction(trans, ret);
4156                 goto err;
4157         }
4158
4159         ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir,
4160                         index);
4161         if (ret == -ENOENT)
4162                 ret = 0;
4163         else if (ret)
4164                 btrfs_abort_transaction(trans, ret);
4165 err:
4166         btrfs_free_path(path);
4167         if (ret)
4168                 goto out;
4169
4170         btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2);
4171         inode_inc_iversion(&inode->vfs_inode);
4172         inode_inc_iversion(&dir->vfs_inode);
4173         inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime =
4174                 dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
4175         ret = btrfs_update_inode(trans, root, &dir->vfs_inode);
4176 out:
4177         return ret;
4178 }
4179
4180 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4181                        struct btrfs_root *root,
4182                        struct btrfs_inode *dir, struct btrfs_inode *inode,
4183                        const char *name, int name_len)
4184 {
4185         int ret;
4186         ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
4187         if (!ret) {
4188                 drop_nlink(&inode->vfs_inode);
4189                 ret = btrfs_update_inode(trans, root, &inode->vfs_inode);
4190         }
4191         return ret;
4192 }
4193
4194 /*
4195  * helper to start transaction for unlink and rmdir.
4196  *
4197  * unlink and rmdir are special in btrfs, they do not always free space, so
4198  * if we cannot make our reservations the normal way try and see if there is
4199  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4200  * allow the unlink to occur.
4201  */
4202 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4203 {
4204         struct btrfs_root *root = BTRFS_I(dir)->root;
4205
4206         /*
4207          * 1 for the possible orphan item
4208          * 1 for the dir item
4209          * 1 for the dir index
4210          * 1 for the inode ref
4211          * 1 for the inode
4212          */
4213         return btrfs_start_transaction_fallback_global_rsv(root, 5, 5);
4214 }
4215
4216 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4217 {
4218         struct btrfs_root *root = BTRFS_I(dir)->root;
4219         struct btrfs_trans_handle *trans;
4220         struct inode *inode = d_inode(dentry);
4221         int ret;
4222
4223         trans = __unlink_start_trans(dir);
4224         if (IS_ERR(trans))
4225                 return PTR_ERR(trans);
4226
4227         btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4228                         0);
4229
4230         ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
4231                         BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4232                         dentry->d_name.len);
4233         if (ret)
4234                 goto out;
4235
4236         if (inode->i_nlink == 0) {
4237                 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4238                 if (ret)
4239                         goto out;
4240         }
4241
4242 out:
4243         btrfs_end_transaction(trans);
4244         btrfs_btree_balance_dirty(root->fs_info);
4245         return ret;
4246 }
4247
4248 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4249                         struct btrfs_root *root,
4250                         struct inode *dir, u64 objectid,
4251                         const char *name, int name_len)
4252 {
4253         struct btrfs_fs_info *fs_info = root->fs_info;
4254         struct btrfs_path *path;
4255         struct extent_buffer *leaf;
4256         struct btrfs_dir_item *di;
4257         struct btrfs_key key;
4258         u64 index;
4259         int ret;
4260         u64 dir_ino = btrfs_ino(BTRFS_I(dir));
4261
4262         path = btrfs_alloc_path();
4263         if (!path)
4264                 return -ENOMEM;
4265
4266         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4267                                    name, name_len, -1);
4268         if (IS_ERR_OR_NULL(di)) {
4269                 if (!di)
4270                         ret = -ENOENT;
4271                 else
4272                         ret = PTR_ERR(di);
4273                 goto out;
4274         }
4275
4276         leaf = path->nodes[0];
4277         btrfs_dir_item_key_to_cpu(leaf, di, &key);
4278         WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4279         ret = btrfs_delete_one_dir_name(trans, root, path, di);
4280         if (ret) {
4281                 btrfs_abort_transaction(trans, ret);
4282                 goto out;
4283         }
4284         btrfs_release_path(path);
4285
4286         ret = btrfs_del_root_ref(trans, fs_info, objectid,
4287                                  root->root_key.objectid, dir_ino,
4288                                  &index, name, name_len);
4289         if (ret < 0) {
4290                 if (ret != -ENOENT) {
4291                         btrfs_abort_transaction(trans, ret);
4292                         goto out;
4293                 }
4294                 di = btrfs_search_dir_index_item(root, path, dir_ino,
4295                                                  name, name_len);
4296                 if (IS_ERR_OR_NULL(di)) {
4297                         if (!di)
4298                                 ret = -ENOENT;
4299                         else
4300                                 ret = PTR_ERR(di);
4301                         btrfs_abort_transaction(trans, ret);
4302                         goto out;
4303                 }
4304
4305                 leaf = path->nodes[0];
4306                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4307                 btrfs_release_path(path);
4308                 index = key.offset;
4309         }
4310         btrfs_release_path(path);
4311
4312         ret = btrfs_delete_delayed_dir_index(trans, fs_info, BTRFS_I(dir), index);
4313         if (ret) {
4314                 btrfs_abort_transaction(trans, ret);
4315                 goto out;
4316         }
4317
4318         btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2);
4319         inode_inc_iversion(dir);
4320         dir->i_mtime = dir->i_ctime = current_time(dir);
4321         ret = btrfs_update_inode_fallback(trans, root, dir);
4322         if (ret)
4323                 btrfs_abort_transaction(trans, ret);
4324 out:
4325         btrfs_free_path(path);
4326         return ret;
4327 }
4328
4329 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4330 {
4331         struct inode *inode = d_inode(dentry);
4332         int err = 0;
4333         struct btrfs_root *root = BTRFS_I(dir)->root;
4334         struct btrfs_trans_handle *trans;
4335         u64 last_unlink_trans;
4336
4337         if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4338                 return -ENOTEMPTY;
4339         if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID)
4340                 return -EPERM;
4341
4342         trans = __unlink_start_trans(dir);
4343         if (IS_ERR(trans))
4344                 return PTR_ERR(trans);
4345
4346         if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4347                 err = btrfs_unlink_subvol(trans, root, dir,
4348                                           BTRFS_I(inode)->location.objectid,
4349                                           dentry->d_name.name,
4350                                           dentry->d_name.len);
4351                 goto out;
4352         }
4353
4354         err = btrfs_orphan_add(trans, BTRFS_I(inode));
4355         if (err)
4356                 goto out;
4357
4358         last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4359
4360         /* now the directory is empty */
4361         err = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
4362                         BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4363                         dentry->d_name.len);
4364         if (!err) {
4365                 btrfs_i_size_write(BTRFS_I(inode), 0);
4366                 /*
4367                  * Propagate the last_unlink_trans value of the deleted dir to
4368                  * its parent directory. This is to prevent an unrecoverable
4369                  * log tree in the case we do something like this:
4370                  * 1) create dir foo
4371                  * 2) create snapshot under dir foo
4372                  * 3) delete the snapshot
4373                  * 4) rmdir foo
4374                  * 5) mkdir foo
4375                  * 6) fsync foo or some file inside foo
4376                  */
4377                 if (last_unlink_trans >= trans->transid)
4378                         BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4379         }
4380 out:
4381         btrfs_end_transaction(trans);
4382         btrfs_btree_balance_dirty(root->fs_info);
4383
4384         return err;
4385 }
4386
4387 static int truncate_space_check(struct btrfs_trans_handle *trans,
4388                                 struct btrfs_root *root,
4389                                 u64 bytes_deleted)
4390 {
4391         struct btrfs_fs_info *fs_info = root->fs_info;
4392         int ret;
4393
4394         /*
4395          * This is only used to apply pressure to the enospc system, we don't
4396          * intend to use this reservation at all.
4397          */
4398         bytes_deleted = btrfs_csum_bytes_to_leaves(fs_info, bytes_deleted);
4399         bytes_deleted *= fs_info->nodesize;
4400         ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
4401                                   bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
4402         if (!ret) {
4403                 trace_btrfs_space_reservation(fs_info, "transaction",
4404                                               trans->transid,
4405                                               bytes_deleted, 1);
4406                 trans->bytes_reserved += bytes_deleted;
4407         }
4408         return ret;
4409
4410 }
4411
4412 /*
4413  * Return this if we need to call truncate_block for the last bit of the
4414  * truncate.
4415  */
4416 #define NEED_TRUNCATE_BLOCK 1
4417
4418 /*
4419  * this can truncate away extent items, csum items and directory items.
4420  * It starts at a high offset and removes keys until it can't find
4421  * any higher than new_size
4422  *
4423  * csum items that cross the new i_size are truncated to the new size
4424  * as well.
4425  *
4426  * min_type is the minimum key type to truncate down to.  If set to 0, this
4427  * will kill all the items on this inode, including the INODE_ITEM_KEY.
4428  */
4429 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4430                                struct btrfs_root *root,
4431                                struct inode *inode,
4432                                u64 new_size, u32 min_type)
4433 {
4434         struct btrfs_fs_info *fs_info = root->fs_info;
4435         struct btrfs_path *path;
4436         struct extent_buffer *leaf;
4437         struct btrfs_file_extent_item *fi;
4438         struct btrfs_key key;
4439         struct btrfs_key found_key;
4440         u64 extent_start = 0;
4441         u64 extent_num_bytes = 0;
4442         u64 extent_offset = 0;
4443         u64 item_end = 0;
4444         u64 last_size = new_size;
4445         u32 found_type = (u8)-1;
4446         int found_extent;
4447         int del_item;
4448         int pending_del_nr = 0;
4449         int pending_del_slot = 0;
4450         int extent_type = -1;
4451         int ret;
4452         int err = 0;
4453         u64 ino = btrfs_ino(BTRFS_I(inode));
4454         u64 bytes_deleted = 0;
4455         bool be_nice = false;
4456         bool should_throttle = false;
4457         bool should_end = false;
4458
4459         BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4460
4461         /*
4462          * for non-free space inodes and ref cows, we want to back off from
4463          * time to time
4464          */
4465         if (!btrfs_is_free_space_inode(BTRFS_I(inode)) &&
4466             test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4467                 be_nice = true;
4468
4469         path = btrfs_alloc_path();
4470         if (!path)
4471                 return -ENOMEM;
4472         path->reada = READA_BACK;
4473
4474         /*
4475          * We want to drop from the next block forward in case this new size is
4476          * not block aligned since we will be keeping the last block of the
4477          * extent just the way it is.
4478          */
4479         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4480             root == fs_info->tree_root)
4481                 btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size,
4482                                         fs_info->sectorsize),
4483                                         (u64)-1, 0);
4484
4485         /*
4486          * This function is also used to drop the items in the log tree before
4487          * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4488          * it is used to drop the loged items. So we shouldn't kill the delayed
4489          * items.
4490          */
4491         if (min_type == 0 && root == BTRFS_I(inode)->root)
4492                 btrfs_kill_delayed_inode_items(BTRFS_I(inode));
4493
4494         key.objectid = ino;
4495         key.offset = (u64)-1;
4496         key.type = (u8)-1;
4497
4498 search_again:
4499         /*
4500          * with a 16K leaf size and 128MB extents, you can actually queue
4501          * up a huge file in a single leaf.  Most of the time that
4502          * bytes_deleted is > 0, it will be huge by the time we get here
4503          */
4504         if (be_nice && bytes_deleted > SZ_32M) {
4505                 if (btrfs_should_end_transaction(trans)) {
4506                         err = -EAGAIN;
4507                         goto error;
4508                 }
4509         }
4510
4511
4512         path->leave_spinning = 1;
4513         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4514         if (ret < 0) {
4515                 err = ret;
4516                 goto out;
4517         }
4518
4519         if (ret > 0) {
4520                 /* there are no items in the tree for us to truncate, we're
4521                  * done
4522                  */
4523                 if (path->slots[0] == 0)
4524                         goto out;
4525                 path->slots[0]--;
4526         }
4527
4528         while (1) {
4529                 fi = NULL;
4530                 leaf = path->nodes[0];
4531                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4532                 found_type = found_key.type;
4533
4534                 if (found_key.objectid != ino)
4535                         break;
4536
4537                 if (found_type < min_type)
4538                         break;
4539
4540                 item_end = found_key.offset;
4541                 if (found_type == BTRFS_EXTENT_DATA_KEY) {
4542                         fi = btrfs_item_ptr(leaf, path->slots[0],
4543                                             struct btrfs_file_extent_item);
4544                         extent_type = btrfs_file_extent_type(leaf, fi);
4545                         if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4546                                 item_end +=
4547                                     btrfs_file_extent_num_bytes(leaf, fi);
4548
4549                                 trace_btrfs_truncate_show_fi_regular(
4550                                         BTRFS_I(inode), leaf, fi,
4551                                         found_key.offset);
4552                         } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4553                                 item_end += btrfs_file_extent_inline_len(leaf,
4554                                                          path->slots[0], fi);
4555
4556                                 trace_btrfs_truncate_show_fi_inline(
4557                                         BTRFS_I(inode), leaf, fi, path->slots[0],
4558                                         found_key.offset);
4559                         }
4560                         item_end--;
4561                 }
4562                 if (found_type > min_type) {
4563                         del_item = 1;
4564                 } else {
4565                         if (item_end < new_size)
4566                                 break;
4567                         if (found_key.offset >= new_size)
4568                                 del_item = 1;
4569                         else
4570                                 del_item = 0;
4571                 }
4572                 found_extent = 0;
4573                 /* FIXME, shrink the extent if the ref count is only 1 */
4574                 if (found_type != BTRFS_EXTENT_DATA_KEY)
4575                         goto delete;
4576
4577                 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4578                         u64 num_dec;
4579                         extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4580                         if (!del_item) {
4581                                 u64 orig_num_bytes =
4582                                         btrfs_file_extent_num_bytes(leaf, fi);
4583                                 extent_num_bytes = ALIGN(new_size -
4584                                                 found_key.offset,
4585                                                 fs_info->sectorsize);
4586                                 btrfs_set_file_extent_num_bytes(leaf, fi,
4587                                                          extent_num_bytes);
4588                                 num_dec = (orig_num_bytes -
4589                                            extent_num_bytes);
4590                                 if (test_bit(BTRFS_ROOT_REF_COWS,
4591                                              &root->state) &&
4592                                     extent_start != 0)
4593                                         inode_sub_bytes(inode, num_dec);
4594                                 btrfs_mark_buffer_dirty(leaf);
4595                         } else {
4596                                 extent_num_bytes =
4597                                         btrfs_file_extent_disk_num_bytes(leaf,
4598                                                                          fi);
4599                                 extent_offset = found_key.offset -
4600                                         btrfs_file_extent_offset(leaf, fi);
4601
4602                                 /* FIXME blocksize != 4096 */
4603                                 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4604                                 if (extent_start != 0) {
4605                                         found_extent = 1;
4606                                         if (test_bit(BTRFS_ROOT_REF_COWS,
4607                                                      &root->state))
4608                                                 inode_sub_bytes(inode, num_dec);
4609                                 }
4610                         }
4611                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4612                         /*
4613                          * we can't truncate inline items that have had
4614                          * special encodings
4615                          */
4616                         if (!del_item &&
4617                             btrfs_file_extent_encryption(leaf, fi) == 0 &&
4618                             btrfs_file_extent_other_encoding(leaf, fi) == 0 &&
4619                             btrfs_file_extent_compression(leaf, fi) == 0) {
4620                                 u32 size = (u32)(new_size - found_key.offset);
4621
4622                                 btrfs_set_file_extent_ram_bytes(leaf, fi, size);
4623                                 size = btrfs_file_extent_calc_inline_size(size);
4624                                 btrfs_truncate_item(root->fs_info, path, size, 1);
4625                         } else if (!del_item) {
4626                                 /*
4627                                  * We have to bail so the last_size is set to
4628                                  * just before this extent.
4629                                  */
4630                                 err = NEED_TRUNCATE_BLOCK;
4631                                 break;
4632                         }
4633
4634                         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4635                                 inode_sub_bytes(inode, item_end + 1 - new_size);
4636                 }
4637 delete:
4638                 if (del_item)
4639                         last_size = found_key.offset;
4640                 else
4641                         last_size = new_size;
4642                 if (del_item) {
4643                         if (!pending_del_nr) {
4644                                 /* no pending yet, add ourselves */
4645                                 pending_del_slot = path->slots[0];
4646                                 pending_del_nr = 1;
4647                         } else if (pending_del_nr &&
4648                                    path->slots[0] + 1 == pending_del_slot) {
4649                                 /* hop on the pending chunk */
4650                                 pending_del_nr++;
4651                                 pending_del_slot = path->slots[0];
4652                         } else {
4653                                 BUG();
4654                         }
4655                 } else {
4656                         break;
4657                 }
4658                 should_throttle = false;
4659
4660                 if (found_extent &&
4661                     (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4662                      root == fs_info->tree_root)) {
4663                         btrfs_set_path_blocking(path);
4664                         bytes_deleted += extent_num_bytes;
4665                         ret = btrfs_free_extent(trans, root, extent_start,
4666                                                 extent_num_bytes, 0,
4667                                                 btrfs_header_owner(leaf),
4668                                                 ino, extent_offset);
4669                         BUG_ON(ret);
4670                         if (btrfs_should_throttle_delayed_refs(trans, fs_info))
4671                                 btrfs_async_run_delayed_refs(fs_info,
4672                                         trans->delayed_ref_updates * 2,
4673                                         trans->transid, 0);
4674                         if (be_nice) {
4675                                 if (truncate_space_check(trans, root,
4676                                                          extent_num_bytes)) {
4677                                         should_end = true;
4678                                 }
4679                                 if (btrfs_should_throttle_delayed_refs(trans,
4680                                                                        fs_info))
4681                                         should_throttle = true;
4682                         }
4683                 }
4684
4685                 if (found_type == BTRFS_INODE_ITEM_KEY)
4686                         break;
4687
4688                 if (path->slots[0] == 0 ||
4689                     path->slots[0] != pending_del_slot ||
4690                     should_throttle || should_end) {
4691                         if (pending_del_nr) {
4692                                 ret = btrfs_del_items(trans, root, path,
4693                                                 pending_del_slot,
4694                                                 pending_del_nr);
4695                                 if (ret) {
4696                                         btrfs_abort_transaction(trans, ret);
4697                                         goto error;
4698                                 }
4699                                 pending_del_nr = 0;
4700                         }
4701                         btrfs_release_path(path);
4702                         if (should_throttle) {
4703                                 unsigned long updates = trans->delayed_ref_updates;
4704                                 if (updates) {
4705                                         trans->delayed_ref_updates = 0;
4706                                         ret = btrfs_run_delayed_refs(trans,
4707                                                                    updates * 2);
4708                                         if (ret && !err)
4709                                                 err = ret;
4710                                 }
4711                         }
4712                         /*
4713                          * if we failed to refill our space rsv, bail out
4714                          * and let the transaction restart
4715                          */
4716                         if (should_end) {
4717                                 err = -EAGAIN;
4718                                 goto error;
4719                         }
4720                         goto search_again;
4721                 } else {
4722                         path->slots[0]--;
4723                 }
4724         }
4725 out:
4726         if (pending_del_nr) {
4727                 ret = btrfs_del_items(trans, root, path, pending_del_slot,
4728                                       pending_del_nr);
4729                 if (ret)
4730                         btrfs_abort_transaction(trans, ret);
4731         }
4732 error:
4733         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4734                 ASSERT(last_size >= new_size);
4735                 if (!err && last_size > new_size)
4736                         last_size = new_size;
4737                 btrfs_ordered_update_i_size(inode, last_size, NULL);
4738         }
4739
4740         btrfs_free_path(path);
4741
4742         if (be_nice && bytes_deleted > SZ_32M) {
4743                 unsigned long updates = trans->delayed_ref_updates;
4744                 if (updates) {
4745                         trans->delayed_ref_updates = 0;
4746                         ret = btrfs_run_delayed_refs(trans, updates * 2);
4747                         if (ret && !err)
4748                                 err = ret;
4749                 }
4750         }
4751         return err;
4752 }
4753
4754 /*
4755  * btrfs_truncate_block - read, zero a chunk and write a block
4756  * @inode - inode that we're zeroing
4757  * @from - the offset to start zeroing
4758  * @len - the length to zero, 0 to zero the entire range respective to the
4759  *      offset
4760  * @front - zero up to the offset instead of from the offset on
4761  *
4762  * This will find the block for the "from" offset and cow the block and zero the
4763  * part we want to zero.  This is used with truncate and hole punching.
4764  */
4765 int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
4766                         int front)
4767 {
4768         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4769         struct address_space *mapping = inode->i_mapping;
4770         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4771         struct btrfs_ordered_extent *ordered;
4772         struct extent_state *cached_state = NULL;
4773         struct extent_changeset *data_reserved = NULL;
4774         char *kaddr;
4775         u32 blocksize = fs_info->sectorsize;
4776         pgoff_t index = from >> PAGE_SHIFT;
4777         unsigned offset = from & (blocksize - 1);
4778         struct page *page;
4779         gfp_t mask = btrfs_alloc_write_mask(mapping);
4780         int ret = 0;
4781         u64 block_start;
4782         u64 block_end;
4783
4784         if (IS_ALIGNED(offset, blocksize) &&
4785             (!len || IS_ALIGNED(len, blocksize)))
4786                 goto out;
4787
4788         block_start = round_down(from, blocksize);
4789         block_end = block_start + blocksize - 1;
4790
4791         ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
4792                                            block_start, blocksize);
4793         if (ret)
4794                 goto out;
4795
4796 again:
4797         page = find_or_create_page(mapping, index, mask);
4798         if (!page) {
4799                 btrfs_delalloc_release_space(inode, data_reserved,
4800                                              block_start, blocksize, true);
4801                 btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, true);
4802                 ret = -ENOMEM;
4803                 goto out;
4804         }
4805
4806         if (!PageUptodate(page)) {
4807                 ret = btrfs_readpage(NULL, page);
4808                 lock_page(page);
4809                 if (page->mapping != mapping) {
4810                         unlock_page(page);
4811                         put_page(page);
4812                         goto again;
4813                 }
4814                 if (!PageUptodate(page)) {
4815                         ret = -EIO;
4816                         goto out_unlock;
4817                 }
4818         }
4819         wait_on_page_writeback(page);
4820
4821         lock_extent_bits(io_tree, block_start, block_end, &cached_state);
4822         set_page_extent_mapped(page);
4823
4824         ordered = btrfs_lookup_ordered_extent(inode, block_start);
4825         if (ordered) {
4826                 unlock_extent_cached(io_tree, block_start, block_end,
4827                                      &cached_state);
4828                 unlock_page(page);
4829                 put_page(page);
4830                 btrfs_start_ordered_extent(inode, ordered, 1);
4831                 btrfs_put_ordered_extent(ordered);
4832                 goto again;
4833         }
4834
4835         clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end,
4836                           EXTENT_DIRTY | EXTENT_DELALLOC |
4837                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4838                           0, 0, &cached_state);
4839
4840         ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
4841                                         &cached_state, 0);
4842         if (ret) {
4843                 unlock_extent_cached(io_tree, block_start, block_end,
4844                                      &cached_state);
4845                 goto out_unlock;
4846         }
4847
4848         if (offset != blocksize) {
4849                 if (!len)
4850                         len = blocksize - offset;
4851                 kaddr = kmap(page);
4852                 if (front)
4853                         memset(kaddr + (block_start - page_offset(page)),
4854                                 0, offset);
4855                 else
4856                         memset(kaddr + (block_start - page_offset(page)) +  offset,
4857                                 0, len);
4858                 flush_dcache_page(page);
4859                 kunmap(page);
4860         }
4861         ClearPageChecked(page);
4862         set_page_dirty(page);
4863         unlock_extent_cached(io_tree, block_start, block_end, &cached_state);
4864
4865 out_unlock:
4866         if (ret)
4867                 btrfs_delalloc_release_space(inode, data_reserved, block_start,
4868                                              blocksize, true);
4869         btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, (ret != 0));
4870         unlock_page(page);
4871         put_page(page);
4872 out:
4873         extent_changeset_free(data_reserved);
4874         return ret;
4875 }
4876
4877 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
4878                              u64 offset, u64 len)
4879 {
4880         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4881         struct btrfs_trans_handle *trans;
4882         int ret;
4883
4884         /*
4885          * Still need to make sure the inode looks like it's been updated so
4886          * that any holes get logged if we fsync.
4887          */
4888         if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
4889                 BTRFS_I(inode)->last_trans = fs_info->generation;
4890                 BTRFS_I(inode)->last_sub_trans = root->log_transid;
4891                 BTRFS_I(inode)->last_log_commit = root->last_log_commit;
4892                 return 0;
4893         }
4894
4895         /*
4896          * 1 - for the one we're dropping
4897          * 1 - for the one we're adding
4898          * 1 - for updating the inode.
4899          */
4900         trans = btrfs_start_transaction(root, 3);
4901         if (IS_ERR(trans))
4902                 return PTR_ERR(trans);
4903
4904         ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
4905         if (ret) {
4906                 btrfs_abort_transaction(trans, ret);
4907                 btrfs_end_transaction(trans);
4908                 return ret;
4909         }
4910
4911         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(BTRFS_I(inode)),
4912                         offset, 0, 0, len, 0, len, 0, 0, 0);
4913         if (ret)
4914                 btrfs_abort_transaction(trans, ret);
4915         else
4916                 btrfs_update_inode(trans, root, inode);
4917         btrfs_end_transaction(trans);
4918         return ret;
4919 }
4920
4921 /*
4922  * This function puts in dummy file extents for the area we're creating a hole
4923  * for.  So if we are truncating this file to a larger size we need to insert
4924  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4925  * the range between oldsize and size
4926  */
4927 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
4928 {
4929         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4930         struct btrfs_root *root = BTRFS_I(inode)->root;
4931         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4932         struct extent_map *em = NULL;
4933         struct extent_state *cached_state = NULL;
4934         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4935         u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
4936         u64 block_end = ALIGN(size, fs_info->sectorsize);
4937         u64 last_byte;
4938         u64 cur_offset;
4939         u64 hole_size;
4940         int err = 0;
4941
4942         /*
4943          * If our size started in the middle of a block we need to zero out the
4944          * rest of the block before we expand the i_size, otherwise we could
4945          * expose stale data.
4946          */
4947         err = btrfs_truncate_block(inode, oldsize, 0, 0);
4948         if (err)
4949                 return err;
4950
4951         if (size <= hole_start)
4952                 return 0;
4953
4954         while (1) {
4955                 struct btrfs_ordered_extent *ordered;
4956
4957                 lock_extent_bits(io_tree, hole_start, block_end - 1,
4958                                  &cached_state);
4959                 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), hole_start,
4960                                                      block_end - hole_start);
4961                 if (!ordered)
4962                         break;
4963                 unlock_extent_cached(io_tree, hole_start, block_end - 1,
4964                                      &cached_state);
4965                 btrfs_start_ordered_extent(inode, ordered, 1);
4966                 btrfs_put_ordered_extent(ordered);
4967         }
4968
4969         cur_offset = hole_start;
4970         while (1) {
4971                 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
4972                                 block_end - cur_offset, 0);
4973                 if (IS_ERR(em)) {
4974                         err = PTR_ERR(em);
4975                         em = NULL;
4976                         break;
4977                 }
4978                 last_byte = min(extent_map_end(em), block_end);
4979                 last_byte = ALIGN(last_byte, fs_info->sectorsize);
4980                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
4981                         struct extent_map *hole_em;
4982                         hole_size = last_byte - cur_offset;
4983
4984                         err = maybe_insert_hole(root, inode, cur_offset,
4985                                                 hole_size);
4986                         if (err)
4987                                 break;
4988                         btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
4989                                                 cur_offset + hole_size - 1, 0);
4990                         hole_em = alloc_extent_map();
4991                         if (!hole_em) {
4992                                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4993                                         &BTRFS_I(inode)->runtime_flags);
4994                                 goto next;
4995                         }
4996                         hole_em->start = cur_offset;
4997                         hole_em->len = hole_size;
4998                         hole_em->orig_start = cur_offset;
4999
5000                         hole_em->block_start = EXTENT_MAP_HOLE;
5001                         hole_em->block_len = 0;
5002                         hole_em->orig_block_len = 0;
5003                         hole_em->ram_bytes = hole_size;
5004                         hole_em->bdev = fs_info->fs_devices->latest_bdev;
5005                         hole_em->compress_type = BTRFS_COMPRESS_NONE;
5006                         hole_em->generation = fs_info->generation;
5007
5008                         while (1) {
5009                                 write_lock(&em_tree->lock);
5010                                 err = add_extent_mapping(em_tree, hole_em, 1);
5011                                 write_unlock(&em_tree->lock);
5012                                 if (err != -EEXIST)
5013                                         break;
5014                                 btrfs_drop_extent_cache(BTRFS_I(inode),
5015                                                         cur_offset,
5016                                                         cur_offset +
5017                                                         hole_size - 1, 0);
5018                         }
5019                         free_extent_map(hole_em);
5020                 }
5021 next:
5022                 free_extent_map(em);
5023                 em = NULL;
5024                 cur_offset = last_byte;
5025                 if (cur_offset >= block_end)
5026                         break;
5027         }
5028         free_extent_map(em);
5029         unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state);
5030         return err;
5031 }
5032
5033 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5034 {
5035         struct btrfs_root *root = BTRFS_I(inode)->root;
5036         struct btrfs_trans_handle *trans;
5037         loff_t oldsize = i_size_read(inode);
5038         loff_t newsize = attr->ia_size;
5039         int mask = attr->ia_valid;
5040         int ret;
5041
5042         /*
5043          * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5044          * special case where we need to update the times despite not having
5045          * these flags set.  For all other operations the VFS set these flags
5046          * explicitly if it wants a timestamp update.
5047          */
5048         if (newsize != oldsize) {
5049                 inode_inc_iversion(inode);
5050                 if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
5051                         inode->i_ctime = inode->i_mtime =
5052                                 current_time(inode);
5053         }
5054
5055         if (newsize > oldsize) {
5056                 /*
5057                  * Don't do an expanding truncate while snapshotting is ongoing.
5058                  * This is to ensure the snapshot captures a fully consistent
5059                  * state of this file - if the snapshot captures this expanding
5060                  * truncation, it must capture all writes that happened before
5061                  * this truncation.
5062                  */
5063                 btrfs_wait_for_snapshot_creation(root);
5064                 ret = btrfs_cont_expand(inode, oldsize, newsize);
5065                 if (ret) {
5066                         btrfs_end_write_no_snapshotting(root);
5067                         return ret;
5068                 }
5069
5070                 trans = btrfs_start_transaction(root, 1);
5071                 if (IS_ERR(trans)) {
5072                         btrfs_end_write_no_snapshotting(root);
5073                         return PTR_ERR(trans);
5074                 }
5075
5076                 i_size_write(inode, newsize);
5077                 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
5078                 pagecache_isize_extended(inode, oldsize, newsize);
5079                 ret = btrfs_update_inode(trans, root, inode);
5080                 btrfs_end_write_no_snapshotting(root);
5081                 btrfs_end_transaction(trans);
5082         } else {
5083
5084                 /*
5085                  * We're truncating a file that used to have good data down to
5086                  * zero. Make sure it gets into the ordered flush list so that
5087                  * any new writes get down to disk quickly.
5088                  */
5089                 if (newsize == 0)
5090                         set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
5091                                 &BTRFS_I(inode)->runtime_flags);
5092
5093                 /*
5094                  * 1 for the orphan item we're going to add
5095                  * 1 for the orphan item deletion.
5096                  */
5097                 trans = btrfs_start_transaction(root, 2);
5098                 if (IS_ERR(trans))
5099                         return PTR_ERR(trans);
5100
5101                 /*
5102                  * We need to do this in case we fail at _any_ point during the
5103                  * actual truncate.  Once we do the truncate_setsize we could
5104                  * invalidate pages which forces any outstanding ordered io to
5105                  * be instantly completed which will give us extents that need
5106                  * to be truncated.  If we fail to get an orphan inode down we
5107                  * could have left over extents that were never meant to live,
5108                  * so we need to guarantee from this point on that everything
5109                  * will be consistent.
5110                  */
5111                 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
5112                 btrfs_end_transaction(trans);
5113                 if (ret)
5114                         return ret;
5115
5116                 /* we don't support swapfiles, so vmtruncate shouldn't fail */
5117                 truncate_setsize(inode, newsize);
5118
5119                 /* Disable nonlocked read DIO to avoid the end less truncate */
5120                 btrfs_inode_block_unlocked_dio(BTRFS_I(inode));
5121                 inode_dio_wait(inode);
5122                 btrfs_inode_resume_unlocked_dio(BTRFS_I(inode));
5123
5124                 ret = btrfs_truncate(inode, newsize == oldsize);
5125                 if (ret && inode->i_nlink) {
5126                         int err;
5127
5128                         /* To get a stable disk_i_size */
5129                         err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
5130                         if (err) {
5131                                 btrfs_orphan_del(NULL, BTRFS_I(inode));
5132                                 return err;
5133                         }
5134
5135                         /*
5136                          * failed to truncate, disk_i_size is only adjusted down
5137                          * as we remove extents, so it should represent the true
5138                          * size of the inode, so reset the in memory size and
5139                          * delete our orphan entry.
5140                          */
5141                         trans = btrfs_join_transaction(root);
5142                         if (IS_ERR(trans)) {
5143                                 btrfs_orphan_del(NULL, BTRFS_I(inode));
5144                                 return ret;
5145                         }
5146                         i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5147                         err = btrfs_orphan_del(trans, BTRFS_I(inode));
5148                         if (err)
5149                                 btrfs_abort_transaction(trans, err);
5150                         btrfs_end_transaction(trans);
5151                 }
5152         }
5153
5154         return ret;
5155 }
5156
5157 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
5158 {
5159         struct inode *inode = d_inode(dentry);
5160         struct btrfs_root *root = BTRFS_I(inode)->root;
5161         int err;
5162
5163         if (btrfs_root_readonly(root))
5164                 return -EROFS;
5165
5166         err = setattr_prepare(dentry, attr);
5167         if (err)
5168                 return err;
5169
5170         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5171                 err = btrfs_setsize(inode, attr);
5172                 if (err)
5173                         return err;
5174         }
5175
5176         if (attr->ia_valid) {
5177                 setattr_copy(inode, attr);
5178                 inode_inc_iversion(inode);
5179                 err = btrfs_dirty_inode(inode);
5180
5181                 if (!err && attr->ia_valid & ATTR_MODE)
5182                         err = posix_acl_chmod(inode, inode->i_mode);
5183         }
5184
5185         return err;
5186 }
5187
5188 /*
5189  * While truncating the inode pages during eviction, we get the VFS calling
5190  * btrfs_invalidatepage() against each page of the inode. This is slow because
5191  * the calls to btrfs_invalidatepage() result in a huge amount of calls to
5192  * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
5193  * extent_state structures over and over, wasting lots of time.
5194  *
5195  * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
5196  * those expensive operations on a per page basis and do only the ordered io
5197  * finishing, while we release here the extent_map and extent_state structures,
5198  * without the excessive merging and splitting.
5199  */
5200 static void evict_inode_truncate_pages(struct inode *inode)
5201 {
5202         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5203         struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
5204         struct rb_node *node;
5205
5206         ASSERT(inode->i_state & I_FREEING);
5207         truncate_inode_pages_final(&inode->i_data);
5208
5209         write_lock(&map_tree->lock);
5210         while (!RB_EMPTY_ROOT(&map_tree->map)) {
5211                 struct extent_map *em;
5212
5213                 node = rb_first(&map_tree->map);
5214                 em = rb_entry(node, struct extent_map, rb_node);
5215                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
5216                 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
5217                 remove_extent_mapping(map_tree, em);
5218                 free_extent_map(em);
5219                 if (need_resched()) {
5220                         write_unlock(&map_tree->lock);
5221                         cond_resched();
5222                         write_lock(&map_tree->lock);
5223                 }
5224         }
5225         write_unlock(&map_tree->lock);
5226
5227         /*
5228          * Keep looping until we have no more ranges in the io tree.
5229          * We can have ongoing bios started by readpages (called from readahead)
5230          * that have their endio callback (extent_io.c:end_bio_extent_readpage)
5231          * still in progress (unlocked the pages in the bio but did not yet
5232          * unlocked the ranges in the io tree). Therefore this means some
5233          * ranges can still be locked and eviction started because before
5234          * submitting those bios, which are executed by a separate task (work
5235          * queue kthread), inode references (inode->i_count) were not taken
5236          * (which would be dropped in the end io callback of each bio).
5237          * Therefore here we effectively end up waiting for those bios and
5238          * anyone else holding locked ranges without having bumped the inode's
5239          * reference count - if we don't do it, when they access the inode's
5240          * io_tree to unlock a range it may be too late, leading to an
5241          * use-after-free issue.
5242          */
5243         spin_lock(&io_tree->lock);
5244         while (!RB_EMPTY_ROOT(&io_tree->state)) {
5245                 struct extent_state *state;
5246                 struct extent_state *cached_state = NULL;
5247                 u64 start;
5248                 u64 end;
5249
5250                 node = rb_first(&io_tree->state);
5251                 state = rb_entry(node, struct extent_state, rb_node);
5252                 start = state->start;
5253                 end = state->end;
5254                 spin_unlock(&io_tree->lock);
5255
5256                 lock_extent_bits(io_tree, start, end, &cached_state);
5257
5258                 /*
5259                  * If still has DELALLOC flag, the extent didn't reach disk,
5260                  * and its reserved space won't be freed by delayed_ref.
5261                  * So we need to free its reserved space here.
5262                  * (Refer to comment in btrfs_invalidatepage, case 2)
5263                  *
5264                  * Note, end is the bytenr of last byte, so we need + 1 here.
5265                  */
5266                 if (state->state & EXTENT_DELALLOC)
5267                         btrfs_qgroup_free_data(inode, NULL, start, end - start + 1);
5268
5269                 clear_extent_bit(io_tree, start, end,
5270                                  EXTENT_LOCKED | EXTENT_DIRTY |
5271                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
5272                                  EXTENT_DEFRAG, 1, 1, &cached_state);
5273
5274                 cond_resched();
5275                 spin_lock(&io_tree->lock);
5276         }
5277         spin_unlock(&io_tree->lock);
5278 }
5279
5280 void btrfs_evict_inode(struct inode *inode)
5281 {
5282         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5283         struct btrfs_trans_handle *trans;
5284         struct btrfs_root *root = BTRFS_I(inode)->root;
5285         struct btrfs_block_rsv *rsv, *global_rsv;
5286         int steal_from_global = 0;
5287         u64 min_size;
5288         int ret;
5289
5290         trace_btrfs_inode_evict(inode);
5291
5292         if (!root) {
5293                 clear_inode(inode);
5294                 return;
5295         }
5296
5297         min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
5298
5299         evict_inode_truncate_pages(inode);
5300
5301         if (inode->i_nlink &&
5302             ((btrfs_root_refs(&root->root_item) != 0 &&
5303               root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5304              btrfs_is_free_space_inode(BTRFS_I(inode))))
5305                 goto no_delete;
5306
5307         if (is_bad_inode(inode)) {
5308                 btrfs_orphan_del(NULL, BTRFS_I(inode));
5309                 goto no_delete;
5310         }
5311         /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
5312         if (!special_file(inode->i_mode))
5313                 btrfs_wait_ordered_range(inode, 0, (u64)-1);
5314
5315         btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1);
5316
5317         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
5318                 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
5319                                  &BTRFS_I(inode)->runtime_flags));
5320                 goto no_delete;
5321         }
5322
5323         if (inode->i_nlink > 0) {
5324                 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5325                        root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5326                 goto no_delete;
5327         }
5328
5329         ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5330         if (ret) {
5331                 btrfs_orphan_del(NULL, BTRFS_I(inode));
5332                 goto no_delete;
5333         }
5334
5335         rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
5336         if (!rsv) {
5337                 btrfs_orphan_del(NULL, BTRFS_I(inode));
5338                 goto no_delete;
5339         }
5340         rsv->size = min_size;
5341         rsv->failfast = 1;
5342         global_rsv = &fs_info->global_block_rsv;
5343
5344         btrfs_i_size_write(BTRFS_I(inode), 0);
5345
5346         /*
5347          * This is a bit simpler than btrfs_truncate since we've already
5348          * reserved our space for our orphan item in the unlink, so we just
5349          * need to reserve some slack space in case we add bytes and update
5350          * inode item when doing the truncate.
5351          */
5352         while (1) {
5353                 ret = btrfs_block_rsv_refill(root, rsv, min_size,
5354                                              BTRFS_RESERVE_FLUSH_LIMIT);
5355
5356                 /*
5357                  * Try and steal from the global reserve since we will
5358                  * likely not use this space anyway, we want to try as
5359                  * hard as possible to get this to work.
5360                  */
5361                 if (ret)
5362                         steal_from_global++;
5363                 else
5364                         steal_from_global = 0;
5365                 ret = 0;
5366
5367                 /*
5368                  * steal_from_global == 0: we reserved stuff, hooray!
5369                  * steal_from_global == 1: we didn't reserve stuff, boo!
5370                  * steal_from_global == 2: we've committed, still not a lot of
5371                  * room but maybe we'll have room in the global reserve this
5372                  * time.
5373                  * steal_from_global == 3: abandon all hope!
5374                  */
5375                 if (steal_from_global > 2) {
5376                         btrfs_warn(fs_info,
5377                                    "Could not get space for a delete, will truncate on mount %d",
5378                                    ret);
5379                         btrfs_orphan_del(NULL, BTRFS_I(inode));
5380                         btrfs_free_block_rsv(fs_info, rsv);
5381                         goto no_delete;
5382                 }
5383
5384                 trans = btrfs_join_transaction(root);
5385                 if (IS_ERR(trans)) {
5386                         btrfs_orphan_del(NULL, BTRFS_I(inode));
5387                         btrfs_free_block_rsv(fs_info, rsv);
5388                         goto no_delete;
5389                 }
5390
5391                 /*
5392                  * We can't just steal from the global reserve, we need to make
5393                  * sure there is room to do it, if not we need to commit and try
5394                  * again.
5395                  */
5396                 if (steal_from_global) {
5397                         if (!btrfs_check_space_for_delayed_refs(trans, fs_info))
5398                                 ret = btrfs_block_rsv_migrate(global_rsv, rsv,
5399                                                               min_size, 0);
5400                         else
5401                                 ret = -ENOSPC;
5402                 }
5403
5404                 /*
5405                  * Couldn't steal from the global reserve, we have too much
5406                  * pending stuff built up, commit the transaction and try it
5407                  * again.
5408                  */
5409                 if (ret) {
5410                         ret = btrfs_commit_transaction(trans);
5411                         if (ret) {
5412                                 btrfs_orphan_del(NULL, BTRFS_I(inode));
5413                                 btrfs_free_block_rsv(fs_info, rsv);
5414                                 goto no_delete;
5415                         }
5416                         continue;
5417                 } else {
5418                         steal_from_global = 0;
5419                 }
5420
5421                 trans->block_rsv = rsv;
5422
5423                 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
5424                 if (ret != -ENOSPC && ret != -EAGAIN)
5425                         break;
5426
5427                 trans->block_rsv = &fs_info->trans_block_rsv;
5428                 btrfs_end_transaction(trans);
5429                 trans = NULL;
5430                 btrfs_btree_balance_dirty(fs_info);
5431         }
5432
5433         btrfs_free_block_rsv(fs_info, rsv);
5434
5435         /*
5436          * Errors here aren't a big deal, it just means we leave orphan items
5437          * in the tree.  They will be cleaned up on the next mount.
5438          */
5439         if (ret == 0) {
5440                 trans->block_rsv = root->orphan_block_rsv;
5441                 btrfs_orphan_del(trans, BTRFS_I(inode));
5442         } else {
5443                 btrfs_orphan_del(NULL, BTRFS_I(inode));
5444         }
5445
5446         trans->block_rsv = &fs_info->trans_block_rsv;
5447         if (!(root == fs_info->tree_root ||
5448               root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
5449                 btrfs_return_ino(root, btrfs_ino(BTRFS_I(inode)));
5450
5451         btrfs_end_transaction(trans);
5452         btrfs_btree_balance_dirty(fs_info);
5453 no_delete:
5454         btrfs_remove_delayed_node(BTRFS_I(inode));
5455         clear_inode(inode);
5456 }
5457
5458 /*
5459  * this returns the key found in the dir entry in the location pointer.
5460  * If no dir entries were found, returns -ENOENT.
5461  * If found a corrupted location in dir entry, returns -EUCLEAN.
5462  */
5463 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5464                                struct btrfs_key *location)
5465 {
5466         const char *name = dentry->d_name.name;
5467         int namelen = dentry->d_name.len;
5468         struct btrfs_dir_item *di;
5469         struct btrfs_path *path;
5470         struct btrfs_root *root = BTRFS_I(dir)->root;
5471         int ret = 0;
5472
5473         path = btrfs_alloc_path();
5474         if (!path)
5475                 return -ENOMEM;
5476
5477         di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)),
5478                         name, namelen, 0);
5479         if (!di) {
5480                 ret = -ENOENT;
5481                 goto out;
5482         }
5483         if (IS_ERR(di)) {
5484                 ret = PTR_ERR(di);
5485                 goto out;
5486         }
5487
5488         btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5489         if (location->type != BTRFS_INODE_ITEM_KEY &&
5490             location->type != BTRFS_ROOT_ITEM_KEY) {
5491                 ret = -EUCLEAN;
5492                 btrfs_warn(root->fs_info,
5493 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5494                            __func__, name, btrfs_ino(BTRFS_I(dir)),
5495                            location->objectid, location->type, location->offset);
5496         }
5497 out:
5498         btrfs_free_path(path);
5499         return ret;
5500 }
5501
5502 /*
5503  * when we hit a tree root in a directory, the btrfs part of the inode
5504  * needs to be changed to reflect the root directory of the tree root.  This
5505  * is kind of like crossing a mount point.
5506  */
5507 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5508                                     struct inode *dir,
5509                                     struct dentry *dentry,
5510                                     struct btrfs_key *location,
5511                                     struct btrfs_root **sub_root)
5512 {
5513         struct btrfs_path *path;
5514         struct btrfs_root *new_root;
5515         struct btrfs_root_ref *ref;
5516         struct extent_buffer *leaf;
5517         struct btrfs_key key;
5518         int ret;
5519         int err = 0;
5520
5521         path = btrfs_alloc_path();
5522         if (!path) {
5523                 err = -ENOMEM;
5524                 goto out;
5525         }
5526
5527         err = -ENOENT;
5528         key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5529         key.type = BTRFS_ROOT_REF_KEY;
5530         key.offset = location->objectid;
5531
5532         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5533         if (ret) {
5534                 if (ret < 0)
5535                         err = ret;
5536                 goto out;
5537         }
5538
5539         leaf = path->nodes[0];
5540         ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5541         if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) ||
5542             btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5543                 goto out;
5544
5545         ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5546                                    (unsigned long)(ref + 1),
5547                                    dentry->d_name.len);
5548         if (ret)
5549                 goto out;
5550
5551         btrfs_release_path(path);
5552
5553         new_root = btrfs_read_fs_root_no_name(fs_info, location);
5554         if (IS_ERR(new_root)) {
5555                 err = PTR_ERR(new_root);
5556                 goto out;
5557         }
5558
5559         *sub_root = new_root;
5560         location->objectid = btrfs_root_dirid(&new_root->root_item);
5561         location->type = BTRFS_INODE_ITEM_KEY;
5562         location->offset = 0;
5563         err = 0;
5564 out:
5565         btrfs_free_path(path);
5566         return err;
5567 }
5568
5569 static void inode_tree_add(struct inode *inode)
5570 {
5571         struct btrfs_root *root = BTRFS_I(inode)->root;
5572         struct btrfs_inode *entry;
5573         struct rb_node **p;
5574         struct rb_node *parent;
5575         struct rb_node *new = &BTRFS_I(inode)->rb_node;
5576         u64 ino = btrfs_ino(BTRFS_I(inode));
5577
5578         if (inode_unhashed(inode))
5579                 return;
5580         parent = NULL;
5581         spin_lock(&root->inode_lock);
5582         p = &root->inode_tree.rb_node;
5583         while (*p) {
5584                 parent = *p;
5585                 entry = rb_entry(parent, struct btrfs_inode, rb_node);
5586
5587                 if (ino < btrfs_ino(BTRFS_I(&entry->vfs_inode)))
5588                         p = &parent->rb_left;
5589                 else if (ino > btrfs_ino(BTRFS_I(&entry->vfs_inode)))
5590                         p = &parent->rb_right;
5591                 else {
5592                         WARN_ON(!(entry->vfs_inode.i_state &
5593                                   (I_WILL_FREE | I_FREEING)));
5594                         rb_replace_node(parent, new, &root->inode_tree);
5595                         RB_CLEAR_NODE(parent);
5596                         spin_unlock(&root->inode_lock);
5597                         return;
5598                 }
5599         }
5600         rb_link_node(new, parent, p);
5601         rb_insert_color(new, &root->inode_tree);
5602         spin_unlock(&root->inode_lock);
5603 }
5604
5605 static void inode_tree_del(struct inode *inode)
5606 {
5607         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5608         struct btrfs_root *root = BTRFS_I(inode)->root;
5609         int empty = 0;
5610
5611         spin_lock(&root->inode_lock);
5612         if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
5613                 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
5614                 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
5615                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5616         }
5617         spin_unlock(&root->inode_lock);
5618
5619         if (empty && btrfs_root_refs(&root->root_item) == 0) {
5620                 synchronize_srcu(&fs_info->subvol_srcu);
5621                 spin_lock(&root->inode_lock);
5622                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5623                 spin_unlock(&root->inode_lock);
5624                 if (empty)
5625                         btrfs_add_dead_root(root);
5626         }
5627 }
5628
5629 void btrfs_invalidate_inodes(struct btrfs_root *root)
5630 {
5631         struct btrfs_fs_info *fs_info = root->fs_info;
5632         struct rb_node *node;
5633         struct rb_node *prev;
5634         struct btrfs_inode *entry;
5635         struct inode *inode;
5636         u64 objectid = 0;
5637
5638         if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
5639                 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
5640
5641         spin_lock(&root->inode_lock);
5642 again:
5643         node = root->inode_tree.rb_node;
5644         prev = NULL;
5645         while (node) {
5646                 prev = node;
5647                 entry = rb_entry(node, struct btrfs_inode, rb_node);
5648
5649                 if (objectid < btrfs_ino(BTRFS_I(&entry->vfs_inode)))
5650                         node = node->rb_left;
5651                 else if (objectid > btrfs_ino(BTRFS_I(&entry->vfs_inode)))
5652                         node = node->rb_right;
5653                 else
5654                         break;
5655         }
5656         if (!node) {
5657                 while (prev) {
5658                         entry = rb_entry(prev, struct btrfs_inode, rb_node);
5659                         if (objectid <= btrfs_ino(BTRFS_I(&entry->vfs_inode))) {
5660                                 node = prev;
5661                                 break;
5662                         }
5663                         prev = rb_next(prev);
5664                 }
5665         }
5666         while (node) {
5667                 entry = rb_entry(node, struct btrfs_inode, rb_node);
5668                 objectid = btrfs_ino(BTRFS_I(&entry->vfs_inode)) + 1;
5669                 inode = igrab(&entry->vfs_inode);
5670                 if (inode) {
5671                         spin_unlock(&root->inode_lock);
5672                         if (atomic_read(&inode->i_count) > 1)
5673                                 d_prune_aliases(inode);
5674                         /*
5675                          * btrfs_drop_inode will have it removed from
5676                          * the inode cache when its usage count
5677                          * hits zero.
5678                          */
5679                         iput(inode);
5680                         cond_resched();
5681                         spin_lock(&root->inode_lock);
5682                         goto again;
5683                 }
5684
5685                 if (cond_resched_lock(&root->inode_lock))
5686                         goto again;
5687
5688                 node = rb_next(node);
5689         }
5690         spin_unlock(&root->inode_lock);
5691 }
5692
5693 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5694 {
5695         struct btrfs_iget_args *args = p;
5696         inode->i_ino = args->location->objectid;
5697         memcpy(&BTRFS_I(inode)->location, args->location,
5698                sizeof(*args->location));
5699         BTRFS_I(inode)->root = args->root;
5700         return 0;
5701 }
5702
5703 static int btrfs_find_actor(struct inode *inode, void *opaque)
5704 {
5705         struct btrfs_iget_args *args = opaque;
5706         return args->location->objectid == BTRFS_I(inode)->location.objectid &&
5707                 args->root == BTRFS_I(inode)->root;
5708 }
5709
5710 static struct inode *btrfs_iget_locked(struct super_block *s,
5711                                        struct btrfs_key *location,
5712                                        struct btrfs_root *root)
5713 {
5714         struct inode *inode;
5715         struct btrfs_iget_args args;
5716         unsigned long hashval = btrfs_inode_hash(location->objectid, root);
5717
5718         args.location = location;
5719         args.root = root;
5720
5721         inode = iget5_locked(s, hashval, btrfs_find_actor,
5722                              btrfs_init_locked_inode,
5723                              (void *)&args);
5724         return inode;
5725 }
5726
5727 /* Get an inode object given its location and corresponding root.
5728  * Returns in *is_new if the inode was read from disk
5729  */
5730 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5731                          struct btrfs_root *root, int *new)
5732 {
5733         struct inode *inode;
5734
5735         inode = btrfs_iget_locked(s, location, root);
5736         if (!inode)
5737                 return ERR_PTR(-ENOMEM);
5738
5739         if (inode->i_state & I_NEW) {
5740                 int ret;
5741
5742                 ret = btrfs_read_locked_inode(inode);
5743                 if (!is_bad_inode(inode)) {
5744                         inode_tree_add(inode);
5745                         unlock_new_inode(inode);
5746                         if (new)
5747                                 *new = 1;
5748                 } else {
5749                         unlock_new_inode(inode);
5750                         iput(inode);
5751                         ASSERT(ret < 0);
5752                         inode = ERR_PTR(ret < 0 ? ret : -ESTALE);
5753                 }
5754         }
5755
5756         return inode;
5757 }
5758
5759 static struct inode *new_simple_dir(struct super_block *s,
5760                                     struct btrfs_key *key,
5761                                     struct btrfs_root *root)
5762 {
5763         struct inode *inode = new_inode(s);
5764
5765         if (!inode)
5766                 return ERR_PTR(-ENOMEM);
5767
5768         BTRFS_I(inode)->root = root;
5769         memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5770         set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5771
5772         inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5773         inode->i_op = &btrfs_dir_ro_inode_operations;
5774         inode->i_opflags &= ~IOP_XATTR;
5775         inode->i_fop = &simple_dir_operations;
5776         inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5777         inode->i_mtime = current_time(inode);
5778         inode->i_atime = inode->i_mtime;
5779         inode->i_ctime = inode->i_mtime;
5780         BTRFS_I(inode)->i_otime = inode->i_mtime;
5781
5782         return inode;
5783 }
5784
5785 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5786 {
5787         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
5788         struct inode *inode;
5789         struct btrfs_root *root = BTRFS_I(dir)->root;
5790         struct btrfs_root *sub_root = root;
5791         struct btrfs_key location;
5792         int index;
5793         int ret = 0;
5794
5795         if (dentry->d_name.len > BTRFS_NAME_LEN)
5796                 return ERR_PTR(-ENAMETOOLONG);
5797
5798         ret = btrfs_inode_by_name(dir, dentry, &location);
5799         if (ret < 0)
5800                 return ERR_PTR(ret);
5801
5802         if (location.type == BTRFS_INODE_ITEM_KEY) {
5803                 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
5804                 return inode;
5805         }
5806
5807         index = srcu_read_lock(&fs_info->subvol_srcu);
5808         ret = fixup_tree_root_location(fs_info, dir, dentry,
5809                                        &location, &sub_root);
5810         if (ret < 0) {
5811                 if (ret != -ENOENT)
5812                         inode = ERR_PTR(ret);
5813                 else
5814                         inode = new_simple_dir(dir->i_sb, &location, sub_root);
5815         } else {
5816                 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
5817         }
5818         srcu_read_unlock(&fs_info->subvol_srcu, index);
5819
5820         if (!IS_ERR(inode) && root != sub_root) {
5821                 down_read(&fs_info->cleanup_work_sem);
5822                 if (!sb_rdonly(inode->i_sb))
5823                         ret = btrfs_orphan_cleanup(sub_root);
5824                 up_read(&fs_info->cleanup_work_sem);
5825                 if (ret) {
5826                         iput(inode);
5827                         inode = ERR_PTR(ret);
5828                 }
5829         }
5830
5831         return inode;
5832 }
5833
5834 static int btrfs_dentry_delete(const struct dentry *dentry)
5835 {
5836         struct btrfs_root *root;
5837         struct inode *inode = d_inode(dentry);
5838
5839         if (!inode && !IS_ROOT(dentry))
5840                 inode = d_inode(dentry->d_parent);
5841
5842         if (inode) {
5843                 root = BTRFS_I(inode)->root;
5844                 if (btrfs_root_refs(&root->root_item) == 0)
5845                         return 1;
5846
5847                 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5848                         return 1;
5849         }
5850         return 0;
5851 }
5852
5853 static void btrfs_dentry_release(struct dentry *dentry)
5854 {
5855         kfree(dentry->d_fsdata);
5856 }
5857
5858 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5859                                    unsigned int flags)
5860 {
5861         struct inode *inode;
5862
5863         inode = btrfs_lookup_dentry(dir, dentry);
5864         if (IS_ERR(inode)) {
5865                 if (PTR_ERR(inode) == -ENOENT)
5866                         inode = NULL;
5867                 else
5868                         return ERR_CAST(inode);
5869         }
5870
5871         return d_splice_alias(inode, dentry);
5872 }
5873
5874 unsigned char btrfs_filetype_table[] = {
5875         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5876 };
5877
5878 /*
5879  * All this infrastructure exists because dir_emit can fault, and we are holding
5880  * the tree lock when doing readdir.  For now just allocate a buffer and copy
5881  * our information into that, and then dir_emit from the buffer.  This is
5882  * similar to what NFS does, only we don't keep the buffer around in pagecache
5883  * because I'm afraid I'll mess that up.  Long term we need to make filldir do
5884  * copy_to_user_inatomic so we don't have to worry about page faulting under the
5885  * tree lock.
5886  */
5887 static int btrfs_opendir(struct inode *inode, struct file *file)
5888 {
5889         struct btrfs_file_private *private;
5890
5891         private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
5892         if (!private)
5893                 return -ENOMEM;
5894         private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
5895         if (!private->filldir_buf) {
5896                 kfree(private);
5897                 return -ENOMEM;
5898         }
5899         file->private_data = private;
5900         return 0;
5901 }
5902
5903 struct dir_entry {
5904         u64 ino;
5905         u64 offset;
5906         unsigned type;
5907         int name_len;
5908 };
5909
5910 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
5911 {
5912         while (entries--) {
5913                 struct dir_entry *entry = addr;
5914                 char *name = (char *)(entry + 1);
5915
5916                 ctx->pos = get_unaligned(&entry->offset);
5917                 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
5918                                          get_unaligned(&entry->ino),
5919                                          get_unaligned(&entry->type)))
5920                         return 1;
5921                 addr += sizeof(struct dir_entry) +
5922                         get_unaligned(&entry->name_len);
5923                 ctx->pos++;
5924         }
5925         return 0;
5926 }
5927
5928 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5929 {
5930         struct inode *inode = file_inode(file);
5931         struct btrfs_root *root = BTRFS_I(inode)->root;
5932         struct btrfs_file_private *private = file->private_data;
5933         struct btrfs_dir_item *di;
5934         struct btrfs_key key;
5935         struct btrfs_key found_key;
5936         struct btrfs_path *path;
5937         void *addr;
5938         struct list_head ins_list;
5939         struct list_head del_list;
5940         int ret;
5941         struct extent_buffer *leaf;
5942         int slot;
5943         char *name_ptr;
5944         int name_len;
5945         int entries = 0;
5946         int total_len = 0;
5947         bool put = false;
5948         struct btrfs_key location;
5949
5950         if (!dir_emit_dots(file, ctx))
5951                 return 0;
5952
5953         path = btrfs_alloc_path();
5954         if (!path)
5955                 return -ENOMEM;
5956
5957         addr = private->filldir_buf;
5958         path->reada = READA_FORWARD;
5959
5960         INIT_LIST_HEAD(&ins_list);
5961         INIT_LIST_HEAD(&del_list);
5962         put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list);
5963
5964 again:
5965         key.type = BTRFS_DIR_INDEX_KEY;
5966         key.offset = ctx->pos;
5967         key.objectid = btrfs_ino(BTRFS_I(inode));
5968
5969         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5970         if (ret < 0)
5971                 goto err;
5972
5973         while (1) {
5974                 struct dir_entry *entry;
5975
5976                 leaf = path->nodes[0];
5977                 slot = path->slots[0];
5978                 if (slot >= btrfs_header_nritems(leaf)) {
5979                         ret = btrfs_next_leaf(root, path);
5980                         if (ret < 0)
5981                                 goto err;
5982                         else if (ret > 0)
5983                                 break;
5984                         continue;
5985                 }
5986
5987                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5988
5989                 if (found_key.objectid != key.objectid)
5990                         break;
5991                 if (found_key.type != BTRFS_DIR_INDEX_KEY)
5992                         break;
5993                 if (found_key.offset < ctx->pos)
5994                         goto next;
5995                 if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
5996                         goto next;
5997                 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5998                 name_len = btrfs_dir_name_len(leaf, di);
5999                 if ((total_len + sizeof(struct dir_entry) + name_len) >=
6000                     PAGE_SIZE) {
6001                         btrfs_release_path(path);
6002                         ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6003                         if (ret)
6004                                 goto nopos;
6005                         addr = private->filldir_buf;
6006                         entries = 0;
6007                         total_len = 0;
6008                         goto again;
6009                 }
6010
6011                 entry = addr;
6012                 put_unaligned(name_len, &entry->name_len);
6013                 name_ptr = (char *)(entry + 1);
6014                 read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
6015                                    name_len);
6016                 put_unaligned(btrfs_filetype_table[btrfs_dir_type(leaf, di)],
6017                                 &entry->type);
6018                 btrfs_dir_item_key_to_cpu(leaf, di, &location);
6019                 put_unaligned(location.objectid, &entry->ino);
6020                 put_unaligned(found_key.offset, &entry->offset);
6021                 entries++;
6022                 addr += sizeof(struct dir_entry) + name_len;
6023                 total_len += sizeof(struct dir_entry) + name_len;
6024 next:
6025                 path->slots[0]++;
6026         }
6027         btrfs_release_path(path);
6028
6029         ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6030         if (ret)
6031                 goto nopos;
6032
6033         ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
6034         if (ret)
6035                 goto nopos;
6036
6037         /*
6038          * Stop new entries from being returned after we return the last
6039          * entry.
6040          *
6041          * New directory entries are assigned a strictly increasing
6042          * offset.  This means that new entries created during readdir
6043          * are *guaranteed* to be seen in the future by that readdir.
6044          * This has broken buggy programs which operate on names as
6045          * they're returned by readdir.  Until we re-use freed offsets
6046          * we have this hack to stop new entries from being returned
6047          * under the assumption that they'll never reach this huge
6048          * offset.
6049          *
6050          * This is being careful not to overflow 32bit loff_t unless the
6051          * last entry requires it because doing so has broken 32bit apps
6052          * in the past.
6053          */
6054         if (ctx->pos >= INT_MAX)
6055                 ctx->pos = LLONG_MAX;
6056         else
6057                 ctx->pos = INT_MAX;
6058 nopos:
6059         ret = 0;
6060 err:
6061         if (put)
6062                 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
6063         btrfs_free_path(path);
6064         return ret;
6065 }
6066
6067 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
6068 {
6069         struct btrfs_root *root = BTRFS_I(inode)->root;
6070         struct btrfs_trans_handle *trans;
6071         int ret = 0;
6072         bool nolock = false;
6073
6074         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
6075                 return 0;
6076
6077         if (btrfs_fs_closing(root->fs_info) &&
6078                         btrfs_is_free_space_inode(BTRFS_I(inode)))
6079                 nolock = true;
6080
6081         if (wbc->sync_mode == WB_SYNC_ALL) {
6082                 if (nolock)
6083                         trans = btrfs_join_transaction_nolock(root);
6084                 else
6085                         trans = btrfs_join_transaction(root);
6086                 if (IS_ERR(trans))
6087                         return PTR_ERR(trans);
6088                 ret = btrfs_commit_transaction(trans);
6089         }
6090         return ret;
6091 }
6092
6093 /*
6094  * This is somewhat expensive, updating the tree every time the
6095  * inode changes.  But, it is most likely to find the inode in cache.
6096  * FIXME, needs more benchmarking...there are no reasons other than performance
6097  * to keep or drop this code.
6098  */
6099 static int btrfs_dirty_inode(struct inode *inode)
6100 {
6101         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6102         struct btrfs_root *root = BTRFS_I(inode)->root;
6103         struct btrfs_trans_handle *trans;
6104         int ret;
6105
6106         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
6107                 return 0;
6108
6109         trans = btrfs_join_transaction(root);
6110         if (IS_ERR(trans))
6111                 return PTR_ERR(trans);
6112
6113         ret = btrfs_update_inode(trans, root, inode);
6114         if (ret && ret == -ENOSPC) {
6115                 /* whoops, lets try again with the full transaction */
6116                 btrfs_end_transaction(trans);
6117                 trans = btrfs_start_transaction(root, 1);
6118                 if (IS_ERR(trans))
6119                         return PTR_ERR(trans);
6120
6121                 ret = btrfs_update_inode(trans, root, inode);
6122         }
6123         btrfs_end_transaction(trans);
6124         if (BTRFS_I(inode)->delayed_node)
6125                 btrfs_balance_delayed_items(fs_info);
6126
6127         return ret;
6128 }
6129
6130 /*
6131  * This is a copy of file_update_time.  We need this so we can return error on
6132  * ENOSPC for updating the inode in the case of file write and mmap writes.
6133  */
6134 static int btrfs_update_time(struct inode *inode, struct timespec *now,
6135                              int flags)
6136 {
6137         struct btrfs_root *root = BTRFS_I(inode)->root;
6138         bool dirty = flags & ~S_VERSION;
6139
6140         if (btrfs_root_readonly(root))
6141                 return -EROFS;
6142
6143         if (flags & S_VERSION)
6144                 dirty |= inode_maybe_inc_iversion(inode, dirty);
6145         if (flags & S_CTIME)
6146                 inode->i_ctime = *now;
6147         if (flags & S_MTIME)
6148                 inode->i_mtime = *now;
6149         if (flags & S_ATIME)
6150                 inode->i_atime = *now;
6151         return dirty ? btrfs_dirty_inode(inode) : 0;
6152 }
6153
6154 /*
6155  * find the highest existing sequence number in a directory
6156  * and then set the in-memory index_cnt variable to reflect
6157  * free sequence numbers
6158  */
6159 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
6160 {
6161         struct btrfs_root *root = inode->root;
6162         struct btrfs_key key, found_key;
6163         struct btrfs_path *path;
6164         struct extent_buffer *leaf;
6165         int ret;
6166
6167         key.objectid = btrfs_ino(inode);
6168         key.type = BTRFS_DIR_INDEX_KEY;
6169         key.offset = (u64)-1;
6170
6171         path = btrfs_alloc_path();
6172         if (!path)
6173                 return -ENOMEM;
6174
6175         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6176         if (ret < 0)
6177                 goto out;
6178         /* FIXME: we should be able to handle this */
6179         if (ret == 0)
6180                 goto out;
6181         ret = 0;
6182
6183         /*
6184          * MAGIC NUMBER EXPLANATION:
6185          * since we search a directory based on f_pos we have to start at 2
6186          * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
6187          * else has to start at 2
6188          */
6189         if (path->slots[0] == 0) {
6190                 inode->index_cnt = 2;
6191                 goto out;
6192         }
6193
6194         path->slots[0]--;
6195
6196         leaf = path->nodes[0];
6197         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6198
6199         if (found_key.objectid != btrfs_ino(inode) ||
6200             found_key.type != BTRFS_DIR_INDEX_KEY) {
6201                 inode->index_cnt = 2;
6202                 goto out;
6203         }
6204
6205         inode->index_cnt = found_key.offset + 1;
6206 out:
6207         btrfs_free_path(path);
6208         return ret;
6209 }
6210
6211 /*
6212  * helper to find a free sequence number in a given directory.  This current
6213  * code is very simple, later versions will do smarter things in the btree
6214  */
6215 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6216 {
6217         int ret = 0;
6218
6219         if (dir->index_cnt == (u64)-1) {
6220                 ret = btrfs_inode_delayed_dir_index_count(dir);
6221                 if (ret) {
6222                         ret = btrfs_set_inode_index_count(dir);
6223                         if (ret)
6224                                 return ret;
6225                 }
6226         }
6227
6228         *index = dir->index_cnt;
6229         dir->index_cnt++;
6230
6231         return ret;
6232 }
6233
6234 static int btrfs_insert_inode_locked(struct inode *inode)
6235 {
6236         struct btrfs_iget_args args;
6237         args.location = &BTRFS_I(inode)->location;
6238         args.root = BTRFS_I(inode)->root;
6239
6240         return insert_inode_locked4(inode,
6241                    btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6242                    btrfs_find_actor, &args);
6243 }
6244
6245 /*
6246  * Inherit flags from the parent inode.
6247  *
6248  * Currently only the compression flags and the cow flags are inherited.
6249  */
6250 static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
6251 {
6252         unsigned int flags;
6253
6254         if (!dir)
6255                 return;
6256
6257         flags = BTRFS_I(dir)->flags;
6258
6259         if (flags & BTRFS_INODE_NOCOMPRESS) {
6260                 BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
6261                 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
6262         } else if (flags & BTRFS_INODE_COMPRESS) {
6263                 BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
6264                 BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
6265         }
6266
6267         if (flags & BTRFS_INODE_NODATACOW) {
6268                 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
6269                 if (S_ISREG(inode->i_mode))
6270                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6271         }
6272
6273         btrfs_update_iflags(inode);
6274 }
6275
6276 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
6277                                      struct btrfs_root *root,
6278                                      struct inode *dir,
6279                                      const char *name, int name_len,
6280                                      u64 ref_objectid, u64 objectid,
6281                                      umode_t mode, u64 *index)
6282 {
6283         struct btrfs_fs_info *fs_info = root->fs_info;
6284         struct inode *inode;
6285         struct btrfs_inode_item *inode_item;
6286         struct btrfs_key *location;
6287         struct btrfs_path *path;
6288         struct btrfs_inode_ref *ref;
6289         struct btrfs_key key[2];
6290         u32 sizes[2];
6291         int nitems = name ? 2 : 1;
6292         unsigned long ptr;
6293         int ret;
6294
6295         path = btrfs_alloc_path();
6296         if (!path)
6297                 return ERR_PTR(-ENOMEM);
6298
6299         inode = new_inode(fs_info->sb);
6300         if (!inode) {
6301                 btrfs_free_path(path);
6302                 return ERR_PTR(-ENOMEM);
6303         }
6304
6305         /*
6306          * O_TMPFILE, set link count to 0, so that after this point,
6307          * we fill in an inode item with the correct link count.
6308          */
6309         if (!name)
6310                 set_nlink(inode, 0);
6311
6312         /*
6313          * we have to initialize this early, so we can reclaim the inode
6314          * number if we fail afterwards in this function.
6315          */
6316         inode->i_ino = objectid;
6317
6318         if (dir && name) {
6319                 trace_btrfs_inode_request(dir);
6320
6321                 ret = btrfs_set_inode_index(BTRFS_I(dir), index);
6322                 if (ret) {
6323                         btrfs_free_path(path);
6324                         iput(inode);
6325                         return ERR_PTR(ret);
6326                 }
6327         } else if (dir) {
6328                 *index = 0;
6329         }
6330         /*
6331          * index_cnt is ignored for everything but a dir,
6332          * btrfs_set_inode_index_count has an explanation for the magic
6333          * number
6334          */
6335         BTRFS_I(inode)->index_cnt = 2;
6336         BTRFS_I(inode)->dir_index = *index;
6337         BTRFS_I(inode)->root = root;
6338         BTRFS_I(inode)->generation = trans->transid;
6339         inode->i_generation = BTRFS_I(inode)->generation;
6340
6341         /*
6342          * We could have gotten an inode number from somebody who was fsynced
6343          * and then removed in this same transaction, so let's just set full
6344          * sync since it will be a full sync anyway and this will blow away the
6345          * old info in the log.
6346          */
6347         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
6348
6349         key[0].objectid = objectid;
6350         key[0].type = BTRFS_INODE_ITEM_KEY;
6351         key[0].offset = 0;
6352
6353         sizes[0] = sizeof(struct btrfs_inode_item);
6354
6355         if (name) {
6356                 /*
6357                  * Start new inodes with an inode_ref. This is slightly more
6358                  * efficient for small numbers of hard links since they will
6359                  * be packed into one item. Extended refs will kick in if we
6360                  * add more hard links than can fit in the ref item.
6361                  */
6362                 key[1].objectid = objectid;
6363                 key[1].type = BTRFS_INODE_REF_KEY;
6364                 key[1].offset = ref_objectid;
6365
6366                 sizes[1] = name_len + sizeof(*ref);
6367         }
6368
6369         location = &BTRFS_I(inode)->location;
6370         location->objectid = objectid;
6371         location->offset = 0;
6372         location->type = BTRFS_INODE_ITEM_KEY;
6373
6374         ret = btrfs_insert_inode_locked(inode);
6375         if (ret < 0)
6376                 goto fail;
6377
6378         path->leave_spinning = 1;
6379         ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
6380         if (ret != 0)
6381                 goto fail_unlock;
6382
6383         inode_init_owner(inode, dir, mode);
6384         inode_set_bytes(inode, 0);
6385
6386         inode->i_mtime = current_time(inode);
6387         inode->i_atime = inode->i_mtime;
6388         inode->i_ctime = inode->i_mtime;
6389         BTRFS_I(inode)->i_otime = inode->i_mtime;
6390
6391         inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6392                                   struct btrfs_inode_item);
6393         memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6394                              sizeof(*inode_item));
6395         fill_inode_item(trans, path->nodes[0], inode_item, inode);
6396
6397         if (name) {
6398                 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6399                                      struct btrfs_inode_ref);
6400                 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
6401                 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
6402                 ptr = (unsigned long)(ref + 1);
6403                 write_extent_buffer(path->nodes[0], name, ptr, name_len);
6404         }
6405
6406         btrfs_mark_buffer_dirty(path->nodes[0]);
6407         btrfs_free_path(path);
6408
6409         btrfs_inherit_iflags(inode, dir);
6410
6411         if (S_ISREG(mode)) {
6412                 if (btrfs_test_opt(fs_info, NODATASUM))
6413                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6414                 if (btrfs_test_opt(fs_info, NODATACOW))
6415                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6416                                 BTRFS_INODE_NODATASUM;
6417         }
6418
6419         inode_tree_add(inode);
6420
6421         trace_btrfs_inode_new(inode);
6422         btrfs_set_inode_last_trans(trans, inode);
6423
6424         btrfs_update_root_times(trans, root);
6425
6426         ret = btrfs_inode_inherit_props(trans, inode, dir);
6427         if (ret)
6428                 btrfs_err(fs_info,
6429                           "error inheriting props for ino %llu (root %llu): %d",
6430                         btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret);
6431
6432         return inode;
6433
6434 fail_unlock:
6435         unlock_new_inode(inode);
6436 fail:
6437         if (dir && name)
6438                 BTRFS_I(dir)->index_cnt--;
6439         btrfs_free_path(path);
6440         iput(inode);
6441         return ERR_PTR(ret);
6442 }
6443
6444 static inline u8 btrfs_inode_type(struct inode *inode)
6445 {
6446         return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
6447 }
6448
6449 /*
6450  * utility function to add 'inode' into 'parent_inode' with
6451  * a give name and a given sequence number.
6452  * if 'add_backref' is true, also insert a backref from the
6453  * inode to the parent directory.
6454  */
6455 int btrfs_add_link(struct btrfs_trans_handle *trans,
6456                    struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6457                    const char *name, int name_len, int add_backref, u64 index)
6458 {
6459         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
6460         int ret = 0;
6461         struct btrfs_key key;
6462         struct btrfs_root *root = parent_inode->root;
6463         u64 ino = btrfs_ino(inode);
6464         u64 parent_ino = btrfs_ino(parent_inode);
6465
6466         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6467                 memcpy(&key, &inode->root->root_key, sizeof(key));
6468         } else {
6469                 key.objectid = ino;
6470                 key.type = BTRFS_INODE_ITEM_KEY;
6471                 key.offset = 0;
6472         }
6473
6474         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6475                 ret = btrfs_add_root_ref(trans, fs_info, key.objectid,
6476                                          root->root_key.objectid, parent_ino,
6477                                          index, name, name_len);
6478         } else if (add_backref) {
6479                 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
6480                                              parent_ino, index);
6481         }
6482
6483         /* Nothing to clean up yet */
6484         if (ret)
6485                 return ret;
6486
6487         ret = btrfs_insert_dir_item(trans, root, name, name_len,
6488                                     parent_inode, &key,
6489                                     btrfs_inode_type(&inode->vfs_inode), index);
6490         if (ret == -EEXIST || ret == -EOVERFLOW)
6491                 goto fail_dir_item;
6492         else if (ret) {
6493                 btrfs_abort_transaction(trans, ret);
6494                 return ret;
6495         }
6496
6497         btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6498                            name_len * 2);
6499         inode_inc_iversion(&parent_inode->vfs_inode);
6500         parent_inode->vfs_inode.i_mtime = parent_inode->vfs_inode.i_ctime =
6501                 current_time(&parent_inode->vfs_inode);
6502         ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode);
6503         if (ret)
6504                 btrfs_abort_transaction(trans, ret);
6505         return ret;
6506
6507 fail_dir_item:
6508         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6509                 u64 local_index;
6510                 int err;
6511                 err = btrfs_del_root_ref(trans, fs_info, key.objectid,
6512                                          root->root_key.objectid, parent_ino,
6513                                          &local_index, name, name_len);
6514
6515         } else if (add_backref) {
6516                 u64 local_index;
6517                 int err;
6518
6519                 err = btrfs_del_inode_ref(trans, root, name, name_len,
6520                                           ino, parent_ino, &local_index);
6521         }
6522         return ret;
6523 }
6524
6525 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
6526                             struct btrfs_inode *dir, struct dentry *dentry,
6527                             struct btrfs_inode *inode, int backref, u64 index)
6528 {
6529         int err = btrfs_add_link(trans, dir, inode,
6530                                  dentry->d_name.name, dentry->d_name.len,
6531                                  backref, index);
6532         if (err > 0)
6533                 err = -EEXIST;
6534         return err;
6535 }
6536
6537 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
6538                         umode_t mode, dev_t rdev)
6539 {
6540         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6541         struct btrfs_trans_handle *trans;
6542         struct btrfs_root *root = BTRFS_I(dir)->root;
6543         struct inode *inode = NULL;
6544         int err;
6545         int drop_inode = 0;
6546         u64 objectid;
6547         u64 index = 0;
6548
6549         /*
6550          * 2 for inode item and ref
6551          * 2 for dir items
6552          * 1 for xattr if selinux is on
6553          */
6554         trans = btrfs_start_transaction(root, 5);
6555         if (IS_ERR(trans))
6556                 return PTR_ERR(trans);
6557
6558         err = btrfs_find_free_ino(root, &objectid);
6559         if (err)
6560                 goto out_unlock;
6561
6562         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6563                         dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6564                         mode, &index);
6565         if (IS_ERR(inode)) {
6566                 err = PTR_ERR(inode);
6567                 goto out_unlock;
6568         }
6569
6570         /*
6571         * If the active LSM wants to access the inode during
6572         * d_instantiate it needs these. Smack checks to see
6573         * if the filesystem supports xattrs by looking at the
6574         * ops vector.
6575         */
6576         inode->i_op = &btrfs_special_inode_operations;
6577         init_special_inode(inode, inode->i_mode, rdev);
6578
6579         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6580         if (err)
6581                 goto out_unlock_inode;
6582
6583         err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6584                         0, index);
6585         if (err) {
6586                 goto out_unlock_inode;
6587         } else {
6588                 btrfs_update_inode(trans, root, inode);
6589                 d_instantiate_new(dentry, inode);
6590         }
6591
6592 out_unlock:
6593         btrfs_end_transaction(trans);
6594         btrfs_btree_balance_dirty(fs_info);
6595         if (drop_inode) {
6596                 inode_dec_link_count(inode);
6597                 iput(inode);
6598         }
6599         return err;
6600
6601 out_unlock_inode:
6602         drop_inode = 1;
6603         unlock_new_inode(inode);
6604         goto out_unlock;
6605
6606 }
6607
6608 static int btrfs_create(struct inode *dir, struct dentry *dentry,
6609                         umode_t mode, bool excl)
6610 {
6611         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6612         struct btrfs_trans_handle *trans;
6613         struct btrfs_root *root = BTRFS_I(dir)->root;
6614         struct inode *inode = NULL;
6615         int drop_inode_on_err = 0;
6616         int err;
6617         u64 objectid;
6618         u64 index = 0;
6619
6620         /*
6621          * 2 for inode item and ref
6622          * 2 for dir items
6623          * 1 for xattr if selinux is on
6624          */
6625         trans = btrfs_start_transaction(root, 5);
6626         if (IS_ERR(trans))
6627                 return PTR_ERR(trans);
6628
6629         err = btrfs_find_free_ino(root, &objectid);
6630         if (err)
6631                 goto out_unlock;
6632
6633         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6634                         dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6635                         mode, &index);
6636         if (IS_ERR(inode)) {
6637                 err = PTR_ERR(inode);
6638                 goto out_unlock;
6639         }
6640         drop_inode_on_err = 1;
6641         /*
6642         * If the active LSM wants to access the inode during
6643         * d_instantiate it needs these. Smack checks to see
6644         * if the filesystem supports xattrs by looking at the
6645         * ops vector.
6646         */
6647         inode->i_fop = &btrfs_file_operations;
6648         inode->i_op = &btrfs_file_inode_operations;
6649         inode->i_mapping->a_ops = &btrfs_aops;
6650
6651         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6652         if (err)
6653                 goto out_unlock_inode;
6654
6655         err = btrfs_update_inode(trans, root, inode);
6656         if (err)
6657                 goto out_unlock_inode;
6658
6659         err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6660                         0, index);
6661         if (err)
6662                 goto out_unlock_inode;
6663
6664         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
6665         d_instantiate_new(dentry, inode);
6666
6667 out_unlock:
6668         btrfs_end_transaction(trans);
6669         if (err && drop_inode_on_err) {
6670                 inode_dec_link_count(inode);
6671                 iput(inode);
6672         }
6673         btrfs_btree_balance_dirty(fs_info);
6674         return err;
6675
6676 out_unlock_inode:
6677         unlock_new_inode(inode);
6678         goto out_unlock;
6679
6680 }
6681
6682 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6683                       struct dentry *dentry)
6684 {
6685         struct btrfs_trans_handle *trans = NULL;
6686         struct btrfs_root *root = BTRFS_I(dir)->root;
6687         struct inode *inode = d_inode(old_dentry);
6688         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6689         u64 index;
6690         int err;
6691         int drop_inode = 0;
6692
6693         /* do not allow sys_link's with other subvols of the same device */
6694         if (root->objectid != BTRFS_I(inode)->root->objectid)
6695                 return -EXDEV;
6696
6697         if (inode->i_nlink >= BTRFS_LINK_MAX)
6698                 return -EMLINK;
6699
6700         err = btrfs_set_inode_index(BTRFS_I(dir), &index);
6701         if (err)
6702                 goto fail;
6703
6704         /*
6705          * 2 items for inode and inode ref
6706          * 2 items for dir items
6707          * 1 item for parent inode
6708          */
6709         trans = btrfs_start_transaction(root, 5);
6710         if (IS_ERR(trans)) {
6711                 err = PTR_ERR(trans);
6712                 trans = NULL;
6713                 goto fail;
6714         }
6715
6716         /* There are several dir indexes for this inode, clear the cache. */
6717         BTRFS_I(inode)->dir_index = 0ULL;
6718         inc_nlink(inode);
6719         inode_inc_iversion(inode);
6720         inode->i_ctime = current_time(inode);
6721         ihold(inode);
6722         set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6723
6724         err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6725                         1, index);
6726
6727         if (err) {
6728                 drop_inode = 1;
6729         } else {
6730                 struct dentry *parent = dentry->d_parent;
6731                 err = btrfs_update_inode(trans, root, inode);
6732                 if (err)
6733                         goto fail;
6734                 if (inode->i_nlink == 1) {
6735                         /*
6736                          * If new hard link count is 1, it's a file created
6737                          * with open(2) O_TMPFILE flag.
6738                          */
6739                         err = btrfs_orphan_del(trans, BTRFS_I(inode));
6740                         if (err)
6741                                 goto fail;
6742                 }
6743                 d_instantiate(dentry, inode);
6744                 btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent);
6745         }
6746
6747 fail:
6748         if (trans)
6749                 btrfs_end_transaction(trans);
6750         if (drop_inode) {
6751                 inode_dec_link_count(inode);
6752                 iput(inode);
6753         }
6754         btrfs_btree_balance_dirty(fs_info);
6755         return err;
6756 }
6757
6758 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
6759 {
6760         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6761         struct inode *inode = NULL;
6762         struct btrfs_trans_handle *trans;
6763         struct btrfs_root *root = BTRFS_I(dir)->root;
6764         int err = 0;
6765         int drop_on_err = 0;
6766         u64 objectid = 0;
6767         u64 index = 0;
6768
6769         /*
6770          * 2 items for inode and ref
6771          * 2 items for dir items
6772          * 1 for xattr if selinux is on
6773          */
6774         trans = btrfs_start_transaction(root, 5);
6775         if (IS_ERR(trans))
6776                 return PTR_ERR(trans);
6777
6778         err = btrfs_find_free_ino(root, &objectid);
6779         if (err)
6780                 goto out_fail;
6781
6782         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6783                         dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6784                         S_IFDIR | mode, &index);
6785         if (IS_ERR(inode)) {
6786                 err = PTR_ERR(inode);
6787                 goto out_fail;
6788         }
6789
6790         drop_on_err = 1;
6791         /* these must be set before we unlock the inode */
6792         inode->i_op = &btrfs_dir_inode_operations;
6793         inode->i_fop = &btrfs_dir_file_operations;
6794
6795         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6796         if (err)
6797                 goto out_fail_inode;
6798
6799         btrfs_i_size_write(BTRFS_I(inode), 0);
6800         err = btrfs_update_inode(trans, root, inode);
6801         if (err)
6802                 goto out_fail_inode;
6803
6804         err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
6805                         dentry->d_name.name,
6806                         dentry->d_name.len, 0, index);
6807         if (err)
6808                 goto out_fail_inode;
6809
6810         d_instantiate_new(dentry, inode);
6811         drop_on_err = 0;
6812
6813 out_fail:
6814         btrfs_end_transaction(trans);
6815         if (drop_on_err) {
6816                 inode_dec_link_count(inode);
6817                 iput(inode);
6818         }
6819         btrfs_btree_balance_dirty(fs_info);
6820         return err;
6821
6822 out_fail_inode:
6823         unlock_new_inode(inode);
6824         goto out_fail;
6825 }
6826
6827 static noinline int uncompress_inline(struct btrfs_path *path,
6828                                       struct page *page,
6829                                       size_t pg_offset, u64 extent_offset,
6830                                       struct btrfs_file_extent_item *item)
6831 {
6832         int ret;
6833         struct extent_buffer *leaf = path->nodes[0];
6834         char *tmp;
6835         size_t max_size;
6836         unsigned long inline_size;
6837         unsigned long ptr;
6838         int compress_type;
6839
6840         WARN_ON(pg_offset != 0);
6841         compress_type = btrfs_file_extent_compression(leaf, item);
6842         max_size = btrfs_file_extent_ram_bytes(leaf, item);
6843         inline_size = btrfs_file_extent_inline_item_len(leaf,
6844                                         btrfs_item_nr(path->slots[0]));
6845         tmp = kmalloc(inline_size, GFP_NOFS);
6846         if (!tmp)
6847                 return -ENOMEM;
6848         ptr = btrfs_file_extent_inline_start(item);
6849
6850         read_extent_buffer(leaf, tmp, ptr, inline_size);
6851
6852         max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6853         ret = btrfs_decompress(compress_type, tmp, page,
6854                                extent_offset, inline_size, max_size);
6855
6856         /*
6857          * decompression code contains a memset to fill in any space between the end
6858          * of the uncompressed data and the end of max_size in case the decompressed
6859          * data ends up shorter than ram_bytes.  That doesn't cover the hole between
6860          * the end of an inline extent and the beginning of the next block, so we
6861          * cover that region here.
6862          */
6863
6864         if (max_size + pg_offset < PAGE_SIZE) {
6865                 char *map = kmap(page);
6866                 memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
6867                 kunmap(page);
6868         }
6869         kfree(tmp);
6870         return ret;
6871 }
6872
6873 /*
6874  * a bit scary, this does extent mapping from logical file offset to the disk.
6875  * the ugly parts come from merging extents from the disk with the in-ram
6876  * representation.  This gets more complex because of the data=ordered code,
6877  * where the in-ram extents might be locked pending data=ordered completion.
6878  *
6879  * This also copies inline extents directly into the page.
6880  */
6881 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6882                 struct page *page,
6883             size_t pg_offset, u64 start, u64 len,
6884                 int create)
6885 {
6886         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
6887         int ret;
6888         int err = 0;
6889         u64 extent_start = 0;
6890         u64 extent_end = 0;
6891         u64 objectid = btrfs_ino(inode);
6892         u32 found_type;
6893         struct btrfs_path *path = NULL;
6894         struct btrfs_root *root = inode->root;
6895         struct btrfs_file_extent_item *item;
6896         struct extent_buffer *leaf;
6897         struct btrfs_key found_key;
6898         struct extent_map *em = NULL;
6899         struct extent_map_tree *em_tree = &inode->extent_tree;
6900         struct extent_io_tree *io_tree = &inode->io_tree;
6901         const bool new_inline = !page || create;
6902
6903         read_lock(&em_tree->lock);
6904         em = lookup_extent_mapping(em_tree, start, len);
6905         if (em)
6906                 em->bdev = fs_info->fs_devices->latest_bdev;
6907         read_unlock(&em_tree->lock);
6908
6909         if (em) {
6910                 if (em->start > start || em->start + em->len <= start)
6911                         free_extent_map(em);
6912                 else if (em->block_start == EXTENT_MAP_INLINE && page)
6913                         free_extent_map(em);
6914                 else
6915                         goto out;
6916         }
6917         em = alloc_extent_map();
6918         if (!em) {
6919                 err = -ENOMEM;
6920                 goto out;
6921         }
6922         em->bdev = fs_info->fs_devices->latest_bdev;
6923         em->start = EXTENT_MAP_HOLE;
6924         em->orig_start = EXTENT_MAP_HOLE;
6925         em->len = (u64)-1;
6926         em->block_len = (u64)-1;
6927
6928         if (!path) {
6929                 path = btrfs_alloc_path();
6930                 if (!path) {
6931                         err = -ENOMEM;
6932                         goto out;
6933                 }
6934                 /*
6935                  * Chances are we'll be called again, so go ahead and do
6936                  * readahead
6937                  */
6938                 path->reada = READA_FORWARD;
6939         }
6940
6941         ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
6942         if (ret < 0) {
6943                 err = ret;
6944                 goto out;
6945         }
6946
6947         if (ret != 0) {
6948                 if (path->slots[0] == 0)
6949                         goto not_found;
6950                 path->slots[0]--;
6951         }
6952
6953         leaf = path->nodes[0];
6954         item = btrfs_item_ptr(leaf, path->slots[0],
6955                               struct btrfs_file_extent_item);
6956         /* are we inside the extent that was found? */
6957         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6958         found_type = found_key.type;
6959         if (found_key.objectid != objectid ||
6960             found_type != BTRFS_EXTENT_DATA_KEY) {
6961                 /*
6962                  * If we backup past the first extent we want to move forward
6963                  * and see if there is an extent in front of us, otherwise we'll
6964                  * say there is a hole for our whole search range which can
6965                  * cause problems.
6966                  */
6967                 extent_end = start;
6968                 goto next;
6969         }
6970
6971         found_type = btrfs_file_extent_type(leaf, item);
6972         extent_start = found_key.offset;
6973         if (found_type == BTRFS_FILE_EXTENT_REG ||
6974             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6975                 extent_end = extent_start +
6976                        btrfs_file_extent_num_bytes(leaf, item);
6977
6978                 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
6979                                                        extent_start);
6980         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6981                 size_t size;
6982                 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6983                 extent_end = ALIGN(extent_start + size,
6984                                    fs_info->sectorsize);
6985
6986                 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
6987                                                       path->slots[0],
6988                                                       extent_start);
6989         }
6990 next:
6991         if (start >= extent_end) {
6992                 path->slots[0]++;
6993                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6994                         ret = btrfs_next_leaf(root, path);
6995                         if (ret < 0) {
6996                                 err = ret;
6997                                 goto out;
6998                         }
6999                         if (ret > 0)
7000                                 goto not_found;
7001                         leaf = path->nodes[0];
7002                 }
7003                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7004                 if (found_key.objectid != objectid ||
7005                     found_key.type != BTRFS_EXTENT_DATA_KEY)
7006                         goto not_found;
7007                 if (start + len <= found_key.offset)
7008                         goto not_found;
7009                 if (start > found_key.offset)
7010                         goto next;
7011                 em->start = start;
7012                 em->orig_start = start;
7013                 em->len = found_key.offset - start;
7014                 goto not_found_em;
7015         }
7016
7017         btrfs_extent_item_to_extent_map(inode, path, item,
7018                         new_inline, em);
7019
7020         if (found_type == BTRFS_FILE_EXTENT_REG ||
7021             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7022                 goto insert;
7023         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
7024                 unsigned long ptr;
7025                 char *map;
7026                 size_t size;
7027                 size_t extent_offset;
7028                 size_t copy_size;
7029
7030                 if (new_inline)
7031                         goto out;
7032
7033                 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
7034                 extent_offset = page_offset(page) + pg_offset - extent_start;
7035                 copy_size = min_t(u64, PAGE_SIZE - pg_offset,
7036                                   size - extent_offset);
7037                 em->start = extent_start + extent_offset;
7038                 em->len = ALIGN(copy_size, fs_info->sectorsize);
7039                 em->orig_block_len = em->len;
7040                 em->orig_start = em->start;
7041                 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
7042                 if (!PageUptodate(page)) {
7043                         if (btrfs_file_extent_compression(leaf, item) !=
7044                             BTRFS_COMPRESS_NONE) {
7045                                 ret = uncompress_inline(path, page, pg_offset,
7046                                                         extent_offset, item);
7047                                 if (ret) {
7048                                         err = ret;
7049                                         goto out;
7050                                 }
7051                         } else {
7052                                 map = kmap(page);
7053                                 read_extent_buffer(leaf, map + pg_offset, ptr,
7054                                                    copy_size);
7055                                 if (pg_offset + copy_size < PAGE_SIZE) {
7056                                         memset(map + pg_offset + copy_size, 0,
7057                                                PAGE_SIZE - pg_offset -
7058                                                copy_size);
7059                                 }
7060                                 kunmap(page);
7061                         }
7062                         flush_dcache_page(page);
7063                 }
7064                 set_extent_uptodate(io_tree, em->start,
7065                                     extent_map_end(em) - 1, NULL, GFP_NOFS);
7066                 goto insert;
7067         }
7068 not_found:
7069         em->start = start;
7070         em->orig_start = start;
7071         em->len = len;
7072 not_found_em:
7073         em->block_start = EXTENT_MAP_HOLE;
7074 insert:
7075         btrfs_release_path(path);
7076         if (em->start > start || extent_map_end(em) <= start) {
7077                 btrfs_err(fs_info,
7078                           "bad extent! em: [%llu %llu] passed [%llu %llu]",
7079                           em->start, em->len, start, len);
7080                 err = -EIO;
7081                 goto out;
7082         }
7083
7084         err = 0;
7085         write_lock(&em_tree->lock);
7086         err = btrfs_add_extent_mapping(em_tree, &em, start, len);
7087         write_unlock(&em_tree->lock);
7088 out:
7089
7090         trace_btrfs_get_extent(root, inode, em);
7091
7092         btrfs_free_path(path);
7093         if (err) {
7094                 free_extent_map(em);
7095                 return ERR_PTR(err);
7096         }
7097         BUG_ON(!em); /* Error is always set */
7098         return em;
7099 }
7100
7101 struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
7102                 struct page *page,
7103                 size_t pg_offset, u64 start, u64 len,
7104                 int create)
7105 {
7106         struct extent_map *em;
7107         struct extent_map *hole_em = NULL;
7108         u64 range_start = start;
7109         u64 end;
7110         u64 found;
7111         u64 found_end;
7112         int err = 0;
7113
7114         em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
7115         if (IS_ERR(em))
7116                 return em;
7117         /*
7118          * If our em maps to:
7119          * - a hole or
7120          * - a pre-alloc extent,
7121          * there might actually be delalloc bytes behind it.
7122          */
7123         if (em->block_start != EXTENT_MAP_HOLE &&
7124             !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7125                 return em;
7126         else
7127                 hole_em = em;
7128
7129         /* check to see if we've wrapped (len == -1 or similar) */
7130         end = start + len;
7131         if (end < start)
7132                 end = (u64)-1;
7133         else
7134                 end -= 1;
7135
7136         em = NULL;
7137
7138         /* ok, we didn't find anything, lets look for delalloc */
7139         found = count_range_bits(&inode->io_tree, &range_start,
7140                                  end, len, EXTENT_DELALLOC, 1);
7141         found_end = range_start + found;
7142         if (found_end < range_start)
7143                 found_end = (u64)-1;
7144
7145         /*
7146          * we didn't find anything useful, return
7147          * the original results from get_extent()
7148          */
7149         if (range_start > end || found_end <= start) {
7150                 em = hole_em;
7151                 hole_em = NULL;
7152                 goto out;
7153         }
7154
7155         /* adjust the range_start to make sure it doesn't
7156          * go backwards from the start they passed in
7157          */
7158         range_start = max(start, range_start);
7159         found = found_end - range_start;
7160
7161         if (found > 0) {
7162                 u64 hole_start = start;
7163                 u64 hole_len = len;
7164
7165                 em = alloc_extent_map();
7166                 if (!em) {
7167                         err = -ENOMEM;
7168                         goto out;
7169                 }
7170                 /*
7171                  * when btrfs_get_extent can't find anything it
7172                  * returns one huge hole
7173                  *
7174                  * make sure what it found really fits our range, and
7175                  * adjust to make sure it is based on the start from
7176                  * the caller
7177                  */
7178                 if (hole_em) {
7179                         u64 calc_end = extent_map_end(hole_em);
7180
7181                         if (calc_end <= start || (hole_em->start > end)) {
7182                                 free_extent_map(hole_em);
7183                                 hole_em = NULL;
7184                         } else {
7185                                 hole_start = max(hole_em->start, start);
7186                                 hole_len = calc_end - hole_start;
7187                         }
7188                 }
7189                 em->bdev = NULL;
7190                 if (hole_em && range_start > hole_start) {
7191                         /* our hole starts before our delalloc, so we
7192                          * have to return just the parts of the hole
7193                          * that go until  the delalloc starts
7194                          */
7195                         em->len = min(hole_len,
7196                                       range_start - hole_start);
7197                         em->start = hole_start;
7198                         em->orig_start = hole_start;
7199                         /*
7200                          * don't adjust block start at all,
7201                          * it is fixed at EXTENT_MAP_HOLE
7202                          */
7203                         em->block_start = hole_em->block_start;
7204                         em->block_len = hole_len;
7205                         if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
7206                                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7207                 } else {
7208                         em->start = range_start;
7209                         em->len = found;
7210                         em->orig_start = range_start;
7211                         em->block_start = EXTENT_MAP_DELALLOC;
7212                         em->block_len = found;
7213                 }
7214         } else {
7215                 return hole_em;
7216         }
7217 out:
7218
7219         free_extent_map(hole_em);
7220         if (err) {
7221                 free_extent_map(em);
7222                 return ERR_PTR(err);
7223         }
7224         return em;
7225 }
7226
7227 static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
7228                                                   const u64 start,
7229                                                   const u64 len,
7230                                                   const u64 orig_start,
7231                                                   const u64 block_start,
7232                                                   const u64 block_len,
7233                                                   const u64 orig_block_len,
7234                                                   const u64 ram_bytes,
7235                                                   const int type)
7236 {
7237         struct extent_map *em = NULL;
7238         int ret;
7239
7240         if (type != BTRFS_ORDERED_NOCOW) {
7241                 em = create_io_em(inode, start, len, orig_start,
7242                                   block_start, block_len, orig_block_len,
7243                                   ram_bytes,
7244                                   BTRFS_COMPRESS_NONE, /* compress_type */
7245                                   type);
7246                 if (IS_ERR(em))
7247                         goto out;
7248         }
7249         ret = btrfs_add_ordered_extent_dio(inode, start, block_start,
7250                                            len, block_len, type);
7251         if (ret) {
7252                 if (em) {
7253                         free_extent_map(em);
7254                         btrfs_drop_extent_cache(BTRFS_I(inode), start,
7255                                                 start + len - 1, 0);
7256                 }
7257                 em = ERR_PTR(ret);
7258         }
7259  out:
7260
7261         return em;
7262 }
7263
7264 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
7265                                                   u64 start, u64 len)
7266 {
7267         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7268         struct btrfs_root *root = BTRFS_I(inode)->root;
7269         struct extent_map *em;
7270         struct btrfs_key ins;
7271         u64 alloc_hint;
7272         int ret;
7273
7274         alloc_hint = get_extent_allocation_hint(inode, start, len);
7275         ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
7276                                    0, alloc_hint, &ins, 1, 1);
7277         if (ret)
7278                 return ERR_PTR(ret);
7279
7280         em = btrfs_create_dio_extent(inode, start, ins.offset, start,
7281                                      ins.objectid, ins.offset, ins.offset,
7282                                      ins.offset, BTRFS_ORDERED_REGULAR);
7283         btrfs_dec_block_group_reservations(fs_info, ins.objectid);
7284         if (IS_ERR(em))
7285                 btrfs_free_reserved_extent(fs_info, ins.objectid,
7286                                            ins.offset, 1);
7287
7288         return em;
7289 }
7290
7291 /*
7292  * returns 1 when the nocow is safe, < 1 on error, 0 if the
7293  * block must be cow'd
7294  */
7295 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7296                               u64 *orig_start, u64 *orig_block_len,
7297                               u64 *ram_bytes)
7298 {
7299         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7300         struct btrfs_path *path;
7301         int ret;
7302         struct extent_buffer *leaf;
7303         struct btrfs_root *root = BTRFS_I(inode)->root;
7304         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7305         struct btrfs_file_extent_item *fi;
7306         struct btrfs_key key;
7307         u64 disk_bytenr;
7308         u64 backref_offset;
7309         u64 extent_end;
7310         u64 num_bytes;
7311         int slot;
7312         int found_type;
7313         bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
7314
7315         path = btrfs_alloc_path();
7316         if (!path)
7317                 return -ENOMEM;
7318
7319         ret = btrfs_lookup_file_extent(NULL, root, path,
7320                         btrfs_ino(BTRFS_I(inode)), offset, 0);
7321         if (ret < 0)
7322                 goto out;
7323
7324         slot = path->slots[0];
7325         if (ret == 1) {
7326                 if (slot == 0) {
7327                         /* can't find the item, must cow */
7328                         ret = 0;
7329                         goto out;
7330                 }
7331                 slot--;
7332         }
7333         ret = 0;
7334         leaf = path->nodes[0];
7335         btrfs_item_key_to_cpu(leaf, &key, slot);
7336         if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
7337             key.type != BTRFS_EXTENT_DATA_KEY) {
7338                 /* not our file or wrong item type, must cow */
7339                 goto out;
7340         }
7341
7342         if (key.offset > offset) {
7343                 /* Wrong offset, must cow */
7344                 goto out;
7345         }
7346
7347         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
7348         found_type = btrfs_file_extent_type(leaf, fi);
7349         if (found_type != BTRFS_FILE_EXTENT_REG &&
7350             found_type != BTRFS_FILE_EXTENT_PREALLOC) {
7351                 /* not a regular extent, must cow */
7352                 goto out;
7353         }
7354
7355         if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
7356                 goto out;
7357
7358         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
7359         if (extent_end <= offset)
7360                 goto out;
7361
7362         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7363         if (disk_bytenr == 0)
7364                 goto out;
7365
7366         if (btrfs_file_extent_compression(leaf, fi) ||
7367             btrfs_file_extent_encryption(leaf, fi) ||
7368             btrfs_file_extent_other_encoding(leaf, fi))
7369                 goto out;
7370
7371         backref_offset = btrfs_file_extent_offset(leaf, fi);
7372
7373         if (orig_start) {
7374                 *orig_start = key.offset - backref_offset;
7375                 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
7376                 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7377         }
7378
7379         if (btrfs_extent_readonly(fs_info, disk_bytenr))
7380                 goto out;
7381
7382         num_bytes = min(offset + *len, extent_end) - offset;
7383         if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7384                 u64 range_end;
7385
7386                 range_end = round_up(offset + num_bytes,
7387                                      root->fs_info->sectorsize) - 1;
7388                 ret = test_range_bit(io_tree, offset, range_end,
7389                                      EXTENT_DELALLOC, 0, NULL);
7390                 if (ret) {
7391                         ret = -EAGAIN;
7392                         goto out;
7393                 }
7394         }
7395
7396         btrfs_release_path(path);
7397
7398         /*
7399          * look for other files referencing this extent, if we
7400          * find any we must cow
7401          */
7402
7403         ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)),
7404                                     key.offset - backref_offset, disk_bytenr);
7405         if (ret) {
7406                 ret = 0;
7407                 goto out;
7408         }
7409
7410         /*
7411          * adjust disk_bytenr and num_bytes to cover just the bytes
7412          * in this extent we are about to write.  If there
7413          * are any csums in that range we have to cow in order
7414          * to keep the csums correct
7415          */
7416         disk_bytenr += backref_offset;
7417         disk_bytenr += offset - key.offset;
7418         if (csum_exist_in_range(fs_info, disk_bytenr, num_bytes))
7419                 goto out;
7420         /*
7421          * all of the above have passed, it is safe to overwrite this extent
7422          * without cow
7423          */
7424         *len = num_bytes;
7425         ret = 1;
7426 out:
7427         btrfs_free_path(path);
7428         return ret;
7429 }
7430
7431 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7432                               struct extent_state **cached_state, int writing)
7433 {
7434         struct btrfs_ordered_extent *ordered;
7435         int ret = 0;
7436
7437         while (1) {
7438                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7439                                  cached_state);
7440                 /*
7441                  * We're concerned with the entire range that we're going to be
7442                  * doing DIO to, so we need to make sure there's no ordered
7443                  * extents in this range.
7444                  */
7445                 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
7446                                                      lockend - lockstart + 1);
7447
7448                 /*
7449                  * We need to make sure there are no buffered pages in this
7450                  * range either, we could have raced between the invalidate in
7451                  * generic_file_direct_write and locking the extent.  The
7452                  * invalidate needs to happen so that reads after a write do not
7453                  * get stale data.
7454                  */
7455                 if (!ordered &&
7456                     (!writing || !filemap_range_has_page(inode->i_mapping,
7457                                                          lockstart, lockend)))
7458                         break;
7459
7460                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7461                                      cached_state);
7462
7463                 if (ordered) {
7464                         /*
7465                          * If we are doing a DIO read and the ordered extent we
7466                          * found is for a buffered write, we can not wait for it
7467                          * to complete and retry, because if we do so we can
7468                          * deadlock with concurrent buffered writes on page
7469                          * locks. This happens only if our DIO read covers more
7470                          * than one extent map, if at this point has already
7471                          * created an ordered extent for a previous extent map
7472                          * and locked its range in the inode's io tree, and a
7473                          * concurrent write against that previous extent map's
7474                          * range and this range started (we unlock the ranges
7475                          * in the io tree only when the bios complete and
7476                          * buffered writes always lock pages before attempting
7477                          * to lock range in the io tree).
7478                          */
7479                         if (writing ||
7480                             test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
7481                                 btrfs_start_ordered_extent(inode, ordered, 1);
7482                         else
7483                                 ret = -ENOTBLK;
7484                         btrfs_put_ordered_extent(ordered);
7485                 } else {
7486                         /*
7487                          * We could trigger writeback for this range (and wait
7488                          * for it to complete) and then invalidate the pages for
7489                          * this range (through invalidate_inode_pages2_range()),
7490                          * but that can lead us to a deadlock with a concurrent
7491                          * call to readpages() (a buffered read or a defrag call
7492                          * triggered a readahead) on a page lock due to an
7493                          * ordered dio extent we created before but did not have
7494                          * yet a corresponding bio submitted (whence it can not
7495                          * complete), which makes readpages() wait for that
7496                          * ordered extent to complete while holding a lock on
7497                          * that page.
7498                          */
7499                         ret = -ENOTBLK;
7500                 }
7501
7502                 if (ret)
7503                         break;
7504
7505                 cond_resched();
7506         }
7507
7508         return ret;
7509 }
7510
7511 /* The callers of this must take lock_extent() */
7512 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
7513                                        u64 orig_start, u64 block_start,
7514                                        u64 block_len, u64 orig_block_len,
7515                                        u64 ram_bytes, int compress_type,
7516                                        int type)
7517 {
7518         struct extent_map_tree *em_tree;
7519         struct extent_map *em;
7520         struct btrfs_root *root = BTRFS_I(inode)->root;
7521         int ret;
7522
7523         ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7524                type == BTRFS_ORDERED_COMPRESSED ||
7525                type == BTRFS_ORDERED_NOCOW ||
7526                type == BTRFS_ORDERED_REGULAR);
7527
7528         em_tree = &BTRFS_I(inode)->extent_tree;
7529         em = alloc_extent_map();
7530         if (!em)
7531                 return ERR_PTR(-ENOMEM);
7532
7533         em->start = start;
7534         em->orig_start = orig_start;
7535         em->len = len;
7536         em->block_len = block_len;
7537         em->block_start = block_start;
7538         em->bdev = root->fs_info->fs_devices->latest_bdev;
7539         em->orig_block_len = orig_block_len;
7540         em->ram_bytes = ram_bytes;
7541         em->generation = -1;
7542         set_bit(EXTENT_FLAG_PINNED, &em->flags);
7543         if (type == BTRFS_ORDERED_PREALLOC) {
7544                 set_bit(EXTENT_FLAG_FILLING, &em->flags);
7545         } else if (type == BTRFS_ORDERED_COMPRESSED) {
7546                 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
7547                 em->compress_type = compress_type;
7548         }
7549
7550         do {
7551                 btrfs_drop_extent_cache(BTRFS_I(inode), em->start,
7552                                 em->start + em->len - 1, 0);
7553                 write_lock(&em_tree->lock);
7554                 ret = add_extent_mapping(em_tree, em, 1);
7555                 write_unlock(&em_tree->lock);
7556                 /*
7557                  * The caller has taken lock_extent(), who could race with us
7558                  * to add em?
7559                  */
7560         } while (ret == -EEXIST);
7561
7562         if (ret) {
7563                 free_extent_map(em);
7564                 return ERR_PTR(ret);
7565         }
7566
7567         /* em got 2 refs now, callers needs to do free_extent_map once. */
7568         return em;
7569 }
7570
7571 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7572                                    struct buffer_head *bh_result, int create)
7573 {
7574         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7575         struct extent_map *em;
7576         struct extent_state *cached_state = NULL;
7577         struct btrfs_dio_data *dio_data = NULL;
7578         u64 start = iblock << inode->i_blkbits;
7579         u64 lockstart, lockend;
7580         u64 len = bh_result->b_size;
7581         int unlock_bits = EXTENT_LOCKED;
7582         int ret = 0;
7583
7584         if (create)
7585                 unlock_bits |= EXTENT_DIRTY;
7586         else
7587                 len = min_t(u64, len, fs_info->sectorsize);
7588
7589         lockstart = start;
7590         lockend = start + len - 1;
7591
7592         if (current->journal_info) {
7593                 /*
7594                  * Need to pull our outstanding extents and set journal_info to NULL so
7595                  * that anything that needs to check if there's a transaction doesn't get
7596                  * confused.
7597                  */
7598                 dio_data = current->journal_info;
7599                 current->journal_info = NULL;
7600         }
7601
7602         /*
7603          * If this errors out it's because we couldn't invalidate pagecache for
7604          * this range and we need to fallback to buffered.
7605          */
7606         if (lock_extent_direct(inode, lockstart, lockend, &cached_state,
7607                                create)) {
7608                 ret = -ENOTBLK;
7609                 goto err;
7610         }
7611
7612         em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
7613         if (IS_ERR(em)) {
7614                 ret = PTR_ERR(em);
7615                 goto unlock_err;
7616         }
7617
7618         /*
7619          * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7620          * io.  INLINE is special, and we could probably kludge it in here, but
7621          * it's still buffered so for safety lets just fall back to the generic
7622          * buffered path.
7623          *
7624          * For COMPRESSED we _have_ to read the entire extent in so we can
7625          * decompress it, so there will be buffering required no matter what we
7626          * do, so go ahead and fallback to buffered.
7627          *
7628          * We return -ENOTBLK because that's what makes DIO go ahead and go back
7629          * to buffered IO.  Don't blame me, this is the price we pay for using
7630          * the generic code.
7631          */
7632         if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7633             em->block_start == EXTENT_MAP_INLINE) {
7634                 free_extent_map(em);
7635                 ret = -ENOTBLK;
7636                 goto unlock_err;
7637         }
7638
7639         /* Just a good old fashioned hole, return */
7640         if (!create && (em->block_start == EXTENT_MAP_HOLE ||
7641                         test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
7642                 free_extent_map(em);
7643                 goto unlock_err;
7644         }
7645
7646         /*
7647          * We don't allocate a new extent in the following cases
7648          *
7649          * 1) The inode is marked as NODATACOW.  In this case we'll just use the
7650          * existing extent.
7651          * 2) The extent is marked as PREALLOC.  We're good to go here and can
7652          * just use the extent.
7653          *
7654          */
7655         if (!create) {
7656                 len = min(len, em->len - (start - em->start));
7657                 lockstart = start + len;
7658                 goto unlock;
7659         }
7660
7661         if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7662             ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7663              em->block_start != EXTENT_MAP_HOLE)) {
7664                 int type;
7665                 u64 block_start, orig_start, orig_block_len, ram_bytes;
7666
7667                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7668                         type = BTRFS_ORDERED_PREALLOC;
7669                 else
7670                         type = BTRFS_ORDERED_NOCOW;
7671                 len = min(len, em->len - (start - em->start));
7672                 block_start = em->block_start + (start - em->start);
7673
7674                 if (can_nocow_extent(inode, start, &len, &orig_start,
7675                                      &orig_block_len, &ram_bytes) == 1 &&
7676                     btrfs_inc_nocow_writers(fs_info, block_start)) {
7677                         struct extent_map *em2;
7678
7679                         em2 = btrfs_create_dio_extent(inode, start, len,
7680                                                       orig_start, block_start,
7681                                                       len, orig_block_len,
7682                                                       ram_bytes, type);
7683                         btrfs_dec_nocow_writers(fs_info, block_start);
7684                         if (type == BTRFS_ORDERED_PREALLOC) {
7685                                 free_extent_map(em);
7686                                 em = em2;
7687                         }
7688                         if (em2 && IS_ERR(em2)) {
7689                                 ret = PTR_ERR(em2);
7690                                 goto unlock_err;
7691                         }
7692                         /*
7693                          * For inode marked NODATACOW or extent marked PREALLOC,
7694                          * use the existing or preallocated extent, so does not
7695                          * need to adjust btrfs_space_info's bytes_may_use.
7696                          */
7697                         btrfs_free_reserved_data_space_noquota(inode,
7698                                         start, len);
7699                         goto unlock;
7700                 }
7701         }
7702
7703         /*
7704          * this will cow the extent, reset the len in case we changed
7705          * it above
7706          */
7707         len = bh_result->b_size;
7708         free_extent_map(em);
7709         em = btrfs_new_extent_direct(inode, start, len);
7710         if (IS_ERR(em)) {
7711                 ret = PTR_ERR(em);
7712                 goto unlock_err;
7713         }
7714         len = min(len, em->len - (start - em->start));
7715 unlock:
7716         bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
7717                 inode->i_blkbits;
7718         bh_result->b_size = len;
7719         bh_result->b_bdev = em->bdev;
7720         set_buffer_mapped(bh_result);
7721         if (create) {
7722                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7723                         set_buffer_new(bh_result);
7724
7725                 /*
7726                  * Need to update the i_size under the extent lock so buffered
7727                  * readers will get the updated i_size when we unlock.
7728                  */
7729                 if (!dio_data->overwrite && start + len > i_size_read(inode))
7730                         i_size_write(inode, start + len);
7731
7732                 WARN_ON(dio_data->reserve < len);
7733                 dio_data->reserve -= len;
7734                 dio_data->unsubmitted_oe_range_end = start + len;
7735                 current->journal_info = dio_data;
7736         }
7737
7738         /*
7739          * In the case of write we need to clear and unlock the entire range,
7740          * in the case of read we need to unlock only the end area that we
7741          * aren't using if there is any left over space.
7742          */
7743         if (lockstart < lockend) {
7744                 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
7745                                  lockend, unlock_bits, 1, 0,
7746                                  &cached_state);
7747         } else {
7748                 free_extent_state(cached_state);
7749         }
7750
7751         free_extent_map(em);
7752
7753         return 0;
7754
7755 unlock_err:
7756         clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7757                          unlock_bits, 1, 0, &cached_state);
7758 err:
7759         if (dio_data)
7760                 current->journal_info = dio_data;
7761         return ret;
7762 }
7763
7764 static inline blk_status_t submit_dio_repair_bio(struct inode *inode,
7765                                                  struct bio *bio,
7766                                                  int mirror_num)
7767 {
7768         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7769         blk_status_t ret;
7770
7771         BUG_ON(bio_op(bio) == REQ_OP_WRITE);
7772
7773         ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR);
7774         if (ret)
7775                 return ret;
7776
7777         ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
7778
7779         return ret;
7780 }
7781
7782 static int btrfs_check_dio_repairable(struct inode *inode,
7783                                       struct bio *failed_bio,
7784                                       struct io_failure_record *failrec,
7785                                       int failed_mirror)
7786 {
7787         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7788         int num_copies;
7789
7790         num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
7791         if (num_copies == 1) {
7792                 /*
7793                  * we only have a single copy of the data, so don't bother with
7794                  * all the retry and error correction code that follows. no
7795                  * matter what the error is, it is very likely to persist.
7796                  */
7797                 btrfs_debug(fs_info,
7798                         "Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
7799                         num_copies, failrec->this_mirror, failed_mirror);
7800                 return 0;
7801         }
7802
7803         failrec->failed_mirror = failed_mirror;
7804         failrec->this_mirror++;
7805         if (failrec->this_mirror == failed_mirror)
7806                 failrec->this_mirror++;
7807
7808         if (failrec->this_mirror > num_copies) {
7809                 btrfs_debug(fs_info,
7810                         "Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
7811                         num_copies, failrec->this_mirror, failed_mirror);
7812                 return 0;
7813         }
7814
7815         return 1;
7816 }
7817
7818 static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio,
7819                                    struct page *page, unsigned int pgoff,
7820                                    u64 start, u64 end, int failed_mirror,
7821                                    bio_end_io_t *repair_endio, void *repair_arg)
7822 {
7823         struct io_failure_record *failrec;
7824         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7825         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
7826         struct bio *bio;
7827         int isector;
7828         unsigned int read_mode = 0;
7829         int segs;
7830         int ret;
7831         blk_status_t status;
7832         struct bio_vec bvec;
7833
7834         BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
7835
7836         ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
7837         if (ret)
7838                 return errno_to_blk_status(ret);
7839
7840         ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
7841                                          failed_mirror);
7842         if (!ret) {
7843                 free_io_failure(failure_tree, io_tree, failrec);
7844                 return BLK_STS_IOERR;
7845         }
7846
7847         segs = bio_segments(failed_bio);
7848         bio_get_first_bvec(failed_bio, &bvec);
7849         if (segs > 1 ||
7850             (bvec.bv_len > btrfs_inode_sectorsize(inode)))
7851                 read_mode |= REQ_FAILFAST_DEV;
7852
7853         isector = start - btrfs_io_bio(failed_bio)->logical;
7854         isector >>= inode->i_sb->s_blocksize_bits;
7855         bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
7856                                 pgoff, isector, repair_endio, repair_arg);
7857         bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
7858
7859         btrfs_debug(BTRFS_I(inode)->root->fs_info,
7860                     "repair DIO read error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d",
7861                     read_mode, failrec->this_mirror, failrec->in_validation);
7862
7863         status = submit_dio_repair_bio(inode, bio, failrec->this_mirror);
7864         if (status) {
7865                 free_io_failure(failure_tree, io_tree, failrec);
7866                 bio_put(bio);
7867         }
7868
7869         return status;
7870 }
7871
7872 struct btrfs_retry_complete {
7873         struct completion done;
7874         struct inode *inode;
7875         u64 start;
7876         int uptodate;
7877 };
7878
7879 static void btrfs_retry_endio_nocsum(struct bio *bio)
7880 {
7881         struct btrfs_retry_complete *done = bio->bi_private;
7882         struct inode *inode = done->inode;
7883         struct bio_vec *bvec;
7884         struct extent_io_tree *io_tree, *failure_tree;
7885         int i;
7886
7887         if (bio->bi_status)
7888                 goto end;
7889
7890         ASSERT(bio->bi_vcnt == 1);
7891         io_tree = &BTRFS_I(inode)->io_tree;
7892         failure_tree = &BTRFS_I(inode)->io_failure_tree;
7893         ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(inode));
7894
7895         done->uptodate = 1;
7896         ASSERT(!bio_flagged(bio, BIO_CLONED));
7897         bio_for_each_segment_all(bvec, bio, i)
7898                 clean_io_failure(BTRFS_I(inode)->root->fs_info, failure_tree,
7899                                  io_tree, done->start, bvec->bv_page,
7900                                  btrfs_ino(BTRFS_I(inode)), 0);
7901 end:
7902         complete(&done->done);
7903         bio_put(bio);
7904 }
7905
7906 static blk_status_t __btrfs_correct_data_nocsum(struct inode *inode,
7907                                                 struct btrfs_io_bio *io_bio)
7908 {
7909         struct btrfs_fs_info *fs_info;
7910         struct bio_vec bvec;
7911         struct bvec_iter iter;
7912         struct btrfs_retry_complete done;
7913         u64 start;
7914         unsigned int pgoff;
7915         u32 sectorsize;
7916         int nr_sectors;
7917         blk_status_t ret;
7918         blk_status_t err = BLK_STS_OK;
7919
7920         fs_info = BTRFS_I(inode)->root->fs_info;
7921         sectorsize = fs_info->sectorsize;
7922
7923         start = io_bio->logical;
7924         done.inode = inode;
7925         io_bio->bio.bi_iter = io_bio->iter;
7926
7927         bio_for_each_segment(bvec, &io_bio->bio, iter) {
7928                 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
7929                 pgoff = bvec.bv_offset;
7930
7931 next_block_or_try_again:
7932                 done.uptodate = 0;
7933                 done.start = start;
7934                 init_completion(&done.done);
7935
7936                 ret = dio_read_error(inode, &io_bio->bio, bvec.bv_page,
7937                                 pgoff, start, start + sectorsize - 1,
7938                                 io_bio->mirror_num,
7939                                 btrfs_retry_endio_nocsum, &done);
7940                 if (ret) {
7941                         err = ret;
7942                         goto next;
7943                 }
7944
7945                 wait_for_completion_io(&done.done);
7946
7947                 if (!done.uptodate) {
7948                         /* We might have another mirror, so try again */
7949                         goto next_block_or_try_again;
7950                 }
7951
7952 next:
7953                 start += sectorsize;
7954
7955                 nr_sectors--;
7956                 if (nr_sectors) {
7957                         pgoff += sectorsize;
7958                         ASSERT(pgoff < PAGE_SIZE);
7959                         goto next_block_or_try_again;
7960                 }
7961         }
7962
7963         return err;
7964 }
7965
7966 static void btrfs_retry_endio(struct bio *bio)
7967 {
7968         struct btrfs_retry_complete *done = bio->bi_private;
7969         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7970         struct extent_io_tree *io_tree, *failure_tree;
7971         struct inode *inode = done->inode;
7972         struct bio_vec *bvec;
7973         int uptodate;
7974         int ret;
7975         int i;
7976
7977         if (bio->bi_status)
7978                 goto end;
7979
7980         uptodate = 1;
7981
7982         ASSERT(bio->bi_vcnt == 1);
7983         ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(done->inode));
7984
7985         io_tree = &BTRFS_I(inode)->io_tree;
7986         failure_tree = &BTRFS_I(inode)->io_failure_tree;
7987
7988         ASSERT(!bio_flagged(bio, BIO_CLONED));
7989         bio_for_each_segment_all(bvec, bio, i) {
7990                 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
7991                                              bvec->bv_offset, done->start,
7992                                              bvec->bv_len);
7993                 if (!ret)
7994                         clean_io_failure(BTRFS_I(inode)->root->fs_info,
7995                                          failure_tree, io_tree, done->start,
7996                                          bvec->bv_page,
7997                                          btrfs_ino(BTRFS_I(inode)),
7998                                          bvec->bv_offset);
7999                 else
8000                         uptodate = 0;
8001         }
8002
8003         done->uptodate = uptodate;
8004 end:
8005         complete(&done->done);
8006         bio_put(bio);
8007 }
8008
8009 static blk_status_t __btrfs_subio_endio_read(struct inode *inode,
8010                 struct btrfs_io_bio *io_bio, blk_status_t err)
8011 {
8012         struct btrfs_fs_info *fs_info;
8013         struct bio_vec bvec;
8014         struct bvec_iter iter;
8015         struct btrfs_retry_complete done;
8016         u64 start;
8017         u64 offset = 0;
8018         u32 sectorsize;
8019         int nr_sectors;
8020         unsigned int pgoff;
8021         int csum_pos;
8022         bool uptodate = (err == 0);
8023         int ret;
8024         blk_status_t status;
8025
8026         fs_info = BTRFS_I(inode)->root->fs_info;
8027         sectorsize = fs_info->sectorsize;
8028
8029         err = BLK_STS_OK;
8030         start = io_bio->logical;
8031         done.inode = inode;
8032         io_bio->bio.bi_iter = io_bio->iter;
8033
8034         bio_for_each_segment(bvec, &io_bio->bio, iter) {
8035                 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
8036
8037                 pgoff = bvec.bv_offset;
8038 next_block:
8039                 if (uptodate) {
8040                         csum_pos = BTRFS_BYTES_TO_BLKS(fs_info, offset);
8041                         ret = __readpage_endio_check(inode, io_bio, csum_pos,
8042                                         bvec.bv_page, pgoff, start, sectorsize);
8043                         if (likely(!ret))
8044                                 goto next;
8045                 }
8046 try_again:
8047                 done.uptodate = 0;
8048                 done.start = start;
8049                 init_completion(&done.done);
8050
8051                 status = dio_read_error(inode, &io_bio->bio, bvec.bv_page,
8052                                         pgoff, start, start + sectorsize - 1,
8053                                         io_bio->mirror_num, btrfs_retry_endio,
8054                                         &done);
8055                 if (status) {
8056                         err = status;
8057                         goto next;
8058                 }
8059
8060                 wait_for_completion_io(&done.done);
8061
8062                 if (!done.uptodate) {
8063                         /* We might have another mirror, so try again */
8064                         goto try_again;
8065                 }
8066 next:
8067                 offset += sectorsize;
8068                 start += sectorsize;
8069
8070                 ASSERT(nr_sectors);
8071
8072                 nr_sectors--;
8073                 if (nr_sectors) {
8074                         pgoff += sectorsize;
8075                         ASSERT(pgoff < PAGE_SIZE);
8076                         goto next_block;
8077                 }
8078         }
8079
8080         return err;
8081 }
8082
8083 static blk_status_t btrfs_subio_endio_read(struct inode *inode,
8084                 struct btrfs_io_bio *io_bio, blk_status_t err)
8085 {
8086         bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
8087
8088         if (skip_csum) {
8089                 if (unlikely(err))
8090                         return __btrfs_correct_data_nocsum(inode, io_bio);
8091                 else
8092                         return BLK_STS_OK;
8093         } else {
8094                 return __btrfs_subio_endio_read(inode, io_bio, err);
8095         }
8096 }
8097
8098 static void btrfs_endio_direct_read(struct bio *bio)
8099 {
8100         struct btrfs_dio_private *dip = bio->bi_private;
8101         struct inode *inode = dip->inode;
8102         struct bio *dio_bio;
8103         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8104         blk_status_t err = bio->bi_status;
8105
8106         if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
8107                 err = btrfs_subio_endio_read(inode, io_bio, err);
8108
8109         unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
8110                       dip->logical_offset + dip->bytes - 1);
8111         dio_bio = dip->dio_bio;
8112
8113         kfree(dip);
8114
8115         dio_bio->bi_status = err;
8116         dio_end_io(dio_bio);
8117
8118         if (io_bio->end_io)
8119                 io_bio->end_io(io_bio, blk_status_to_errno(err));
8120         bio_put(bio);
8121 }
8122
8123 static void __endio_write_update_ordered(struct inode *inode,
8124                                          const u64 offset, const u64 bytes,
8125                                          const bool uptodate)
8126 {
8127         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8128         struct btrfs_ordered_extent *ordered = NULL;
8129         struct btrfs_workqueue *wq;
8130         btrfs_work_func_t func;
8131         u64 ordered_offset = offset;
8132         u64 ordered_bytes = bytes;
8133         u64 last_offset;
8134         int ret;
8135
8136         if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
8137                 wq = fs_info->endio_freespace_worker;
8138                 func = btrfs_freespace_write_helper;
8139         } else {
8140                 wq = fs_info->endio_write_workers;
8141                 func = btrfs_endio_write_helper;
8142         }
8143
8144 again:
8145         last_offset = ordered_offset;
8146         ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
8147                                                    &ordered_offset,
8148                                                    ordered_bytes,
8149                                                    uptodate);
8150         if (!ret)
8151                 goto out_test;
8152
8153         btrfs_init_work(&ordered->work, func, finish_ordered_fn, NULL, NULL);
8154         btrfs_queue_work(wq, &ordered->work);
8155 out_test:
8156         /*
8157          * If btrfs_dec_test_ordered_pending does not find any ordered extent
8158          * in the range, we can exit.
8159          */
8160         if (ordered_offset == last_offset)
8161                 return;
8162         /*
8163          * our bio might span multiple ordered extents.  If we haven't
8164          * completed the accounting for the whole dio, go back and try again
8165          */
8166         if (ordered_offset < offset + bytes) {
8167                 ordered_bytes = offset + bytes - ordered_offset;
8168                 ordered = NULL;
8169                 goto again;
8170         }
8171 }
8172
8173 static void btrfs_endio_direct_write(struct bio *bio)
8174 {
8175         struct btrfs_dio_private *dip = bio->bi_private;
8176         struct bio *dio_bio = dip->dio_bio;
8177
8178         __endio_write_update_ordered(dip->inode, dip->logical_offset,
8179                                      dip->bytes, !bio->bi_status);
8180
8181         kfree(dip);
8182
8183         dio_bio->bi_status = bio->bi_status;
8184         dio_end_io(dio_bio);
8185         bio_put(bio);
8186 }
8187
8188 static blk_status_t btrfs_submit_bio_start_direct_io(void *private_data,
8189                                     struct bio *bio, u64 offset)
8190 {
8191         struct inode *inode = private_data;
8192         blk_status_t ret;
8193         ret = btrfs_csum_one_bio(inode, bio, offset, 1);
8194         BUG_ON(ret); /* -ENOMEM */
8195         return 0;
8196 }
8197
8198 static void btrfs_end_dio_bio(struct bio *bio)
8199 {
8200         struct btrfs_dio_private *dip = bio->bi_private;
8201         blk_status_t err = bio->bi_status;
8202
8203         if (err)
8204                 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
8205                            "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
8206                            btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio),
8207                            bio->bi_opf,
8208                            (unsigned long long)bio->bi_iter.bi_sector,
8209                            bio->bi_iter.bi_size, err);
8210
8211         if (dip->subio_endio)
8212                 err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err);
8213
8214         if (err) {
8215                 /*
8216                  * We want to perceive the errors flag being set before
8217                  * decrementing the reference count. We don't need a barrier
8218                  * since atomic operations with a return value are fully
8219                  * ordered as per atomic_t.txt
8220                  */
8221                 dip->errors = 1;
8222         }
8223
8224         /* if there are more bios still pending for this dio, just exit */
8225         if (!atomic_dec_and_test(&dip->pending_bios))
8226                 goto out;
8227
8228         if (dip->errors) {
8229                 bio_io_error(dip->orig_bio);
8230         } else {
8231                 dip->dio_bio->bi_status = BLK_STS_OK;
8232                 bio_endio(dip->orig_bio);
8233         }
8234 out:
8235         bio_put(bio);
8236 }
8237
8238 static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
8239                                                  struct btrfs_dio_private *dip,
8240                                                  struct bio *bio,
8241                                                  u64 file_offset)
8242 {
8243         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8244         struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
8245         blk_status_t ret;
8246
8247         /*
8248          * We load all the csum data we need when we submit
8249          * the first bio to reduce the csum tree search and
8250          * contention.
8251          */
8252         if (dip->logical_offset == file_offset) {
8253                 ret = btrfs_lookup_bio_sums_dio(inode, dip->orig_bio,
8254                                                 file_offset);
8255                 if (ret)
8256                         return ret;
8257         }
8258
8259         if (bio == dip->orig_bio)
8260                 return 0;
8261
8262         file_offset -= dip->logical_offset;
8263         file_offset >>= inode->i_sb->s_blocksize_bits;
8264         io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
8265
8266         return 0;
8267 }
8268
8269 static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
8270                 struct inode *inode, u64 file_offset, int async_submit)
8271 {
8272         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8273         struct btrfs_dio_private *dip = bio->bi_private;
8274         bool write = bio_op(bio) == REQ_OP_WRITE;
8275         blk_status_t ret;
8276
8277         /* Check btrfs_submit_bio_hook() for rules about async submit. */
8278         if (async_submit)
8279                 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
8280
8281         if (!write) {
8282                 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
8283                 if (ret)
8284                         goto err;
8285         }
8286
8287         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
8288                 goto map;
8289
8290         if (write && async_submit) {
8291                 ret = btrfs_wq_submit_bio(fs_info, bio, 0, 0,
8292                                           file_offset, inode,
8293                                           btrfs_submit_bio_start_direct_io,
8294                                           btrfs_submit_bio_done);
8295                 goto err;
8296         } else if (write) {
8297                 /*
8298                  * If we aren't doing async submit, calculate the csum of the
8299                  * bio now.
8300                  */
8301                 ret = btrfs_csum_one_bio(inode, bio, file_offset, 1);
8302                 if (ret)
8303                         goto err;
8304         } else {
8305                 ret = btrfs_lookup_and_bind_dio_csum(inode, dip, bio,
8306                                                      file_offset);
8307                 if (ret)
8308                         goto err;
8309         }
8310 map:
8311         ret = btrfs_map_bio(fs_info, bio, 0, 0);
8312 err:
8313         return ret;
8314 }
8315
8316 static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip)
8317 {
8318         struct inode *inode = dip->inode;
8319         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8320         struct bio *bio;
8321         struct bio *orig_bio = dip->orig_bio;
8322         u64 start_sector = orig_bio->bi_iter.bi_sector;
8323         u64 file_offset = dip->logical_offset;
8324         u64 map_length;
8325         int async_submit = 0;
8326         u64 submit_len;
8327         int clone_offset = 0;
8328         int clone_len;
8329         int ret;
8330         blk_status_t status;
8331
8332         map_length = orig_bio->bi_iter.bi_size;
8333         submit_len = map_length;
8334         ret = btrfs_map_block(fs_info, btrfs_op(orig_bio), start_sector << 9,
8335                               &map_length, NULL, 0);
8336         if (ret)
8337                 return -EIO;
8338
8339         if (map_length >= submit_len) {
8340                 bio = orig_bio;
8341                 dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
8342                 goto submit;
8343         }
8344
8345         /* async crcs make it difficult to collect full stripe writes. */
8346         if (btrfs_data_alloc_profile(fs_info) & BTRFS_BLOCK_GROUP_RAID56_MASK)
8347                 async_submit = 0;
8348         else
8349                 async_submit = 1;
8350
8351         /* bio split */
8352         ASSERT(map_length <= INT_MAX);
8353         atomic_inc(&dip->pending_bios);
8354         do {
8355                 clone_len = min_t(int, submit_len, map_length);
8356
8357                 /*
8358                  * This will never fail as it's passing GPF_NOFS and
8359                  * the allocation is backed by btrfs_bioset.
8360                  */
8361                 bio = btrfs_bio_clone_partial(orig_bio, clone_offset,
8362                                               clone_len);
8363                 bio->bi_private = dip;
8364                 bio->bi_end_io = btrfs_end_dio_bio;
8365                 btrfs_io_bio(bio)->logical = file_offset;
8366
8367                 ASSERT(submit_len >= clone_len);
8368                 submit_len -= clone_len;
8369                 if (submit_len == 0)
8370                         break;
8371
8372                 /*
8373                  * Increase the count before we submit the bio so we know
8374                  * the end IO handler won't happen before we increase the
8375                  * count. Otherwise, the dip might get freed before we're
8376                  * done setting it up.
8377                  */
8378                 atomic_inc(&dip->pending_bios);
8379
8380                 status = btrfs_submit_dio_bio(bio, inode, file_offset,
8381                                                 async_submit);
8382                 if (status) {
8383                         bio_put(bio);
8384                         atomic_dec(&dip->pending_bios);
8385                         goto out_err;
8386                 }
8387
8388                 clone_offset += clone_len;
8389                 start_sector += clone_len >> 9;
8390                 file_offset += clone_len;
8391
8392                 map_length = submit_len;
8393                 ret = btrfs_map_block(fs_info, btrfs_op(orig_bio),
8394                                       start_sector << 9, &map_length, NULL, 0);
8395                 if (ret)
8396                         goto out_err;
8397         } while (submit_len > 0);
8398
8399 submit:
8400         status = btrfs_submit_dio_bio(bio, inode, file_offset, async_submit);
8401         if (!status)
8402                 return 0;
8403
8404         bio_put(bio);
8405 out_err:
8406         dip->errors = 1;
8407         /*
8408          * Before atomic variable goto zero, we must  make sure dip->errors is
8409          * perceived to be set. This ordering is ensured by the fact that an
8410          * atomic operations with a return value are fully ordered as per
8411          * atomic_t.txt
8412          */
8413         if (atomic_dec_and_test(&dip->pending_bios))
8414                 bio_io_error(dip->orig_bio);
8415
8416         /* bio_end_io() will handle error, so we needn't return it */
8417         return 0;
8418 }
8419
8420 static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
8421                                 loff_t file_offset)
8422 {
8423         struct btrfs_dio_private *dip = NULL;
8424         struct bio *bio = NULL;
8425         struct btrfs_io_bio *io_bio;
8426         bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
8427         int ret = 0;
8428
8429         bio = btrfs_bio_clone(dio_bio);
8430
8431         dip = kzalloc(sizeof(*dip), GFP_NOFS);
8432         if (!dip) {
8433                 ret = -ENOMEM;
8434                 goto free_ordered;
8435         }
8436
8437         dip->private = dio_bio->bi_private;
8438         dip->inode = inode;
8439         dip->logical_offset = file_offset;
8440         dip->bytes = dio_bio->bi_iter.bi_size;
8441         dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
8442         bio->bi_private = dip;
8443         dip->orig_bio = bio;
8444         dip->dio_bio = dio_bio;
8445         atomic_set(&dip->pending_bios, 0);
8446         io_bio = btrfs_io_bio(bio);
8447         io_bio->logical = file_offset;
8448
8449         if (write) {
8450                 bio->bi_end_io = btrfs_endio_direct_write;
8451         } else {
8452                 bio->bi_end_io = btrfs_endio_direct_read;
8453                 dip->subio_endio = btrfs_subio_endio_read;
8454         }
8455
8456         /*
8457          * Reset the range for unsubmitted ordered extents (to a 0 length range)
8458          * even if we fail to submit a bio, because in such case we do the
8459          * corresponding error handling below and it must not be done a second
8460          * time by btrfs_direct_IO().
8461          */
8462         if (write) {
8463                 struct btrfs_dio_data *dio_data = current->journal_info;
8464
8465                 dio_data->unsubmitted_oe_range_end = dip->logical_offset +
8466                         dip->bytes;
8467                 dio_data->unsubmitted_oe_range_start =
8468                         dio_data->unsubmitted_oe_range_end;
8469         }
8470
8471         ret = btrfs_submit_direct_hook(dip);
8472         if (!ret)
8473                 return;
8474
8475         if (io_bio->end_io)
8476                 io_bio->end_io(io_bio, ret);
8477
8478 free_ordered:
8479         /*
8480          * If we arrived here it means either we failed to submit the dip
8481          * or we either failed to clone the dio_bio or failed to allocate the
8482          * dip. If we cloned the dio_bio and allocated the dip, we can just
8483          * call bio_endio against our io_bio so that we get proper resource
8484          * cleanup if we fail to submit the dip, otherwise, we must do the
8485          * same as btrfs_endio_direct_[write|read] because we can't call these
8486          * callbacks - they require an allocated dip and a clone of dio_bio.
8487          */
8488         if (bio && dip) {
8489                 bio_io_error(bio);
8490                 /*
8491                  * The end io callbacks free our dip, do the final put on bio
8492                  * and all the cleanup and final put for dio_bio (through
8493                  * dio_end_io()).
8494                  */
8495                 dip = NULL;
8496                 bio = NULL;
8497         } else {
8498                 if (write)
8499                         __endio_write_update_ordered(inode,
8500                                                 file_offset,
8501                                                 dio_bio->bi_iter.bi_size,
8502                                                 false);
8503                 else
8504                         unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
8505                               file_offset + dio_bio->bi_iter.bi_size - 1);
8506
8507                 dio_bio->bi_status = BLK_STS_IOERR;
8508                 /*
8509                  * Releases and cleans up our dio_bio, no need to bio_put()
8510                  * nor bio_endio()/bio_io_error() against dio_bio.
8511                  */
8512                 dio_end_io(dio_bio);
8513         }
8514         if (bio)
8515                 bio_put(bio);
8516         kfree(dip);
8517 }
8518
8519 static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
8520                                const struct iov_iter *iter, loff_t offset)
8521 {
8522         int seg;
8523         int i;
8524         unsigned int blocksize_mask = fs_info->sectorsize - 1;
8525         ssize_t retval = -EINVAL;
8526
8527         if (offset & blocksize_mask)
8528                 goto out;
8529
8530         if (iov_iter_alignment(iter) & blocksize_mask)
8531                 goto out;
8532
8533         /* If this is a write we don't need to check anymore */
8534         if (iov_iter_rw(iter) != READ || !iter_is_iovec(iter))
8535                 return 0;
8536         /*
8537          * Check to make sure we don't have duplicate iov_base's in this
8538          * iovec, if so return EINVAL, otherwise we'll get csum errors
8539          * when reading back.
8540          */
8541         for (seg = 0; seg < iter->nr_segs; seg++) {
8542                 for (i = seg + 1; i < iter->nr_segs; i++) {
8543                         if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
8544                                 goto out;
8545                 }
8546         }
8547         retval = 0;
8548 out:
8549         return retval;
8550 }
8551
8552 static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
8553 {
8554         struct file *file = iocb->ki_filp;
8555         struct inode *inode = file->f_mapping->host;
8556         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8557         struct btrfs_dio_data dio_data = { 0 };
8558         struct extent_changeset *data_reserved = NULL;
8559         loff_t offset = iocb->ki_pos;
8560         size_t count = 0;
8561         int flags = 0;
8562         bool wakeup = true;
8563         bool relock = false;
8564         ssize_t ret;
8565
8566         if (check_direct_IO(fs_info, iter, offset))
8567                 return 0;
8568
8569         inode_dio_begin(inode);
8570
8571         /*
8572          * The generic stuff only does filemap_write_and_wait_range, which
8573          * isn't enough if we've written compressed pages to this area, so
8574          * we need to flush the dirty pages again to make absolutely sure
8575          * that any outstanding dirty pages are on disk.
8576          */
8577         count = iov_iter_count(iter);
8578         if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8579                      &BTRFS_I(inode)->runtime_flags))
8580                 filemap_fdatawrite_range(inode->i_mapping, offset,
8581                                          offset + count - 1);
8582
8583         if (iov_iter_rw(iter) == WRITE) {
8584                 /*
8585                  * If the write DIO is beyond the EOF, we need update
8586                  * the isize, but it is protected by i_mutex. So we can
8587                  * not unlock the i_mutex at this case.
8588                  */
8589                 if (offset + count <= inode->i_size) {
8590                         dio_data.overwrite = 1;
8591                         inode_unlock(inode);
8592                         relock = true;
8593                 } else if (iocb->ki_flags & IOCB_NOWAIT) {
8594                         ret = -EAGAIN;
8595                         goto out;
8596                 }
8597                 ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
8598                                                    offset, count);
8599                 if (ret)
8600                         goto out;
8601
8602                 /*
8603                  * We need to know how many extents we reserved so that we can
8604                  * do the accounting properly if we go over the number we
8605                  * originally calculated.  Abuse current->journal_info for this.
8606                  */
8607                 dio_data.reserve = round_up(count,
8608                                             fs_info->sectorsize);
8609                 dio_data.unsubmitted_oe_range_start = (u64)offset;
8610                 dio_data.unsubmitted_oe_range_end = (u64)offset;
8611                 current->journal_info = &dio_data;
8612                 down_read(&BTRFS_I(inode)->dio_sem);
8613         } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8614                                      &BTRFS_I(inode)->runtime_flags)) {
8615                 inode_dio_end(inode);
8616                 flags = DIO_LOCKING | DIO_SKIP_HOLES;
8617                 wakeup = false;
8618         }
8619
8620         ret = __blockdev_direct_IO(iocb, inode,
8621                                    fs_info->fs_devices->latest_bdev,
8622                                    iter, btrfs_get_blocks_direct, NULL,
8623                                    btrfs_submit_direct, flags);
8624         if (iov_iter_rw(iter) == WRITE) {
8625                 up_read(&BTRFS_I(inode)->dio_sem);
8626                 current->journal_info = NULL;
8627                 if (ret < 0 && ret != -EIOCBQUEUED) {
8628                         if (dio_data.reserve)
8629                                 btrfs_delalloc_release_space(inode, data_reserved,
8630                                         offset, dio_data.reserve, true);
8631                         /*
8632                          * On error we might have left some ordered extents
8633                          * without submitting corresponding bios for them, so
8634                          * cleanup them up to avoid other tasks getting them
8635                          * and waiting for them to complete forever.
8636                          */
8637                         if (dio_data.unsubmitted_oe_range_start <
8638                             dio_data.unsubmitted_oe_range_end)
8639                                 __endio_write_update_ordered(inode,
8640                                         dio_data.unsubmitted_oe_range_start,
8641                                         dio_data.unsubmitted_oe_range_end -
8642                                         dio_data.unsubmitted_oe_range_start,
8643                                         false);
8644                 } else if (ret >= 0 && (size_t)ret < count)
8645                         btrfs_delalloc_release_space(inode, data_reserved,
8646                                         offset, count - (size_t)ret, true);
8647                 btrfs_delalloc_release_extents(BTRFS_I(inode), count, false);
8648         }
8649 out:
8650         if (wakeup)
8651                 inode_dio_end(inode);
8652         if (relock)
8653                 inode_lock(inode);
8654
8655         extent_changeset_free(data_reserved);
8656         return ret;
8657 }
8658
8659 #define BTRFS_FIEMAP_FLAGS      (FIEMAP_FLAG_SYNC)
8660
8661 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8662                 __u64 start, __u64 len)
8663 {
8664         int     ret;
8665
8666         ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
8667         if (ret)
8668                 return ret;
8669
8670         return extent_fiemap(inode, fieinfo, start, len);
8671 }
8672
8673 int btrfs_readpage(struct file *file, struct page *page)
8674 {
8675         struct extent_io_tree *tree;
8676         tree = &BTRFS_I(page->mapping->host)->io_tree;
8677         return extent_read_full_page(tree, page, btrfs_get_extent, 0);
8678 }
8679
8680 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
8681 {
8682         struct inode *inode = page->mapping->host;
8683         int ret;
8684
8685         if (current->flags & PF_MEMALLOC) {
8686                 redirty_page_for_writepage(wbc, page);
8687                 unlock_page(page);
8688                 return 0;
8689         }
8690
8691         /*
8692          * If we are under memory pressure we will call this directly from the
8693          * VM, we need to make sure we have the inode referenced for the ordered
8694          * extent.  If not just return like we didn't do anything.
8695          */
8696         if (!igrab(inode)) {
8697                 redirty_page_for_writepage(wbc, page);
8698                 return AOP_WRITEPAGE_ACTIVATE;
8699         }
8700         ret = extent_write_full_page(page, wbc);
8701         btrfs_add_delayed_iput(inode);
8702         return ret;
8703 }
8704
8705 static int btrfs_writepages(struct address_space *mapping,
8706                             struct writeback_control *wbc)
8707 {
8708         struct extent_io_tree *tree;
8709
8710         tree = &BTRFS_I(mapping->host)->io_tree;
8711         return extent_writepages(tree, mapping, wbc);
8712 }
8713
8714 static int
8715 btrfs_readpages(struct file *file, struct address_space *mapping,
8716                 struct list_head *pages, unsigned nr_pages)
8717 {
8718         struct extent_io_tree *tree;
8719         tree = &BTRFS_I(mapping->host)->io_tree;
8720         return extent_readpages(tree, mapping, pages, nr_pages);
8721 }
8722 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8723 {
8724         struct extent_io_tree *tree;
8725         struct extent_map_tree *map;
8726         int ret;
8727
8728         tree = &BTRFS_I(page->mapping->host)->io_tree;
8729         map = &BTRFS_I(page->mapping->host)->extent_tree;
8730         ret = try_release_extent_mapping(map, tree, page, gfp_flags);
8731         if (ret == 1) {
8732                 ClearPagePrivate(page);
8733                 set_page_private(page, 0);
8734                 put_page(page);
8735         }
8736         return ret;
8737 }
8738
8739 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8740 {
8741         if (PageWriteback(page) || PageDirty(page))
8742                 return 0;
8743         return __btrfs_releasepage(page, gfp_flags);
8744 }
8745
8746 static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8747                                  unsigned int length)
8748 {
8749         struct inode *inode = page->mapping->host;
8750         struct extent_io_tree *tree;
8751         struct btrfs_ordered_extent *ordered;
8752         struct extent_state *cached_state = NULL;
8753         u64 page_start = page_offset(page);
8754         u64 page_end = page_start + PAGE_SIZE - 1;
8755         u64 start;
8756         u64 end;
8757         int inode_evicting = inode->i_state & I_FREEING;
8758
8759         /*
8760          * we have the page locked, so new writeback can't start,
8761          * and the dirty bit won't be cleared while we are here.
8762          *
8763          * Wait for IO on this page so that we can safely clear
8764          * the PagePrivate2 bit and do ordered accounting
8765          */
8766         wait_on_page_writeback(page);
8767
8768         tree = &BTRFS_I(inode)->io_tree;
8769         if (offset) {
8770                 btrfs_releasepage(page, GFP_NOFS);
8771                 return;
8772         }
8773
8774         if (!inode_evicting)
8775                 lock_extent_bits(tree, page_start, page_end, &cached_state);
8776 again:
8777         start = page_start;
8778         ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
8779                                         page_end - start + 1);
8780         if (ordered) {
8781                 end = min(page_end, ordered->file_offset + ordered->len - 1);
8782                 /*
8783                  * IO on this page will never be started, so we need
8784                  * to account for any ordered extents now
8785                  */
8786                 if (!inode_evicting)
8787                         clear_extent_bit(tree, start, end,
8788                                          EXTENT_DIRTY | EXTENT_DELALLOC |
8789                                          EXTENT_DELALLOC_NEW |
8790                                          EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8791                                          EXTENT_DEFRAG, 1, 0, &cached_state);
8792                 /*
8793                  * whoever cleared the private bit is responsible
8794                  * for the finish_ordered_io
8795                  */
8796                 if (TestClearPagePrivate2(page)) {
8797                         struct btrfs_ordered_inode_tree *tree;
8798                         u64 new_len;
8799
8800                         tree = &BTRFS_I(inode)->ordered_tree;
8801
8802                         spin_lock_irq(&tree->lock);
8803                         set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8804                         new_len = start - ordered->file_offset;
8805                         if (new_len < ordered->truncated_len)
8806                                 ordered->truncated_len = new_len;
8807                         spin_unlock_irq(&tree->lock);
8808
8809                         if (btrfs_dec_test_ordered_pending(inode, &ordered,
8810                                                            start,
8811                                                            end - start + 1, 1))
8812                                 btrfs_finish_ordered_io(ordered);
8813                 }
8814                 btrfs_put_ordered_extent(ordered);
8815                 if (!inode_evicting) {
8816                         cached_state = NULL;
8817                         lock_extent_bits(tree, start, end,
8818                                          &cached_state);
8819                 }
8820
8821                 start = end + 1;
8822                 if (start < page_end)
8823                         goto again;
8824         }
8825
8826         /*
8827          * Qgroup reserved space handler
8828          * Page here will be either
8829          * 1) Already written to disk
8830          *    In this case, its reserved space is released from data rsv map
8831          *    and will be freed by delayed_ref handler finally.
8832          *    So even we call qgroup_free_data(), it won't decrease reserved
8833          *    space.
8834          * 2) Not written to disk
8835          *    This means the reserved space should be freed here. However,
8836          *    if a truncate invalidates the page (by clearing PageDirty)
8837          *    and the page is accounted for while allocating extent
8838          *    in btrfs_check_data_free_space() we let delayed_ref to
8839          *    free the entire extent.
8840          */
8841         if (PageDirty(page))
8842                 btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
8843         if (!inode_evicting) {
8844                 clear_extent_bit(tree, page_start, page_end,
8845                                  EXTENT_LOCKED | EXTENT_DIRTY |
8846                                  EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
8847                                  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
8848                                  &cached_state);
8849
8850                 __btrfs_releasepage(page, GFP_NOFS);
8851         }
8852
8853         ClearPageChecked(page);
8854         if (PagePrivate(page)) {
8855                 ClearPagePrivate(page);
8856                 set_page_private(page, 0);
8857                 put_page(page);
8858         }
8859 }
8860
8861 /*
8862  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8863  * called from a page fault handler when a page is first dirtied. Hence we must
8864  * be careful to check for EOF conditions here. We set the page up correctly
8865  * for a written page which means we get ENOSPC checking when writing into
8866  * holes and correct delalloc and unwritten extent mapping on filesystems that
8867  * support these features.
8868  *
8869  * We are not allowed to take the i_mutex here so we have to play games to
8870  * protect against truncate races as the page could now be beyond EOF.  Because
8871  * vmtruncate() writes the inode size before removing pages, once we have the
8872  * page lock we can determine safely if the page is beyond EOF. If it is not
8873  * beyond EOF, then the page is guaranteed safe against truncation until we
8874  * unlock the page.
8875  */
8876 int btrfs_page_mkwrite(struct vm_fault *vmf)
8877 {
8878         struct page *page = vmf->page;
8879         struct inode *inode = file_inode(vmf->vma->vm_file);
8880         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8881         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8882         struct btrfs_ordered_extent *ordered;
8883         struct extent_state *cached_state = NULL;
8884         struct extent_changeset *data_reserved = NULL;
8885         char *kaddr;
8886         unsigned long zero_start;
8887         loff_t size;
8888         int ret;
8889         int reserved = 0;
8890         u64 reserved_space;
8891         u64 page_start;
8892         u64 page_end;
8893         u64 end;
8894
8895         reserved_space = PAGE_SIZE;
8896
8897         sb_start_pagefault(inode->i_sb);
8898         page_start = page_offset(page);
8899         page_end = page_start + PAGE_SIZE - 1;
8900         end = page_end;
8901
8902         /*
8903          * Reserving delalloc space after obtaining the page lock can lead to
8904          * deadlock. For example, if a dirty page is locked by this function
8905          * and the call to btrfs_delalloc_reserve_space() ends up triggering
8906          * dirty page write out, then the btrfs_writepage() function could
8907          * end up waiting indefinitely to get a lock on the page currently
8908          * being processed by btrfs_page_mkwrite() function.
8909          */
8910         ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
8911                                            reserved_space);
8912         if (!ret) {
8913                 ret = file_update_time(vmf->vma->vm_file);
8914                 reserved = 1;
8915         }
8916         if (ret) {
8917                 if (ret == -ENOMEM)
8918                         ret = VM_FAULT_OOM;
8919                 else /* -ENOSPC, -EIO, etc */
8920                         ret = VM_FAULT_SIGBUS;
8921                 if (reserved)
8922                         goto out;
8923                 goto out_noreserve;
8924         }
8925
8926         ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8927 again:
8928         lock_page(page);
8929         size = i_size_read(inode);
8930
8931         if ((page->mapping != inode->i_mapping) ||
8932             (page_start >= size)) {
8933                 /* page got truncated out from underneath us */
8934                 goto out_unlock;
8935         }
8936         wait_on_page_writeback(page);
8937
8938         lock_extent_bits(io_tree, page_start, page_end, &cached_state);
8939         set_page_extent_mapped(page);
8940
8941         /*
8942          * we can't set the delalloc bits if there are pending ordered
8943          * extents.  Drop our locks and wait for them to finish
8944          */
8945         ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
8946                         PAGE_SIZE);
8947         if (ordered) {
8948                 unlock_extent_cached(io_tree, page_start, page_end,
8949                                      &cached_state);
8950                 unlock_page(page);
8951                 btrfs_start_ordered_extent(inode, ordered, 1);
8952                 btrfs_put_ordered_extent(ordered);
8953                 goto again;
8954         }
8955
8956         if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8957                 reserved_space = round_up(size - page_start,
8958                                           fs_info->sectorsize);
8959                 if (reserved_space < PAGE_SIZE) {
8960                         end = page_start + reserved_space - 1;
8961                         btrfs_delalloc_release_space(inode, data_reserved,
8962                                         page_start, PAGE_SIZE - reserved_space,
8963                                         true);
8964                 }
8965         }
8966
8967         /*
8968          * page_mkwrite gets called when the page is firstly dirtied after it's
8969          * faulted in, but write(2) could also dirty a page and set delalloc
8970          * bits, thus in this case for space account reason, we still need to
8971          * clear any delalloc bits within this page range since we have to
8972          * reserve data&meta space before lock_page() (see above comments).
8973          */
8974         clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
8975                           EXTENT_DIRTY | EXTENT_DELALLOC |
8976                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
8977                           0, 0, &cached_state);
8978
8979         ret = btrfs_set_extent_delalloc(inode, page_start, end, 0,
8980                                         &cached_state, 0);
8981         if (ret) {
8982                 unlock_extent_cached(io_tree, page_start, page_end,
8983                                      &cached_state);
8984                 ret = VM_FAULT_SIGBUS;
8985                 goto out_unlock;
8986         }
8987         ret = 0;
8988
8989         /* page is wholly or partially inside EOF */
8990         if (page_start + PAGE_SIZE > size)
8991                 zero_start = size & ~PAGE_MASK;
8992         else
8993                 zero_start = PAGE_SIZE;
8994
8995         if (zero_start != PAGE_SIZE) {
8996                 kaddr = kmap(page);
8997                 memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
8998                 flush_dcache_page(page);
8999                 kunmap(page);
9000         }
9001         ClearPageChecked(page);
9002         set_page_dirty(page);
9003         SetPageUptodate(page);
9004
9005         BTRFS_I(inode)->last_trans = fs_info->generation;
9006         BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
9007         BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
9008
9009         unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
9010
9011 out_unlock:
9012         if (!ret) {
9013                 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true);
9014                 sb_end_pagefault(inode->i_sb);
9015                 extent_changeset_free(data_reserved);
9016                 return VM_FAULT_LOCKED;
9017         }
9018         unlock_page(page);
9019 out:
9020         btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0));
9021         btrfs_delalloc_release_space(inode, data_reserved, page_start,
9022                                      reserved_space, (ret != 0));
9023 out_noreserve:
9024         sb_end_pagefault(inode->i_sb);
9025         extent_changeset_free(data_reserved);
9026         return ret;
9027 }
9028
9029 static int btrfs_truncate(struct inode *inode, bool skip_writeback)
9030 {
9031         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9032         struct btrfs_root *root = BTRFS_I(inode)->root;
9033         struct btrfs_block_rsv *rsv;
9034         int ret = 0;
9035         int err = 0;
9036         struct btrfs_trans_handle *trans;
9037         u64 mask = fs_info->sectorsize - 1;
9038         u64 min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
9039
9040         if (!skip_writeback) {
9041                 ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
9042                                                (u64)-1);
9043                 if (ret)
9044                         return ret;
9045         }
9046
9047         /*
9048          * Yes ladies and gentlemen, this is indeed ugly.  The fact is we have
9049          * 3 things going on here
9050          *
9051          * 1) We need to reserve space for our orphan item and the space to
9052          * delete our orphan item.  Lord knows we don't want to have a dangling
9053          * orphan item because we didn't reserve space to remove it.
9054          *
9055          * 2) We need to reserve space to update our inode.
9056          *
9057          * 3) We need to have something to cache all the space that is going to
9058          * be free'd up by the truncate operation, but also have some slack
9059          * space reserved in case it uses space during the truncate (thank you
9060          * very much snapshotting).
9061          *
9062          * And we need these to all be separate.  The fact is we can use a lot of
9063          * space doing the truncate, and we have no earthly idea how much space
9064          * we will use, so we need the truncate reservation to be separate so it
9065          * doesn't end up using space reserved for updating the inode or
9066          * removing the orphan item.  We also need to be able to stop the
9067          * transaction and start a new one, which means we need to be able to
9068          * update the inode several times, and we have no idea of knowing how
9069          * many times that will be, so we can't just reserve 1 item for the
9070          * entirety of the operation, so that has to be done separately as well.
9071          * Then there is the orphan item, which does indeed need to be held on
9072          * to for the whole operation, and we need nobody to touch this reserved
9073          * space except the orphan code.
9074          *
9075          * So that leaves us with
9076          *
9077          * 1) root->orphan_block_rsv - for the orphan deletion.
9078          * 2) rsv - for the truncate reservation, which we will steal from the
9079          * transaction reservation.
9080          * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
9081          * updating the inode.
9082          */
9083         rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
9084         if (!rsv)
9085                 return -ENOMEM;
9086         rsv->size = min_size;
9087         rsv->failfast = 1;
9088
9089         /*
9090          * 1 for the truncate slack space
9091          * 1 for updating the inode.
9092          */
9093         trans = btrfs_start_transaction(root, 2);
9094         if (IS_ERR(trans)) {
9095                 err = PTR_ERR(trans);
9096                 goto out;
9097         }
9098
9099         /* Migrate the slack space for the truncate to our reserve */
9100         ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
9101                                       min_size, 0);
9102         BUG_ON(ret);
9103
9104         /*
9105          * So if we truncate and then write and fsync we normally would just
9106          * write the extents that changed, which is a problem if we need to
9107          * first truncate that entire inode.  So set this flag so we write out
9108          * all of the extents in the inode to the sync log so we're completely
9109          * safe.
9110          */
9111         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
9112         trans->block_rsv = rsv;
9113
9114         while (1) {
9115                 ret = btrfs_truncate_inode_items(trans, root, inode,
9116                                                  inode->i_size,
9117                                                  BTRFS_EXTENT_DATA_KEY);
9118                 trans->block_rsv = &fs_info->trans_block_rsv;
9119                 if (ret != -ENOSPC && ret != -EAGAIN) {
9120                         if (ret < 0)
9121                                 err = ret;
9122                         break;
9123                 }
9124
9125                 ret = btrfs_update_inode(trans, root, inode);
9126                 if (ret) {
9127                         err = ret;
9128                         break;
9129                 }
9130
9131                 btrfs_end_transaction(trans);
9132                 btrfs_btree_balance_dirty(fs_info);
9133
9134                 trans = btrfs_start_transaction(root, 2);
9135                 if (IS_ERR(trans)) {
9136                         ret = err = PTR_ERR(trans);
9137                         trans = NULL;
9138                         break;
9139                 }
9140
9141                 btrfs_block_rsv_release(fs_info, rsv, -1);
9142                 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
9143                                               rsv, min_size, 0);
9144                 BUG_ON(ret);    /* shouldn't happen */
9145                 trans->block_rsv = rsv;
9146         }
9147
9148         /*
9149          * We can't call btrfs_truncate_block inside a trans handle as we could
9150          * deadlock with freeze, if we got NEED_TRUNCATE_BLOCK then we know
9151          * we've truncated everything except the last little bit, and can do
9152          * btrfs_truncate_block and then update the disk_i_size.
9153          */
9154         if (ret == NEED_TRUNCATE_BLOCK) {
9155                 btrfs_end_transaction(trans);
9156                 btrfs_btree_balance_dirty(fs_info);
9157
9158                 ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
9159                 if (ret)
9160                         goto out;
9161                 trans = btrfs_start_transaction(root, 1);
9162                 if (IS_ERR(trans)) {
9163                         ret = PTR_ERR(trans);
9164                         goto out;
9165                 }
9166                 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
9167         }
9168
9169         if (ret == 0 && inode->i_nlink > 0) {
9170                 trans->block_rsv = root->orphan_block_rsv;
9171                 ret = btrfs_orphan_del(trans, BTRFS_I(inode));
9172                 if (ret)
9173                         err = ret;
9174         }
9175
9176         if (trans) {
9177                 trans->block_rsv = &fs_info->trans_block_rsv;
9178                 ret = btrfs_update_inode(trans, root, inode);
9179                 if (ret && !err)
9180                         err = ret;
9181
9182                 ret = btrfs_end_transaction(trans);
9183                 btrfs_btree_balance_dirty(fs_info);
9184         }
9185 out:
9186         btrfs_free_block_rsv(fs_info, rsv);
9187
9188         if (ret && !err)
9189                 err = ret;
9190
9191         return err;
9192 }
9193
9194 /*
9195  * create a new subvolume directory/inode (helper for the ioctl).
9196  */
9197 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
9198                              struct btrfs_root *new_root,
9199                              struct btrfs_root *parent_root,
9200                              u64 new_dirid)
9201 {
9202         struct inode *inode;
9203         int err;
9204         u64 index = 0;
9205
9206         inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
9207                                 new_dirid, new_dirid,
9208                                 S_IFDIR | (~current_umask() & S_IRWXUGO),
9209                                 &index);
9210         if (IS_ERR(inode))
9211                 return PTR_ERR(inode);
9212         inode->i_op = &btrfs_dir_inode_operations;
9213         inode->i_fop = &btrfs_dir_file_operations;
9214
9215         set_nlink(inode, 1);
9216         btrfs_i_size_write(BTRFS_I(inode), 0);
9217         unlock_new_inode(inode);
9218
9219         err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
9220         if (err)
9221                 btrfs_err(new_root->fs_info,
9222                           "error inheriting subvolume %llu properties: %d",
9223                           new_root->root_key.objectid, err);
9224
9225         err = btrfs_update_inode(trans, new_root, inode);
9226
9227         iput(inode);
9228         return err;
9229 }
9230
9231 struct inode *btrfs_alloc_inode(struct super_block *sb)
9232 {
9233         struct btrfs_fs_info *fs_info = btrfs_sb(sb);
9234         struct btrfs_inode *ei;
9235         struct inode *inode;
9236
9237         ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_KERNEL);
9238         if (!ei)
9239                 return NULL;
9240
9241         ei->root = NULL;
9242         ei->generation = 0;
9243         ei->last_trans = 0;
9244         ei->last_sub_trans = 0;
9245         ei->logged_trans = 0;
9246         ei->delalloc_bytes = 0;
9247         ei->new_delalloc_bytes = 0;
9248         ei->defrag_bytes = 0;
9249         ei->disk_i_size = 0;
9250         ei->flags = 0;
9251         ei->csum_bytes = 0;
9252         ei->index_cnt = (u64)-1;
9253         ei->dir_index = 0;
9254         ei->last_unlink_trans = 0;
9255         ei->last_log_commit = 0;
9256
9257         spin_lock_init(&ei->lock);
9258         ei->outstanding_extents = 0;
9259         if (sb->s_magic != BTRFS_TEST_MAGIC)
9260                 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
9261                                               BTRFS_BLOCK_RSV_DELALLOC);
9262         ei->runtime_flags = 0;
9263         ei->prop_compress = BTRFS_COMPRESS_NONE;
9264         ei->defrag_compress = BTRFS_COMPRESS_NONE;
9265
9266         ei->delayed_node = NULL;
9267
9268         ei->i_otime.tv_sec = 0;
9269         ei->i_otime.tv_nsec = 0;
9270
9271         inode = &ei->vfs_inode;
9272         extent_map_tree_init(&ei->extent_tree);
9273         extent_io_tree_init(&ei->io_tree, inode);
9274         extent_io_tree_init(&ei->io_failure_tree, inode);
9275         ei->io_tree.track_uptodate = 1;
9276         ei->io_failure_tree.track_uptodate = 1;
9277         atomic_set(&ei->sync_writers, 0);
9278         mutex_init(&ei->log_mutex);
9279         mutex_init(&ei->delalloc_mutex);
9280         btrfs_ordered_inode_tree_init(&ei->ordered_tree);
9281         INIT_LIST_HEAD(&ei->delalloc_inodes);
9282         INIT_LIST_HEAD(&ei->delayed_iput);
9283         RB_CLEAR_NODE(&ei->rb_node);
9284         init_rwsem(&ei->dio_sem);
9285
9286         return inode;
9287 }
9288
9289 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
9290 void btrfs_test_destroy_inode(struct inode *inode)
9291 {
9292         btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
9293         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9294 }
9295 #endif
9296
9297 static void btrfs_i_callback(struct rcu_head *head)
9298 {
9299         struct inode *inode = container_of(head, struct inode, i_rcu);
9300         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9301 }
9302
9303 void btrfs_destroy_inode(struct inode *inode)
9304 {
9305         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9306         struct btrfs_ordered_extent *ordered;
9307         struct btrfs_root *root = BTRFS_I(inode)->root;
9308
9309         WARN_ON(!hlist_empty(&inode->i_dentry));
9310         WARN_ON(inode->i_data.nrpages);
9311         WARN_ON(BTRFS_I(inode)->block_rsv.reserved);
9312         WARN_ON(BTRFS_I(inode)->block_rsv.size);
9313         WARN_ON(BTRFS_I(inode)->outstanding_extents);
9314         WARN_ON(BTRFS_I(inode)->delalloc_bytes);
9315         WARN_ON(BTRFS_I(inode)->new_delalloc_bytes);
9316         WARN_ON(BTRFS_I(inode)->csum_bytes);
9317         WARN_ON(BTRFS_I(inode)->defrag_bytes);
9318
9319         /*
9320          * This can happen where we create an inode, but somebody else also
9321          * created the same inode and we need to destroy the one we already
9322          * created.
9323          */
9324         if (!root)
9325                 goto free;
9326
9327         if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
9328                      &BTRFS_I(inode)->runtime_flags)) {
9329                 btrfs_info(fs_info, "inode %llu still on the orphan list",
9330                            btrfs_ino(BTRFS_I(inode)));
9331                 atomic_dec(&root->orphan_inodes);
9332         }
9333
9334         while (1) {
9335                 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
9336                 if (!ordered)
9337                         break;
9338                 else {
9339                         btrfs_err(fs_info,
9340                                   "found ordered extent %llu %llu on inode cleanup",
9341                                   ordered->file_offset, ordered->len);
9342                         btrfs_remove_ordered_extent(inode, ordered);
9343                         btrfs_put_ordered_extent(ordered);
9344                         btrfs_put_ordered_extent(ordered);
9345                 }
9346         }
9347         btrfs_qgroup_check_reserved_leak(inode);
9348         inode_tree_del(inode);
9349         btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
9350 free:
9351         call_rcu(&inode->i_rcu, btrfs_i_callback);
9352 }
9353
9354 int btrfs_drop_inode(struct inode *inode)
9355 {
9356         struct btrfs_root *root = BTRFS_I(inode)->root;
9357
9358         if (root == NULL)
9359                 return 1;
9360
9361         /* the snap/subvol tree is on deleting */
9362         if (btrfs_root_refs(&root->root_item) == 0)
9363                 return 1;
9364         else
9365                 return generic_drop_inode(inode);
9366 }
9367
9368 static void init_once(void *foo)
9369 {
9370         struct btrfs_inode *ei = (struct btrfs_inode *) foo;
9371
9372         inode_init_once(&ei->vfs_inode);
9373 }
9374
9375 void __cold btrfs_destroy_cachep(void)
9376 {
9377         /*
9378          * Make sure all delayed rcu free inodes are flushed before we
9379          * destroy cache.
9380          */
9381         rcu_barrier();
9382         kmem_cache_destroy(btrfs_inode_cachep);
9383         kmem_cache_destroy(btrfs_trans_handle_cachep);
9384         kmem_cache_destroy(btrfs_path_cachep);
9385         kmem_cache_destroy(btrfs_free_space_cachep);
9386 }
9387
9388 int __init btrfs_init_cachep(void)
9389 {
9390         btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
9391                         sizeof(struct btrfs_inode), 0,
9392                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
9393                         init_once);
9394         if (!btrfs_inode_cachep)
9395                 goto fail;
9396
9397         btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
9398                         sizeof(struct btrfs_trans_handle), 0,
9399                         SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
9400         if (!btrfs_trans_handle_cachep)
9401                 goto fail;
9402
9403         btrfs_path_cachep = kmem_cache_create("btrfs_path",
9404                         sizeof(struct btrfs_path), 0,
9405                         SLAB_MEM_SPREAD, NULL);
9406         if (!btrfs_path_cachep)
9407                 goto fail;
9408
9409         btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
9410                         sizeof(struct btrfs_free_space), 0,
9411                         SLAB_MEM_SPREAD, NULL);
9412         if (!btrfs_free_space_cachep)
9413                 goto fail;
9414
9415         return 0;
9416 fail:
9417         btrfs_destroy_cachep();
9418         return -ENOMEM;
9419 }
9420
9421 static int btrfs_getattr(const struct path *path, struct kstat *stat,
9422                          u32 request_mask, unsigned int flags)
9423 {
9424         u64 delalloc_bytes;
9425         struct inode *inode = d_inode(path->dentry);
9426         u32 blocksize = inode->i_sb->s_blocksize;
9427         u32 bi_flags = BTRFS_I(inode)->flags;
9428
9429         stat->result_mask |= STATX_BTIME;
9430         stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec;
9431         stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec;
9432         if (bi_flags & BTRFS_INODE_APPEND)
9433                 stat->attributes |= STATX_ATTR_APPEND;
9434         if (bi_flags & BTRFS_INODE_COMPRESS)
9435                 stat->attributes |= STATX_ATTR_COMPRESSED;
9436         if (bi_flags & BTRFS_INODE_IMMUTABLE)
9437                 stat->attributes |= STATX_ATTR_IMMUTABLE;
9438         if (bi_flags & BTRFS_INODE_NODUMP)
9439                 stat->attributes |= STATX_ATTR_NODUMP;
9440
9441         stat->attributes_mask |= (STATX_ATTR_APPEND |
9442                                   STATX_ATTR_COMPRESSED |
9443                                   STATX_ATTR_IMMUTABLE |
9444                                   STATX_ATTR_NODUMP);
9445
9446         generic_fillattr(inode, stat);
9447         stat->dev = BTRFS_I(inode)->root->anon_dev;
9448
9449         spin_lock(&BTRFS_I(inode)->lock);
9450         delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
9451         spin_unlock(&BTRFS_I(inode)->lock);
9452         stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
9453                         ALIGN(delalloc_bytes, blocksize)) >> 9;
9454         return 0;
9455 }
9456
9457 static int btrfs_rename_exchange(struct inode *old_dir,
9458                               struct dentry *old_dentry,
9459                               struct inode *new_dir,
9460                               struct dentry *new_dentry)
9461 {
9462         struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9463         struct btrfs_trans_handle *trans;
9464         struct btrfs_root *root = BTRFS_I(old_dir)->root;
9465         struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9466         struct inode *new_inode = new_dentry->d_inode;
9467         struct inode *old_inode = old_dentry->d_inode;
9468         struct timespec ctime = current_time(old_inode);
9469         struct dentry *parent;
9470         u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9471         u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
9472         u64 old_idx = 0;
9473         u64 new_idx = 0;
9474         u64 root_objectid;
9475         int ret;
9476         bool root_log_pinned = false;
9477         bool dest_log_pinned = false;
9478
9479         /* we only allow rename subvolume link between subvolumes */
9480         if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9481                 return -EXDEV;
9482
9483         /* close the race window with snapshot create/destroy ioctl */
9484         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9485                 down_read(&fs_info->subvol_sem);
9486         if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
9487                 down_read(&fs_info->subvol_sem);
9488
9489         /*
9490          * We want to reserve the absolute worst case amount of items.  So if
9491          * both inodes are subvols and we need to unlink them then that would
9492          * require 4 item modifications, but if they are both normal inodes it
9493          * would require 5 item modifications, so we'll assume their normal
9494          * inodes.  So 5 * 2 is 10, plus 2 for the new links, so 12 total items
9495          * should cover the worst case number of items we'll modify.
9496          */
9497         trans = btrfs_start_transaction(root, 12);
9498         if (IS_ERR(trans)) {
9499                 ret = PTR_ERR(trans);
9500                 goto out_notrans;
9501         }
9502
9503         /*
9504          * We need to find a free sequence number both in the source and
9505          * in the destination directory for the exchange.
9506          */
9507         ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
9508         if (ret)
9509                 goto out_fail;
9510         ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
9511         if (ret)
9512                 goto out_fail;
9513
9514         BTRFS_I(old_inode)->dir_index = 0ULL;
9515         BTRFS_I(new_inode)->dir_index = 0ULL;
9516
9517         /* Reference for the source. */
9518         if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9519                 /* force full log commit if subvolume involved. */
9520                 btrfs_set_log_full_commit(fs_info, trans);
9521         } else {
9522                 btrfs_pin_log_trans(root);
9523                 root_log_pinned = true;
9524                 ret = btrfs_insert_inode_ref(trans, dest,
9525                                              new_dentry->d_name.name,
9526                                              new_dentry->d_name.len,
9527                                              old_ino,
9528                                              btrfs_ino(BTRFS_I(new_dir)),
9529                                              old_idx);
9530                 if (ret)
9531                         goto out_fail;
9532         }
9533
9534         /* And now for the dest. */
9535         if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9536                 /* force full log commit if subvolume involved. */
9537                 btrfs_set_log_full_commit(fs_info, trans);
9538         } else {
9539                 btrfs_pin_log_trans(dest);
9540                 dest_log_pinned = true;
9541                 ret = btrfs_insert_inode_ref(trans, root,
9542                                              old_dentry->d_name.name,
9543                                              old_dentry->d_name.len,
9544                                              new_ino,
9545                                              btrfs_ino(BTRFS_I(old_dir)),
9546                                              new_idx);
9547                 if (ret)
9548                         goto out_fail;
9549         }
9550
9551         /* Update inode version and ctime/mtime. */
9552         inode_inc_iversion(old_dir);
9553         inode_inc_iversion(new_dir);
9554         inode_inc_iversion(old_inode);
9555         inode_inc_iversion(new_inode);
9556         old_dir->i_ctime = old_dir->i_mtime = ctime;
9557         new_dir->i_ctime = new_dir->i_mtime = ctime;
9558         old_inode->i_ctime = ctime;
9559         new_inode->i_ctime = ctime;
9560
9561         if (old_dentry->d_parent != new_dentry->d_parent) {
9562                 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9563                                 BTRFS_I(old_inode), 1);
9564                 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
9565                                 BTRFS_I(new_inode), 1);
9566         }
9567
9568         /* src is a subvolume */
9569         if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9570                 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
9571                 ret = btrfs_unlink_subvol(trans, root, old_dir,
9572                                           root_objectid,
9573                                           old_dentry->d_name.name,
9574                                           old_dentry->d_name.len);
9575         } else { /* src is an inode */
9576                 ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
9577                                            BTRFS_I(old_dentry->d_inode),
9578                                            old_dentry->d_name.name,
9579                                            old_dentry->d_name.len);
9580                 if (!ret)
9581                         ret = btrfs_update_inode(trans, root, old_inode);
9582         }
9583         if (ret) {
9584                 btrfs_abort_transaction(trans, ret);
9585                 goto out_fail;
9586         }
9587
9588         /* dest is a subvolume */
9589         if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9590                 root_objectid = BTRFS_I(new_inode)->root->root_key.objectid;
9591                 ret = btrfs_unlink_subvol(trans, dest, new_dir,
9592                                           root_objectid,
9593                                           new_dentry->d_name.name,
9594                                           new_dentry->d_name.len);
9595         } else { /* dest is an inode */
9596                 ret = __btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
9597                                            BTRFS_I(new_dentry->d_inode),
9598                                            new_dentry->d_name.name,
9599                                            new_dentry->d_name.len);
9600                 if (!ret)
9601                         ret = btrfs_update_inode(trans, dest, new_inode);
9602         }
9603         if (ret) {
9604                 btrfs_abort_transaction(trans, ret);
9605                 goto out_fail;
9606         }
9607
9608         ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9609                              new_dentry->d_name.name,
9610                              new_dentry->d_name.len, 0, old_idx);
9611         if (ret) {
9612                 btrfs_abort_transaction(trans, ret);
9613                 goto out_fail;
9614         }
9615
9616         ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
9617                              old_dentry->d_name.name,
9618                              old_dentry->d_name.len, 0, new_idx);
9619         if (ret) {
9620                 btrfs_abort_transaction(trans, ret);
9621                 goto out_fail;
9622         }
9623
9624         if (old_inode->i_nlink == 1)
9625                 BTRFS_I(old_inode)->dir_index = old_idx;
9626         if (new_inode->i_nlink == 1)
9627                 BTRFS_I(new_inode)->dir_index = new_idx;
9628
9629         if (root_log_pinned) {
9630                 parent = new_dentry->d_parent;
9631                 btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
9632                                 parent);
9633                 btrfs_end_log_trans(root);
9634                 root_log_pinned = false;
9635         }
9636         if (dest_log_pinned) {
9637                 parent = old_dentry->d_parent;
9638                 btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir),
9639                                 parent);
9640                 btrfs_end_log_trans(dest);
9641                 dest_log_pinned = false;
9642         }
9643 out_fail:
9644         /*
9645          * If we have pinned a log and an error happened, we unpin tasks
9646          * trying to sync the log and force them to fallback to a transaction
9647          * commit if the log currently contains any of the inodes involved in
9648          * this rename operation (to ensure we do not persist a log with an
9649          * inconsistent state for any of these inodes or leading to any
9650          * inconsistencies when replayed). If the transaction was aborted, the
9651          * abortion reason is propagated to userspace when attempting to commit
9652          * the transaction. If the log does not contain any of these inodes, we
9653          * allow the tasks to sync it.
9654          */
9655         if (ret && (root_log_pinned || dest_log_pinned)) {
9656                 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) ||
9657                     btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) ||
9658                     btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) ||
9659                     (new_inode &&
9660                      btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation)))
9661                         btrfs_set_log_full_commit(fs_info, trans);
9662
9663                 if (root_log_pinned) {
9664                         btrfs_end_log_trans(root);
9665                         root_log_pinned = false;
9666                 }
9667                 if (dest_log_pinned) {
9668                         btrfs_end_log_trans(dest);
9669                         dest_log_pinned = false;
9670                 }
9671         }
9672         ret = btrfs_end_transaction(trans);
9673 out_notrans:
9674         if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
9675                 up_read(&fs_info->subvol_sem);
9676         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9677                 up_read(&fs_info->subvol_sem);
9678
9679         return ret;
9680 }
9681
9682 static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans,
9683                                      struct btrfs_root *root,
9684                                      struct inode *dir,
9685                                      struct dentry *dentry)
9686 {
9687         int ret;
9688         struct inode *inode;
9689         u64 objectid;
9690         u64 index;
9691
9692         ret = btrfs_find_free_ino(root, &objectid);
9693         if (ret)
9694                 return ret;
9695
9696         inode = btrfs_new_inode(trans, root, dir,
9697                                 dentry->d_name.name,
9698                                 dentry->d_name.len,
9699                                 btrfs_ino(BTRFS_I(dir)),
9700                                 objectid,
9701                                 S_IFCHR | WHITEOUT_MODE,
9702                                 &index);
9703
9704         if (IS_ERR(inode)) {
9705                 ret = PTR_ERR(inode);
9706                 return ret;
9707         }
9708
9709         inode->i_op = &btrfs_special_inode_operations;
9710         init_special_inode(inode, inode->i_mode,
9711                 WHITEOUT_DEV);
9712
9713         ret = btrfs_init_inode_security(trans, inode, dir,
9714                                 &dentry->d_name);
9715         if (ret)
9716                 goto out;
9717
9718         ret = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
9719                                 BTRFS_I(inode), 0, index);
9720         if (ret)
9721                 goto out;
9722
9723         ret = btrfs_update_inode(trans, root, inode);
9724 out:
9725         unlock_new_inode(inode);
9726         if (ret)
9727                 inode_dec_link_count(inode);
9728         iput(inode);
9729
9730         return ret;
9731 }
9732
9733 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9734                            struct inode *new_dir, struct dentry *new_dentry,
9735                            unsigned int flags)
9736 {
9737         struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9738         struct btrfs_trans_handle *trans;
9739         unsigned int trans_num_items;
9740         struct btrfs_root *root = BTRFS_I(old_dir)->root;
9741         struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9742         struct inode *new_inode = d_inode(new_dentry);
9743         struct inode *old_inode = d_inode(old_dentry);
9744         u64 index = 0;
9745         u64 root_objectid;
9746         int ret;
9747         u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9748         bool log_pinned = false;
9749
9750         if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9751                 return -EPERM;
9752
9753         /* we only allow rename subvolume link between subvolumes */
9754         if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9755                 return -EXDEV;
9756
9757         if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9758             (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
9759                 return -ENOTEMPTY;
9760
9761         if (S_ISDIR(old_inode->i_mode) && new_inode &&
9762             new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9763                 return -ENOTEMPTY;
9764
9765
9766         /* check for collisions, even if the  name isn't there */
9767         ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
9768                              new_dentry->d_name.name,
9769                              new_dentry->d_name.len);
9770
9771         if (ret) {
9772                 if (ret == -EEXIST) {
9773                         /* we shouldn't get
9774                          * eexist without a new_inode */
9775                         if (WARN_ON(!new_inode)) {
9776                                 return ret;
9777                         }
9778                 } else {
9779                         /* maybe -EOVERFLOW */
9780                         return ret;
9781                 }
9782         }
9783         ret = 0;
9784
9785         /*
9786          * we're using rename to replace one file with another.  Start IO on it
9787          * now so  we don't add too much work to the end of the transaction
9788          */
9789         if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9790                 filemap_flush(old_inode->i_mapping);
9791
9792         /* close the racy window with snapshot create/destroy ioctl */
9793         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9794                 down_read(&fs_info->subvol_sem);
9795         /*
9796          * We want to reserve the absolute worst case amount of items.  So if
9797          * both inodes are subvols and we need to unlink them then that would
9798          * require 4 item modifications, but if they are both normal inodes it
9799          * would require 5 item modifications, so we'll assume they are normal
9800          * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
9801          * should cover the worst case number of items we'll modify.
9802          * If our rename has the whiteout flag, we need more 5 units for the
9803          * new inode (1 inode item, 1 inode ref, 2 dir items and 1 xattr item
9804          * when selinux is enabled).
9805          */
9806         trans_num_items = 11;
9807         if (flags & RENAME_WHITEOUT)
9808                 trans_num_items += 5;
9809         trans = btrfs_start_transaction(root, trans_num_items);
9810         if (IS_ERR(trans)) {
9811                 ret = PTR_ERR(trans);
9812                 goto out_notrans;
9813         }
9814
9815         if (dest != root)
9816                 btrfs_record_root_in_trans(trans, dest);
9817
9818         ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
9819         if (ret)
9820                 goto out_fail;
9821
9822         BTRFS_I(old_inode)->dir_index = 0ULL;
9823         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9824                 /* force full log commit if subvolume involved. */
9825                 btrfs_set_log_full_commit(fs_info, trans);
9826         } else {
9827                 btrfs_pin_log_trans(root);
9828                 log_pinned = true;
9829                 ret = btrfs_insert_inode_ref(trans, dest,
9830                                              new_dentry->d_name.name,
9831                                              new_dentry->d_name.len,
9832                                              old_ino,
9833                                              btrfs_ino(BTRFS_I(new_dir)), index);
9834                 if (ret)
9835                         goto out_fail;
9836         }
9837
9838         inode_inc_iversion(old_dir);
9839         inode_inc_iversion(new_dir);
9840         inode_inc_iversion(old_inode);
9841         old_dir->i_ctime = old_dir->i_mtime =
9842         new_dir->i_ctime = new_dir->i_mtime =
9843         old_inode->i_ctime = current_time(old_dir);
9844
9845         if (old_dentry->d_parent != new_dentry->d_parent)
9846                 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9847                                 BTRFS_I(old_inode), 1);
9848
9849         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9850                 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
9851                 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
9852                                         old_dentry->d_name.name,
9853                                         old_dentry->d_name.len);
9854         } else {
9855                 ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
9856                                         BTRFS_I(d_inode(old_dentry)),
9857                                         old_dentry->d_name.name,
9858                                         old_dentry->d_name.len);
9859                 if (!ret)
9860                         ret = btrfs_update_inode(trans, root, old_inode);
9861         }
9862         if (ret) {
9863                 btrfs_abort_transaction(trans, ret);
9864                 goto out_fail;
9865         }
9866
9867         if (new_inode) {
9868                 inode_inc_iversion(new_inode);
9869                 new_inode->i_ctime = current_time(new_inode);
9870                 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
9871                              BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9872                         root_objectid = BTRFS_I(new_inode)->location.objectid;
9873                         ret = btrfs_unlink_subvol(trans, dest, new_dir,
9874                                                 root_objectid,
9875                                                 new_dentry->d_name.name,
9876                                                 new_dentry->d_name.len);
9877                         BUG_ON(new_inode->i_nlink == 0);
9878                 } else {
9879                         ret = btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
9880                                                  BTRFS_I(d_inode(new_dentry)),
9881                                                  new_dentry->d_name.name,
9882                                                  new_dentry->d_name.len);
9883                 }
9884                 if (!ret && new_inode->i_nlink == 0)
9885                         ret = btrfs_orphan_add(trans,
9886                                         BTRFS_I(d_inode(new_dentry)));
9887                 if (ret) {
9888                         btrfs_abort_transaction(trans, ret);
9889                         goto out_fail;
9890                 }
9891         }
9892
9893         ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9894                              new_dentry->d_name.name,
9895                              new_dentry->d_name.len, 0, index);
9896         if (ret) {
9897                 btrfs_abort_transaction(trans, ret);
9898                 goto out_fail;
9899         }
9900
9901         if (old_inode->i_nlink == 1)
9902                 BTRFS_I(old_inode)->dir_index = index;
9903
9904         if (log_pinned) {
9905                 struct dentry *parent = new_dentry->d_parent;
9906
9907                 btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
9908                                 parent);
9909                 btrfs_end_log_trans(root);
9910                 log_pinned = false;
9911         }
9912
9913         if (flags & RENAME_WHITEOUT) {
9914                 ret = btrfs_whiteout_for_rename(trans, root, old_dir,
9915                                                 old_dentry);
9916
9917                 if (ret) {
9918                         btrfs_abort_transaction(trans, ret);
9919                         goto out_fail;
9920                 }
9921         }
9922 out_fail:
9923         /*
9924          * If we have pinned the log and an error happened, we unpin tasks
9925          * trying to sync the log and force them to fallback to a transaction
9926          * commit if the log currently contains any of the inodes involved in
9927          * this rename operation (to ensure we do not persist a log with an
9928          * inconsistent state for any of these inodes or leading to any
9929          * inconsistencies when replayed). If the transaction was aborted, the
9930          * abortion reason is propagated to userspace when attempting to commit
9931          * the transaction. If the log does not contain any of these inodes, we
9932          * allow the tasks to sync it.
9933          */
9934         if (ret && log_pinned) {
9935                 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) ||
9936                     btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) ||
9937                     btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) ||
9938                     (new_inode &&
9939                      btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation)))
9940                         btrfs_set_log_full_commit(fs_info, trans);
9941
9942                 btrfs_end_log_trans(root);
9943                 log_pinned = false;
9944         }
9945         btrfs_end_transaction(trans);
9946 out_notrans:
9947         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9948                 up_read(&fs_info->subvol_sem);
9949
9950         return ret;
9951 }
9952
9953 static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry,
9954                          struct inode *new_dir, struct dentry *new_dentry,
9955                          unsigned int flags)
9956 {
9957         if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
9958                 return -EINVAL;
9959
9960         if (flags & RENAME_EXCHANGE)
9961                 return btrfs_rename_exchange(old_dir, old_dentry, new_dir,
9962                                           new_dentry);
9963
9964         return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
9965 }
9966
9967 static void btrfs_run_delalloc_work(struct btrfs_work *work)
9968 {
9969         struct btrfs_delalloc_work *delalloc_work;
9970         struct inode *inode;
9971
9972         delalloc_work = container_of(work, struct btrfs_delalloc_work,
9973                                      work);
9974         inode = delalloc_work->inode;
9975         filemap_flush(inode->i_mapping);
9976         if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9977                                 &BTRFS_I(inode)->runtime_flags))
9978                 filemap_flush(inode->i_mapping);
9979
9980         if (delalloc_work->delay_iput)
9981                 btrfs_add_delayed_iput(inode);
9982         else
9983                 iput(inode);
9984         complete(&delalloc_work->completion);
9985 }
9986
9987 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
9988                                                     int delay_iput)
9989 {
9990         struct btrfs_delalloc_work *work;
9991
9992         work = kmalloc(sizeof(*work), GFP_NOFS);
9993         if (!work)
9994                 return NULL;
9995
9996         init_completion(&work->completion);
9997         INIT_LIST_HEAD(&work->list);
9998         work->inode = inode;
9999         work->delay_iput = delay_iput;
10000         WARN_ON_ONCE(!inode);
10001         btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
10002                         btrfs_run_delalloc_work, NULL, NULL);
10003
10004         return work;
10005 }
10006
10007 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
10008 {
10009         wait_for_completion(&work->completion);
10010         kfree(work);
10011 }
10012
10013 /*
10014  * some fairly slow code that needs optimization. This walks the list
10015  * of all the inodes with pending delalloc and forces them to disk.
10016  */
10017 static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
10018                                    int nr)
10019 {
10020         struct btrfs_inode *binode;
10021         struct inode *inode;
10022         struct btrfs_delalloc_work *work, *next;
10023         struct list_head works;
10024         struct list_head splice;
10025         int ret = 0;
10026
10027         INIT_LIST_HEAD(&works);
10028         INIT_LIST_HEAD(&splice);
10029
10030         mutex_lock(&root->delalloc_mutex);
10031         spin_lock(&root->delalloc_lock);
10032         list_splice_init(&root->delalloc_inodes, &splice);
10033         while (!list_empty(&splice)) {
10034                 binode = list_entry(splice.next, struct btrfs_inode,
10035                                     delalloc_inodes);
10036
10037                 list_move_tail(&binode->delalloc_inodes,
10038                                &root->delalloc_inodes);
10039                 inode = igrab(&binode->vfs_inode);
10040                 if (!inode) {
10041                         cond_resched_lock(&root->delalloc_lock);
10042                         continue;
10043                 }
10044                 spin_unlock(&root->delalloc_lock);
10045
10046                 work = btrfs_alloc_delalloc_work(inode, delay_iput);
10047                 if (!work) {
10048                         if (delay_iput)
10049                                 btrfs_add_delayed_iput(inode);
10050                         else
10051                                 iput(inode);
10052                         ret = -ENOMEM;
10053                         goto out;
10054                 }
10055                 list_add_tail(&work->list, &works);
10056                 btrfs_queue_work(root->fs_info->flush_workers,
10057                                  &work->work);
10058                 ret++;
10059                 if (nr != -1 && ret >= nr)
10060                         goto out;
10061                 cond_resched();
10062                 spin_lock(&root->delalloc_lock);
10063         }
10064         spin_unlock(&root->delalloc_lock);
10065
10066 out:
10067         list_for_each_entry_safe(work, next, &works, list) {
10068                 list_del_init(&work->list);
10069                 btrfs_wait_and_free_delalloc_work(work);
10070         }
10071
10072         if (!list_empty_careful(&splice)) {
10073                 spin_lock(&root->delalloc_lock);
10074                 list_splice_tail(&splice, &root->delalloc_inodes);
10075                 spin_unlock(&root->delalloc_lock);
10076         }
10077         mutex_unlock(&root->delalloc_mutex);
10078         return ret;
10079 }
10080
10081 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
10082 {
10083         struct btrfs_fs_info *fs_info = root->fs_info;
10084         int ret;
10085
10086         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
10087                 return -EROFS;
10088
10089         ret = __start_delalloc_inodes(root, delay_iput, -1);
10090         if (ret > 0)
10091                 ret = 0;
10092         return ret;
10093 }
10094
10095 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
10096                                int nr)
10097 {
10098         struct btrfs_root *root;
10099         struct list_head splice;
10100         int ret;
10101
10102         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
10103                 return -EROFS;
10104
10105         INIT_LIST_HEAD(&splice);
10106
10107         mutex_lock(&fs_info->delalloc_root_mutex);
10108         spin_lock(&fs_info->delalloc_root_lock);
10109         list_splice_init(&fs_info->delalloc_roots, &splice);
10110         while (!list_empty(&splice) && nr) {
10111                 root = list_first_entry(&splice, struct btrfs_root,
10112                                         delalloc_root);
10113                 root = btrfs_grab_fs_root(root);
10114                 BUG_ON(!root);
10115                 list_move_tail(&root->delalloc_root,
10116                                &fs_info->delalloc_roots);
10117                 spin_unlock(&fs_info->delalloc_root_lock);
10118
10119                 ret = __start_delalloc_inodes(root, delay_iput, nr);
10120                 btrfs_put_fs_root(root);
10121                 if (ret < 0)
10122                         goto out;
10123
10124                 if (nr != -1) {
10125                         nr -= ret;
10126                         WARN_ON(nr < 0);
10127                 }
10128                 spin_lock(&fs_info->delalloc_root_lock);
10129         }
10130         spin_unlock(&fs_info->delalloc_root_lock);
10131
10132         ret = 0;
10133 out:
10134         if (!list_empty_careful(&splice)) {
10135                 spin_lock(&fs_info->delalloc_root_lock);
10136                 list_splice_tail(&splice, &fs_info->delalloc_roots);
10137                 spin_unlock(&fs_info->delalloc_root_lock);
10138         }
10139         mutex_unlock(&fs_info->delalloc_root_mutex);
10140         return ret;
10141 }
10142
10143 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
10144                          const char *symname)
10145 {
10146         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
10147         struct btrfs_trans_handle *trans;
10148         struct btrfs_root *root = BTRFS_I(dir)->root;
10149         struct btrfs_path *path;
10150         struct btrfs_key key;
10151         struct inode *inode = NULL;
10152         int err;
10153         int drop_inode = 0;
10154         u64 objectid;
10155         u64 index = 0;
10156         int name_len;
10157         int datasize;
10158         unsigned long ptr;
10159         struct btrfs_file_extent_item *ei;
10160         struct extent_buffer *leaf;
10161
10162         name_len = strlen(symname);
10163         if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
10164                 return -ENAMETOOLONG;
10165
10166         /*
10167          * 2 items for inode item and ref
10168          * 2 items for dir items
10169          * 1 item for updating parent inode item
10170          * 1 item for the inline extent item
10171          * 1 item for xattr if selinux is on
10172          */
10173         trans = btrfs_start_transaction(root, 7);
10174         if (IS_ERR(trans))
10175                 return PTR_ERR(trans);
10176
10177         err = btrfs_find_free_ino(root, &objectid);
10178         if (err)
10179                 goto out_unlock;
10180
10181         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
10182                                 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)),
10183                                 objectid, S_IFLNK|S_IRWXUGO, &index);
10184         if (IS_ERR(inode)) {
10185                 err = PTR_ERR(inode);
10186                 goto out_unlock;
10187         }
10188
10189         /*
10190         * If the active LSM wants to access the inode during
10191         * d_instantiate it needs these. Smack checks to see
10192         * if the filesystem supports xattrs by looking at the
10193         * ops vector.
10194         */
10195         inode->i_fop = &btrfs_file_operations;
10196         inode->i_op = &btrfs_file_inode_operations;
10197         inode->i_mapping->a_ops = &btrfs_aops;
10198         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
10199
10200         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
10201         if (err)
10202                 goto out_unlock_inode;
10203
10204         path = btrfs_alloc_path();
10205         if (!path) {
10206                 err = -ENOMEM;
10207                 goto out_unlock_inode;
10208         }
10209         key.objectid = btrfs_ino(BTRFS_I(inode));
10210         key.offset = 0;
10211         key.type = BTRFS_EXTENT_DATA_KEY;
10212         datasize = btrfs_file_extent_calc_inline_size(name_len);
10213         err = btrfs_insert_empty_item(trans, root, path, &key,
10214                                       datasize);
10215         if (err) {
10216                 btrfs_free_path(path);
10217                 goto out_unlock_inode;
10218         }
10219         leaf = path->nodes[0];
10220         ei = btrfs_item_ptr(leaf, path->slots[0],
10221                             struct btrfs_file_extent_item);
10222         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
10223         btrfs_set_file_extent_type(leaf, ei,
10224                                    BTRFS_FILE_EXTENT_INLINE);
10225         btrfs_set_file_extent_encryption(leaf, ei, 0);
10226         btrfs_set_file_extent_compression(leaf, ei, 0);
10227         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
10228         btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
10229
10230         ptr = btrfs_file_extent_inline_start(ei);
10231         write_extent_buffer(leaf, symname, ptr, name_len);
10232         btrfs_mark_buffer_dirty(leaf);
10233         btrfs_free_path(path);
10234
10235         inode->i_op = &btrfs_symlink_inode_operations;
10236         inode_nohighmem(inode);
10237         inode->i_mapping->a_ops = &btrfs_symlink_aops;
10238         inode_set_bytes(inode, name_len);
10239         btrfs_i_size_write(BTRFS_I(inode), name_len);
10240         err = btrfs_update_inode(trans, root, inode);
10241         /*
10242          * Last step, add directory indexes for our symlink inode. This is the
10243          * last step to avoid extra cleanup of these indexes if an error happens
10244          * elsewhere above.
10245          */
10246         if (!err)
10247                 err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
10248                                 BTRFS_I(inode), 0, index);
10249         if (err) {
10250                 drop_inode = 1;
10251                 goto out_unlock_inode;
10252         }
10253
10254         d_instantiate_new(dentry, inode);
10255
10256 out_unlock:
10257         btrfs_end_transaction(trans);
10258         if (drop_inode) {
10259                 inode_dec_link_count(inode);
10260                 iput(inode);
10261         }
10262         btrfs_btree_balance_dirty(fs_info);
10263         return err;
10264
10265 out_unlock_inode:
10266         drop_inode = 1;
10267         unlock_new_inode(inode);
10268         goto out_unlock;
10269 }
10270
10271 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
10272                                        u64 start, u64 num_bytes, u64 min_size,
10273                                        loff_t actual_len, u64 *alloc_hint,
10274                                        struct btrfs_trans_handle *trans)
10275 {
10276         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
10277         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
10278         struct extent_map *em;
10279         struct btrfs_root *root = BTRFS_I(inode)->root;
10280         struct btrfs_key ins;
10281         u64 cur_offset = start;
10282         u64 i_size;
10283         u64 cur_bytes;
10284         u64 last_alloc = (u64)-1;
10285         int ret = 0;
10286         bool own_trans = true;
10287         u64 end = start + num_bytes - 1;
10288
10289         if (trans)
10290                 own_trans = false;
10291         while (num_bytes > 0) {
10292                 if (own_trans) {
10293                         trans = btrfs_start_transaction(root, 3);
10294                         if (IS_ERR(trans)) {
10295                                 ret = PTR_ERR(trans);
10296                                 break;
10297                         }
10298                 }
10299
10300                 cur_bytes = min_t(u64, num_bytes, SZ_256M);
10301                 cur_bytes = max(cur_bytes, min_size);
10302                 /*
10303                  * If we are severely fragmented we could end up with really
10304                  * small allocations, so if the allocator is returning small
10305                  * chunks lets make its job easier by only searching for those
10306                  * sized chunks.
10307                  */
10308                 cur_bytes = min(cur_bytes, last_alloc);
10309                 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
10310                                 min_size, 0, *alloc_hint, &ins, 1, 0);
10311                 if (ret) {
10312                         if (own_trans)
10313                                 btrfs_end_transaction(trans);
10314                         break;
10315                 }
10316                 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10317
10318                 last_alloc = ins.offset;
10319                 ret = insert_reserved_file_extent(trans, inode,
10320                                                   cur_offset, ins.objectid,
10321                                                   ins.offset, ins.offset,
10322                                                   ins.offset, 0, 0, 0,
10323                                                   BTRFS_FILE_EXTENT_PREALLOC);
10324                 if (ret) {
10325                         btrfs_free_reserved_extent(fs_info, ins.objectid,
10326                                                    ins.offset, 0);
10327                         btrfs_abort_transaction(trans, ret);
10328                         if (own_trans)
10329                                 btrfs_end_transaction(trans);
10330                         break;
10331                 }
10332
10333                 btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
10334                                         cur_offset + ins.offset -1, 0);
10335
10336                 em = alloc_extent_map();
10337                 if (!em) {
10338                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
10339                                 &BTRFS_I(inode)->runtime_flags);
10340                         goto next;
10341                 }
10342
10343                 em->start = cur_offset;
10344                 em->orig_start = cur_offset;
10345                 em->len = ins.offset;
10346                 em->block_start = ins.objectid;
10347                 em->block_len = ins.offset;
10348                 em->orig_block_len = ins.offset;
10349                 em->ram_bytes = ins.offset;
10350                 em->bdev = fs_info->fs_devices->latest_bdev;
10351                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
10352                 em->generation = trans->transid;
10353
10354                 while (1) {
10355                         write_lock(&em_tree->lock);
10356                         ret = add_extent_mapping(em_tree, em, 1);
10357                         write_unlock(&em_tree->lock);
10358                         if (ret != -EEXIST)
10359                                 break;
10360                         btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
10361                                                 cur_offset + ins.offset - 1,
10362                                                 0);
10363                 }
10364                 free_extent_map(em);
10365 next:
10366                 num_bytes -= ins.offset;
10367                 cur_offset += ins.offset;
10368                 *alloc_hint = ins.objectid + ins.offset;
10369
10370                 inode_inc_iversion(inode);
10371                 inode->i_ctime = current_time(inode);
10372                 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
10373                 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
10374                     (actual_len > inode->i_size) &&
10375                     (cur_offset > inode->i_size)) {
10376                         if (cur_offset > actual_len)
10377                                 i_size = actual_len;
10378                         else
10379                                 i_size = cur_offset;
10380                         i_size_write(inode, i_size);
10381                         btrfs_ordered_update_i_size(inode, i_size, NULL);
10382                 }
10383
10384                 ret = btrfs_update_inode(trans, root, inode);
10385
10386                 if (ret) {
10387                         btrfs_abort_transaction(trans, ret);
10388                         if (own_trans)
10389                                 btrfs_end_transaction(trans);
10390                         break;
10391                 }
10392
10393                 if (own_trans)
10394                         btrfs_end_transaction(trans);
10395         }
10396         if (cur_offset < end)
10397                 btrfs_free_reserved_data_space(inode, NULL, cur_offset,
10398                         end - cur_offset + 1);
10399         return ret;
10400 }
10401
10402 int btrfs_prealloc_file_range(struct inode *inode, int mode,
10403                               u64 start, u64 num_bytes, u64 min_size,
10404                               loff_t actual_len, u64 *alloc_hint)
10405 {
10406         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10407                                            min_size, actual_len, alloc_hint,
10408                                            NULL);
10409 }
10410
10411 int btrfs_prealloc_file_range_trans(struct inode *inode,
10412                                     struct btrfs_trans_handle *trans, int mode,
10413                                     u64 start, u64 num_bytes, u64 min_size,
10414                                     loff_t actual_len, u64 *alloc_hint)
10415 {
10416         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10417                                            min_size, actual_len, alloc_hint, trans);
10418 }
10419
10420 static int btrfs_set_page_dirty(struct page *page)
10421 {
10422         return __set_page_dirty_nobuffers(page);
10423 }
10424
10425 static int btrfs_permission(struct inode *inode, int mask)
10426 {
10427         struct btrfs_root *root = BTRFS_I(inode)->root;
10428         umode_t mode = inode->i_mode;
10429
10430         if (mask & MAY_WRITE &&
10431             (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
10432                 if (btrfs_root_readonly(root))
10433                         return -EROFS;
10434                 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
10435                         return -EACCES;
10436         }
10437         return generic_permission(inode, mask);
10438 }
10439
10440 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
10441 {
10442         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
10443         struct btrfs_trans_handle *trans;
10444         struct btrfs_root *root = BTRFS_I(dir)->root;
10445         struct inode *inode = NULL;
10446         u64 objectid;
10447         u64 index;
10448         int ret = 0;
10449
10450         /*
10451          * 5 units required for adding orphan entry
10452          */
10453         trans = btrfs_start_transaction(root, 5);
10454         if (IS_ERR(trans))
10455                 return PTR_ERR(trans);
10456
10457         ret = btrfs_find_free_ino(root, &objectid);
10458         if (ret)
10459                 goto out;
10460
10461         inode = btrfs_new_inode(trans, root, dir, NULL, 0,
10462                         btrfs_ino(BTRFS_I(dir)), objectid, mode, &index);
10463         if (IS_ERR(inode)) {
10464                 ret = PTR_ERR(inode);
10465                 inode = NULL;
10466                 goto out;
10467         }
10468
10469         inode->i_fop = &btrfs_file_operations;
10470         inode->i_op = &btrfs_file_inode_operations;
10471
10472         inode->i_mapping->a_ops = &btrfs_aops;
10473         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
10474
10475         ret = btrfs_init_inode_security(trans, inode, dir, NULL);
10476         if (ret)
10477                 goto out_inode;
10478
10479         ret = btrfs_update_inode(trans, root, inode);
10480         if (ret)
10481                 goto out_inode;
10482         ret = btrfs_orphan_add(trans, BTRFS_I(inode));
10483         if (ret)
10484                 goto out_inode;
10485
10486         /*
10487          * We set number of links to 0 in btrfs_new_inode(), and here we set
10488          * it to 1 because d_tmpfile() will issue a warning if the count is 0,
10489          * through:
10490          *
10491          *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
10492          */
10493         set_nlink(inode, 1);
10494         unlock_new_inode(inode);
10495         d_tmpfile(dentry, inode);
10496         mark_inode_dirty(inode);
10497
10498 out:
10499         btrfs_end_transaction(trans);
10500         if (ret)
10501                 iput(inode);
10502         btrfs_btree_balance_dirty(fs_info);
10503         return ret;
10504
10505 out_inode:
10506         unlock_new_inode(inode);
10507         goto out;
10508
10509 }
10510
10511 __attribute__((const))
10512 static int btrfs_readpage_io_failed_hook(struct page *page, int failed_mirror)
10513 {
10514         return -EAGAIN;
10515 }
10516
10517 static struct btrfs_fs_info *iotree_fs_info(void *private_data)
10518 {
10519         struct inode *inode = private_data;
10520         return btrfs_sb(inode->i_sb);
10521 }
10522
10523 static void btrfs_check_extent_io_range(void *private_data, const char *caller,
10524                                         u64 start, u64 end)
10525 {
10526         struct inode *inode = private_data;
10527         u64 isize;
10528
10529         isize = i_size_read(inode);
10530         if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
10531                 btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
10532                     "%s: ino %llu isize %llu odd range [%llu,%llu]",
10533                         caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
10534         }
10535 }
10536
10537 void btrfs_set_range_writeback(void *private_data, u64 start, u64 end)
10538 {
10539         struct inode *inode = private_data;
10540         unsigned long index = start >> PAGE_SHIFT;
10541         unsigned long end_index = end >> PAGE_SHIFT;
10542         struct page *page;
10543
10544         while (index <= end_index) {
10545                 page = find_get_page(inode->i_mapping, index);
10546                 ASSERT(page); /* Pages should be in the extent_io_tree */
10547                 set_page_writeback(page);
10548                 put_page(page);
10549                 index++;
10550         }
10551 }
10552
10553 static const struct inode_operations btrfs_dir_inode_operations = {
10554         .getattr        = btrfs_getattr,
10555         .lookup         = btrfs_lookup,
10556         .create         = btrfs_create,
10557         .unlink         = btrfs_unlink,
10558         .link           = btrfs_link,
10559         .mkdir          = btrfs_mkdir,
10560         .rmdir          = btrfs_rmdir,
10561         .rename         = btrfs_rename2,
10562         .symlink        = btrfs_symlink,
10563         .setattr        = btrfs_setattr,
10564         .mknod          = btrfs_mknod,
10565         .listxattr      = btrfs_listxattr,
10566         .permission     = btrfs_permission,
10567         .get_acl        = btrfs_get_acl,
10568         .set_acl        = btrfs_set_acl,
10569         .update_time    = btrfs_update_time,
10570         .tmpfile        = btrfs_tmpfile,
10571 };
10572 static const struct inode_operations btrfs_dir_ro_inode_operations = {
10573         .lookup         = btrfs_lookup,
10574         .permission     = btrfs_permission,
10575         .update_time    = btrfs_update_time,
10576 };
10577
10578 static const struct file_operations btrfs_dir_file_operations = {
10579         .llseek         = generic_file_llseek,
10580         .read           = generic_read_dir,
10581         .iterate_shared = btrfs_real_readdir,
10582         .open           = btrfs_opendir,
10583         .unlocked_ioctl = btrfs_ioctl,
10584 #ifdef CONFIG_COMPAT
10585         .compat_ioctl   = btrfs_compat_ioctl,
10586 #endif
10587         .release        = btrfs_release_file,
10588         .fsync          = btrfs_sync_file,
10589 };
10590
10591 static const struct extent_io_ops btrfs_extent_io_ops = {
10592         /* mandatory callbacks */
10593         .submit_bio_hook = btrfs_submit_bio_hook,
10594         .readpage_end_io_hook = btrfs_readpage_end_io_hook,
10595         .merge_bio_hook = btrfs_merge_bio_hook,
10596         .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
10597         .tree_fs_info = iotree_fs_info,
10598         .set_range_writeback = btrfs_set_range_writeback,
10599
10600         /* optional callbacks */
10601         .fill_delalloc = run_delalloc_range,
10602         .writepage_end_io_hook = btrfs_writepage_end_io_hook,
10603         .writepage_start_hook = btrfs_writepage_start_hook,
10604         .set_bit_hook = btrfs_set_bit_hook,
10605         .clear_bit_hook = btrfs_clear_bit_hook,
10606         .merge_extent_hook = btrfs_merge_extent_hook,
10607         .split_extent_hook = btrfs_split_extent_hook,
10608         .check_extent_io_range = btrfs_check_extent_io_range,
10609 };
10610
10611 /*
10612  * btrfs doesn't support the bmap operation because swapfiles
10613  * use bmap to make a mapping of extents in the file.  They assume
10614  * these extents won't change over the life of the file and they
10615  * use the bmap result to do IO directly to the drive.
10616  *
10617  * the btrfs bmap call would return logical addresses that aren't
10618  * suitable for IO and they also will change frequently as COW
10619  * operations happen.  So, swapfile + btrfs == corruption.
10620  *
10621  * For now we're avoiding this by dropping bmap.
10622  */
10623 static const struct address_space_operations btrfs_aops = {
10624         .readpage       = btrfs_readpage,
10625         .writepage      = btrfs_writepage,
10626         .writepages     = btrfs_writepages,
10627         .readpages      = btrfs_readpages,
10628         .direct_IO      = btrfs_direct_IO,
10629         .invalidatepage = btrfs_invalidatepage,
10630         .releasepage    = btrfs_releasepage,
10631         .set_page_dirty = btrfs_set_page_dirty,
10632         .error_remove_page = generic_error_remove_page,
10633 };
10634
10635 static const struct address_space_operations btrfs_symlink_aops = {
10636         .readpage       = btrfs_readpage,
10637         .writepage      = btrfs_writepage,
10638         .invalidatepage = btrfs_invalidatepage,
10639         .releasepage    = btrfs_releasepage,
10640 };
10641
10642 static const struct inode_operations btrfs_file_inode_operations = {
10643         .getattr        = btrfs_getattr,
10644         .setattr        = btrfs_setattr,
10645         .listxattr      = btrfs_listxattr,
10646         .permission     = btrfs_permission,
10647         .fiemap         = btrfs_fiemap,
10648         .get_acl        = btrfs_get_acl,
10649         .set_acl        = btrfs_set_acl,
10650         .update_time    = btrfs_update_time,
10651 };
10652 static const struct inode_operations btrfs_special_inode_operations = {
10653         .getattr        = btrfs_getattr,
10654         .setattr        = btrfs_setattr,
10655         .permission     = btrfs_permission,
10656         .listxattr      = btrfs_listxattr,
10657         .get_acl        = btrfs_get_acl,
10658         .set_acl        = btrfs_set_acl,
10659         .update_time    = btrfs_update_time,
10660 };
10661 static const struct inode_operations btrfs_symlink_inode_operations = {
10662         .get_link       = page_get_link,
10663         .getattr        = btrfs_getattr,
10664         .setattr        = btrfs_setattr,
10665         .permission     = btrfs_permission,
10666         .listxattr      = btrfs_listxattr,
10667         .update_time    = btrfs_update_time,
10668 };
10669
10670 const struct dentry_operations btrfs_dentry_operations = {
10671         .d_delete       = btrfs_dentry_delete,
10672         .d_release      = btrfs_dentry_release,
10673 };