Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs...
[sfrench/cifs-2.6.git] / fs / btrfs / inode.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include <linux/slab.h>
40 #include <linux/ratelimit.h>
41 #include "compat.h"
42 #include "ctree.h"
43 #include "disk-io.h"
44 #include "transaction.h"
45 #include "btrfs_inode.h"
46 #include "ioctl.h"
47 #include "print-tree.h"
48 #include "volumes.h"
49 #include "ordered-data.h"
50 #include "xattr.h"
51 #include "tree-log.h"
52 #include "compression.h"
53 #include "locking.h"
54 #include "free-space-cache.h"
55 #include "inode-map.h"
56
57 struct btrfs_iget_args {
58         u64 ino;
59         struct btrfs_root *root;
60 };
61
62 static const struct inode_operations btrfs_dir_inode_operations;
63 static const struct inode_operations btrfs_symlink_inode_operations;
64 static const struct inode_operations btrfs_dir_ro_inode_operations;
65 static const struct inode_operations btrfs_special_inode_operations;
66 static const struct inode_operations btrfs_file_inode_operations;
67 static const struct address_space_operations btrfs_aops;
68 static const struct address_space_operations btrfs_symlink_aops;
69 static const struct file_operations btrfs_dir_file_operations;
70 static struct extent_io_ops btrfs_extent_io_ops;
71
72 static struct kmem_cache *btrfs_inode_cachep;
73 struct kmem_cache *btrfs_trans_handle_cachep;
74 struct kmem_cache *btrfs_transaction_cachep;
75 struct kmem_cache *btrfs_path_cachep;
76 struct kmem_cache *btrfs_free_space_cachep;
77
78 #define S_SHIFT 12
79 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
80         [S_IFREG >> S_SHIFT]    = BTRFS_FT_REG_FILE,
81         [S_IFDIR >> S_SHIFT]    = BTRFS_FT_DIR,
82         [S_IFCHR >> S_SHIFT]    = BTRFS_FT_CHRDEV,
83         [S_IFBLK >> S_SHIFT]    = BTRFS_FT_BLKDEV,
84         [S_IFIFO >> S_SHIFT]    = BTRFS_FT_FIFO,
85         [S_IFSOCK >> S_SHIFT]   = BTRFS_FT_SOCK,
86         [S_IFLNK >> S_SHIFT]    = BTRFS_FT_SYMLINK,
87 };
88
89 static int btrfs_setsize(struct inode *inode, loff_t newsize);
90 static int btrfs_truncate(struct inode *inode);
91 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
92 static noinline int cow_file_range(struct inode *inode,
93                                    struct page *locked_page,
94                                    u64 start, u64 end, int *page_started,
95                                    unsigned long *nr_written, int unlock);
96
97 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
98                                      struct inode *inode,  struct inode *dir,
99                                      const struct qstr *qstr)
100 {
101         int err;
102
103         err = btrfs_init_acl(trans, inode, dir);
104         if (!err)
105                 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
106         return err;
107 }
108
109 /*
110  * this does all the hard work for inserting an inline extent into
111  * the btree.  The caller should have done a btrfs_drop_extents so that
112  * no overlapping inline items exist in the btree
113  */
114 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
115                                 struct btrfs_root *root, struct inode *inode,
116                                 u64 start, size_t size, size_t compressed_size,
117                                 int compress_type,
118                                 struct page **compressed_pages)
119 {
120         struct btrfs_key key;
121         struct btrfs_path *path;
122         struct extent_buffer *leaf;
123         struct page *page = NULL;
124         char *kaddr;
125         unsigned long ptr;
126         struct btrfs_file_extent_item *ei;
127         int err = 0;
128         int ret;
129         size_t cur_size = size;
130         size_t datasize;
131         unsigned long offset;
132
133         if (compressed_size && compressed_pages)
134                 cur_size = compressed_size;
135
136         path = btrfs_alloc_path();
137         if (!path)
138                 return -ENOMEM;
139
140         path->leave_spinning = 1;
141
142         key.objectid = btrfs_ino(inode);
143         key.offset = start;
144         btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
145         datasize = btrfs_file_extent_calc_inline_size(cur_size);
146
147         inode_add_bytes(inode, size);
148         ret = btrfs_insert_empty_item(trans, root, path, &key,
149                                       datasize);
150         BUG_ON(ret);
151         if (ret) {
152                 err = ret;
153                 goto fail;
154         }
155         leaf = path->nodes[0];
156         ei = btrfs_item_ptr(leaf, path->slots[0],
157                             struct btrfs_file_extent_item);
158         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
159         btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
160         btrfs_set_file_extent_encryption(leaf, ei, 0);
161         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
162         btrfs_set_file_extent_ram_bytes(leaf, ei, size);
163         ptr = btrfs_file_extent_inline_start(ei);
164
165         if (compress_type != BTRFS_COMPRESS_NONE) {
166                 struct page *cpage;
167                 int i = 0;
168                 while (compressed_size > 0) {
169                         cpage = compressed_pages[i];
170                         cur_size = min_t(unsigned long, compressed_size,
171                                        PAGE_CACHE_SIZE);
172
173                         kaddr = kmap_atomic(cpage, KM_USER0);
174                         write_extent_buffer(leaf, kaddr, ptr, cur_size);
175                         kunmap_atomic(kaddr, KM_USER0);
176
177                         i++;
178                         ptr += cur_size;
179                         compressed_size -= cur_size;
180                 }
181                 btrfs_set_file_extent_compression(leaf, ei,
182                                                   compress_type);
183         } else {
184                 page = find_get_page(inode->i_mapping,
185                                      start >> PAGE_CACHE_SHIFT);
186                 btrfs_set_file_extent_compression(leaf, ei, 0);
187                 kaddr = kmap_atomic(page, KM_USER0);
188                 offset = start & (PAGE_CACHE_SIZE - 1);
189                 write_extent_buffer(leaf, kaddr + offset, ptr, size);
190                 kunmap_atomic(kaddr, KM_USER0);
191                 page_cache_release(page);
192         }
193         btrfs_mark_buffer_dirty(leaf);
194         btrfs_free_path(path);
195
196         /*
197          * we're an inline extent, so nobody can
198          * extend the file past i_size without locking
199          * a page we already have locked.
200          *
201          * We must do any isize and inode updates
202          * before we unlock the pages.  Otherwise we
203          * could end up racing with unlink.
204          */
205         BTRFS_I(inode)->disk_i_size = inode->i_size;
206         btrfs_update_inode(trans, root, inode);
207
208         return 0;
209 fail:
210         btrfs_free_path(path);
211         return err;
212 }
213
214
215 /*
216  * conditionally insert an inline extent into the file.  This
217  * does the checks required to make sure the data is small enough
218  * to fit as an inline extent.
219  */
220 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
221                                  struct btrfs_root *root,
222                                  struct inode *inode, u64 start, u64 end,
223                                  size_t compressed_size, int compress_type,
224                                  struct page **compressed_pages)
225 {
226         u64 isize = i_size_read(inode);
227         u64 actual_end = min(end + 1, isize);
228         u64 inline_len = actual_end - start;
229         u64 aligned_end = (end + root->sectorsize - 1) &
230                         ~((u64)root->sectorsize - 1);
231         u64 hint_byte;
232         u64 data_len = inline_len;
233         int ret;
234
235         if (compressed_size)
236                 data_len = compressed_size;
237
238         if (start > 0 ||
239             actual_end >= PAGE_CACHE_SIZE ||
240             data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
241             (!compressed_size &&
242             (actual_end & (root->sectorsize - 1)) == 0) ||
243             end + 1 < isize ||
244             data_len > root->fs_info->max_inline) {
245                 return 1;
246         }
247
248         ret = btrfs_drop_extents(trans, inode, start, aligned_end,
249                                  &hint_byte, 1);
250         BUG_ON(ret);
251
252         if (isize > actual_end)
253                 inline_len = min_t(u64, isize, actual_end);
254         ret = insert_inline_extent(trans, root, inode, start,
255                                    inline_len, compressed_size,
256                                    compress_type, compressed_pages);
257         BUG_ON(ret);
258         btrfs_delalloc_release_metadata(inode, end + 1 - start);
259         btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
260         return 0;
261 }
262
263 struct async_extent {
264         u64 start;
265         u64 ram_size;
266         u64 compressed_size;
267         struct page **pages;
268         unsigned long nr_pages;
269         int compress_type;
270         struct list_head list;
271 };
272
273 struct async_cow {
274         struct inode *inode;
275         struct btrfs_root *root;
276         struct page *locked_page;
277         u64 start;
278         u64 end;
279         struct list_head extents;
280         struct btrfs_work work;
281 };
282
283 static noinline int add_async_extent(struct async_cow *cow,
284                                      u64 start, u64 ram_size,
285                                      u64 compressed_size,
286                                      struct page **pages,
287                                      unsigned long nr_pages,
288                                      int compress_type)
289 {
290         struct async_extent *async_extent;
291
292         async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
293         BUG_ON(!async_extent);
294         async_extent->start = start;
295         async_extent->ram_size = ram_size;
296         async_extent->compressed_size = compressed_size;
297         async_extent->pages = pages;
298         async_extent->nr_pages = nr_pages;
299         async_extent->compress_type = compress_type;
300         list_add_tail(&async_extent->list, &cow->extents);
301         return 0;
302 }
303
304 /*
305  * we create compressed extents in two phases.  The first
306  * phase compresses a range of pages that have already been
307  * locked (both pages and state bits are locked).
308  *
309  * This is done inside an ordered work queue, and the compression
310  * is spread across many cpus.  The actual IO submission is step
311  * two, and the ordered work queue takes care of making sure that
312  * happens in the same order things were put onto the queue by
313  * writepages and friends.
314  *
315  * If this code finds it can't get good compression, it puts an
316  * entry onto the work queue to write the uncompressed bytes.  This
317  * makes sure that both compressed inodes and uncompressed inodes
318  * are written in the same order that pdflush sent them down.
319  */
320 static noinline int compress_file_range(struct inode *inode,
321                                         struct page *locked_page,
322                                         u64 start, u64 end,
323                                         struct async_cow *async_cow,
324                                         int *num_added)
325 {
326         struct btrfs_root *root = BTRFS_I(inode)->root;
327         struct btrfs_trans_handle *trans;
328         u64 num_bytes;
329         u64 blocksize = root->sectorsize;
330         u64 actual_end;
331         u64 isize = i_size_read(inode);
332         int ret = 0;
333         struct page **pages = NULL;
334         unsigned long nr_pages;
335         unsigned long nr_pages_ret = 0;
336         unsigned long total_compressed = 0;
337         unsigned long total_in = 0;
338         unsigned long max_compressed = 128 * 1024;
339         unsigned long max_uncompressed = 128 * 1024;
340         int i;
341         int will_compress;
342         int compress_type = root->fs_info->compress_type;
343
344         /* if this is a small write inside eof, kick off a defragbot */
345         if (end <= BTRFS_I(inode)->disk_i_size && (end - start + 1) < 16 * 1024)
346                 btrfs_add_inode_defrag(NULL, inode);
347
348         actual_end = min_t(u64, isize, end + 1);
349 again:
350         will_compress = 0;
351         nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
352         nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
353
354         /*
355          * we don't want to send crud past the end of i_size through
356          * compression, that's just a waste of CPU time.  So, if the
357          * end of the file is before the start of our current
358          * requested range of bytes, we bail out to the uncompressed
359          * cleanup code that can deal with all of this.
360          *
361          * It isn't really the fastest way to fix things, but this is a
362          * very uncommon corner.
363          */
364         if (actual_end <= start)
365                 goto cleanup_and_bail_uncompressed;
366
367         total_compressed = actual_end - start;
368
369         /* we want to make sure that amount of ram required to uncompress
370          * an extent is reasonable, so we limit the total size in ram
371          * of a compressed extent to 128k.  This is a crucial number
372          * because it also controls how easily we can spread reads across
373          * cpus for decompression.
374          *
375          * We also want to make sure the amount of IO required to do
376          * a random read is reasonably small, so we limit the size of
377          * a compressed extent to 128k.
378          */
379         total_compressed = min(total_compressed, max_uncompressed);
380         num_bytes = (end - start + blocksize) & ~(blocksize - 1);
381         num_bytes = max(blocksize,  num_bytes);
382         total_in = 0;
383         ret = 0;
384
385         /*
386          * we do compression for mount -o compress and when the
387          * inode has not been flagged as nocompress.  This flag can
388          * change at any time if we discover bad compression ratios.
389          */
390         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
391             (btrfs_test_opt(root, COMPRESS) ||
392              (BTRFS_I(inode)->force_compress) ||
393              (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
394                 WARN_ON(pages);
395                 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
396                 BUG_ON(!pages);
397
398                 if (BTRFS_I(inode)->force_compress)
399                         compress_type = BTRFS_I(inode)->force_compress;
400
401                 ret = btrfs_compress_pages(compress_type,
402                                            inode->i_mapping, start,
403                                            total_compressed, pages,
404                                            nr_pages, &nr_pages_ret,
405                                            &total_in,
406                                            &total_compressed,
407                                            max_compressed);
408
409                 if (!ret) {
410                         unsigned long offset = total_compressed &
411                                 (PAGE_CACHE_SIZE - 1);
412                         struct page *page = pages[nr_pages_ret - 1];
413                         char *kaddr;
414
415                         /* zero the tail end of the last page, we might be
416                          * sending it down to disk
417                          */
418                         if (offset) {
419                                 kaddr = kmap_atomic(page, KM_USER0);
420                                 memset(kaddr + offset, 0,
421                                        PAGE_CACHE_SIZE - offset);
422                                 kunmap_atomic(kaddr, KM_USER0);
423                         }
424                         will_compress = 1;
425                 }
426         }
427         if (start == 0) {
428                 trans = btrfs_join_transaction(root);
429                 BUG_ON(IS_ERR(trans));
430                 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
431
432                 /* lets try to make an inline extent */
433                 if (ret || total_in < (actual_end - start)) {
434                         /* we didn't compress the entire range, try
435                          * to make an uncompressed inline extent.
436                          */
437                         ret = cow_file_range_inline(trans, root, inode,
438                                                     start, end, 0, 0, NULL);
439                 } else {
440                         /* try making a compressed inline extent */
441                         ret = cow_file_range_inline(trans, root, inode,
442                                                     start, end,
443                                                     total_compressed,
444                                                     compress_type, pages);
445                 }
446                 if (ret == 0) {
447                         /*
448                          * inline extent creation worked, we don't need
449                          * to create any more async work items.  Unlock
450                          * and free up our temp pages.
451                          */
452                         extent_clear_unlock_delalloc(inode,
453                              &BTRFS_I(inode)->io_tree,
454                              start, end, NULL,
455                              EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
456                              EXTENT_CLEAR_DELALLOC |
457                              EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
458
459                         btrfs_end_transaction(trans, root);
460                         goto free_pages_out;
461                 }
462                 btrfs_end_transaction(trans, root);
463         }
464
465         if (will_compress) {
466                 /*
467                  * we aren't doing an inline extent round the compressed size
468                  * up to a block size boundary so the allocator does sane
469                  * things
470                  */
471                 total_compressed = (total_compressed + blocksize - 1) &
472                         ~(blocksize - 1);
473
474                 /*
475                  * one last check to make sure the compression is really a
476                  * win, compare the page count read with the blocks on disk
477                  */
478                 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
479                         ~(PAGE_CACHE_SIZE - 1);
480                 if (total_compressed >= total_in) {
481                         will_compress = 0;
482                 } else {
483                         num_bytes = total_in;
484                 }
485         }
486         if (!will_compress && pages) {
487                 /*
488                  * the compression code ran but failed to make things smaller,
489                  * free any pages it allocated and our page pointer array
490                  */
491                 for (i = 0; i < nr_pages_ret; i++) {
492                         WARN_ON(pages[i]->mapping);
493                         page_cache_release(pages[i]);
494                 }
495                 kfree(pages);
496                 pages = NULL;
497                 total_compressed = 0;
498                 nr_pages_ret = 0;
499
500                 /* flag the file so we don't compress in the future */
501                 if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
502                     !(BTRFS_I(inode)->force_compress)) {
503                         BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
504                 }
505         }
506         if (will_compress) {
507                 *num_added += 1;
508
509                 /* the async work queues will take care of doing actual
510                  * allocation on disk for these compressed pages,
511                  * and will submit them to the elevator.
512                  */
513                 add_async_extent(async_cow, start, num_bytes,
514                                  total_compressed, pages, nr_pages_ret,
515                                  compress_type);
516
517                 if (start + num_bytes < end) {
518                         start += num_bytes;
519                         pages = NULL;
520                         cond_resched();
521                         goto again;
522                 }
523         } else {
524 cleanup_and_bail_uncompressed:
525                 /*
526                  * No compression, but we still need to write the pages in
527                  * the file we've been given so far.  redirty the locked
528                  * page if it corresponds to our extent and set things up
529                  * for the async work queue to run cow_file_range to do
530                  * the normal delalloc dance
531                  */
532                 if (page_offset(locked_page) >= start &&
533                     page_offset(locked_page) <= end) {
534                         __set_page_dirty_nobuffers(locked_page);
535                         /* unlocked later on in the async handlers */
536                 }
537                 add_async_extent(async_cow, start, end - start + 1,
538                                  0, NULL, 0, BTRFS_COMPRESS_NONE);
539                 *num_added += 1;
540         }
541
542 out:
543         return 0;
544
545 free_pages_out:
546         for (i = 0; i < nr_pages_ret; i++) {
547                 WARN_ON(pages[i]->mapping);
548                 page_cache_release(pages[i]);
549         }
550         kfree(pages);
551
552         goto out;
553 }
554
555 /*
556  * phase two of compressed writeback.  This is the ordered portion
557  * of the code, which only gets called in the order the work was
558  * queued.  We walk all the async extents created by compress_file_range
559  * and send them down to the disk.
560  */
561 static noinline int submit_compressed_extents(struct inode *inode,
562                                               struct async_cow *async_cow)
563 {
564         struct async_extent *async_extent;
565         u64 alloc_hint = 0;
566         struct btrfs_trans_handle *trans;
567         struct btrfs_key ins;
568         struct extent_map *em;
569         struct btrfs_root *root = BTRFS_I(inode)->root;
570         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
571         struct extent_io_tree *io_tree;
572         int ret = 0;
573
574         if (list_empty(&async_cow->extents))
575                 return 0;
576
577
578         while (!list_empty(&async_cow->extents)) {
579                 async_extent = list_entry(async_cow->extents.next,
580                                           struct async_extent, list);
581                 list_del(&async_extent->list);
582
583                 io_tree = &BTRFS_I(inode)->io_tree;
584
585 retry:
586                 /* did the compression code fall back to uncompressed IO? */
587                 if (!async_extent->pages) {
588                         int page_started = 0;
589                         unsigned long nr_written = 0;
590
591                         lock_extent(io_tree, async_extent->start,
592                                          async_extent->start +
593                                          async_extent->ram_size - 1, GFP_NOFS);
594
595                         /* allocate blocks */
596                         ret = cow_file_range(inode, async_cow->locked_page,
597                                              async_extent->start,
598                                              async_extent->start +
599                                              async_extent->ram_size - 1,
600                                              &page_started, &nr_written, 0);
601
602                         /*
603                          * if page_started, cow_file_range inserted an
604                          * inline extent and took care of all the unlocking
605                          * and IO for us.  Otherwise, we need to submit
606                          * all those pages down to the drive.
607                          */
608                         if (!page_started && !ret)
609                                 extent_write_locked_range(io_tree,
610                                                   inode, async_extent->start,
611                                                   async_extent->start +
612                                                   async_extent->ram_size - 1,
613                                                   btrfs_get_extent,
614                                                   WB_SYNC_ALL);
615                         kfree(async_extent);
616                         cond_resched();
617                         continue;
618                 }
619
620                 lock_extent(io_tree, async_extent->start,
621                             async_extent->start + async_extent->ram_size - 1,
622                             GFP_NOFS);
623
624                 trans = btrfs_join_transaction(root);
625                 BUG_ON(IS_ERR(trans));
626                 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
627                 ret = btrfs_reserve_extent(trans, root,
628                                            async_extent->compressed_size,
629                                            async_extent->compressed_size,
630                                            0, alloc_hint,
631                                            (u64)-1, &ins, 1);
632                 btrfs_end_transaction(trans, root);
633
634                 if (ret) {
635                         int i;
636                         for (i = 0; i < async_extent->nr_pages; i++) {
637                                 WARN_ON(async_extent->pages[i]->mapping);
638                                 page_cache_release(async_extent->pages[i]);
639                         }
640                         kfree(async_extent->pages);
641                         async_extent->nr_pages = 0;
642                         async_extent->pages = NULL;
643                         unlock_extent(io_tree, async_extent->start,
644                                       async_extent->start +
645                                       async_extent->ram_size - 1, GFP_NOFS);
646                         goto retry;
647                 }
648
649                 /*
650                  * here we're doing allocation and writeback of the
651                  * compressed pages
652                  */
653                 btrfs_drop_extent_cache(inode, async_extent->start,
654                                         async_extent->start +
655                                         async_extent->ram_size - 1, 0);
656
657                 em = alloc_extent_map();
658                 BUG_ON(!em);
659                 em->start = async_extent->start;
660                 em->len = async_extent->ram_size;
661                 em->orig_start = em->start;
662
663                 em->block_start = ins.objectid;
664                 em->block_len = ins.offset;
665                 em->bdev = root->fs_info->fs_devices->latest_bdev;
666                 em->compress_type = async_extent->compress_type;
667                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
668                 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
669
670                 while (1) {
671                         write_lock(&em_tree->lock);
672                         ret = add_extent_mapping(em_tree, em);
673                         write_unlock(&em_tree->lock);
674                         if (ret != -EEXIST) {
675                                 free_extent_map(em);
676                                 break;
677                         }
678                         btrfs_drop_extent_cache(inode, async_extent->start,
679                                                 async_extent->start +
680                                                 async_extent->ram_size - 1, 0);
681                 }
682
683                 ret = btrfs_add_ordered_extent_compress(inode,
684                                                 async_extent->start,
685                                                 ins.objectid,
686                                                 async_extent->ram_size,
687                                                 ins.offset,
688                                                 BTRFS_ORDERED_COMPRESSED,
689                                                 async_extent->compress_type);
690                 BUG_ON(ret);
691
692                 /*
693                  * clear dirty, set writeback and unlock the pages.
694                  */
695                 extent_clear_unlock_delalloc(inode,
696                                 &BTRFS_I(inode)->io_tree,
697                                 async_extent->start,
698                                 async_extent->start +
699                                 async_extent->ram_size - 1,
700                                 NULL, EXTENT_CLEAR_UNLOCK_PAGE |
701                                 EXTENT_CLEAR_UNLOCK |
702                                 EXTENT_CLEAR_DELALLOC |
703                                 EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
704
705                 ret = btrfs_submit_compressed_write(inode,
706                                     async_extent->start,
707                                     async_extent->ram_size,
708                                     ins.objectid,
709                                     ins.offset, async_extent->pages,
710                                     async_extent->nr_pages);
711
712                 BUG_ON(ret);
713                 alloc_hint = ins.objectid + ins.offset;
714                 kfree(async_extent);
715                 cond_resched();
716         }
717
718         return 0;
719 }
720
721 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
722                                       u64 num_bytes)
723 {
724         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
725         struct extent_map *em;
726         u64 alloc_hint = 0;
727
728         read_lock(&em_tree->lock);
729         em = search_extent_mapping(em_tree, start, num_bytes);
730         if (em) {
731                 /*
732                  * if block start isn't an actual block number then find the
733                  * first block in this inode and use that as a hint.  If that
734                  * block is also bogus then just don't worry about it.
735                  */
736                 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
737                         free_extent_map(em);
738                         em = search_extent_mapping(em_tree, 0, 0);
739                         if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
740                                 alloc_hint = em->block_start;
741                         if (em)
742                                 free_extent_map(em);
743                 } else {
744                         alloc_hint = em->block_start;
745                         free_extent_map(em);
746                 }
747         }
748         read_unlock(&em_tree->lock);
749
750         return alloc_hint;
751 }
752
753 static inline bool is_free_space_inode(struct btrfs_root *root,
754                                        struct inode *inode)
755 {
756         if (root == root->fs_info->tree_root ||
757             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID)
758                 return true;
759         return false;
760 }
761
762 /*
763  * when extent_io.c finds a delayed allocation range in the file,
764  * the call backs end up in this code.  The basic idea is to
765  * allocate extents on disk for the range, and create ordered data structs
766  * in ram to track those extents.
767  *
768  * locked_page is the page that writepage had locked already.  We use
769  * it to make sure we don't do extra locks or unlocks.
770  *
771  * *page_started is set to one if we unlock locked_page and do everything
772  * required to start IO on it.  It may be clean and already done with
773  * IO when we return.
774  */
775 static noinline int cow_file_range(struct inode *inode,
776                                    struct page *locked_page,
777                                    u64 start, u64 end, int *page_started,
778                                    unsigned long *nr_written,
779                                    int unlock)
780 {
781         struct btrfs_root *root = BTRFS_I(inode)->root;
782         struct btrfs_trans_handle *trans;
783         u64 alloc_hint = 0;
784         u64 num_bytes;
785         unsigned long ram_size;
786         u64 disk_num_bytes;
787         u64 cur_alloc_size;
788         u64 blocksize = root->sectorsize;
789         struct btrfs_key ins;
790         struct extent_map *em;
791         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
792         int ret = 0;
793
794         BUG_ON(is_free_space_inode(root, inode));
795         trans = btrfs_join_transaction(root);
796         BUG_ON(IS_ERR(trans));
797         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
798
799         num_bytes = (end - start + blocksize) & ~(blocksize - 1);
800         num_bytes = max(blocksize,  num_bytes);
801         disk_num_bytes = num_bytes;
802         ret = 0;
803
804         /* if this is a small write inside eof, kick off defrag */
805         if (end <= BTRFS_I(inode)->disk_i_size && num_bytes < 64 * 1024)
806                 btrfs_add_inode_defrag(trans, inode);
807
808         if (start == 0) {
809                 /* lets try to make an inline extent */
810                 ret = cow_file_range_inline(trans, root, inode,
811                                             start, end, 0, 0, NULL);
812                 if (ret == 0) {
813                         extent_clear_unlock_delalloc(inode,
814                                      &BTRFS_I(inode)->io_tree,
815                                      start, end, NULL,
816                                      EXTENT_CLEAR_UNLOCK_PAGE |
817                                      EXTENT_CLEAR_UNLOCK |
818                                      EXTENT_CLEAR_DELALLOC |
819                                      EXTENT_CLEAR_DIRTY |
820                                      EXTENT_SET_WRITEBACK |
821                                      EXTENT_END_WRITEBACK);
822
823                         *nr_written = *nr_written +
824                              (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
825                         *page_started = 1;
826                         ret = 0;
827                         goto out;
828                 }
829         }
830
831         BUG_ON(disk_num_bytes >
832                btrfs_super_total_bytes(&root->fs_info->super_copy));
833
834         alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
835         btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
836
837         while (disk_num_bytes > 0) {
838                 unsigned long op;
839
840                 cur_alloc_size = disk_num_bytes;
841                 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
842                                            root->sectorsize, 0, alloc_hint,
843                                            (u64)-1, &ins, 1);
844                 BUG_ON(ret);
845
846                 em = alloc_extent_map();
847                 BUG_ON(!em);
848                 em->start = start;
849                 em->orig_start = em->start;
850                 ram_size = ins.offset;
851                 em->len = ins.offset;
852
853                 em->block_start = ins.objectid;
854                 em->block_len = ins.offset;
855                 em->bdev = root->fs_info->fs_devices->latest_bdev;
856                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
857
858                 while (1) {
859                         write_lock(&em_tree->lock);
860                         ret = add_extent_mapping(em_tree, em);
861                         write_unlock(&em_tree->lock);
862                         if (ret != -EEXIST) {
863                                 free_extent_map(em);
864                                 break;
865                         }
866                         btrfs_drop_extent_cache(inode, start,
867                                                 start + ram_size - 1, 0);
868                 }
869
870                 cur_alloc_size = ins.offset;
871                 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
872                                                ram_size, cur_alloc_size, 0);
873                 BUG_ON(ret);
874
875                 if (root->root_key.objectid ==
876                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
877                         ret = btrfs_reloc_clone_csums(inode, start,
878                                                       cur_alloc_size);
879                         BUG_ON(ret);
880                 }
881
882                 if (disk_num_bytes < cur_alloc_size)
883                         break;
884
885                 /* we're not doing compressed IO, don't unlock the first
886                  * page (which the caller expects to stay locked), don't
887                  * clear any dirty bits and don't set any writeback bits
888                  *
889                  * Do set the Private2 bit so we know this page was properly
890                  * setup for writepage
891                  */
892                 op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
893                 op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
894                         EXTENT_SET_PRIVATE2;
895
896                 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
897                                              start, start + ram_size - 1,
898                                              locked_page, op);
899                 disk_num_bytes -= cur_alloc_size;
900                 num_bytes -= cur_alloc_size;
901                 alloc_hint = ins.objectid + ins.offset;
902                 start += cur_alloc_size;
903         }
904 out:
905         ret = 0;
906         btrfs_end_transaction(trans, root);
907
908         return ret;
909 }
910
911 /*
912  * work queue call back to started compression on a file and pages
913  */
914 static noinline void async_cow_start(struct btrfs_work *work)
915 {
916         struct async_cow *async_cow;
917         int num_added = 0;
918         async_cow = container_of(work, struct async_cow, work);
919
920         compress_file_range(async_cow->inode, async_cow->locked_page,
921                             async_cow->start, async_cow->end, async_cow,
922                             &num_added);
923         if (num_added == 0)
924                 async_cow->inode = NULL;
925 }
926
927 /*
928  * work queue call back to submit previously compressed pages
929  */
930 static noinline void async_cow_submit(struct btrfs_work *work)
931 {
932         struct async_cow *async_cow;
933         struct btrfs_root *root;
934         unsigned long nr_pages;
935
936         async_cow = container_of(work, struct async_cow, work);
937
938         root = async_cow->root;
939         nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
940                 PAGE_CACHE_SHIFT;
941
942         atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
943
944         if (atomic_read(&root->fs_info->async_delalloc_pages) <
945             5 * 1042 * 1024 &&
946             waitqueue_active(&root->fs_info->async_submit_wait))
947                 wake_up(&root->fs_info->async_submit_wait);
948
949         if (async_cow->inode)
950                 submit_compressed_extents(async_cow->inode, async_cow);
951 }
952
953 static noinline void async_cow_free(struct btrfs_work *work)
954 {
955         struct async_cow *async_cow;
956         async_cow = container_of(work, struct async_cow, work);
957         kfree(async_cow);
958 }
959
960 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
961                                 u64 start, u64 end, int *page_started,
962                                 unsigned long *nr_written)
963 {
964         struct async_cow *async_cow;
965         struct btrfs_root *root = BTRFS_I(inode)->root;
966         unsigned long nr_pages;
967         u64 cur_end;
968         int limit = 10 * 1024 * 1042;
969
970         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
971                          1, 0, NULL, GFP_NOFS);
972         while (start < end) {
973                 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
974                 BUG_ON(!async_cow);
975                 async_cow->inode = inode;
976                 async_cow->root = root;
977                 async_cow->locked_page = locked_page;
978                 async_cow->start = start;
979
980                 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
981                         cur_end = end;
982                 else
983                         cur_end = min(end, start + 512 * 1024 - 1);
984
985                 async_cow->end = cur_end;
986                 INIT_LIST_HEAD(&async_cow->extents);
987
988                 async_cow->work.func = async_cow_start;
989                 async_cow->work.ordered_func = async_cow_submit;
990                 async_cow->work.ordered_free = async_cow_free;
991                 async_cow->work.flags = 0;
992
993                 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
994                         PAGE_CACHE_SHIFT;
995                 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
996
997                 btrfs_queue_worker(&root->fs_info->delalloc_workers,
998                                    &async_cow->work);
999
1000                 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1001                         wait_event(root->fs_info->async_submit_wait,
1002                            (atomic_read(&root->fs_info->async_delalloc_pages) <
1003                             limit));
1004                 }
1005
1006                 while (atomic_read(&root->fs_info->async_submit_draining) &&
1007                       atomic_read(&root->fs_info->async_delalloc_pages)) {
1008                         wait_event(root->fs_info->async_submit_wait,
1009                           (atomic_read(&root->fs_info->async_delalloc_pages) ==
1010                            0));
1011                 }
1012
1013                 *nr_written += nr_pages;
1014                 start = cur_end + 1;
1015         }
1016         *page_started = 1;
1017         return 0;
1018 }
1019
1020 static noinline int csum_exist_in_range(struct btrfs_root *root,
1021                                         u64 bytenr, u64 num_bytes)
1022 {
1023         int ret;
1024         struct btrfs_ordered_sum *sums;
1025         LIST_HEAD(list);
1026
1027         ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1028                                        bytenr + num_bytes - 1, &list, 0);
1029         if (ret == 0 && list_empty(&list))
1030                 return 0;
1031
1032         while (!list_empty(&list)) {
1033                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1034                 list_del(&sums->list);
1035                 kfree(sums);
1036         }
1037         return 1;
1038 }
1039
1040 /*
1041  * when nowcow writeback call back.  This checks for snapshots or COW copies
1042  * of the extents that exist in the file, and COWs the file as required.
1043  *
1044  * If no cow copies or snapshots exist, we write directly to the existing
1045  * blocks on disk
1046  */
1047 static noinline int run_delalloc_nocow(struct inode *inode,
1048                                        struct page *locked_page,
1049                               u64 start, u64 end, int *page_started, int force,
1050                               unsigned long *nr_written)
1051 {
1052         struct btrfs_root *root = BTRFS_I(inode)->root;
1053         struct btrfs_trans_handle *trans;
1054         struct extent_buffer *leaf;
1055         struct btrfs_path *path;
1056         struct btrfs_file_extent_item *fi;
1057         struct btrfs_key found_key;
1058         u64 cow_start;
1059         u64 cur_offset;
1060         u64 extent_end;
1061         u64 extent_offset;
1062         u64 disk_bytenr;
1063         u64 num_bytes;
1064         int extent_type;
1065         int ret;
1066         int type;
1067         int nocow;
1068         int check_prev = 1;
1069         bool nolock;
1070         u64 ino = btrfs_ino(inode);
1071
1072         path = btrfs_alloc_path();
1073         BUG_ON(!path);
1074
1075         nolock = is_free_space_inode(root, inode);
1076
1077         if (nolock)
1078                 trans = btrfs_join_transaction_nolock(root);
1079         else
1080                 trans = btrfs_join_transaction(root);
1081
1082         BUG_ON(IS_ERR(trans));
1083         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1084
1085         cow_start = (u64)-1;
1086         cur_offset = start;
1087         while (1) {
1088                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
1089                                                cur_offset, 0);
1090                 BUG_ON(ret < 0);
1091                 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1092                         leaf = path->nodes[0];
1093                         btrfs_item_key_to_cpu(leaf, &found_key,
1094                                               path->slots[0] - 1);
1095                         if (found_key.objectid == ino &&
1096                             found_key.type == BTRFS_EXTENT_DATA_KEY)
1097                                 path->slots[0]--;
1098                 }
1099                 check_prev = 0;
1100 next_slot:
1101                 leaf = path->nodes[0];
1102                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1103                         ret = btrfs_next_leaf(root, path);
1104                         if (ret < 0)
1105                                 BUG_ON(1);
1106                         if (ret > 0)
1107                                 break;
1108                         leaf = path->nodes[0];
1109                 }
1110
1111                 nocow = 0;
1112                 disk_bytenr = 0;
1113                 num_bytes = 0;
1114                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1115
1116                 if (found_key.objectid > ino ||
1117                     found_key.type > BTRFS_EXTENT_DATA_KEY ||
1118                     found_key.offset > end)
1119                         break;
1120
1121                 if (found_key.offset > cur_offset) {
1122                         extent_end = found_key.offset;
1123                         extent_type = 0;
1124                         goto out_check;
1125                 }
1126
1127                 fi = btrfs_item_ptr(leaf, path->slots[0],
1128                                     struct btrfs_file_extent_item);
1129                 extent_type = btrfs_file_extent_type(leaf, fi);
1130
1131                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1132                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1133                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1134                         extent_offset = btrfs_file_extent_offset(leaf, fi);
1135                         extent_end = found_key.offset +
1136                                 btrfs_file_extent_num_bytes(leaf, fi);
1137                         if (extent_end <= start) {
1138                                 path->slots[0]++;
1139                                 goto next_slot;
1140                         }
1141                         if (disk_bytenr == 0)
1142                                 goto out_check;
1143                         if (btrfs_file_extent_compression(leaf, fi) ||
1144                             btrfs_file_extent_encryption(leaf, fi) ||
1145                             btrfs_file_extent_other_encoding(leaf, fi))
1146                                 goto out_check;
1147                         if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1148                                 goto out_check;
1149                         if (btrfs_extent_readonly(root, disk_bytenr))
1150                                 goto out_check;
1151                         if (btrfs_cross_ref_exist(trans, root, ino,
1152                                                   found_key.offset -
1153                                                   extent_offset, disk_bytenr))
1154                                 goto out_check;
1155                         disk_bytenr += extent_offset;
1156                         disk_bytenr += cur_offset - found_key.offset;
1157                         num_bytes = min(end + 1, extent_end) - cur_offset;
1158                         /*
1159                          * force cow if csum exists in the range.
1160                          * this ensure that csum for a given extent are
1161                          * either valid or do not exist.
1162                          */
1163                         if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1164                                 goto out_check;
1165                         nocow = 1;
1166                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1167                         extent_end = found_key.offset +
1168                                 btrfs_file_extent_inline_len(leaf, fi);
1169                         extent_end = ALIGN(extent_end, root->sectorsize);
1170                 } else {
1171                         BUG_ON(1);
1172                 }
1173 out_check:
1174                 if (extent_end <= start) {
1175                         path->slots[0]++;
1176                         goto next_slot;
1177                 }
1178                 if (!nocow) {
1179                         if (cow_start == (u64)-1)
1180                                 cow_start = cur_offset;
1181                         cur_offset = extent_end;
1182                         if (cur_offset > end)
1183                                 break;
1184                         path->slots[0]++;
1185                         goto next_slot;
1186                 }
1187
1188                 btrfs_release_path(path);
1189                 if (cow_start != (u64)-1) {
1190                         ret = cow_file_range(inode, locked_page, cow_start,
1191                                         found_key.offset - 1, page_started,
1192                                         nr_written, 1);
1193                         BUG_ON(ret);
1194                         cow_start = (u64)-1;
1195                 }
1196
1197                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1198                         struct extent_map *em;
1199                         struct extent_map_tree *em_tree;
1200                         em_tree = &BTRFS_I(inode)->extent_tree;
1201                         em = alloc_extent_map();
1202                         BUG_ON(!em);
1203                         em->start = cur_offset;
1204                         em->orig_start = em->start;
1205                         em->len = num_bytes;
1206                         em->block_len = num_bytes;
1207                         em->block_start = disk_bytenr;
1208                         em->bdev = root->fs_info->fs_devices->latest_bdev;
1209                         set_bit(EXTENT_FLAG_PINNED, &em->flags);
1210                         while (1) {
1211                                 write_lock(&em_tree->lock);
1212                                 ret = add_extent_mapping(em_tree, em);
1213                                 write_unlock(&em_tree->lock);
1214                                 if (ret != -EEXIST) {
1215                                         free_extent_map(em);
1216                                         break;
1217                                 }
1218                                 btrfs_drop_extent_cache(inode, em->start,
1219                                                 em->start + em->len - 1, 0);
1220                         }
1221                         type = BTRFS_ORDERED_PREALLOC;
1222                 } else {
1223                         type = BTRFS_ORDERED_NOCOW;
1224                 }
1225
1226                 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1227                                                num_bytes, num_bytes, type);
1228                 BUG_ON(ret);
1229
1230                 if (root->root_key.objectid ==
1231                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1232                         ret = btrfs_reloc_clone_csums(inode, cur_offset,
1233                                                       num_bytes);
1234                         BUG_ON(ret);
1235                 }
1236
1237                 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1238                                 cur_offset, cur_offset + num_bytes - 1,
1239                                 locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1240                                 EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1241                                 EXTENT_SET_PRIVATE2);
1242                 cur_offset = extent_end;
1243                 if (cur_offset > end)
1244                         break;
1245         }
1246         btrfs_release_path(path);
1247
1248         if (cur_offset <= end && cow_start == (u64)-1)
1249                 cow_start = cur_offset;
1250         if (cow_start != (u64)-1) {
1251                 ret = cow_file_range(inode, locked_page, cow_start, end,
1252                                      page_started, nr_written, 1);
1253                 BUG_ON(ret);
1254         }
1255
1256         if (nolock) {
1257                 ret = btrfs_end_transaction_nolock(trans, root);
1258                 BUG_ON(ret);
1259         } else {
1260                 ret = btrfs_end_transaction(trans, root);
1261                 BUG_ON(ret);
1262         }
1263         btrfs_free_path(path);
1264         return 0;
1265 }
1266
1267 /*
1268  * extent_io.c call back to do delayed allocation processing
1269  */
1270 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1271                               u64 start, u64 end, int *page_started,
1272                               unsigned long *nr_written)
1273 {
1274         int ret;
1275         struct btrfs_root *root = BTRFS_I(inode)->root;
1276
1277         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1278                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1279                                          page_started, 1, nr_written);
1280         else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1281                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1282                                          page_started, 0, nr_written);
1283         else if (!btrfs_test_opt(root, COMPRESS) &&
1284                  !(BTRFS_I(inode)->force_compress) &&
1285                  !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))
1286                 ret = cow_file_range(inode, locked_page, start, end,
1287                                       page_started, nr_written, 1);
1288         else
1289                 ret = cow_file_range_async(inode, locked_page, start, end,
1290                                            page_started, nr_written);
1291         return ret;
1292 }
1293
1294 static int btrfs_split_extent_hook(struct inode *inode,
1295                                    struct extent_state *orig, u64 split)
1296 {
1297         /* not delalloc, ignore it */
1298         if (!(orig->state & EXTENT_DELALLOC))
1299                 return 0;
1300
1301         atomic_inc(&BTRFS_I(inode)->outstanding_extents);
1302         return 0;
1303 }
1304
1305 /*
1306  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1307  * extents so we can keep track of new extents that are just merged onto old
1308  * extents, such as when we are doing sequential writes, so we can properly
1309  * account for the metadata space we'll need.
1310  */
1311 static int btrfs_merge_extent_hook(struct inode *inode,
1312                                    struct extent_state *new,
1313                                    struct extent_state *other)
1314 {
1315         /* not delalloc, ignore it */
1316         if (!(other->state & EXTENT_DELALLOC))
1317                 return 0;
1318
1319         atomic_dec(&BTRFS_I(inode)->outstanding_extents);
1320         return 0;
1321 }
1322
1323 /*
1324  * extent_io.c set_bit_hook, used to track delayed allocation
1325  * bytes in this file, and to maintain the list of inodes that
1326  * have pending delalloc work to be done.
1327  */
1328 static int btrfs_set_bit_hook(struct inode *inode,
1329                               struct extent_state *state, int *bits)
1330 {
1331
1332         /*
1333          * set_bit and clear bit hooks normally require _irqsave/restore
1334          * but in this case, we are only testing for the DELALLOC
1335          * bit, which is only set or cleared with irqs on
1336          */
1337         if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1338                 struct btrfs_root *root = BTRFS_I(inode)->root;
1339                 u64 len = state->end + 1 - state->start;
1340                 bool do_list = !is_free_space_inode(root, inode);
1341
1342                 if (*bits & EXTENT_FIRST_DELALLOC)
1343                         *bits &= ~EXTENT_FIRST_DELALLOC;
1344                 else
1345                         atomic_inc(&BTRFS_I(inode)->outstanding_extents);
1346
1347                 spin_lock(&root->fs_info->delalloc_lock);
1348                 BTRFS_I(inode)->delalloc_bytes += len;
1349                 root->fs_info->delalloc_bytes += len;
1350                 if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1351                         list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1352                                       &root->fs_info->delalloc_inodes);
1353                 }
1354                 spin_unlock(&root->fs_info->delalloc_lock);
1355         }
1356         return 0;
1357 }
1358
1359 /*
1360  * extent_io.c clear_bit_hook, see set_bit_hook for why
1361  */
1362 static int btrfs_clear_bit_hook(struct inode *inode,
1363                                 struct extent_state *state, int *bits)
1364 {
1365         /*
1366          * set_bit and clear bit hooks normally require _irqsave/restore
1367          * but in this case, we are only testing for the DELALLOC
1368          * bit, which is only set or cleared with irqs on
1369          */
1370         if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1371                 struct btrfs_root *root = BTRFS_I(inode)->root;
1372                 u64 len = state->end + 1 - state->start;
1373                 bool do_list = !is_free_space_inode(root, inode);
1374
1375                 if (*bits & EXTENT_FIRST_DELALLOC)
1376                         *bits &= ~EXTENT_FIRST_DELALLOC;
1377                 else if (!(*bits & EXTENT_DO_ACCOUNTING))
1378                         atomic_dec(&BTRFS_I(inode)->outstanding_extents);
1379
1380                 if (*bits & EXTENT_DO_ACCOUNTING)
1381                         btrfs_delalloc_release_metadata(inode, len);
1382
1383                 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1384                     && do_list)
1385                         btrfs_free_reserved_data_space(inode, len);
1386
1387                 spin_lock(&root->fs_info->delalloc_lock);
1388                 root->fs_info->delalloc_bytes -= len;
1389                 BTRFS_I(inode)->delalloc_bytes -= len;
1390
1391                 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1392                     !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1393                         list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1394                 }
1395                 spin_unlock(&root->fs_info->delalloc_lock);
1396         }
1397         return 0;
1398 }
1399
1400 /*
1401  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1402  * we don't create bios that span stripes or chunks
1403  */
1404 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1405                          size_t size, struct bio *bio,
1406                          unsigned long bio_flags)
1407 {
1408         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1409         struct btrfs_mapping_tree *map_tree;
1410         u64 logical = (u64)bio->bi_sector << 9;
1411         u64 length = 0;
1412         u64 map_length;
1413         int ret;
1414
1415         if (bio_flags & EXTENT_BIO_COMPRESSED)
1416                 return 0;
1417
1418         length = bio->bi_size;
1419         map_tree = &root->fs_info->mapping_tree;
1420         map_length = length;
1421         ret = btrfs_map_block(map_tree, READ, logical,
1422                               &map_length, NULL, 0);
1423
1424         if (map_length < length + size)
1425                 return 1;
1426         return ret;
1427 }
1428
1429 /*
1430  * in order to insert checksums into the metadata in large chunks,
1431  * we wait until bio submission time.   All the pages in the bio are
1432  * checksummed and sums are attached onto the ordered extent record.
1433  *
1434  * At IO completion time the cums attached on the ordered extent record
1435  * are inserted into the btree
1436  */
1437 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1438                                     struct bio *bio, int mirror_num,
1439                                     unsigned long bio_flags,
1440                                     u64 bio_offset)
1441 {
1442         struct btrfs_root *root = BTRFS_I(inode)->root;
1443         int ret = 0;
1444
1445         ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1446         BUG_ON(ret);
1447         return 0;
1448 }
1449
1450 /*
1451  * in order to insert checksums into the metadata in large chunks,
1452  * we wait until bio submission time.   All the pages in the bio are
1453  * checksummed and sums are attached onto the ordered extent record.
1454  *
1455  * At IO completion time the cums attached on the ordered extent record
1456  * are inserted into the btree
1457  */
1458 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1459                           int mirror_num, unsigned long bio_flags,
1460                           u64 bio_offset)
1461 {
1462         struct btrfs_root *root = BTRFS_I(inode)->root;
1463         return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1464 }
1465
1466 /*
1467  * extent_io.c submission hook. This does the right thing for csum calculation
1468  * on write, or reading the csums from the tree before a read
1469  */
1470 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1471                           int mirror_num, unsigned long bio_flags,
1472                           u64 bio_offset)
1473 {
1474         struct btrfs_root *root = BTRFS_I(inode)->root;
1475         int ret = 0;
1476         int skip_sum;
1477
1478         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1479
1480         if (is_free_space_inode(root, inode))
1481                 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
1482         else
1483                 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1484         BUG_ON(ret);
1485
1486         if (!(rw & REQ_WRITE)) {
1487                 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1488                         return btrfs_submit_compressed_read(inode, bio,
1489                                                     mirror_num, bio_flags);
1490                 } else if (!skip_sum) {
1491                         ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1492                         if (ret)
1493                                 return ret;
1494                 }
1495                 goto mapit;
1496         } else if (!skip_sum) {
1497                 /* csum items have already been cloned */
1498                 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1499                         goto mapit;
1500                 /* we're doing a write, do the async checksumming */
1501                 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1502                                    inode, rw, bio, mirror_num,
1503                                    bio_flags, bio_offset,
1504                                    __btrfs_submit_bio_start,
1505                                    __btrfs_submit_bio_done);
1506         }
1507
1508 mapit:
1509         return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1510 }
1511
1512 /*
1513  * given a list of ordered sums record them in the inode.  This happens
1514  * at IO completion time based on sums calculated at bio submission time.
1515  */
1516 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1517                              struct inode *inode, u64 file_offset,
1518                              struct list_head *list)
1519 {
1520         struct btrfs_ordered_sum *sum;
1521
1522         list_for_each_entry(sum, list, list) {
1523                 btrfs_csum_file_blocks(trans,
1524                        BTRFS_I(inode)->root->fs_info->csum_root, sum);
1525         }
1526         return 0;
1527 }
1528
1529 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1530                               struct extent_state **cached_state)
1531 {
1532         if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1533                 WARN_ON(1);
1534         return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1535                                    cached_state, GFP_NOFS);
1536 }
1537
1538 /* see btrfs_writepage_start_hook for details on why this is required */
1539 struct btrfs_writepage_fixup {
1540         struct page *page;
1541         struct btrfs_work work;
1542 };
1543
1544 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1545 {
1546         struct btrfs_writepage_fixup *fixup;
1547         struct btrfs_ordered_extent *ordered;
1548         struct extent_state *cached_state = NULL;
1549         struct page *page;
1550         struct inode *inode;
1551         u64 page_start;
1552         u64 page_end;
1553
1554         fixup = container_of(work, struct btrfs_writepage_fixup, work);
1555         page = fixup->page;
1556 again:
1557         lock_page(page);
1558         if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1559                 ClearPageChecked(page);
1560                 goto out_page;
1561         }
1562
1563         inode = page->mapping->host;
1564         page_start = page_offset(page);
1565         page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1566
1567         lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1568                          &cached_state, GFP_NOFS);
1569
1570         /* already ordered? We're done */
1571         if (PagePrivate2(page))
1572                 goto out;
1573
1574         ordered = btrfs_lookup_ordered_extent(inode, page_start);
1575         if (ordered) {
1576                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1577                                      page_end, &cached_state, GFP_NOFS);
1578                 unlock_page(page);
1579                 btrfs_start_ordered_extent(inode, ordered, 1);
1580                 goto again;
1581         }
1582
1583         BUG();
1584         btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1585         ClearPageChecked(page);
1586 out:
1587         unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1588                              &cached_state, GFP_NOFS);
1589 out_page:
1590         unlock_page(page);
1591         page_cache_release(page);
1592         kfree(fixup);
1593 }
1594
1595 /*
1596  * There are a few paths in the higher layers of the kernel that directly
1597  * set the page dirty bit without asking the filesystem if it is a
1598  * good idea.  This causes problems because we want to make sure COW
1599  * properly happens and the data=ordered rules are followed.
1600  *
1601  * In our case any range that doesn't have the ORDERED bit set
1602  * hasn't been properly setup for IO.  We kick off an async process
1603  * to fix it up.  The async helper will wait for ordered extents, set
1604  * the delalloc bit and make it safe to write the page.
1605  */
1606 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1607 {
1608         struct inode *inode = page->mapping->host;
1609         struct btrfs_writepage_fixup *fixup;
1610         struct btrfs_root *root = BTRFS_I(inode)->root;
1611
1612         /* this page is properly in the ordered list */
1613         if (TestClearPagePrivate2(page))
1614                 return 0;
1615
1616         if (PageChecked(page))
1617                 return -EAGAIN;
1618
1619         fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1620         if (!fixup)
1621                 return -EAGAIN;
1622
1623         SetPageChecked(page);
1624         page_cache_get(page);
1625         fixup->work.func = btrfs_writepage_fixup_worker;
1626         fixup->page = page;
1627         btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1628         return -EAGAIN;
1629 }
1630
1631 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1632                                        struct inode *inode, u64 file_pos,
1633                                        u64 disk_bytenr, u64 disk_num_bytes,
1634                                        u64 num_bytes, u64 ram_bytes,
1635                                        u8 compression, u8 encryption,
1636                                        u16 other_encoding, int extent_type)
1637 {
1638         struct btrfs_root *root = BTRFS_I(inode)->root;
1639         struct btrfs_file_extent_item *fi;
1640         struct btrfs_path *path;
1641         struct extent_buffer *leaf;
1642         struct btrfs_key ins;
1643         u64 hint;
1644         int ret;
1645
1646         path = btrfs_alloc_path();
1647         BUG_ON(!path);
1648
1649         path->leave_spinning = 1;
1650
1651         /*
1652          * we may be replacing one extent in the tree with another.
1653          * The new extent is pinned in the extent map, and we don't want
1654          * to drop it from the cache until it is completely in the btree.
1655          *
1656          * So, tell btrfs_drop_extents to leave this extent in the cache.
1657          * the caller is expected to unpin it and allow it to be merged
1658          * with the others.
1659          */
1660         ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
1661                                  &hint, 0);
1662         BUG_ON(ret);
1663
1664         ins.objectid = btrfs_ino(inode);
1665         ins.offset = file_pos;
1666         ins.type = BTRFS_EXTENT_DATA_KEY;
1667         ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1668         BUG_ON(ret);
1669         leaf = path->nodes[0];
1670         fi = btrfs_item_ptr(leaf, path->slots[0],
1671                             struct btrfs_file_extent_item);
1672         btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1673         btrfs_set_file_extent_type(leaf, fi, extent_type);
1674         btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1675         btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1676         btrfs_set_file_extent_offset(leaf, fi, 0);
1677         btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1678         btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1679         btrfs_set_file_extent_compression(leaf, fi, compression);
1680         btrfs_set_file_extent_encryption(leaf, fi, encryption);
1681         btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1682
1683         btrfs_unlock_up_safe(path, 1);
1684         btrfs_set_lock_blocking(leaf);
1685
1686         btrfs_mark_buffer_dirty(leaf);
1687
1688         inode_add_bytes(inode, num_bytes);
1689
1690         ins.objectid = disk_bytenr;
1691         ins.offset = disk_num_bytes;
1692         ins.type = BTRFS_EXTENT_ITEM_KEY;
1693         ret = btrfs_alloc_reserved_file_extent(trans, root,
1694                                         root->root_key.objectid,
1695                                         btrfs_ino(inode), file_pos, &ins);
1696         BUG_ON(ret);
1697         btrfs_free_path(path);
1698
1699         return 0;
1700 }
1701
1702 /*
1703  * helper function for btrfs_finish_ordered_io, this
1704  * just reads in some of the csum leaves to prime them into ram
1705  * before we start the transaction.  It limits the amount of btree
1706  * reads required while inside the transaction.
1707  */
1708 /* as ordered data IO finishes, this gets called so we can finish
1709  * an ordered extent if the range of bytes in the file it covers are
1710  * fully written.
1711  */
1712 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1713 {
1714         struct btrfs_root *root = BTRFS_I(inode)->root;
1715         struct btrfs_trans_handle *trans = NULL;
1716         struct btrfs_ordered_extent *ordered_extent = NULL;
1717         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1718         struct extent_state *cached_state = NULL;
1719         int compress_type = 0;
1720         int ret;
1721         bool nolock;
1722
1723         ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
1724                                              end - start + 1);
1725         if (!ret)
1726                 return 0;
1727         BUG_ON(!ordered_extent);
1728
1729         nolock = is_free_space_inode(root, inode);
1730
1731         if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1732                 BUG_ON(!list_empty(&ordered_extent->list));
1733                 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1734                 if (!ret) {
1735                         if (nolock)
1736                                 trans = btrfs_join_transaction_nolock(root);
1737                         else
1738                                 trans = btrfs_join_transaction(root);
1739                         BUG_ON(IS_ERR(trans));
1740                         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1741                         ret = btrfs_update_inode(trans, root, inode);
1742                         BUG_ON(ret);
1743                 }
1744                 goto out;
1745         }
1746
1747         lock_extent_bits(io_tree, ordered_extent->file_offset,
1748                          ordered_extent->file_offset + ordered_extent->len - 1,
1749                          0, &cached_state, GFP_NOFS);
1750
1751         if (nolock)
1752                 trans = btrfs_join_transaction_nolock(root);
1753         else
1754                 trans = btrfs_join_transaction(root);
1755         BUG_ON(IS_ERR(trans));
1756         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1757
1758         if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1759                 compress_type = ordered_extent->compress_type;
1760         if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1761                 BUG_ON(compress_type);
1762                 ret = btrfs_mark_extent_written(trans, inode,
1763                                                 ordered_extent->file_offset,
1764                                                 ordered_extent->file_offset +
1765                                                 ordered_extent->len);
1766                 BUG_ON(ret);
1767         } else {
1768                 BUG_ON(root == root->fs_info->tree_root);
1769                 ret = insert_reserved_file_extent(trans, inode,
1770                                                 ordered_extent->file_offset,
1771                                                 ordered_extent->start,
1772                                                 ordered_extent->disk_len,
1773                                                 ordered_extent->len,
1774                                                 ordered_extent->len,
1775                                                 compress_type, 0, 0,
1776                                                 BTRFS_FILE_EXTENT_REG);
1777                 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1778                                    ordered_extent->file_offset,
1779                                    ordered_extent->len);
1780                 BUG_ON(ret);
1781         }
1782         unlock_extent_cached(io_tree, ordered_extent->file_offset,
1783                              ordered_extent->file_offset +
1784                              ordered_extent->len - 1, &cached_state, GFP_NOFS);
1785
1786         add_pending_csums(trans, inode, ordered_extent->file_offset,
1787                           &ordered_extent->list);
1788
1789         ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1790         if (!ret) {
1791                 ret = btrfs_update_inode(trans, root, inode);
1792                 BUG_ON(ret);
1793         }
1794         ret = 0;
1795 out:
1796         if (nolock) {
1797                 if (trans)
1798                         btrfs_end_transaction_nolock(trans, root);
1799         } else {
1800                 btrfs_delalloc_release_metadata(inode, ordered_extent->len);
1801                 if (trans)
1802                         btrfs_end_transaction(trans, root);
1803         }
1804
1805         /* once for us */
1806         btrfs_put_ordered_extent(ordered_extent);
1807         /* once for the tree */
1808         btrfs_put_ordered_extent(ordered_extent);
1809
1810         return 0;
1811 }
1812
1813 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1814                                 struct extent_state *state, int uptodate)
1815 {
1816         trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
1817
1818         ClearPagePrivate2(page);
1819         return btrfs_finish_ordered_io(page->mapping->host, start, end);
1820 }
1821
1822 /*
1823  * When IO fails, either with EIO or csum verification fails, we
1824  * try other mirrors that might have a good copy of the data.  This
1825  * io_failure_record is used to record state as we go through all the
1826  * mirrors.  If another mirror has good data, the page is set up to date
1827  * and things continue.  If a good mirror can't be found, the original
1828  * bio end_io callback is called to indicate things have failed.
1829  */
1830 struct io_failure_record {
1831         struct page *page;
1832         u64 start;
1833         u64 len;
1834         u64 logical;
1835         unsigned long bio_flags;
1836         int last_mirror;
1837 };
1838
1839 static int btrfs_io_failed_hook(struct bio *failed_bio,
1840                          struct page *page, u64 start, u64 end,
1841                          struct extent_state *state)
1842 {
1843         struct io_failure_record *failrec = NULL;
1844         u64 private;
1845         struct extent_map *em;
1846         struct inode *inode = page->mapping->host;
1847         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1848         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1849         struct bio *bio;
1850         int num_copies;
1851         int ret;
1852         int rw;
1853         u64 logical;
1854
1855         ret = get_state_private(failure_tree, start, &private);
1856         if (ret) {
1857                 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1858                 if (!failrec)
1859                         return -ENOMEM;
1860                 failrec->start = start;
1861                 failrec->len = end - start + 1;
1862                 failrec->last_mirror = 0;
1863                 failrec->bio_flags = 0;
1864
1865                 read_lock(&em_tree->lock);
1866                 em = lookup_extent_mapping(em_tree, start, failrec->len);
1867                 if (em->start > start || em->start + em->len < start) {
1868                         free_extent_map(em);
1869                         em = NULL;
1870                 }
1871                 read_unlock(&em_tree->lock);
1872
1873                 if (IS_ERR_OR_NULL(em)) {
1874                         kfree(failrec);
1875                         return -EIO;
1876                 }
1877                 logical = start - em->start;
1878                 logical = em->block_start + logical;
1879                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1880                         logical = em->block_start;
1881                         failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1882                         extent_set_compress_type(&failrec->bio_flags,
1883                                                  em->compress_type);
1884                 }
1885                 failrec->logical = logical;
1886                 free_extent_map(em);
1887                 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1888                                 EXTENT_DIRTY, GFP_NOFS);
1889                 set_state_private(failure_tree, start,
1890                                  (u64)(unsigned long)failrec);
1891         } else {
1892                 failrec = (struct io_failure_record *)(unsigned long)private;
1893         }
1894         num_copies = btrfs_num_copies(
1895                               &BTRFS_I(inode)->root->fs_info->mapping_tree,
1896                               failrec->logical, failrec->len);
1897         failrec->last_mirror++;
1898         if (!state) {
1899                 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1900                 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1901                                                     failrec->start,
1902                                                     EXTENT_LOCKED);
1903                 if (state && state->start != failrec->start)
1904                         state = NULL;
1905                 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1906         }
1907         if (!state || failrec->last_mirror > num_copies) {
1908                 set_state_private(failure_tree, failrec->start, 0);
1909                 clear_extent_bits(failure_tree, failrec->start,
1910                                   failrec->start + failrec->len - 1,
1911                                   EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1912                 kfree(failrec);
1913                 return -EIO;
1914         }
1915         bio = bio_alloc(GFP_NOFS, 1);
1916         bio->bi_private = state;
1917         bio->bi_end_io = failed_bio->bi_end_io;
1918         bio->bi_sector = failrec->logical >> 9;
1919         bio->bi_bdev = failed_bio->bi_bdev;
1920         bio->bi_size = 0;
1921
1922         bio_add_page(bio, page, failrec->len, start - page_offset(page));
1923         if (failed_bio->bi_rw & REQ_WRITE)
1924                 rw = WRITE;
1925         else
1926                 rw = READ;
1927
1928         ret = BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1929                                                       failrec->last_mirror,
1930                                                       failrec->bio_flags, 0);
1931         return ret;
1932 }
1933
1934 /*
1935  * each time an IO finishes, we do a fast check in the IO failure tree
1936  * to see if we need to process or clean up an io_failure_record
1937  */
1938 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1939 {
1940         u64 private;
1941         u64 private_failure;
1942         struct io_failure_record *failure;
1943         int ret;
1944
1945         private = 0;
1946         if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1947                              (u64)-1, 1, EXTENT_DIRTY, 0)) {
1948                 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1949                                         start, &private_failure);
1950                 if (ret == 0) {
1951                         failure = (struct io_failure_record *)(unsigned long)
1952                                    private_failure;
1953                         set_state_private(&BTRFS_I(inode)->io_failure_tree,
1954                                           failure->start, 0);
1955                         clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1956                                           failure->start,
1957                                           failure->start + failure->len - 1,
1958                                           EXTENT_DIRTY | EXTENT_LOCKED,
1959                                           GFP_NOFS);
1960                         kfree(failure);
1961                 }
1962         }
1963         return 0;
1964 }
1965
1966 /*
1967  * when reads are done, we need to check csums to verify the data is correct
1968  * if there's a match, we allow the bio to finish.  If not, we go through
1969  * the io_failure_record routines to find good copies
1970  */
1971 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1972                                struct extent_state *state)
1973 {
1974         size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1975         struct inode *inode = page->mapping->host;
1976         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1977         char *kaddr;
1978         u64 private = ~(u32)0;
1979         int ret;
1980         struct btrfs_root *root = BTRFS_I(inode)->root;
1981         u32 csum = ~(u32)0;
1982
1983         if (PageChecked(page)) {
1984                 ClearPageChecked(page);
1985                 goto good;
1986         }
1987
1988         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1989                 goto good;
1990
1991         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1992             test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1993                 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1994                                   GFP_NOFS);
1995                 return 0;
1996         }
1997
1998         if (state && state->start == start) {
1999                 private = state->private;
2000                 ret = 0;
2001         } else {
2002                 ret = get_state_private(io_tree, start, &private);
2003         }
2004         kaddr = kmap_atomic(page, KM_USER0);
2005         if (ret)
2006                 goto zeroit;
2007
2008         csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
2009         btrfs_csum_final(csum, (char *)&csum);
2010         if (csum != private)
2011                 goto zeroit;
2012
2013         kunmap_atomic(kaddr, KM_USER0);
2014 good:
2015         /* if the io failure tree for this inode is non-empty,
2016          * check to see if we've recovered from a failed IO
2017          */
2018         btrfs_clean_io_failures(inode, start);
2019         return 0;
2020
2021 zeroit:
2022         printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
2023                        "private %llu\n",
2024                        (unsigned long long)btrfs_ino(page->mapping->host),
2025                        (unsigned long long)start, csum,
2026                        (unsigned long long)private);
2027         memset(kaddr + offset, 1, end - start + 1);
2028         flush_dcache_page(page);
2029         kunmap_atomic(kaddr, KM_USER0);
2030         if (private == 0)
2031                 return 0;
2032         return -EIO;
2033 }
2034
2035 struct delayed_iput {
2036         struct list_head list;
2037         struct inode *inode;
2038 };
2039
2040 void btrfs_add_delayed_iput(struct inode *inode)
2041 {
2042         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2043         struct delayed_iput *delayed;
2044
2045         if (atomic_add_unless(&inode->i_count, -1, 1))
2046                 return;
2047
2048         delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
2049         delayed->inode = inode;
2050
2051         spin_lock(&fs_info->delayed_iput_lock);
2052         list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2053         spin_unlock(&fs_info->delayed_iput_lock);
2054 }
2055
2056 void btrfs_run_delayed_iputs(struct btrfs_root *root)
2057 {
2058         LIST_HEAD(list);
2059         struct btrfs_fs_info *fs_info = root->fs_info;
2060         struct delayed_iput *delayed;
2061         int empty;
2062
2063         spin_lock(&fs_info->delayed_iput_lock);
2064         empty = list_empty(&fs_info->delayed_iputs);
2065         spin_unlock(&fs_info->delayed_iput_lock);
2066         if (empty)
2067                 return;
2068
2069         down_read(&root->fs_info->cleanup_work_sem);
2070         spin_lock(&fs_info->delayed_iput_lock);
2071         list_splice_init(&fs_info->delayed_iputs, &list);
2072         spin_unlock(&fs_info->delayed_iput_lock);
2073
2074         while (!list_empty(&list)) {
2075                 delayed = list_entry(list.next, struct delayed_iput, list);
2076                 list_del(&delayed->list);
2077                 iput(delayed->inode);
2078                 kfree(delayed);
2079         }
2080         up_read(&root->fs_info->cleanup_work_sem);
2081 }
2082
2083 /*
2084  * calculate extra metadata reservation when snapshotting a subvolume
2085  * contains orphan files.
2086  */
2087 void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
2088                                 struct btrfs_pending_snapshot *pending,
2089                                 u64 *bytes_to_reserve)
2090 {
2091         struct btrfs_root *root;
2092         struct btrfs_block_rsv *block_rsv;
2093         u64 num_bytes;
2094         int index;
2095
2096         root = pending->root;
2097         if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
2098                 return;
2099
2100         block_rsv = root->orphan_block_rsv;
2101
2102         /* orphan block reservation for the snapshot */
2103         num_bytes = block_rsv->size;
2104
2105         /*
2106          * after the snapshot is created, COWing tree blocks may use more
2107          * space than it frees. So we should make sure there is enough
2108          * reserved space.
2109          */
2110         index = trans->transid & 0x1;
2111         if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
2112                 num_bytes += block_rsv->size -
2113                              (block_rsv->reserved + block_rsv->freed[index]);
2114         }
2115
2116         *bytes_to_reserve += num_bytes;
2117 }
2118
2119 void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans,
2120                                 struct btrfs_pending_snapshot *pending)
2121 {
2122         struct btrfs_root *root = pending->root;
2123         struct btrfs_root *snap = pending->snap;
2124         struct btrfs_block_rsv *block_rsv;
2125         u64 num_bytes;
2126         int index;
2127         int ret;
2128
2129         if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
2130                 return;
2131
2132         /* refill source subvolume's orphan block reservation */
2133         block_rsv = root->orphan_block_rsv;
2134         index = trans->transid & 0x1;
2135         if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
2136                 num_bytes = block_rsv->size -
2137                             (block_rsv->reserved + block_rsv->freed[index]);
2138                 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
2139                                               root->orphan_block_rsv,
2140                                               num_bytes);
2141                 BUG_ON(ret);
2142         }
2143
2144         /* setup orphan block reservation for the snapshot */
2145         block_rsv = btrfs_alloc_block_rsv(snap);
2146         BUG_ON(!block_rsv);
2147
2148         btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
2149         snap->orphan_block_rsv = block_rsv;
2150
2151         num_bytes = root->orphan_block_rsv->size;
2152         ret = btrfs_block_rsv_migrate(&pending->block_rsv,
2153                                       block_rsv, num_bytes);
2154         BUG_ON(ret);
2155
2156 #if 0
2157         /* insert orphan item for the snapshot */
2158         WARN_ON(!root->orphan_item_inserted);
2159         ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2160                                        snap->root_key.objectid);
2161         BUG_ON(ret);
2162         snap->orphan_item_inserted = 1;
2163 #endif
2164 }
2165
2166 enum btrfs_orphan_cleanup_state {
2167         ORPHAN_CLEANUP_STARTED  = 1,
2168         ORPHAN_CLEANUP_DONE     = 2,
2169 };
2170
2171 /*
2172  * This is called in transaction commmit time. If there are no orphan
2173  * files in the subvolume, it removes orphan item and frees block_rsv
2174  * structure.
2175  */
2176 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2177                               struct btrfs_root *root)
2178 {
2179         int ret;
2180
2181         if (!list_empty(&root->orphan_list) ||
2182             root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
2183                 return;
2184
2185         if (root->orphan_item_inserted &&
2186             btrfs_root_refs(&root->root_item) > 0) {
2187                 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
2188                                             root->root_key.objectid);
2189                 BUG_ON(ret);
2190                 root->orphan_item_inserted = 0;
2191         }
2192
2193         if (root->orphan_block_rsv) {
2194                 WARN_ON(root->orphan_block_rsv->size > 0);
2195                 btrfs_free_block_rsv(root, root->orphan_block_rsv);
2196                 root->orphan_block_rsv = NULL;
2197         }
2198 }
2199
2200 /*
2201  * This creates an orphan entry for the given inode in case something goes
2202  * wrong in the middle of an unlink/truncate.
2203  *
2204  * NOTE: caller of this function should reserve 5 units of metadata for
2205  *       this function.
2206  */
2207 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2208 {
2209         struct btrfs_root *root = BTRFS_I(inode)->root;
2210         struct btrfs_block_rsv *block_rsv = NULL;
2211         int reserve = 0;
2212         int insert = 0;
2213         int ret;
2214
2215         if (!root->orphan_block_rsv) {
2216                 block_rsv = btrfs_alloc_block_rsv(root);
2217                 BUG_ON(!block_rsv);
2218         }
2219
2220         spin_lock(&root->orphan_lock);
2221         if (!root->orphan_block_rsv) {
2222                 root->orphan_block_rsv = block_rsv;
2223         } else if (block_rsv) {
2224                 btrfs_free_block_rsv(root, block_rsv);
2225                 block_rsv = NULL;
2226         }
2227
2228         if (list_empty(&BTRFS_I(inode)->i_orphan)) {
2229                 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2230 #if 0
2231                 /*
2232                  * For proper ENOSPC handling, we should do orphan
2233                  * cleanup when mounting. But this introduces backward
2234                  * compatibility issue.
2235                  */
2236                 if (!xchg(&root->orphan_item_inserted, 1))
2237                         insert = 2;
2238                 else
2239                         insert = 1;
2240 #endif
2241                 insert = 1;
2242         }
2243
2244         if (!BTRFS_I(inode)->orphan_meta_reserved) {
2245                 BTRFS_I(inode)->orphan_meta_reserved = 1;
2246                 reserve = 1;
2247         }
2248         spin_unlock(&root->orphan_lock);
2249
2250         if (block_rsv)
2251                 btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
2252
2253         /* grab metadata reservation from transaction handle */
2254         if (reserve) {
2255                 ret = btrfs_orphan_reserve_metadata(trans, inode);
2256                 BUG_ON(ret);
2257         }
2258
2259         /* insert an orphan item to track this unlinked/truncated file */
2260         if (insert >= 1) {
2261                 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
2262                 BUG_ON(ret);
2263         }
2264
2265         /* insert an orphan item to track subvolume contains orphan files */
2266         if (insert >= 2) {
2267                 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2268                                                root->root_key.objectid);
2269                 BUG_ON(ret);
2270         }
2271         return 0;
2272 }
2273
2274 /*
2275  * We have done the truncate/delete so we can go ahead and remove the orphan
2276  * item for this particular inode.
2277  */
2278 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2279 {
2280         struct btrfs_root *root = BTRFS_I(inode)->root;
2281         int delete_item = 0;
2282         int release_rsv = 0;
2283         int ret = 0;
2284
2285         spin_lock(&root->orphan_lock);
2286         if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
2287                 list_del_init(&BTRFS_I(inode)->i_orphan);
2288                 delete_item = 1;
2289         }
2290
2291         if (BTRFS_I(inode)->orphan_meta_reserved) {
2292                 BTRFS_I(inode)->orphan_meta_reserved = 0;
2293                 release_rsv = 1;
2294         }
2295         spin_unlock(&root->orphan_lock);
2296
2297         if (trans && delete_item) {
2298                 ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
2299                 BUG_ON(ret);
2300         }
2301
2302         if (release_rsv)
2303                 btrfs_orphan_release_metadata(inode);
2304
2305         return 0;
2306 }
2307
2308 /*
2309  * this cleans up any orphans that may be left on the list from the last use
2310  * of this root.
2311  */
2312 int btrfs_orphan_cleanup(struct btrfs_root *root)
2313 {
2314         struct btrfs_path *path;
2315         struct extent_buffer *leaf;
2316         struct btrfs_key key, found_key;
2317         struct btrfs_trans_handle *trans;
2318         struct inode *inode;
2319         int ret = 0, nr_unlink = 0, nr_truncate = 0;
2320
2321         if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
2322                 return 0;
2323
2324         path = btrfs_alloc_path();
2325         if (!path) {
2326                 ret = -ENOMEM;
2327                 goto out;
2328         }
2329         path->reada = -1;
2330
2331         key.objectid = BTRFS_ORPHAN_OBJECTID;
2332         btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2333         key.offset = (u64)-1;
2334
2335         while (1) {
2336                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2337                 if (ret < 0)
2338                         goto out;
2339
2340                 /*
2341                  * if ret == 0 means we found what we were searching for, which
2342                  * is weird, but possible, so only screw with path if we didn't
2343                  * find the key and see if we have stuff that matches
2344                  */
2345                 if (ret > 0) {
2346                         ret = 0;
2347                         if (path->slots[0] == 0)
2348                                 break;
2349                         path->slots[0]--;
2350                 }
2351
2352                 /* pull out the item */
2353                 leaf = path->nodes[0];
2354                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2355
2356                 /* make sure the item matches what we want */
2357                 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2358                         break;
2359                 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2360                         break;
2361
2362                 /* release the path since we're done with it */
2363                 btrfs_release_path(path);
2364
2365                 /*
2366                  * this is where we are basically btrfs_lookup, without the
2367                  * crossing root thing.  we store the inode number in the
2368                  * offset of the orphan item.
2369                  */
2370                 found_key.objectid = found_key.offset;
2371                 found_key.type = BTRFS_INODE_ITEM_KEY;
2372                 found_key.offset = 0;
2373                 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2374                 if (IS_ERR(inode)) {
2375                         ret = PTR_ERR(inode);
2376                         goto out;
2377                 }
2378
2379                 /*
2380                  * add this inode to the orphan list so btrfs_orphan_del does
2381                  * the proper thing when we hit it
2382                  */
2383                 spin_lock(&root->orphan_lock);
2384                 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2385                 spin_unlock(&root->orphan_lock);
2386
2387                 /*
2388                  * if this is a bad inode, means we actually succeeded in
2389                  * removing the inode, but not the orphan record, which means
2390                  * we need to manually delete the orphan since iput will just
2391                  * do a destroy_inode
2392                  */
2393                 if (is_bad_inode(inode)) {
2394                         trans = btrfs_start_transaction(root, 0);
2395                         if (IS_ERR(trans)) {
2396                                 ret = PTR_ERR(trans);
2397                                 goto out;
2398                         }
2399                         btrfs_orphan_del(trans, inode);
2400                         btrfs_end_transaction(trans, root);
2401                         iput(inode);
2402                         continue;
2403                 }
2404
2405                 /* if we have links, this was a truncate, lets do that */
2406                 if (inode->i_nlink) {
2407                         if (!S_ISREG(inode->i_mode)) {
2408                                 WARN_ON(1);
2409                                 iput(inode);
2410                                 continue;
2411                         }
2412                         nr_truncate++;
2413                         ret = btrfs_truncate(inode);
2414                 } else {
2415                         nr_unlink++;
2416                 }
2417
2418                 /* this will do delete_inode and everything for us */
2419                 iput(inode);
2420                 if (ret)
2421                         goto out;
2422         }
2423         root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
2424
2425         if (root->orphan_block_rsv)
2426                 btrfs_block_rsv_release(root, root->orphan_block_rsv,
2427                                         (u64)-1);
2428
2429         if (root->orphan_block_rsv || root->orphan_item_inserted) {
2430                 trans = btrfs_join_transaction(root);
2431                 if (!IS_ERR(trans))
2432                         btrfs_end_transaction(trans, root);
2433         }
2434
2435         if (nr_unlink)
2436                 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2437         if (nr_truncate)
2438                 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2439
2440 out:
2441         if (ret)
2442                 printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
2443         btrfs_free_path(path);
2444         return ret;
2445 }
2446
2447 /*
2448  * very simple check to peek ahead in the leaf looking for xattrs.  If we
2449  * don't find any xattrs, we know there can't be any acls.
2450  *
2451  * slot is the slot the inode is in, objectid is the objectid of the inode
2452  */
2453 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2454                                           int slot, u64 objectid)
2455 {
2456         u32 nritems = btrfs_header_nritems(leaf);
2457         struct btrfs_key found_key;
2458         int scanned = 0;
2459
2460         slot++;
2461         while (slot < nritems) {
2462                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2463
2464                 /* we found a different objectid, there must not be acls */
2465                 if (found_key.objectid != objectid)
2466                         return 0;
2467
2468                 /* we found an xattr, assume we've got an acl */
2469                 if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2470                         return 1;
2471
2472                 /*
2473                  * we found a key greater than an xattr key, there can't
2474                  * be any acls later on
2475                  */
2476                 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2477                         return 0;
2478
2479                 slot++;
2480                 scanned++;
2481
2482                 /*
2483                  * it goes inode, inode backrefs, xattrs, extents,
2484                  * so if there are a ton of hard links to an inode there can
2485                  * be a lot of backrefs.  Don't waste time searching too hard,
2486                  * this is just an optimization
2487                  */
2488                 if (scanned >= 8)
2489                         break;
2490         }
2491         /* we hit the end of the leaf before we found an xattr or
2492          * something larger than an xattr.  We have to assume the inode
2493          * has acls
2494          */
2495         return 1;
2496 }
2497
2498 /*
2499  * read an inode from the btree into the in-memory inode
2500  */
2501 static void btrfs_read_locked_inode(struct inode *inode)
2502 {
2503         struct btrfs_path *path;
2504         struct extent_buffer *leaf;
2505         struct btrfs_inode_item *inode_item;
2506         struct btrfs_timespec *tspec;
2507         struct btrfs_root *root = BTRFS_I(inode)->root;
2508         struct btrfs_key location;
2509         int maybe_acls;
2510         u32 rdev;
2511         int ret;
2512         bool filled = false;
2513
2514         ret = btrfs_fill_inode(inode, &rdev);
2515         if (!ret)
2516                 filled = true;
2517
2518         path = btrfs_alloc_path();
2519         BUG_ON(!path);
2520         path->leave_spinning = 1;
2521         memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2522
2523         ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2524         if (ret)
2525                 goto make_bad;
2526
2527         leaf = path->nodes[0];
2528
2529         if (filled)
2530                 goto cache_acl;
2531
2532         inode_item = btrfs_item_ptr(leaf, path->slots[0],
2533                                     struct btrfs_inode_item);
2534         if (!leaf->map_token)
2535                 map_private_extent_buffer(leaf, (unsigned long)inode_item,
2536                                           sizeof(struct btrfs_inode_item),
2537                                           &leaf->map_token, &leaf->kaddr,
2538                                           &leaf->map_start, &leaf->map_len,
2539                                           KM_USER1);
2540
2541         inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2542         inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2543         inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2544         inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2545         btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2546
2547         tspec = btrfs_inode_atime(inode_item);
2548         inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2549         inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2550
2551         tspec = btrfs_inode_mtime(inode_item);
2552         inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2553         inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2554
2555         tspec = btrfs_inode_ctime(inode_item);
2556         inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2557         inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2558
2559         inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2560         BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2561         BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2562         inode->i_generation = BTRFS_I(inode)->generation;
2563         inode->i_rdev = 0;
2564         rdev = btrfs_inode_rdev(leaf, inode_item);
2565
2566         BTRFS_I(inode)->index_cnt = (u64)-1;
2567         BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2568 cache_acl:
2569         /*
2570          * try to precache a NULL acl entry for files that don't have
2571          * any xattrs or acls
2572          */
2573         maybe_acls = acls_after_inode_item(leaf, path->slots[0],
2574                                            btrfs_ino(inode));
2575         if (!maybe_acls)
2576                 cache_no_acl(inode);
2577
2578         if (leaf->map_token) {
2579                 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
2580                 leaf->map_token = NULL;
2581         }
2582
2583         btrfs_free_path(path);
2584
2585         switch (inode->i_mode & S_IFMT) {
2586         case S_IFREG:
2587                 inode->i_mapping->a_ops = &btrfs_aops;
2588                 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2589                 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2590                 inode->i_fop = &btrfs_file_operations;
2591                 inode->i_op = &btrfs_file_inode_operations;
2592                 break;
2593         case S_IFDIR:
2594                 inode->i_fop = &btrfs_dir_file_operations;
2595                 if (root == root->fs_info->tree_root)
2596                         inode->i_op = &btrfs_dir_ro_inode_operations;
2597                 else
2598                         inode->i_op = &btrfs_dir_inode_operations;
2599                 break;
2600         case S_IFLNK:
2601                 inode->i_op = &btrfs_symlink_inode_operations;
2602                 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2603                 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2604                 break;
2605         default:
2606                 inode->i_op = &btrfs_special_inode_operations;
2607                 init_special_inode(inode, inode->i_mode, rdev);
2608                 break;
2609         }
2610
2611         btrfs_update_iflags(inode);
2612         return;
2613
2614 make_bad:
2615         btrfs_free_path(path);
2616         make_bad_inode(inode);
2617 }
2618
2619 /*
2620  * given a leaf and an inode, copy the inode fields into the leaf
2621  */
2622 static void fill_inode_item(struct btrfs_trans_handle *trans,
2623                             struct extent_buffer *leaf,
2624                             struct btrfs_inode_item *item,
2625                             struct inode *inode)
2626 {
2627         if (!leaf->map_token)
2628                 map_private_extent_buffer(leaf, (unsigned long)item,
2629                                           sizeof(struct btrfs_inode_item),
2630                                           &leaf->map_token, &leaf->kaddr,
2631                                           &leaf->map_start, &leaf->map_len,
2632                                           KM_USER1);
2633
2634         btrfs_set_inode_uid(leaf, item, inode->i_uid);
2635         btrfs_set_inode_gid(leaf, item, inode->i_gid);
2636         btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2637         btrfs_set_inode_mode(leaf, item, inode->i_mode);
2638         btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2639
2640         btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2641                                inode->i_atime.tv_sec);
2642         btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2643                                 inode->i_atime.tv_nsec);
2644
2645         btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2646                                inode->i_mtime.tv_sec);
2647         btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2648                                 inode->i_mtime.tv_nsec);
2649
2650         btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2651                                inode->i_ctime.tv_sec);
2652         btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2653                                 inode->i_ctime.tv_nsec);
2654
2655         btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2656         btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2657         btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2658         btrfs_set_inode_transid(leaf, item, trans->transid);
2659         btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2660         btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2661         btrfs_set_inode_block_group(leaf, item, 0);
2662
2663         if (leaf->map_token) {
2664                 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
2665                 leaf->map_token = NULL;
2666         }
2667 }
2668
2669 /*
2670  * copy everything in the in-memory inode into the btree.
2671  */
2672 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2673                                 struct btrfs_root *root, struct inode *inode)
2674 {
2675         struct btrfs_inode_item *inode_item;
2676         struct btrfs_path *path;
2677         struct extent_buffer *leaf;
2678         int ret;
2679
2680         /*
2681          * If root is tree root, it means this inode is used to
2682          * store free space information. And these inodes are updated
2683          * when committing the transaction, so they needn't delaye to
2684          * be updated, or deadlock will occured.
2685          */
2686         if (!is_free_space_inode(root, inode)) {
2687                 ret = btrfs_delayed_update_inode(trans, root, inode);
2688                 if (!ret)
2689                         btrfs_set_inode_last_trans(trans, inode);
2690                 return ret;
2691         }
2692
2693         path = btrfs_alloc_path();
2694         if (!path)
2695                 return -ENOMEM;
2696
2697         path->leave_spinning = 1;
2698         ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
2699                                  1);
2700         if (ret) {
2701                 if (ret > 0)
2702                         ret = -ENOENT;
2703                 goto failed;
2704         }
2705
2706         btrfs_unlock_up_safe(path, 1);
2707         leaf = path->nodes[0];
2708         inode_item = btrfs_item_ptr(leaf, path->slots[0],
2709                                     struct btrfs_inode_item);
2710
2711         fill_inode_item(trans, leaf, inode_item, inode);
2712         btrfs_mark_buffer_dirty(leaf);
2713         btrfs_set_inode_last_trans(trans, inode);
2714         ret = 0;
2715 failed:
2716         btrfs_free_path(path);
2717         return ret;
2718 }
2719
2720 /*
2721  * unlink helper that gets used here in inode.c and in the tree logging
2722  * recovery code.  It remove a link in a directory with a given name, and
2723  * also drops the back refs in the inode to the directory
2724  */
2725 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2726                                 struct btrfs_root *root,
2727                                 struct inode *dir, struct inode *inode,
2728                                 const char *name, int name_len)
2729 {
2730         struct btrfs_path *path;
2731         int ret = 0;
2732         struct extent_buffer *leaf;
2733         struct btrfs_dir_item *di;
2734         struct btrfs_key key;
2735         u64 index;
2736         u64 ino = btrfs_ino(inode);
2737         u64 dir_ino = btrfs_ino(dir);
2738
2739         path = btrfs_alloc_path();
2740         if (!path) {
2741                 ret = -ENOMEM;
2742                 goto out;
2743         }
2744
2745         path->leave_spinning = 1;
2746         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2747                                     name, name_len, -1);
2748         if (IS_ERR(di)) {
2749                 ret = PTR_ERR(di);
2750                 goto err;
2751         }
2752         if (!di) {
2753                 ret = -ENOENT;
2754                 goto err;
2755         }
2756         leaf = path->nodes[0];
2757         btrfs_dir_item_key_to_cpu(leaf, di, &key);
2758         ret = btrfs_delete_one_dir_name(trans, root, path, di);
2759         if (ret)
2760                 goto err;
2761         btrfs_release_path(path);
2762
2763         ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
2764                                   dir_ino, &index);
2765         if (ret) {
2766                 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2767                        "inode %llu parent %llu\n", name_len, name,
2768                        (unsigned long long)ino, (unsigned long long)dir_ino);
2769                 goto err;
2770         }
2771
2772         ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
2773         if (ret)
2774                 goto err;
2775
2776         ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2777                                          inode, dir_ino);
2778         BUG_ON(ret != 0 && ret != -ENOENT);
2779
2780         ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2781                                            dir, index);
2782         if (ret == -ENOENT)
2783                 ret = 0;
2784 err:
2785         btrfs_free_path(path);
2786         if (ret)
2787                 goto out;
2788
2789         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2790         inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2791         btrfs_update_inode(trans, root, dir);
2792 out:
2793         return ret;
2794 }
2795
2796 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2797                        struct btrfs_root *root,
2798                        struct inode *dir, struct inode *inode,
2799                        const char *name, int name_len)
2800 {
2801         int ret;
2802         ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
2803         if (!ret) {
2804                 btrfs_drop_nlink(inode);
2805                 ret = btrfs_update_inode(trans, root, inode);
2806         }
2807         return ret;
2808 }
2809                 
2810
2811 /* helper to check if there is any shared block in the path */
2812 static int check_path_shared(struct btrfs_root *root,
2813                              struct btrfs_path *path)
2814 {
2815         struct extent_buffer *eb;
2816         int level;
2817         u64 refs = 1;
2818
2819         for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2820                 int ret;
2821
2822                 if (!path->nodes[level])
2823                         break;
2824                 eb = path->nodes[level];
2825                 if (!btrfs_block_can_be_shared(root, eb))
2826                         continue;
2827                 ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
2828                                                &refs, NULL);
2829                 if (refs > 1)
2830                         return 1;
2831         }
2832         return 0;
2833 }
2834
2835 /*
2836  * helper to start transaction for unlink and rmdir.
2837  *
2838  * unlink and rmdir are special in btrfs, they do not always free space.
2839  * so in enospc case, we should make sure they will free space before
2840  * allowing them to use the global metadata reservation.
2841  */
2842 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2843                                                        struct dentry *dentry)
2844 {
2845         struct btrfs_trans_handle *trans;
2846         struct btrfs_root *root = BTRFS_I(dir)->root;
2847         struct btrfs_path *path;
2848         struct btrfs_inode_ref *ref;
2849         struct btrfs_dir_item *di;
2850         struct inode *inode = dentry->d_inode;
2851         u64 index;
2852         int check_link = 1;
2853         int err = -ENOSPC;
2854         int ret;
2855         u64 ino = btrfs_ino(inode);
2856         u64 dir_ino = btrfs_ino(dir);
2857
2858         trans = btrfs_start_transaction(root, 10);
2859         if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
2860                 return trans;
2861
2862         if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
2863                 return ERR_PTR(-ENOSPC);
2864
2865         /* check if there is someone else holds reference */
2866         if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
2867                 return ERR_PTR(-ENOSPC);
2868
2869         if (atomic_read(&inode->i_count) > 2)
2870                 return ERR_PTR(-ENOSPC);
2871
2872         if (xchg(&root->fs_info->enospc_unlink, 1))
2873                 return ERR_PTR(-ENOSPC);
2874
2875         path = btrfs_alloc_path();
2876         if (!path) {
2877                 root->fs_info->enospc_unlink = 0;
2878                 return ERR_PTR(-ENOMEM);
2879         }
2880
2881         trans = btrfs_start_transaction(root, 0);
2882         if (IS_ERR(trans)) {
2883                 btrfs_free_path(path);
2884                 root->fs_info->enospc_unlink = 0;
2885                 return trans;
2886         }
2887
2888         path->skip_locking = 1;
2889         path->search_commit_root = 1;
2890
2891         ret = btrfs_lookup_inode(trans, root, path,
2892                                 &BTRFS_I(dir)->location, 0);
2893         if (ret < 0) {
2894                 err = ret;
2895                 goto out;
2896         }
2897         if (ret == 0) {
2898                 if (check_path_shared(root, path))
2899                         goto out;
2900         } else {
2901                 check_link = 0;
2902         }
2903         btrfs_release_path(path);
2904
2905         ret = btrfs_lookup_inode(trans, root, path,
2906                                 &BTRFS_I(inode)->location, 0);
2907         if (ret < 0) {
2908                 err = ret;
2909                 goto out;
2910         }
2911         if (ret == 0) {
2912                 if (check_path_shared(root, path))
2913                         goto out;
2914         } else {
2915                 check_link = 0;
2916         }
2917         btrfs_release_path(path);
2918
2919         if (ret == 0 && S_ISREG(inode->i_mode)) {
2920                 ret = btrfs_lookup_file_extent(trans, root, path,
2921                                                ino, (u64)-1, 0);
2922                 if (ret < 0) {
2923                         err = ret;
2924                         goto out;
2925                 }
2926                 BUG_ON(ret == 0);
2927                 if (check_path_shared(root, path))
2928                         goto out;
2929                 btrfs_release_path(path);
2930         }
2931
2932         if (!check_link) {
2933                 err = 0;
2934                 goto out;
2935         }
2936
2937         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2938                                 dentry->d_name.name, dentry->d_name.len, 0);
2939         if (IS_ERR(di)) {
2940                 err = PTR_ERR(di);
2941                 goto out;
2942         }
2943         if (di) {
2944                 if (check_path_shared(root, path))
2945                         goto out;
2946         } else {
2947                 err = 0;
2948                 goto out;
2949         }
2950         btrfs_release_path(path);
2951
2952         ref = btrfs_lookup_inode_ref(trans, root, path,
2953                                 dentry->d_name.name, dentry->d_name.len,
2954                                 ino, dir_ino, 0);
2955         if (IS_ERR(ref)) {
2956                 err = PTR_ERR(ref);
2957                 goto out;
2958         }
2959         BUG_ON(!ref);
2960         if (check_path_shared(root, path))
2961                 goto out;
2962         index = btrfs_inode_ref_index(path->nodes[0], ref);
2963         btrfs_release_path(path);
2964
2965         /*
2966          * This is a commit root search, if we can lookup inode item and other
2967          * relative items in the commit root, it means the transaction of
2968          * dir/file creation has been committed, and the dir index item that we
2969          * delay to insert has also been inserted into the commit root. So
2970          * we needn't worry about the delayed insertion of the dir index item
2971          * here.
2972          */
2973         di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
2974                                 dentry->d_name.name, dentry->d_name.len, 0);
2975         if (IS_ERR(di)) {
2976                 err = PTR_ERR(di);
2977                 goto out;
2978         }
2979         BUG_ON(ret == -ENOENT);
2980         if (check_path_shared(root, path))
2981                 goto out;
2982
2983         err = 0;
2984 out:
2985         btrfs_free_path(path);
2986         if (err) {
2987                 btrfs_end_transaction(trans, root);
2988                 root->fs_info->enospc_unlink = 0;
2989                 return ERR_PTR(err);
2990         }
2991
2992         trans->block_rsv = &root->fs_info->global_block_rsv;
2993         return trans;
2994 }
2995
2996 static void __unlink_end_trans(struct btrfs_trans_handle *trans,
2997                                struct btrfs_root *root)
2998 {
2999         if (trans->block_rsv == &root->fs_info->global_block_rsv) {
3000                 BUG_ON(!root->fs_info->enospc_unlink);
3001                 root->fs_info->enospc_unlink = 0;
3002         }
3003         btrfs_end_transaction_throttle(trans, root);
3004 }
3005
3006 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3007 {
3008         struct btrfs_root *root = BTRFS_I(dir)->root;
3009         struct btrfs_trans_handle *trans;
3010         struct inode *inode = dentry->d_inode;
3011         int ret;
3012         unsigned long nr = 0;
3013
3014         trans = __unlink_start_trans(dir, dentry);
3015         if (IS_ERR(trans))
3016                 return PTR_ERR(trans);
3017
3018         btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
3019
3020         ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3021                                  dentry->d_name.name, dentry->d_name.len);
3022         BUG_ON(ret);
3023
3024         if (inode->i_nlink == 0) {
3025                 ret = btrfs_orphan_add(trans, inode);
3026                 BUG_ON(ret);
3027         }
3028
3029         nr = trans->blocks_used;
3030         __unlink_end_trans(trans, root);
3031         btrfs_btree_balance_dirty(root, nr);
3032         return ret;
3033 }
3034
3035 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3036                         struct btrfs_root *root,
3037                         struct inode *dir, u64 objectid,
3038                         const char *name, int name_len)
3039 {
3040         struct btrfs_path *path;
3041         struct extent_buffer *leaf;
3042         struct btrfs_dir_item *di;
3043         struct btrfs_key key;
3044         u64 index;
3045         int ret;
3046         u64 dir_ino = btrfs_ino(dir);
3047
3048         path = btrfs_alloc_path();
3049         if (!path)
3050                 return -ENOMEM;
3051
3052         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3053                                    name, name_len, -1);
3054         BUG_ON(IS_ERR_OR_NULL(di));
3055
3056         leaf = path->nodes[0];
3057         btrfs_dir_item_key_to_cpu(leaf, di, &key);
3058         WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
3059         ret = btrfs_delete_one_dir_name(trans, root, path, di);
3060         BUG_ON(ret);
3061         btrfs_release_path(path);
3062
3063         ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
3064                                  objectid, root->root_key.objectid,
3065                                  dir_ino, &index, name, name_len);
3066         if (ret < 0) {
3067                 BUG_ON(ret != -ENOENT);
3068                 di = btrfs_search_dir_index_item(root, path, dir_ino,
3069                                                  name, name_len);
3070                 BUG_ON(IS_ERR_OR_NULL(di));
3071
3072                 leaf = path->nodes[0];
3073                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3074                 btrfs_release_path(path);
3075                 index = key.offset;
3076         }
3077         btrfs_release_path(path);
3078
3079         ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3080         BUG_ON(ret);
3081
3082         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3083         dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3084         ret = btrfs_update_inode(trans, root, dir);
3085         BUG_ON(ret);
3086
3087         btrfs_free_path(path);
3088         return 0;
3089 }
3090
3091 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3092 {
3093         struct inode *inode = dentry->d_inode;
3094         int err = 0;
3095         struct btrfs_root *root = BTRFS_I(dir)->root;
3096         struct btrfs_trans_handle *trans;
3097         unsigned long nr = 0;
3098
3099         if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
3100             btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
3101                 return -ENOTEMPTY;
3102
3103         trans = __unlink_start_trans(dir, dentry);
3104         if (IS_ERR(trans))
3105                 return PTR_ERR(trans);
3106
3107         if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3108                 err = btrfs_unlink_subvol(trans, root, dir,
3109                                           BTRFS_I(inode)->location.objectid,
3110                                           dentry->d_name.name,
3111                                           dentry->d_name.len);
3112                 goto out;
3113         }
3114
3115         err = btrfs_orphan_add(trans, inode);
3116         if (err)
3117                 goto out;
3118
3119         /* now the directory is empty */
3120         err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3121                                  dentry->d_name.name, dentry->d_name.len);
3122         if (!err)
3123                 btrfs_i_size_write(inode, 0);
3124 out:
3125         nr = trans->blocks_used;
3126         __unlink_end_trans(trans, root);
3127         btrfs_btree_balance_dirty(root, nr);
3128
3129         return err;
3130 }
3131
3132 /*
3133  * this can truncate away extent items, csum items and directory items.
3134  * It starts at a high offset and removes keys until it can't find
3135  * any higher than new_size
3136  *
3137  * csum items that cross the new i_size are truncated to the new size
3138  * as well.
3139  *
3140  * min_type is the minimum key type to truncate down to.  If set to 0, this
3141  * will kill all the items on this inode, including the INODE_ITEM_KEY.
3142  */
3143 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3144                                struct btrfs_root *root,