Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs...
[sfrench/cifs-2.6.git] / fs / btrfs / inode.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include "compat.h"
40 #include "ctree.h"
41 #include "disk-io.h"
42 #include "transaction.h"
43 #include "btrfs_inode.h"
44 #include "ioctl.h"
45 #include "print-tree.h"
46 #include "volumes.h"
47 #include "ordered-data.h"
48 #include "xattr.h"
49 #include "tree-log.h"
50 #include "compression.h"
51 #include "locking.h"
52
53 struct btrfs_iget_args {
54         u64 ino;
55         struct btrfs_root *root;
56 };
57
58 static const struct inode_operations btrfs_dir_inode_operations;
59 static const struct inode_operations btrfs_symlink_inode_operations;
60 static const struct inode_operations btrfs_dir_ro_inode_operations;
61 static const struct inode_operations btrfs_special_inode_operations;
62 static const struct inode_operations btrfs_file_inode_operations;
63 static const struct address_space_operations btrfs_aops;
64 static const struct address_space_operations btrfs_symlink_aops;
65 static struct file_operations btrfs_dir_file_operations;
66 static struct extent_io_ops btrfs_extent_io_ops;
67
68 static struct kmem_cache *btrfs_inode_cachep;
69 struct kmem_cache *btrfs_trans_handle_cachep;
70 struct kmem_cache *btrfs_transaction_cachep;
71 struct kmem_cache *btrfs_path_cachep;
72
73 #define S_SHIFT 12
74 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
75         [S_IFREG >> S_SHIFT]    = BTRFS_FT_REG_FILE,
76         [S_IFDIR >> S_SHIFT]    = BTRFS_FT_DIR,
77         [S_IFCHR >> S_SHIFT]    = BTRFS_FT_CHRDEV,
78         [S_IFBLK >> S_SHIFT]    = BTRFS_FT_BLKDEV,
79         [S_IFIFO >> S_SHIFT]    = BTRFS_FT_FIFO,
80         [S_IFSOCK >> S_SHIFT]   = BTRFS_FT_SOCK,
81         [S_IFLNK >> S_SHIFT]    = BTRFS_FT_SYMLINK,
82 };
83
84 static void btrfs_truncate(struct inode *inode);
85 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
86 static noinline int cow_file_range(struct inode *inode,
87                                    struct page *locked_page,
88                                    u64 start, u64 end, int *page_started,
89                                    unsigned long *nr_written, int unlock);
90
91 static int btrfs_init_inode_security(struct inode *inode,  struct inode *dir)
92 {
93         int err;
94
95         err = btrfs_init_acl(inode, dir);
96         if (!err)
97                 err = btrfs_xattr_security_init(inode, dir);
98         return err;
99 }
100
101 /*
102  * this does all the hard work for inserting an inline extent into
103  * the btree.  The caller should have done a btrfs_drop_extents so that
104  * no overlapping inline items exist in the btree
105  */
106 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
107                                 struct btrfs_root *root, struct inode *inode,
108                                 u64 start, size_t size, size_t compressed_size,
109                                 struct page **compressed_pages)
110 {
111         struct btrfs_key key;
112         struct btrfs_path *path;
113         struct extent_buffer *leaf;
114         struct page *page = NULL;
115         char *kaddr;
116         unsigned long ptr;
117         struct btrfs_file_extent_item *ei;
118         int err = 0;
119         int ret;
120         size_t cur_size = size;
121         size_t datasize;
122         unsigned long offset;
123         int use_compress = 0;
124
125         if (compressed_size && compressed_pages) {
126                 use_compress = 1;
127                 cur_size = compressed_size;
128         }
129
130         path = btrfs_alloc_path();
131         if (!path)
132                 return -ENOMEM;
133
134         path->leave_spinning = 1;
135         btrfs_set_trans_block_group(trans, inode);
136
137         key.objectid = inode->i_ino;
138         key.offset = start;
139         btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
140         datasize = btrfs_file_extent_calc_inline_size(cur_size);
141
142         inode_add_bytes(inode, size);
143         ret = btrfs_insert_empty_item(trans, root, path, &key,
144                                       datasize);
145         BUG_ON(ret);
146         if (ret) {
147                 err = ret;
148                 goto fail;
149         }
150         leaf = path->nodes[0];
151         ei = btrfs_item_ptr(leaf, path->slots[0],
152                             struct btrfs_file_extent_item);
153         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
154         btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
155         btrfs_set_file_extent_encryption(leaf, ei, 0);
156         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
157         btrfs_set_file_extent_ram_bytes(leaf, ei, size);
158         ptr = btrfs_file_extent_inline_start(ei);
159
160         if (use_compress) {
161                 struct page *cpage;
162                 int i = 0;
163                 while (compressed_size > 0) {
164                         cpage = compressed_pages[i];
165                         cur_size = min_t(unsigned long, compressed_size,
166                                        PAGE_CACHE_SIZE);
167
168                         kaddr = kmap_atomic(cpage, KM_USER0);
169                         write_extent_buffer(leaf, kaddr, ptr, cur_size);
170                         kunmap_atomic(kaddr, KM_USER0);
171
172                         i++;
173                         ptr += cur_size;
174                         compressed_size -= cur_size;
175                 }
176                 btrfs_set_file_extent_compression(leaf, ei,
177                                                   BTRFS_COMPRESS_ZLIB);
178         } else {
179                 page = find_get_page(inode->i_mapping,
180                                      start >> PAGE_CACHE_SHIFT);
181                 btrfs_set_file_extent_compression(leaf, ei, 0);
182                 kaddr = kmap_atomic(page, KM_USER0);
183                 offset = start & (PAGE_CACHE_SIZE - 1);
184                 write_extent_buffer(leaf, kaddr + offset, ptr, size);
185                 kunmap_atomic(kaddr, KM_USER0);
186                 page_cache_release(page);
187         }
188         btrfs_mark_buffer_dirty(leaf);
189         btrfs_free_path(path);
190
191         BTRFS_I(inode)->disk_i_size = inode->i_size;
192         btrfs_update_inode(trans, root, inode);
193         return 0;
194 fail:
195         btrfs_free_path(path);
196         return err;
197 }
198
199
200 /*
201  * conditionally insert an inline extent into the file.  This
202  * does the checks required to make sure the data is small enough
203  * to fit as an inline extent.
204  */
205 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
206                                  struct btrfs_root *root,
207                                  struct inode *inode, u64 start, u64 end,
208                                  size_t compressed_size,
209                                  struct page **compressed_pages)
210 {
211         u64 isize = i_size_read(inode);
212         u64 actual_end = min(end + 1, isize);
213         u64 inline_len = actual_end - start;
214         u64 aligned_end = (end + root->sectorsize - 1) &
215                         ~((u64)root->sectorsize - 1);
216         u64 hint_byte;
217         u64 data_len = inline_len;
218         int ret;
219
220         if (compressed_size)
221                 data_len = compressed_size;
222
223         if (start > 0 ||
224             actual_end >= PAGE_CACHE_SIZE ||
225             data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
226             (!compressed_size &&
227             (actual_end & (root->sectorsize - 1)) == 0) ||
228             end + 1 < isize ||
229             data_len > root->fs_info->max_inline) {
230                 return 1;
231         }
232
233         ret = btrfs_drop_extents(trans, root, inode, start,
234                                  aligned_end, aligned_end, start,
235                                  &hint_byte, 1);
236         BUG_ON(ret);
237
238         if (isize > actual_end)
239                 inline_len = min_t(u64, isize, actual_end);
240         ret = insert_inline_extent(trans, root, inode, start,
241                                    inline_len, compressed_size,
242                                    compressed_pages);
243         BUG_ON(ret);
244         btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
245         return 0;
246 }
247
248 struct async_extent {
249         u64 start;
250         u64 ram_size;
251         u64 compressed_size;
252         struct page **pages;
253         unsigned long nr_pages;
254         struct list_head list;
255 };
256
257 struct async_cow {
258         struct inode *inode;
259         struct btrfs_root *root;
260         struct page *locked_page;
261         u64 start;
262         u64 end;
263         struct list_head extents;
264         struct btrfs_work work;
265 };
266
267 static noinline int add_async_extent(struct async_cow *cow,
268                                      u64 start, u64 ram_size,
269                                      u64 compressed_size,
270                                      struct page **pages,
271                                      unsigned long nr_pages)
272 {
273         struct async_extent *async_extent;
274
275         async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
276         async_extent->start = start;
277         async_extent->ram_size = ram_size;
278         async_extent->compressed_size = compressed_size;
279         async_extent->pages = pages;
280         async_extent->nr_pages = nr_pages;
281         list_add_tail(&async_extent->list, &cow->extents);
282         return 0;
283 }
284
285 /*
286  * we create compressed extents in two phases.  The first
287  * phase compresses a range of pages that have already been
288  * locked (both pages and state bits are locked).
289  *
290  * This is done inside an ordered work queue, and the compression
291  * is spread across many cpus.  The actual IO submission is step
292  * two, and the ordered work queue takes care of making sure that
293  * happens in the same order things were put onto the queue by
294  * writepages and friends.
295  *
296  * If this code finds it can't get good compression, it puts an
297  * entry onto the work queue to write the uncompressed bytes.  This
298  * makes sure that both compressed inodes and uncompressed inodes
299  * are written in the same order that pdflush sent them down.
300  */
301 static noinline int compress_file_range(struct inode *inode,
302                                         struct page *locked_page,
303                                         u64 start, u64 end,
304                                         struct async_cow *async_cow,
305                                         int *num_added)
306 {
307         struct btrfs_root *root = BTRFS_I(inode)->root;
308         struct btrfs_trans_handle *trans;
309         u64 num_bytes;
310         u64 orig_start;
311         u64 disk_num_bytes;
312         u64 blocksize = root->sectorsize;
313         u64 actual_end;
314         u64 isize = i_size_read(inode);
315         int ret = 0;
316         struct page **pages = NULL;
317         unsigned long nr_pages;
318         unsigned long nr_pages_ret = 0;
319         unsigned long total_compressed = 0;
320         unsigned long total_in = 0;
321         unsigned long max_compressed = 128 * 1024;
322         unsigned long max_uncompressed = 128 * 1024;
323         int i;
324         int will_compress;
325
326         orig_start = start;
327
328         actual_end = min_t(u64, isize, end + 1);
329 again:
330         will_compress = 0;
331         nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
332         nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
333
334         /*
335          * we don't want to send crud past the end of i_size through
336          * compression, that's just a waste of CPU time.  So, if the
337          * end of the file is before the start of our current
338          * requested range of bytes, we bail out to the uncompressed
339          * cleanup code that can deal with all of this.
340          *
341          * It isn't really the fastest way to fix things, but this is a
342          * very uncommon corner.
343          */
344         if (actual_end <= start)
345                 goto cleanup_and_bail_uncompressed;
346
347         total_compressed = actual_end - start;
348
349         /* we want to make sure that amount of ram required to uncompress
350          * an extent is reasonable, so we limit the total size in ram
351          * of a compressed extent to 128k.  This is a crucial number
352          * because it also controls how easily we can spread reads across
353          * cpus for decompression.
354          *
355          * We also want to make sure the amount of IO required to do
356          * a random read is reasonably small, so we limit the size of
357          * a compressed extent to 128k.
358          */
359         total_compressed = min(total_compressed, max_uncompressed);
360         num_bytes = (end - start + blocksize) & ~(blocksize - 1);
361         num_bytes = max(blocksize,  num_bytes);
362         disk_num_bytes = num_bytes;
363         total_in = 0;
364         ret = 0;
365
366         /*
367          * we do compression for mount -o compress and when the
368          * inode has not been flagged as nocompress.  This flag can
369          * change at any time if we discover bad compression ratios.
370          */
371         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
372             btrfs_test_opt(root, COMPRESS)) {
373                 WARN_ON(pages);
374                 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
375
376                 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
377                                                 total_compressed, pages,
378                                                 nr_pages, &nr_pages_ret,
379                                                 &total_in,
380                                                 &total_compressed,
381                                                 max_compressed);
382
383                 if (!ret) {
384                         unsigned long offset = total_compressed &
385                                 (PAGE_CACHE_SIZE - 1);
386                         struct page *page = pages[nr_pages_ret - 1];
387                         char *kaddr;
388
389                         /* zero the tail end of the last page, we might be
390                          * sending it down to disk
391                          */
392                         if (offset) {
393                                 kaddr = kmap_atomic(page, KM_USER0);
394                                 memset(kaddr + offset, 0,
395                                        PAGE_CACHE_SIZE - offset);
396                                 kunmap_atomic(kaddr, KM_USER0);
397                         }
398                         will_compress = 1;
399                 }
400         }
401         if (start == 0) {
402                 trans = btrfs_join_transaction(root, 1);
403                 BUG_ON(!trans);
404                 btrfs_set_trans_block_group(trans, inode);
405
406                 /* lets try to make an inline extent */
407                 if (ret || total_in < (actual_end - start)) {
408                         /* we didn't compress the entire range, try
409                          * to make an uncompressed inline extent.
410                          */
411                         ret = cow_file_range_inline(trans, root, inode,
412                                                     start, end, 0, NULL);
413                 } else {
414                         /* try making a compressed inline extent */
415                         ret = cow_file_range_inline(trans, root, inode,
416                                                     start, end,
417                                                     total_compressed, pages);
418                 }
419                 btrfs_end_transaction(trans, root);
420                 if (ret == 0) {
421                         /*
422                          * inline extent creation worked, we don't need
423                          * to create any more async work items.  Unlock
424                          * and free up our temp pages.
425                          */
426                         extent_clear_unlock_delalloc(inode,
427                                                      &BTRFS_I(inode)->io_tree,
428                                                      start, end, NULL, 1, 0,
429                                                      0, 1, 1, 1, 0);
430                         ret = 0;
431                         goto free_pages_out;
432                 }
433         }
434
435         if (will_compress) {
436                 /*
437                  * we aren't doing an inline extent round the compressed size
438                  * up to a block size boundary so the allocator does sane
439                  * things
440                  */
441                 total_compressed = (total_compressed + blocksize - 1) &
442                         ~(blocksize - 1);
443
444                 /*
445                  * one last check to make sure the compression is really a
446                  * win, compare the page count read with the blocks on disk
447                  */
448                 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
449                         ~(PAGE_CACHE_SIZE - 1);
450                 if (total_compressed >= total_in) {
451                         will_compress = 0;
452                 } else {
453                         disk_num_bytes = total_compressed;
454                         num_bytes = total_in;
455                 }
456         }
457         if (!will_compress && pages) {
458                 /*
459                  * the compression code ran but failed to make things smaller,
460                  * free any pages it allocated and our page pointer array
461                  */
462                 for (i = 0; i < nr_pages_ret; i++) {
463                         WARN_ON(pages[i]->mapping);
464                         page_cache_release(pages[i]);
465                 }
466                 kfree(pages);
467                 pages = NULL;
468                 total_compressed = 0;
469                 nr_pages_ret = 0;
470
471                 /* flag the file so we don't compress in the future */
472                 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
473         }
474         if (will_compress) {
475                 *num_added += 1;
476
477                 /* the async work queues will take care of doing actual
478                  * allocation on disk for these compressed pages,
479                  * and will submit them to the elevator.
480                  */
481                 add_async_extent(async_cow, start, num_bytes,
482                                  total_compressed, pages, nr_pages_ret);
483
484                 if (start + num_bytes < end && start + num_bytes < actual_end) {
485                         start += num_bytes;
486                         pages = NULL;
487                         cond_resched();
488                         goto again;
489                 }
490         } else {
491 cleanup_and_bail_uncompressed:
492                 /*
493                  * No compression, but we still need to write the pages in
494                  * the file we've been given so far.  redirty the locked
495                  * page if it corresponds to our extent and set things up
496                  * for the async work queue to run cow_file_range to do
497                  * the normal delalloc dance
498                  */
499                 if (page_offset(locked_page) >= start &&
500                     page_offset(locked_page) <= end) {
501                         __set_page_dirty_nobuffers(locked_page);
502                         /* unlocked later on in the async handlers */
503                 }
504                 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
505                 *num_added += 1;
506         }
507
508 out:
509         return 0;
510
511 free_pages_out:
512         for (i = 0; i < nr_pages_ret; i++) {
513                 WARN_ON(pages[i]->mapping);
514                 page_cache_release(pages[i]);
515         }
516         kfree(pages);
517
518         goto out;
519 }
520
521 /*
522  * phase two of compressed writeback.  This is the ordered portion
523  * of the code, which only gets called in the order the work was
524  * queued.  We walk all the async extents created by compress_file_range
525  * and send them down to the disk.
526  */
527 static noinline int submit_compressed_extents(struct inode *inode,
528                                               struct async_cow *async_cow)
529 {
530         struct async_extent *async_extent;
531         u64 alloc_hint = 0;
532         struct btrfs_trans_handle *trans;
533         struct btrfs_key ins;
534         struct extent_map *em;
535         struct btrfs_root *root = BTRFS_I(inode)->root;
536         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
537         struct extent_io_tree *io_tree;
538         int ret;
539
540         if (list_empty(&async_cow->extents))
541                 return 0;
542
543         trans = btrfs_join_transaction(root, 1);
544
545         while (!list_empty(&async_cow->extents)) {
546                 async_extent = list_entry(async_cow->extents.next,
547                                           struct async_extent, list);
548                 list_del(&async_extent->list);
549
550                 io_tree = &BTRFS_I(inode)->io_tree;
551
552                 /* did the compression code fall back to uncompressed IO? */
553                 if (!async_extent->pages) {
554                         int page_started = 0;
555                         unsigned long nr_written = 0;
556
557                         lock_extent(io_tree, async_extent->start,
558                                     async_extent->start +
559                                     async_extent->ram_size - 1, GFP_NOFS);
560
561                         /* allocate blocks */
562                         cow_file_range(inode, async_cow->locked_page,
563                                        async_extent->start,
564                                        async_extent->start +
565                                        async_extent->ram_size - 1,
566                                        &page_started, &nr_written, 0);
567
568                         /*
569                          * if page_started, cow_file_range inserted an
570                          * inline extent and took care of all the unlocking
571                          * and IO for us.  Otherwise, we need to submit
572                          * all those pages down to the drive.
573                          */
574                         if (!page_started)
575                                 extent_write_locked_range(io_tree,
576                                                   inode, async_extent->start,
577                                                   async_extent->start +
578                                                   async_extent->ram_size - 1,
579                                                   btrfs_get_extent,
580                                                   WB_SYNC_ALL);
581                         kfree(async_extent);
582                         cond_resched();
583                         continue;
584                 }
585
586                 lock_extent(io_tree, async_extent->start,
587                             async_extent->start + async_extent->ram_size - 1,
588                             GFP_NOFS);
589                 /*
590                  * here we're doing allocation and writeback of the
591                  * compressed pages
592                  */
593                 btrfs_drop_extent_cache(inode, async_extent->start,
594                                         async_extent->start +
595                                         async_extent->ram_size - 1, 0);
596
597                 ret = btrfs_reserve_extent(trans, root,
598                                            async_extent->compressed_size,
599                                            async_extent->compressed_size,
600                                            0, alloc_hint,
601                                            (u64)-1, &ins, 1);
602                 BUG_ON(ret);
603                 em = alloc_extent_map(GFP_NOFS);
604                 em->start = async_extent->start;
605                 em->len = async_extent->ram_size;
606                 em->orig_start = em->start;
607
608                 em->block_start = ins.objectid;
609                 em->block_len = ins.offset;
610                 em->bdev = root->fs_info->fs_devices->latest_bdev;
611                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
612                 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
613
614                 while (1) {
615                         write_lock(&em_tree->lock);
616                         ret = add_extent_mapping(em_tree, em);
617                         write_unlock(&em_tree->lock);
618                         if (ret != -EEXIST) {
619                                 free_extent_map(em);
620                                 break;
621                         }
622                         btrfs_drop_extent_cache(inode, async_extent->start,
623                                                 async_extent->start +
624                                                 async_extent->ram_size - 1, 0);
625                 }
626
627                 ret = btrfs_add_ordered_extent(inode, async_extent->start,
628                                                ins.objectid,
629                                                async_extent->ram_size,
630                                                ins.offset,
631                                                BTRFS_ORDERED_COMPRESSED);
632                 BUG_ON(ret);
633
634                 btrfs_end_transaction(trans, root);
635
636                 /*
637                  * clear dirty, set writeback and unlock the pages.
638                  */
639                 extent_clear_unlock_delalloc(inode,
640                                              &BTRFS_I(inode)->io_tree,
641                                              async_extent->start,
642                                              async_extent->start +
643                                              async_extent->ram_size - 1,
644                                              NULL, 1, 1, 0, 1, 1, 0, 0);
645
646                 ret = btrfs_submit_compressed_write(inode,
647                                     async_extent->start,
648                                     async_extent->ram_size,
649                                     ins.objectid,
650                                     ins.offset, async_extent->pages,
651                                     async_extent->nr_pages);
652
653                 BUG_ON(ret);
654                 trans = btrfs_join_transaction(root, 1);
655                 alloc_hint = ins.objectid + ins.offset;
656                 kfree(async_extent);
657                 cond_resched();
658         }
659
660         btrfs_end_transaction(trans, root);
661         return 0;
662 }
663
664 /*
665  * when extent_io.c finds a delayed allocation range in the file,
666  * the call backs end up in this code.  The basic idea is to
667  * allocate extents on disk for the range, and create ordered data structs
668  * in ram to track those extents.
669  *
670  * locked_page is the page that writepage had locked already.  We use
671  * it to make sure we don't do extra locks or unlocks.
672  *
673  * *page_started is set to one if we unlock locked_page and do everything
674  * required to start IO on it.  It may be clean and already done with
675  * IO when we return.
676  */
677 static noinline int cow_file_range(struct inode *inode,
678                                    struct page *locked_page,
679                                    u64 start, u64 end, int *page_started,
680                                    unsigned long *nr_written,
681                                    int unlock)
682 {
683         struct btrfs_root *root = BTRFS_I(inode)->root;
684         struct btrfs_trans_handle *trans;
685         u64 alloc_hint = 0;
686         u64 num_bytes;
687         unsigned long ram_size;
688         u64 disk_num_bytes;
689         u64 cur_alloc_size;
690         u64 blocksize = root->sectorsize;
691         u64 actual_end;
692         u64 isize = i_size_read(inode);
693         struct btrfs_key ins;
694         struct extent_map *em;
695         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
696         int ret = 0;
697
698         trans = btrfs_join_transaction(root, 1);
699         BUG_ON(!trans);
700         btrfs_set_trans_block_group(trans, inode);
701
702         actual_end = min_t(u64, isize, end + 1);
703
704         num_bytes = (end - start + blocksize) & ~(blocksize - 1);
705         num_bytes = max(blocksize,  num_bytes);
706         disk_num_bytes = num_bytes;
707         ret = 0;
708
709         if (start == 0) {
710                 /* lets try to make an inline extent */
711                 ret = cow_file_range_inline(trans, root, inode,
712                                             start, end, 0, NULL);
713                 if (ret == 0) {
714                         extent_clear_unlock_delalloc(inode,
715                                                      &BTRFS_I(inode)->io_tree,
716                                                      start, end, NULL, 1, 1,
717                                                      1, 1, 1, 1, 0);
718                         *nr_written = *nr_written +
719                              (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
720                         *page_started = 1;
721                         ret = 0;
722                         goto out;
723                 }
724         }
725
726         BUG_ON(disk_num_bytes >
727                btrfs_super_total_bytes(&root->fs_info->super_copy));
728
729
730         read_lock(&BTRFS_I(inode)->extent_tree.lock);
731         em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
732                                    start, num_bytes);
733         if (em) {
734                 alloc_hint = em->block_start;
735                 free_extent_map(em);
736         }
737         read_unlock(&BTRFS_I(inode)->extent_tree.lock);
738         btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
739
740         while (disk_num_bytes > 0) {
741                 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
742                 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
743                                            root->sectorsize, 0, alloc_hint,
744                                            (u64)-1, &ins, 1);
745                 BUG_ON(ret);
746
747                 em = alloc_extent_map(GFP_NOFS);
748                 em->start = start;
749                 em->orig_start = em->start;
750                 ram_size = ins.offset;
751                 em->len = ins.offset;
752
753                 em->block_start = ins.objectid;
754                 em->block_len = ins.offset;
755                 em->bdev = root->fs_info->fs_devices->latest_bdev;
756                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
757
758                 while (1) {
759                         write_lock(&em_tree->lock);
760                         ret = add_extent_mapping(em_tree, em);
761                         write_unlock(&em_tree->lock);
762                         if (ret != -EEXIST) {
763                                 free_extent_map(em);
764                                 break;
765                         }
766                         btrfs_drop_extent_cache(inode, start,
767                                                 start + ram_size - 1, 0);
768                 }
769
770                 cur_alloc_size = ins.offset;
771                 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
772                                                ram_size, cur_alloc_size, 0);
773                 BUG_ON(ret);
774
775                 if (root->root_key.objectid ==
776                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
777                         ret = btrfs_reloc_clone_csums(inode, start,
778                                                       cur_alloc_size);
779                         BUG_ON(ret);
780                 }
781
782                 if (disk_num_bytes < cur_alloc_size)
783                         break;
784
785                 /* we're not doing compressed IO, don't unlock the first
786                  * page (which the caller expects to stay locked), don't
787                  * clear any dirty bits and don't set any writeback bits
788                  *
789                  * Do set the Private2 bit so we know this page was properly
790                  * setup for writepage
791                  */
792                 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
793                                              start, start + ram_size - 1,
794                                              locked_page, unlock, 1,
795                                              1, 0, 0, 0, 1);
796                 disk_num_bytes -= cur_alloc_size;
797                 num_bytes -= cur_alloc_size;
798                 alloc_hint = ins.objectid + ins.offset;
799                 start += cur_alloc_size;
800         }
801 out:
802         ret = 0;
803         btrfs_end_transaction(trans, root);
804
805         return ret;
806 }
807
808 /*
809  * work queue call back to started compression on a file and pages
810  */
811 static noinline void async_cow_start(struct btrfs_work *work)
812 {
813         struct async_cow *async_cow;
814         int num_added = 0;
815         async_cow = container_of(work, struct async_cow, work);
816
817         compress_file_range(async_cow->inode, async_cow->locked_page,
818                             async_cow->start, async_cow->end, async_cow,
819                             &num_added);
820         if (num_added == 0)
821                 async_cow->inode = NULL;
822 }
823
824 /*
825  * work queue call back to submit previously compressed pages
826  */
827 static noinline void async_cow_submit(struct btrfs_work *work)
828 {
829         struct async_cow *async_cow;
830         struct btrfs_root *root;
831         unsigned long nr_pages;
832
833         async_cow = container_of(work, struct async_cow, work);
834
835         root = async_cow->root;
836         nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
837                 PAGE_CACHE_SHIFT;
838
839         atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
840
841         if (atomic_read(&root->fs_info->async_delalloc_pages) <
842             5 * 1042 * 1024 &&
843             waitqueue_active(&root->fs_info->async_submit_wait))
844                 wake_up(&root->fs_info->async_submit_wait);
845
846         if (async_cow->inode)
847                 submit_compressed_extents(async_cow->inode, async_cow);
848 }
849
850 static noinline void async_cow_free(struct btrfs_work *work)
851 {
852         struct async_cow *async_cow;
853         async_cow = container_of(work, struct async_cow, work);
854         kfree(async_cow);
855 }
856
857 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
858                                 u64 start, u64 end, int *page_started,
859                                 unsigned long *nr_written)
860 {
861         struct async_cow *async_cow;
862         struct btrfs_root *root = BTRFS_I(inode)->root;
863         unsigned long nr_pages;
864         u64 cur_end;
865         int limit = 10 * 1024 * 1042;
866
867         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
868                          EXTENT_DELALLOC, 1, 0, NULL, GFP_NOFS);
869         while (start < end) {
870                 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
871                 async_cow->inode = inode;
872                 async_cow->root = root;
873                 async_cow->locked_page = locked_page;
874                 async_cow->start = start;
875
876                 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
877                         cur_end = end;
878                 else
879                         cur_end = min(end, start + 512 * 1024 - 1);
880
881                 async_cow->end = cur_end;
882                 INIT_LIST_HEAD(&async_cow->extents);
883
884                 async_cow->work.func = async_cow_start;
885                 async_cow->work.ordered_func = async_cow_submit;
886                 async_cow->work.ordered_free = async_cow_free;
887                 async_cow->work.flags = 0;
888
889                 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
890                         PAGE_CACHE_SHIFT;
891                 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
892
893                 btrfs_queue_worker(&root->fs_info->delalloc_workers,
894                                    &async_cow->work);
895
896                 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
897                         wait_event(root->fs_info->async_submit_wait,
898                            (atomic_read(&root->fs_info->async_delalloc_pages) <
899                             limit));
900                 }
901
902                 while (atomic_read(&root->fs_info->async_submit_draining) &&
903                       atomic_read(&root->fs_info->async_delalloc_pages)) {
904                         wait_event(root->fs_info->async_submit_wait,
905                           (atomic_read(&root->fs_info->async_delalloc_pages) ==
906                            0));
907                 }
908
909                 *nr_written += nr_pages;
910                 start = cur_end + 1;
911         }
912         *page_started = 1;
913         return 0;
914 }
915
916 static noinline int csum_exist_in_range(struct btrfs_root *root,
917                                         u64 bytenr, u64 num_bytes)
918 {
919         int ret;
920         struct btrfs_ordered_sum *sums;
921         LIST_HEAD(list);
922
923         ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
924                                        bytenr + num_bytes - 1, &list);
925         if (ret == 0 && list_empty(&list))
926                 return 0;
927
928         while (!list_empty(&list)) {
929                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
930                 list_del(&sums->list);
931                 kfree(sums);
932         }
933         return 1;
934 }
935
936 /*
937  * when nowcow writeback call back.  This checks for snapshots or COW copies
938  * of the extents that exist in the file, and COWs the file as required.
939  *
940  * If no cow copies or snapshots exist, we write directly to the existing
941  * blocks on disk
942  */
943 static noinline int run_delalloc_nocow(struct inode *inode,
944                                        struct page *locked_page,
945                               u64 start, u64 end, int *page_started, int force,
946                               unsigned long *nr_written)
947 {
948         struct btrfs_root *root = BTRFS_I(inode)->root;
949         struct btrfs_trans_handle *trans;
950         struct extent_buffer *leaf;
951         struct btrfs_path *path;
952         struct btrfs_file_extent_item *fi;
953         struct btrfs_key found_key;
954         u64 cow_start;
955         u64 cur_offset;
956         u64 extent_end;
957         u64 extent_offset;
958         u64 disk_bytenr;
959         u64 num_bytes;
960         int extent_type;
961         int ret;
962         int type;
963         int nocow;
964         int check_prev = 1;
965
966         path = btrfs_alloc_path();
967         BUG_ON(!path);
968         trans = btrfs_join_transaction(root, 1);
969         BUG_ON(!trans);
970
971         cow_start = (u64)-1;
972         cur_offset = start;
973         while (1) {
974                 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
975                                                cur_offset, 0);
976                 BUG_ON(ret < 0);
977                 if (ret > 0 && path->slots[0] > 0 && check_prev) {
978                         leaf = path->nodes[0];
979                         btrfs_item_key_to_cpu(leaf, &found_key,
980                                               path->slots[0] - 1);
981                         if (found_key.objectid == inode->i_ino &&
982                             found_key.type == BTRFS_EXTENT_DATA_KEY)
983                                 path->slots[0]--;
984                 }
985                 check_prev = 0;
986 next_slot:
987                 leaf = path->nodes[0];
988                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
989                         ret = btrfs_next_leaf(root, path);
990                         if (ret < 0)
991                                 BUG_ON(1);
992                         if (ret > 0)
993                                 break;
994                         leaf = path->nodes[0];
995                 }
996
997                 nocow = 0;
998                 disk_bytenr = 0;
999                 num_bytes = 0;
1000                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1001
1002                 if (found_key.objectid > inode->i_ino ||
1003                     found_key.type > BTRFS_EXTENT_DATA_KEY ||
1004                     found_key.offset > end)
1005                         break;
1006
1007                 if (found_key.offset > cur_offset) {
1008                         extent_end = found_key.offset;
1009                         goto out_check;
1010                 }
1011
1012                 fi = btrfs_item_ptr(leaf, path->slots[0],
1013                                     struct btrfs_file_extent_item);
1014                 extent_type = btrfs_file_extent_type(leaf, fi);
1015
1016                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1017                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1018                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1019                         extent_offset = btrfs_file_extent_offset(leaf, fi);
1020                         extent_end = found_key.offset +
1021                                 btrfs_file_extent_num_bytes(leaf, fi);
1022                         if (extent_end <= start) {
1023                                 path->slots[0]++;
1024                                 goto next_slot;
1025                         }
1026                         if (disk_bytenr == 0)
1027                                 goto out_check;
1028                         if (btrfs_file_extent_compression(leaf, fi) ||
1029                             btrfs_file_extent_encryption(leaf, fi) ||
1030                             btrfs_file_extent_other_encoding(leaf, fi))
1031                                 goto out_check;
1032                         if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1033                                 goto out_check;
1034                         if (btrfs_extent_readonly(root, disk_bytenr))
1035                                 goto out_check;
1036                         if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1037                                                   found_key.offset -
1038                                                   extent_offset, disk_bytenr))
1039                                 goto out_check;
1040                         disk_bytenr += extent_offset;
1041                         disk_bytenr += cur_offset - found_key.offset;
1042                         num_bytes = min(end + 1, extent_end) - cur_offset;
1043                         /*
1044                          * force cow if csum exists in the range.
1045                          * this ensure that csum for a given extent are
1046                          * either valid or do not exist.
1047                          */
1048                         if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1049                                 goto out_check;
1050                         nocow = 1;
1051                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1052                         extent_end = found_key.offset +
1053                                 btrfs_file_extent_inline_len(leaf, fi);
1054                         extent_end = ALIGN(extent_end, root->sectorsize);
1055                 } else {
1056                         BUG_ON(1);
1057                 }
1058 out_check:
1059                 if (extent_end <= start) {
1060                         path->slots[0]++;
1061                         goto next_slot;
1062                 }
1063                 if (!nocow) {
1064                         if (cow_start == (u64)-1)
1065                                 cow_start = cur_offset;
1066                         cur_offset = extent_end;
1067                         if (cur_offset > end)
1068                                 break;
1069                         path->slots[0]++;
1070                         goto next_slot;
1071                 }
1072
1073                 btrfs_release_path(root, path);
1074                 if (cow_start != (u64)-1) {
1075                         ret = cow_file_range(inode, locked_page, cow_start,
1076                                         found_key.offset - 1, page_started,
1077                                         nr_written, 1);
1078                         BUG_ON(ret);
1079                         cow_start = (u64)-1;
1080                 }
1081
1082                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1083                         struct extent_map *em;
1084                         struct extent_map_tree *em_tree;
1085                         em_tree = &BTRFS_I(inode)->extent_tree;
1086                         em = alloc_extent_map(GFP_NOFS);
1087                         em->start = cur_offset;
1088                         em->orig_start = em->start;
1089                         em->len = num_bytes;
1090                         em->block_len = num_bytes;
1091                         em->block_start = disk_bytenr;
1092                         em->bdev = root->fs_info->fs_devices->latest_bdev;
1093                         set_bit(EXTENT_FLAG_PINNED, &em->flags);
1094                         while (1) {
1095                                 write_lock(&em_tree->lock);
1096                                 ret = add_extent_mapping(em_tree, em);
1097                                 write_unlock(&em_tree->lock);
1098                                 if (ret != -EEXIST) {
1099                                         free_extent_map(em);
1100                                         break;
1101                                 }
1102                                 btrfs_drop_extent_cache(inode, em->start,
1103                                                 em->start + em->len - 1, 0);
1104                         }
1105                         type = BTRFS_ORDERED_PREALLOC;
1106                 } else {
1107                         type = BTRFS_ORDERED_NOCOW;
1108                 }
1109
1110                 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1111                                                num_bytes, num_bytes, type);
1112                 BUG_ON(ret);
1113
1114                 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1115                                         cur_offset, cur_offset + num_bytes - 1,
1116                                         locked_page, 1, 1, 1, 0, 0, 0, 1);
1117                 cur_offset = extent_end;
1118                 if (cur_offset > end)
1119                         break;
1120         }
1121         btrfs_release_path(root, path);
1122
1123         if (cur_offset <= end && cow_start == (u64)-1)
1124                 cow_start = cur_offset;
1125         if (cow_start != (u64)-1) {
1126                 ret = cow_file_range(inode, locked_page, cow_start, end,
1127                                      page_started, nr_written, 1);
1128                 BUG_ON(ret);
1129         }
1130
1131         ret = btrfs_end_transaction(trans, root);
1132         BUG_ON(ret);
1133         btrfs_free_path(path);
1134         return 0;
1135 }
1136
1137 /*
1138  * extent_io.c call back to do delayed allocation processing
1139  */
1140 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1141                               u64 start, u64 end, int *page_started,
1142                               unsigned long *nr_written)
1143 {
1144         int ret;
1145         struct btrfs_root *root = BTRFS_I(inode)->root;
1146
1147         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1148                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1149                                          page_started, 1, nr_written);
1150         else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1151                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1152                                          page_started, 0, nr_written);
1153         else if (!btrfs_test_opt(root, COMPRESS))
1154                 ret = cow_file_range(inode, locked_page, start, end,
1155                                       page_started, nr_written, 1);
1156         else
1157                 ret = cow_file_range_async(inode, locked_page, start, end,
1158                                            page_started, nr_written);
1159         return ret;
1160 }
1161
1162 static int btrfs_split_extent_hook(struct inode *inode,
1163                                     struct extent_state *orig, u64 split)
1164 {
1165         struct btrfs_root *root = BTRFS_I(inode)->root;
1166         u64 size;
1167
1168         if (!(orig->state & EXTENT_DELALLOC))
1169                 return 0;
1170
1171         size = orig->end - orig->start + 1;
1172         if (size > root->fs_info->max_extent) {
1173                 u64 num_extents;
1174                 u64 new_size;
1175
1176                 new_size = orig->end - split + 1;
1177                 num_extents = div64_u64(size + root->fs_info->max_extent - 1,
1178                                         root->fs_info->max_extent);
1179
1180                 /*
1181                  * if we break a large extent up then leave delalloc_extents be,
1182                  * since we've already accounted for the large extent.
1183                  */
1184                 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1185                               root->fs_info->max_extent) < num_extents)
1186                         return 0;
1187         }
1188
1189         BTRFS_I(inode)->delalloc_extents++;
1190
1191         return 0;
1192 }
1193
1194 /*
1195  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1196  * extents so we can keep track of new extents that are just merged onto old
1197  * extents, such as when we are doing sequential writes, so we can properly
1198  * account for the metadata space we'll need.
1199  */
1200 static int btrfs_merge_extent_hook(struct inode *inode,
1201                                    struct extent_state *new,
1202                                    struct extent_state *other)
1203 {
1204         struct btrfs_root *root = BTRFS_I(inode)->root;
1205         u64 new_size, old_size;
1206         u64 num_extents;
1207
1208         /* not delalloc, ignore it */
1209         if (!(other->state & EXTENT_DELALLOC))
1210                 return 0;
1211
1212         old_size = other->end - other->start + 1;
1213         if (new->start < other->start)
1214                 new_size = other->end - new->start + 1;
1215         else
1216                 new_size = new->end - other->start + 1;
1217
1218         /* we're not bigger than the max, unreserve the space and go */
1219         if (new_size <= root->fs_info->max_extent) {
1220                 BTRFS_I(inode)->delalloc_extents--;
1221                 return 0;
1222         }
1223
1224         /*
1225          * If we grew by another max_extent, just return, we want to keep that
1226          * reserved amount.
1227          */
1228         num_extents = div64_u64(old_size + root->fs_info->max_extent - 1,
1229                                 root->fs_info->max_extent);
1230         if (div64_u64(new_size + root->fs_info->max_extent - 1,
1231                       root->fs_info->max_extent) > num_extents)
1232                 return 0;
1233
1234         BTRFS_I(inode)->delalloc_extents--;
1235
1236         return 0;
1237 }
1238
1239 /*
1240  * extent_io.c set_bit_hook, used to track delayed allocation
1241  * bytes in this file, and to maintain the list of inodes that
1242  * have pending delalloc work to be done.
1243  */
1244 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1245                        unsigned long old, unsigned long bits)
1246 {
1247
1248         /*
1249          * set_bit and clear bit hooks normally require _irqsave/restore
1250          * but in this case, we are only testeing for the DELALLOC
1251          * bit, which is only set or cleared with irqs on
1252          */
1253         if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1254                 struct btrfs_root *root = BTRFS_I(inode)->root;
1255
1256                 BTRFS_I(inode)->delalloc_extents++;
1257                 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1258                 spin_lock(&root->fs_info->delalloc_lock);
1259                 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1260                 root->fs_info->delalloc_bytes += end - start + 1;
1261                 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1262                         list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1263                                       &root->fs_info->delalloc_inodes);
1264                 }
1265                 spin_unlock(&root->fs_info->delalloc_lock);
1266         }
1267         return 0;
1268 }
1269
1270 /*
1271  * extent_io.c clear_bit_hook, see set_bit_hook for why
1272  */
1273 static int btrfs_clear_bit_hook(struct inode *inode,
1274                                 struct extent_state *state, unsigned long bits)
1275 {
1276         /*
1277          * set_bit and clear bit hooks normally require _irqsave/restore
1278          * but in this case, we are only testeing for the DELALLOC
1279          * bit, which is only set or cleared with irqs on
1280          */
1281         if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1282                 struct btrfs_root *root = BTRFS_I(inode)->root;
1283
1284                 BTRFS_I(inode)->delalloc_extents--;
1285                 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
1286
1287                 spin_lock(&root->fs_info->delalloc_lock);
1288                 if (state->end - state->start + 1 >
1289                     root->fs_info->delalloc_bytes) {
1290                         printk(KERN_INFO "btrfs warning: delalloc account "
1291                                "%llu %llu\n",
1292                                (unsigned long long)
1293                                state->end - state->start + 1,
1294                                (unsigned long long)
1295                                root->fs_info->delalloc_bytes);
1296                         btrfs_delalloc_free_space(root, inode, (u64)-1);
1297                         root->fs_info->delalloc_bytes = 0;
1298                         BTRFS_I(inode)->delalloc_bytes = 0;
1299                 } else {
1300                         btrfs_delalloc_free_space(root, inode,
1301                                                   state->end -
1302                                                   state->start + 1);
1303                         root->fs_info->delalloc_bytes -= state->end -
1304                                 state->start + 1;
1305                         BTRFS_I(inode)->delalloc_bytes -= state->end -
1306                                 state->start + 1;
1307                 }
1308                 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1309                     !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1310                         list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1311                 }
1312                 spin_unlock(&root->fs_info->delalloc_lock);
1313         }
1314         return 0;
1315 }
1316
1317 /*
1318  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1319  * we don't create bios that span stripes or chunks
1320  */
1321 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1322                          size_t size, struct bio *bio,
1323                          unsigned long bio_flags)
1324 {
1325         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1326         struct btrfs_mapping_tree *map_tree;
1327         u64 logical = (u64)bio->bi_sector << 9;
1328         u64 length = 0;
1329         u64 map_length;
1330         int ret;
1331
1332         if (bio_flags & EXTENT_BIO_COMPRESSED)
1333                 return 0;
1334
1335         length = bio->bi_size;
1336         map_tree = &root->fs_info->mapping_tree;
1337         map_length = length;
1338         ret = btrfs_map_block(map_tree, READ, logical,
1339                               &map_length, NULL, 0);
1340
1341         if (map_length < length + size)
1342                 return 1;
1343         return 0;
1344 }
1345
1346 /*
1347  * in order to insert checksums into the metadata in large chunks,
1348  * we wait until bio submission time.   All the pages in the bio are
1349  * checksummed and sums are attached onto the ordered extent record.
1350  *
1351  * At IO completion time the cums attached on the ordered extent record
1352  * are inserted into the btree
1353  */
1354 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1355                                     struct bio *bio, int mirror_num,
1356                                     unsigned long bio_flags)
1357 {
1358         struct btrfs_root *root = BTRFS_I(inode)->root;
1359         int ret = 0;
1360
1361         ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1362         BUG_ON(ret);
1363         return 0;
1364 }
1365
1366 /*
1367  * in order to insert checksums into the metadata in large chunks,
1368  * we wait until bio submission time.   All the pages in the bio are
1369  * checksummed and sums are attached onto the ordered extent record.
1370  *
1371  * At IO completion time the cums attached on the ordered extent record
1372  * are inserted into the btree
1373  */
1374 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1375                           int mirror_num, unsigned long bio_flags)
1376 {
1377         struct btrfs_root *root = BTRFS_I(inode)->root;
1378         return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1379 }
1380
1381 /*
1382  * extent_io.c submission hook. This does the right thing for csum calculation
1383  * on write, or reading the csums from the tree before a read
1384  */
1385 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1386                           int mirror_num, unsigned long bio_flags)
1387 {
1388         struct btrfs_root *root = BTRFS_I(inode)->root;
1389         int ret = 0;
1390         int skip_sum;
1391
1392         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1393
1394         ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1395         BUG_ON(ret);
1396
1397         if (!(rw & (1 << BIO_RW))) {
1398                 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1399                         return btrfs_submit_compressed_read(inode, bio,
1400                                                     mirror_num, bio_flags);
1401                 } else if (!skip_sum)
1402                         btrfs_lookup_bio_sums(root, inode, bio, NULL);
1403                 goto mapit;
1404         } else if (!skip_sum) {
1405                 /* csum items have already been cloned */
1406                 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1407                         goto mapit;
1408                 /* we're doing a write, do the async checksumming */
1409                 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1410                                    inode, rw, bio, mirror_num,
1411                                    bio_flags, __btrfs_submit_bio_start,
1412                                    __btrfs_submit_bio_done);
1413         }
1414
1415 mapit:
1416         return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1417 }
1418
1419 /*
1420  * given a list of ordered sums record them in the inode.  This happens
1421  * at IO completion time based on sums calculated at bio submission time.
1422  */
1423 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1424                              struct inode *inode, u64 file_offset,
1425                              struct list_head *list)
1426 {
1427         struct btrfs_ordered_sum *sum;
1428
1429         btrfs_set_trans_block_group(trans, inode);
1430
1431         list_for_each_entry(sum, list, list) {
1432                 btrfs_csum_file_blocks(trans,
1433                        BTRFS_I(inode)->root->fs_info->csum_root, sum);
1434         }
1435         return 0;
1436 }
1437
1438 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1439 {
1440         if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1441                 WARN_ON(1);
1442         return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1443                                    GFP_NOFS);
1444 }
1445
1446 /* see btrfs_writepage_start_hook for details on why this is required */
1447 struct btrfs_writepage_fixup {
1448         struct page *page;
1449         struct btrfs_work work;
1450 };
1451
1452 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1453 {
1454         struct btrfs_writepage_fixup *fixup;
1455         struct btrfs_ordered_extent *ordered;
1456         struct page *page;
1457         struct inode *inode;
1458         u64 page_start;
1459         u64 page_end;
1460
1461         fixup = container_of(work, struct btrfs_writepage_fixup, work);
1462         page = fixup->page;
1463 again:
1464         lock_page(page);
1465         if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1466                 ClearPageChecked(page);
1467                 goto out_page;
1468         }
1469
1470         inode = page->mapping->host;
1471         page_start = page_offset(page);
1472         page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1473
1474         lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1475
1476         /* already ordered? We're done */
1477         if (PagePrivate2(page))
1478                 goto out;
1479
1480         ordered = btrfs_lookup_ordered_extent(inode, page_start);
1481         if (ordered) {
1482                 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1483                               page_end, GFP_NOFS);
1484                 unlock_page(page);
1485                 btrfs_start_ordered_extent(inode, ordered, 1);
1486                 goto again;
1487         }
1488
1489         btrfs_set_extent_delalloc(inode, page_start, page_end);
1490         ClearPageChecked(page);
1491 out:
1492         unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1493 out_page:
1494         unlock_page(page);
1495         page_cache_release(page);
1496 }
1497
1498 /*
1499  * There are a few paths in the higher layers of the kernel that directly
1500  * set the page dirty bit without asking the filesystem if it is a
1501  * good idea.  This causes problems because we want to make sure COW
1502  * properly happens and the data=ordered rules are followed.
1503  *
1504  * In our case any range that doesn't have the ORDERED bit set
1505  * hasn't been properly setup for IO.  We kick off an async process
1506  * to fix it up.  The async helper will wait for ordered extents, set
1507  * the delalloc bit and make it safe to write the page.
1508  */
1509 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1510 {
1511         struct inode *inode = page->mapping->host;
1512         struct btrfs_writepage_fixup *fixup;
1513         struct btrfs_root *root = BTRFS_I(inode)->root;
1514
1515         /* this page is properly in the ordered list */
1516         if (TestClearPagePrivate2(page))
1517                 return 0;
1518
1519         if (PageChecked(page))
1520                 return -EAGAIN;
1521
1522         fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1523         if (!fixup)
1524                 return -EAGAIN;
1525
1526         SetPageChecked(page);
1527         page_cache_get(page);
1528         fixup->work.func = btrfs_writepage_fixup_worker;
1529         fixup->page = page;
1530         btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1531         return -EAGAIN;
1532 }
1533
1534 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1535                                        struct inode *inode, u64 file_pos,
1536                                        u64 disk_bytenr, u64 disk_num_bytes,
1537                                        u64 num_bytes, u64 ram_bytes,
1538                                        u64 locked_end,
1539                                        u8 compression, u8 encryption,
1540                                        u16 other_encoding, int extent_type)
1541 {
1542         struct btrfs_root *root = BTRFS_I(inode)->root;
1543         struct btrfs_file_extent_item *fi;
1544         struct btrfs_path *path;
1545         struct extent_buffer *leaf;
1546         struct btrfs_key ins;
1547         u64 hint;
1548         int ret;
1549
1550         path = btrfs_alloc_path();
1551         BUG_ON(!path);
1552
1553         path->leave_spinning = 1;
1554
1555         /*
1556          * we may be replacing one extent in the tree with another.
1557          * The new extent is pinned in the extent map, and we don't want
1558          * to drop it from the cache until it is completely in the btree.
1559          *
1560          * So, tell btrfs_drop_extents to leave this extent in the cache.
1561          * the caller is expected to unpin it and allow it to be merged
1562          * with the others.
1563          */
1564         ret = btrfs_drop_extents(trans, root, inode, file_pos,
1565                                  file_pos + num_bytes, locked_end,
1566                                  file_pos, &hint, 0);
1567         BUG_ON(ret);
1568
1569         ins.objectid = inode->i_ino;
1570         ins.offset = file_pos;
1571         ins.type = BTRFS_EXTENT_DATA_KEY;
1572         ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1573         BUG_ON(ret);
1574         leaf = path->nodes[0];
1575         fi = btrfs_item_ptr(leaf, path->slots[0],
1576                             struct btrfs_file_extent_item);
1577         btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1578         btrfs_set_file_extent_type(leaf, fi, extent_type);
1579         btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1580         btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1581         btrfs_set_file_extent_offset(leaf, fi, 0);
1582         btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1583         btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1584         btrfs_set_file_extent_compression(leaf, fi, compression);
1585         btrfs_set_file_extent_encryption(leaf, fi, encryption);
1586         btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1587
1588         btrfs_unlock_up_safe(path, 1);
1589         btrfs_set_lock_blocking(leaf);
1590
1591         btrfs_mark_buffer_dirty(leaf);
1592
1593         inode_add_bytes(inode, num_bytes);
1594
1595         ins.objectid = disk_bytenr;
1596         ins.offset = disk_num_bytes;
1597         ins.type = BTRFS_EXTENT_ITEM_KEY;
1598         ret = btrfs_alloc_reserved_file_extent(trans, root,
1599                                         root->root_key.objectid,
1600                                         inode->i_ino, file_pos, &ins);
1601         BUG_ON(ret);
1602         btrfs_free_path(path);
1603
1604         return 0;
1605 }
1606
1607 /*
1608  * helper function for btrfs_finish_ordered_io, this
1609  * just reads in some of the csum leaves to prime them into ram
1610  * before we start the transaction.  It limits the amount of btree
1611  * reads required while inside the transaction.
1612  */
1613 static noinline void reada_csum(struct btrfs_root *root,
1614                                 struct btrfs_path *path,
1615                                 struct btrfs_ordered_extent *ordered_extent)
1616 {
1617         struct btrfs_ordered_sum *sum;
1618         u64 bytenr;
1619
1620         sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
1621                          list);
1622         bytenr = sum->sums[0].bytenr;
1623
1624         /*
1625          * we don't care about the results, the point of this search is
1626          * just to get the btree leaves into ram
1627          */
1628         btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
1629 }
1630
1631 /* as ordered data IO finishes, this gets called so we can finish
1632  * an ordered extent if the range of bytes in the file it covers are
1633  * fully written.
1634  */
1635 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1636 {
1637         struct btrfs_root *root = BTRFS_I(inode)->root;
1638         struct btrfs_trans_handle *trans;
1639         struct btrfs_ordered_extent *ordered_extent = NULL;
1640         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1641         struct btrfs_path *path;
1642         int compressed = 0;
1643         int ret;
1644
1645         ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1646         if (!ret)
1647                 return 0;
1648
1649         /*
1650          * before we join the transaction, try to do some of our IO.
1651          * This will limit the amount of IO that we have to do with
1652          * the transaction running.  We're unlikely to need to do any
1653          * IO if the file extents are new, the disk_i_size checks
1654          * covers the most common case.
1655          */
1656         if (start < BTRFS_I(inode)->disk_i_size) {
1657                 path = btrfs_alloc_path();
1658                 if (path) {
1659                         ret = btrfs_lookup_file_extent(NULL, root, path,
1660                                                        inode->i_ino,
1661                                                        start, 0);
1662                         ordered_extent = btrfs_lookup_ordered_extent(inode,
1663                                                                      start);
1664                         if (!list_empty(&ordered_extent->list)) {
1665                                 btrfs_release_path(root, path);
1666                                 reada_csum(root, path, ordered_extent);
1667                         }
1668                         btrfs_free_path(path);
1669                 }
1670         }
1671
1672         trans = btrfs_join_transaction(root, 1);
1673
1674         if (!ordered_extent)
1675                 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1676         BUG_ON(!ordered_extent);
1677         if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1678                 goto nocow;
1679
1680         lock_extent(io_tree, ordered_extent->file_offset,
1681                     ordered_extent->file_offset + ordered_extent->len - 1,
1682                     GFP_NOFS);
1683
1684         if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1685                 compressed = 1;
1686         if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1687                 BUG_ON(compressed);
1688                 ret = btrfs_mark_extent_written(trans, root, inode,
1689                                                 ordered_extent->file_offset,
1690                                                 ordered_extent->file_offset +
1691                                                 ordered_extent->len);
1692                 BUG_ON(ret);
1693         } else {
1694                 ret = insert_reserved_file_extent(trans, inode,
1695                                                 ordered_extent->file_offset,
1696                                                 ordered_extent->start,
1697                                                 ordered_extent->disk_len,
1698                                                 ordered_extent->len,
1699                                                 ordered_extent->len,
1700                                                 ordered_extent->file_offset +
1701                                                 ordered_extent->len,
1702                                                 compressed, 0, 0,
1703                                                 BTRFS_FILE_EXTENT_REG);
1704                 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1705                                    ordered_extent->file_offset,
1706                                    ordered_extent->len);
1707                 BUG_ON(ret);
1708         }
1709         unlock_extent(io_tree, ordered_extent->file_offset,
1710                     ordered_extent->file_offset + ordered_extent->len - 1,
1711                     GFP_NOFS);
1712 nocow:
1713         add_pending_csums(trans, inode, ordered_extent->file_offset,
1714                           &ordered_extent->list);
1715
1716         mutex_lock(&BTRFS_I(inode)->extent_mutex);
1717         btrfs_ordered_update_i_size(inode, ordered_extent);
1718         btrfs_update_inode(trans, root, inode);
1719         btrfs_remove_ordered_extent(inode, ordered_extent);
1720         mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1721
1722         /* once for us */
1723         btrfs_put_ordered_extent(ordered_extent);
1724         /* once for the tree */
1725         btrfs_put_ordered_extent(ordered_extent);
1726
1727         btrfs_end_transaction(trans, root);
1728         return 0;
1729 }
1730
1731 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1732                                 struct extent_state *state, int uptodate)
1733 {
1734         ClearPagePrivate2(page);
1735         return btrfs_finish_ordered_io(page->mapping->host, start, end);
1736 }
1737
1738 /*
1739  * When IO fails, either with EIO or csum verification fails, we
1740  * try other mirrors that might have a good copy of the data.  This
1741  * io_failure_record is used to record state as we go through all the
1742  * mirrors.  If another mirror has good data, the page is set up to date
1743  * and things continue.  If a good mirror can't be found, the original
1744  * bio end_io callback is called to indicate things have failed.
1745  */
1746 struct io_failure_record {
1747         struct page *page;
1748         u64 start;
1749         u64 len;
1750         u64 logical;
1751         unsigned long bio_flags;
1752         int last_mirror;
1753 };
1754
1755 static int btrfs_io_failed_hook(struct bio *failed_bio,
1756                          struct page *page, u64 start, u64 end,
1757                          struct extent_state *state)
1758 {
1759         struct io_failure_record *failrec = NULL;
1760         u64 private;
1761         struct extent_map *em;
1762         struct inode *inode = page->mapping->host;
1763         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1764         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1765         struct bio *bio;
1766         int num_copies;
1767         int ret;
1768         int rw;
1769         u64 logical;
1770
1771         ret = get_state_private(failure_tree, start, &private);
1772         if (ret) {
1773                 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1774                 if (!failrec)
1775                         return -ENOMEM;
1776                 failrec->start = start;
1777                 failrec->len = end - start + 1;
1778                 failrec->last_mirror = 0;
1779                 failrec->bio_flags = 0;
1780
1781                 read_lock(&em_tree->lock);
1782                 em = lookup_extent_mapping(em_tree, start, failrec->len);
1783                 if (em->start > start || em->start + em->len < start) {
1784                         free_extent_map(em);
1785                         em = NULL;
1786                 }
1787                 read_unlock(&em_tree->lock);
1788
1789                 if (!em || IS_ERR(em)) {
1790                         kfree(failrec);
1791                         return -EIO;
1792                 }
1793                 logical = start - em->start;
1794                 logical = em->block_start + logical;
1795                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1796                         logical = em->block_start;
1797                         failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1798                 }
1799                 failrec->logical = logical;
1800                 free_extent_map(em);
1801                 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1802                                 EXTENT_DIRTY, GFP_NOFS);
1803                 set_state_private(failure_tree, start,
1804                                  (u64)(unsigned long)failrec);
1805         } else {
1806                 failrec = (struct io_failure_record *)(unsigned long)private;
1807         }
1808         num_copies = btrfs_num_copies(
1809                               &BTRFS_I(inode)->root->fs_info->mapping_tree,
1810                               failrec->logical, failrec->len);
1811         failrec->last_mirror++;
1812         if (!state) {
1813                 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1814                 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1815                                                     failrec->start,
1816                                                     EXTENT_LOCKED);
1817                 if (state && state->start != failrec->start)
1818                         state = NULL;
1819                 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1820         }
1821         if (!state || failrec->last_mirror > num_copies) {
1822                 set_state_private(failure_tree, failrec->start, 0);
1823                 clear_extent_bits(failure_tree, failrec->start,
1824                                   failrec->start + failrec->len - 1,
1825                                   EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1826                 kfree(failrec);
1827                 return -EIO;
1828         }
1829         bio = bio_alloc(GFP_NOFS, 1);
1830         bio->bi_private = state;
1831         bio->bi_end_io = failed_bio->bi_end_io;
1832         bio->bi_sector = failrec->logical >> 9;
1833         bio->bi_bdev = failed_bio->bi_bdev;
1834         bio->bi_size = 0;
1835
1836         bio_add_page(bio, page, failrec->len, start - page_offset(page));
1837         if (failed_bio->bi_rw & (1 << BIO_RW))
1838                 rw = WRITE;
1839         else
1840                 rw = READ;
1841
1842         BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1843                                                       failrec->last_mirror,
1844                                                       failrec->bio_flags);
1845         return 0;
1846 }
1847
1848 /*
1849  * each time an IO finishes, we do a fast check in the IO failure tree
1850  * to see if we need to process or clean up an io_failure_record
1851  */
1852 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1853 {
1854         u64 private;
1855         u64 private_failure;
1856         struct io_failure_record *failure;
1857         int ret;
1858
1859         private = 0;
1860         if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1861                              (u64)-1, 1, EXTENT_DIRTY)) {
1862                 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1863                                         start, &private_failure);
1864                 if (ret == 0) {
1865                         failure = (struct io_failure_record *)(unsigned long)
1866                                    private_failure;
1867                         set_state_private(&BTRFS_I(inode)->io_failure_tree,
1868                                           failure->start, 0);
1869                         clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1870                                           failure->start,
1871                                           failure->start + failure->len - 1,
1872                                           EXTENT_DIRTY | EXTENT_LOCKED,
1873                                           GFP_NOFS);
1874                         kfree(failure);
1875                 }
1876         }
1877         return 0;
1878 }
1879
1880 /*
1881  * when reads are done, we need to check csums to verify the data is correct
1882  * if there's a match, we allow the bio to finish.  If not, we go through
1883  * the io_failure_record routines to find good copies
1884  */
1885 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1886                                struct extent_state *state)
1887 {
1888         size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1889         struct inode *inode = page->mapping->host;
1890         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1891         char *kaddr;
1892         u64 private = ~(u32)0;
1893         int ret;
1894         struct btrfs_root *root = BTRFS_I(inode)->root;
1895         u32 csum = ~(u32)0;
1896
1897         if (PageChecked(page)) {
1898                 ClearPageChecked(page);
1899                 goto good;
1900         }
1901
1902         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1903                 return 0;
1904
1905         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1906             test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1907                 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1908                                   GFP_NOFS);
1909                 return 0;
1910         }
1911
1912         if (state && state->start == start) {
1913                 private = state->private;
1914                 ret = 0;
1915         } else {
1916                 ret = get_state_private(io_tree, start, &private);
1917         }
1918         kaddr = kmap_atomic(page, KM_USER0);
1919         if (ret)
1920                 goto zeroit;
1921
1922         csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
1923         btrfs_csum_final(csum, (char *)&csum);
1924         if (csum != private)
1925                 goto zeroit;
1926
1927         kunmap_atomic(kaddr, KM_USER0);
1928 good:
1929         /* if the io failure tree for this inode is non-empty,
1930          * check to see if we've recovered from a failed IO
1931          */
1932         btrfs_clean_io_failures(inode, start);
1933         return 0;
1934
1935 zeroit:
1936         if (printk_ratelimit()) {
1937                 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1938                        "private %llu\n", page->mapping->host->i_ino,
1939                        (unsigned long long)start, csum,
1940                        (unsigned long long)private);
1941         }
1942         memset(kaddr + offset, 1, end - start + 1);
1943         flush_dcache_page(page);
1944         kunmap_atomic(kaddr, KM_USER0);
1945         if (private == 0)
1946                 return 0;
1947         return -EIO;
1948 }
1949
1950 /*
1951  * This creates an orphan entry for the given inode in case something goes
1952  * wrong in the middle of an unlink/truncate.
1953  */
1954 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1955 {
1956         struct btrfs_root *root = BTRFS_I(inode)->root;
1957         int ret = 0;
1958
1959         spin_lock(&root->list_lock);
1960
1961         /* already on the orphan list, we're good */
1962         if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1963                 spin_unlock(&root->list_lock);
1964                 return 0;
1965         }
1966
1967         list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1968
1969         spin_unlock(&root->list_lock);
1970
1971         /*
1972          * insert an orphan item to track this unlinked/truncated file
1973          */
1974         ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1975
1976         return ret;
1977 }
1978
1979 /*
1980  * We have done the truncate/delete so we can go ahead and remove the orphan
1981  * item for this particular inode.
1982  */
1983 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
1984 {
1985         struct btrfs_root *root = BTRFS_I(inode)->root;
1986         int ret = 0;
1987
1988         spin_lock(&root->list_lock);
1989
1990         if (list_empty(&BTRFS_I(inode)->i_orphan)) {
1991                 spin_unlock(&root->list_lock);
1992                 return 0;
1993         }
1994
1995         list_del_init(&BTRFS_I(inode)->i_orphan);
1996         if (!trans) {
1997                 spin_unlock(&root->list_lock);
1998                 return 0;
1999         }
2000
2001         spin_unlock(&root->list_lock);
2002
2003         ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
2004
2005         return ret;
2006 }
2007
2008 /*
2009  * this cleans up any orphans that may be left on the list from the last use
2010  * of this root.
2011  */
2012 void btrfs_orphan_cleanup(struct btrfs_root *root)
2013 {
2014         struct btrfs_path *path;
2015         struct extent_buffer *leaf;
2016         struct btrfs_item *item;
2017         struct btrfs_key key, found_key;
2018         struct btrfs_trans_handle *trans;
2019         struct inode *inode;
2020         int ret = 0, nr_unlink = 0, nr_truncate = 0;
2021
2022         path = btrfs_alloc_path();
2023         if (!path)
2024                 return;
2025         path->reada = -1;
2026
2027         key.objectid = BTRFS_ORPHAN_OBJECTID;
2028         btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2029         key.offset = (u64)-1;
2030
2031
2032         while (1) {
2033                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2034                 if (ret < 0) {
2035                         printk(KERN_ERR "Error searching slot for orphan: %d"
2036                                "\n", ret);
2037                         break;
2038                 }
2039
2040                 /*
2041                  * if ret == 0 means we found what we were searching for, which
2042                  * is weird, but possible, so only screw with path if we didnt
2043                  * find the key and see if we have stuff that matches
2044                  */
2045                 if (ret > 0) {
2046                         if (path->slots[0] == 0)
2047                                 break;
2048                         path->slots[0]--;
2049                 }
2050
2051                 /* pull out the item */
2052                 leaf = path->nodes[0];
2053                 item = btrfs_item_nr(leaf, path->slots[0]);
2054                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2055
2056                 /* make sure the item matches what we want */
2057                 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2058                         break;
2059                 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2060                         break;
2061
2062                 /* release the path since we're done with it */
2063                 btrfs_release_path(root, path);
2064
2065                 /*
2066                  * this is where we are basically btrfs_lookup, without the
2067                  * crossing root thing.  we store the inode number in the
2068                  * offset of the orphan item.
2069                  */
2070                 found_key.objectid = found_key.offset;
2071                 found_key.type = BTRFS_INODE_ITEM_KEY;
2072                 found_key.offset = 0;
2073                 inode = btrfs_iget(root->fs_info->sb, &found_key, root);
2074                 if (IS_ERR(inode))
2075                         break;
2076
2077                 /*
2078                  * add this inode to the orphan list so btrfs_orphan_del does
2079                  * the proper thing when we hit it
2080                  */
2081                 spin_lock(&root->list_lock);
2082                 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2083                 spin_unlock(&root->list_lock);
2084
2085                 /*
2086                  * if this is a bad inode, means we actually succeeded in
2087                  * removing the inode, but not the orphan record, which means
2088                  * we need to manually delete the orphan since iput will just
2089                  * do a destroy_inode
2090                  */
2091                 if (is_bad_inode(inode)) {
2092                         trans = btrfs_start_transaction(root, 1);
2093                         btrfs_orphan_del(trans, inode);
2094                         btrfs_end_transaction(trans, root);
2095                         iput(inode);
2096                         continue;
2097                 }
2098
2099                 /* if we have links, this was a truncate, lets do that */
2100                 if (inode->i_nlink) {
2101                         nr_truncate++;
2102                         btrfs_truncate(inode);
2103                 } else {
2104                         nr_unlink++;
2105                 }
2106
2107                 /* this will do delete_inode and everything for us */
2108                 iput(inode);
2109         }
2110
2111         if (nr_unlink)
2112                 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2113         if (nr_truncate)
2114                 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2115
2116         btrfs_free_path(path);
2117 }
2118
2119 /*
2120  * very simple check to peek ahead in the leaf looking for xattrs.  If we
2121  * don't find any xattrs, we know there can't be any acls.
2122  *
2123  * slot is the slot the inode is in, objectid is the objectid of the inode
2124  */
2125 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2126                                           int slot, u64 objectid)
2127 {
2128         u32 nritems = btrfs_header_nritems(leaf);
2129         struct btrfs_key found_key;
2130         int scanned = 0;
2131
2132         slot++;
2133         while (slot < nritems) {
2134                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2135
2136                 /* we found a different objectid, there must not be acls */
2137                 if (found_key.objectid != objectid)
2138                         return 0;
2139
2140                 /* we found an xattr, assume we've got an acl */
2141                 if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2142                         return 1;
2143
2144                 /*
2145                  * we found a key greater than an xattr key, there can't
2146                  * be any acls later on
2147                  */
2148                 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2149                         return 0;
2150
2151                 slot++;
2152                 scanned++;
2153
2154                 /*
2155                  * it goes inode, inode backrefs, xattrs, extents,
2156                  * so if there are a ton of hard links to an inode there can
2157                  * be a lot of backrefs.  Don't waste time searching too hard,
2158                  * this is just an optimization
2159                  */
2160                 if (scanned >= 8)
2161                         break;
2162         }
2163         /* we hit the end of the leaf before we found an xattr or
2164          * something larger than an xattr.  We have to assume the inode
2165          * has acls
2166          */
2167         return 1;
2168 }
2169
2170 /*
2171  * read an inode from the btree into the in-memory inode
2172  */
2173 static void btrfs_read_locked_inode(struct inode *inode)
2174 {
2175         struct btrfs_path *path;
2176         struct extent_buffer *leaf;
2177         struct btrfs_inode_item *inode_item;
2178         struct btrfs_timespec *tspec;
2179         struct btrfs_root *root = BTRFS_I(inode)->root;
2180         struct btrfs_key location;
2181         int maybe_acls;
2182         u64 alloc_group_block;
2183         u32 rdev;
2184         int ret;
2185
2186         path = btrfs_alloc_path();
2187         BUG_ON(!path);
2188         memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2189
2190         ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2191         if (ret)
2192                 goto make_bad;
2193
2194         leaf = path->nodes[0];
2195         inode_item = btrfs_item_ptr(leaf, path->slots[0],
2196                                     struct btrfs_inode_item);
2197
2198         inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2199         inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2200         inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2201         inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2202         btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2203
2204         tspec = btrfs_inode_atime(inode_item);
2205         inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2206         inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2207
2208         tspec = btrfs_inode_mtime(inode_item);
2209         inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2210         inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2211
2212         tspec = btrfs_inode_ctime(inode_item);
2213         inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2214         inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2215
2216         inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2217         BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2218         BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2219         inode->i_generation = BTRFS_I(inode)->generation;
2220         inode->i_rdev = 0;
2221         rdev = btrfs_inode_rdev(leaf, inode_item);
2222
2223         BTRFS_I(inode)->index_cnt = (u64)-1;
2224         BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2225
2226         alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2227
2228         /*
2229          * try to precache a NULL acl entry for files that don't have
2230          * any xattrs or acls
2231          */
2232         maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2233         if (!maybe_acls)
2234                 cache_no_acl(inode);
2235
2236         BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2237                                                 alloc_group_block, 0);
2238         btrfs_free_path(path);
2239         inode_item = NULL;
2240
2241         switch (inode->i_mode & S_IFMT) {
2242         case S_IFREG:
2243                 inode->i_mapping->a_ops = &btrfs_aops;
2244                 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2245                 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2246                 inode->i_fop = &btrfs_file_operations;
2247                 inode->i_op = &btrfs_file_inode_operations;
2248                 break;
2249         case S_IFDIR:
2250                 inode->i_fop = &btrfs_dir_file_operations;
2251                 if (root == root->fs_info->tree_root)
2252                         inode->i_op = &btrfs_dir_ro_inode_operations;
2253                 else
2254                         inode->i_op = &btrfs_dir_inode_operations;
2255                 break;
2256         case S_IFLNK:
2257                 inode->i_op = &btrfs_symlink_inode_operations;
2258                 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2259                 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2260                 break;
2261         default:
2262                 inode->i_op = &btrfs_special_inode_operations;
2263                 init_special_inode(inode, inode->i_mode, rdev);
2264                 break;
2265         }
2266
2267         btrfs_update_iflags(inode);
2268         return;
2269
2270 make_bad:
2271         btrfs_free_path(path);
2272         make_bad_inode(inode);
2273 }
2274
2275 /*
2276  * given a leaf and an inode, copy the inode fields into the leaf
2277  */
2278 static void fill_inode_item(struct btrfs_trans_handle *trans,
2279                             struct extent_buffer *leaf,
2280                             struct btrfs_inode_item *item,
2281                             struct inode *inode)
2282 {
2283         btrfs_set_inode_uid(leaf, item, inode->i_uid);
2284         btrfs_set_inode_gid(leaf, item, inode->i_gid);
2285         btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2286         btrfs_set_inode_mode(leaf, item, inode->i_mode);
2287         btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2288
2289         btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2290                                inode->i_atime.tv_sec);
2291         btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2292                                 inode->i_atime.tv_nsec);
2293
2294         btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2295                                inode->i_mtime.tv_sec);
2296         btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2297                                 inode->i_mtime.tv_nsec);
2298
2299         btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2300                                inode->i_ctime.tv_sec);
2301         btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2302                                 inode->i_ctime.tv_nsec);
2303
2304         btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2305         btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2306         btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2307         btrfs_set_inode_transid(leaf, item, trans->transid);
2308         btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2309         btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2310         btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2311 }
2312
2313 /*
2314  * copy everything in the in-memory inode into the btree.
2315  */
2316 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2317                                 struct btrfs_root *root, struct inode *inode)
2318 {
2319         struct btrfs_inode_item *inode_item;
2320         struct btrfs_path *path;
2321         struct extent_buffer *leaf;
2322         int ret;
2323
2324         path = btrfs_alloc_path();
2325         BUG_ON(!path);
2326         path->leave_spinning = 1;
2327         ret = btrfs_lookup_inode(trans, root, path,
2328                                  &BTRFS_I(inode)->location, 1);
2329         if (ret) {
2330                 if (ret > 0)
2331                         ret = -ENOENT;
2332                 goto failed;
2333         }
2334
2335         btrfs_unlock_up_safe(path, 1);
2336         leaf = path->nodes[0];
2337         inode_item = btrfs_item_ptr(leaf, path->slots[0],
2338                                   struct btrfs_inode_item);
2339
2340         fill_inode_item(trans, leaf, inode_item, inode);
2341         btrfs_mark_buffer_dirty(leaf);
2342         btrfs_set_inode_last_trans(trans, inode);
2343         ret = 0;
2344 failed:
2345         btrfs_free_path(path);
2346         return ret;
2347 }
2348
2349
2350 /*
2351  * unlink helper that gets used here in inode.c and in the tree logging
2352  * recovery code.  It remove a link in a directory with a given name, and
2353  * also drops the back refs in the inode to the directory
2354  */
2355 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2356                        struct btrfs_root *root,
2357                        struct inode *dir, struct inode *inode,
2358                        const char *name, int name_len)
2359 {
2360         struct btrfs_path *path;
2361         int ret = 0;
2362         struct extent_buffer *leaf;
2363         struct btrfs_dir_item *di;
2364         struct btrfs_key key;
2365         u64 index;
2366
2367         path = btrfs_alloc_path();
2368         if (!path) {
2369                 ret = -ENOMEM;
2370                 goto err;
2371         }
2372
2373         path->leave_spinning = 1;
2374         di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2375                                     name, name_len, -1);
2376         if (IS_ERR(di)) {
2377                 ret = PTR_ERR(di);
2378                 goto err;
2379         }
2380         if (!di) {
2381                 ret = -ENOENT;
2382                 goto err;
2383         }
2384         leaf = path->nodes[0];
2385         btrfs_dir_item_key_to_cpu(leaf, di, &key);
2386         ret = btrfs_delete_one_dir_name(trans, root, path, di);
2387         if (ret)
2388                 goto err;
2389         btrfs_release_path(root, path);
2390
2391         ret = btrfs_del_inode_ref(trans, root, name, name_len,
2392                                   inode->i_ino,
2393                                   dir->i_ino, &index);
2394         if (ret) {
2395                 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2396                        "inode %lu parent %lu\n", name_len, name,
2397                        inode->i_ino, dir->i_ino);
2398                 goto err;
2399         }
2400
2401         di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2402                                          index, name, name_len, -1);
2403         if (IS_ERR(di)) {
2404                 ret = PTR_ERR(di);
2405                 goto err;
2406         }
2407         if (!di) {
2408                 ret = -ENOENT;
2409                 goto err;
2410         }
2411         ret = btrfs_delete_one_dir_name(trans, root, path, di);
2412         btrfs_release_path(root, path);
2413
2414         ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2415                                          inode, dir->i_ino);
2416         BUG_ON(ret != 0 && ret != -ENOENT);
2417
2418         ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2419                                            dir, index);
2420         BUG_ON(ret);
2421 err:
2422         btrfs_free_path(path);
2423         if (ret)
2424                 goto out;
2425
2426         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2427         inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2428         btrfs_update_inode(trans, root, dir);
2429         btrfs_drop_nlink(inode);
2430         ret = btrfs_update_inode(trans, root, inode);
2431 out:
2432         return ret;
2433 }
2434
2435 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2436 {
2437         struct btrfs_root *root;
2438         struct btrfs_trans_handle *trans;
2439         struct inode *inode = dentry->d_inode;
2440         int ret;
2441         unsigned long nr = 0;
2442
2443         root = BTRFS_I(dir)->root;
2444
2445         trans = btrfs_start_transaction(root, 1);
2446
2447         btrfs_set_trans_block_group(trans, dir);
2448
2449         btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2450
2451         ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2452                                  dentry->d_name.name, dentry->d_name.len);
2453
2454         if (inode->i_nlink == 0)
2455                 ret = btrfs_orphan_add(trans, inode);
2456
2457         nr = trans->blocks_used;
2458
2459         btrfs_end_transaction_throttle(trans, root);
2460         btrfs_btree_balance_dirty(root, nr);
2461         return ret;
2462 }
2463
2464 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2465                         struct btrfs_root *root,
2466                         struct inode *dir, u64 objectid,
2467                         const char *name, int name_len)
2468 {
2469         struct btrfs_path *path;
2470         struct extent_buffer *leaf;
2471         struct btrfs_dir_item *di;
2472         struct btrfs_key key;
2473         u64 index;
2474         int ret;
2475
2476         path = btrfs_alloc_path();
2477         if (!path)
2478                 return -ENOMEM;
2479
2480         di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2481                                    name, name_len, -1);
2482         BUG_ON(!di || IS_ERR(di));
2483
2484         leaf = path->nodes[0];
2485         btrfs_dir_item_key_to_cpu(leaf, di, &key);
2486         WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2487         ret = btrfs_delete_one_dir_name(trans, root, path, di);
2488         BUG_ON(ret);
2489         btrfs_release_path(root, path);
2490
2491         ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
2492                                  objectid, root->root_key.objectid,
2493                                  dir->i_ino, &index, name, name_len);
2494         if (ret < 0) {
2495                 BUG_ON(ret != -ENOENT);
2496                 di = btrfs_search_dir_index_item(root, path, dir->i_ino,
2497                                                  name, name_len);
2498                 BUG_ON(!di || IS_ERR(di));
2499
2500                 leaf = path->nodes[0];
2501                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2502                 btrfs_release_path(root, path);
2503                 index = key.offset;
2504         }
2505
2506         di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2507                                          index, name, name_len, -1);
2508         BUG_ON(!di || IS_ERR(di));
2509
2510         leaf = path->nodes[0];
2511         btrfs_dir_item_key_to_cpu(leaf, di, &key);
2512         WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2513         ret = btrfs_delete_one_dir_name(trans, root, path, di);
2514         BUG_ON(ret);
2515         btrfs_release_path(root, path);
2516
2517         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2518         dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2519         ret = btrfs_update_inode(trans, root, dir);
2520         BUG_ON(ret);
2521         dir->i_sb->s_dirt = 1;
2522
2523         btrfs_free_path(path);
2524         return 0;
2525 }
2526
2527 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2528 {
2529         struct inode *inode = dentry->d_inode;
2530         int err = 0;
2531         int ret;
2532         struct btrfs_root *root = BTRFS_I(dir)->root;
2533         struct btrfs_trans_handle *trans;
2534         unsigned long nr = 0;
2535
2536         if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2537             inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
2538                 return -ENOTEMPTY;
2539
2540         trans = btrfs_start_transaction(root, 1);
2541         btrfs_set_trans_block_group(trans, dir);
2542
2543         if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
2544                 err = btrfs_unlink_subvol(trans, root, dir,
2545                                           BTRFS_I(inode)->location.objectid,
2546                                           dentry->d_name.name,
2547                                           dentry->d_name.len);
2548                 goto out;
2549         }
2550
2551         err = btrfs_orphan_add(trans, inode);
2552         if (err)
2553                 goto out;
2554
2555         /* now the directory is empty */
2556         err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2557                                  dentry->d_name.name, dentry->d_name.len);
2558         if (!err)
2559                 btrfs_i_size_write(inode, 0);
2560 out:
2561         nr = trans->blocks_used;
2562         ret = btrfs_end_transaction_throttle(trans, root);
2563         btrfs_btree_balance_dirty(root, nr);
2564
2565         if (ret && !err)
2566                 err = ret;
2567         return err;
2568 }
2569
2570 #if 0
2571 /*
2572  * when truncating bytes in a file, it is possible to avoid reading
2573  * the leaves that contain only checksum items.  This can be the
2574  * majority of the IO required to delete a large file, but it must
2575  * be done carefully.
2576  *
2577  * The keys in the level just above the leaves are checked to make sure
2578  * the lowest key in a given leaf is a csum key, and starts at an offset
2579  * after the new  size.
2580  *
2581  * Then the key for the next leaf is checked to make sure it also has
2582  * a checksum item for the same file.  If it does, we know our target leaf
2583  * contains only checksum items, and it can be safely freed without reading
2584  * it.
2585  *
2586  * This is just an optimization targeted at large files.  It may do
2587  * nothing.  It will return 0 unless things went badly.
2588  */
2589 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2590                                      struct btrfs_root *root,
2591                                      struct btrfs_path *path,
2592                                      struct inode *inode, u64 new_size)
2593 {
2594         struct btrfs_key key;
2595         int ret;
2596         int nritems;
2597         struct btrfs_key found_key;
2598         struct btrfs_key other_key;
2599         struct btrfs_leaf_ref *ref;
2600         u64 leaf_gen;
2601         u64 leaf_start;
2602
2603         path->lowest_level = 1;
2604         key.objectid = inode->i_ino;
2605         key.type = BTRFS_CSUM_ITEM_KEY;
2606         key.offset = new_size;
2607 again:
2608         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2609         if (ret < 0)
2610                 goto out;
2611
2612         if (path->nodes[1] == NULL) {
2613                 ret = 0;
2614                 goto out;
2615         }
2616         ret = 0;
2617         btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2618         nritems = btrfs_header_nritems(path->nodes[1]);
2619
2620         if (!nritems)
2621                 goto out;
2622
2623         if (path->slots[1] >= nritems)
2624                 goto next_node;
2625
2626         /* did we find a key greater than anything we want to delete? */
2627         if (found_key.objectid > inode->i_ino ||
2628            (found_key.objectid == inode->i_ino && found_key.type > key.type))
2629                 goto out;
2630
2631         /* we check the next key in the node to make sure the leave contains
2632          * only checksum items.  This comparison doesn't work if our
2633          * leaf is the last one in the node
2634          */
2635         if (path->slots[1] + 1 >= nritems) {
2636 next_node:
2637                 /* search forward from the last key in the node, this
2638                  * will bring us into the next node in the tree
2639                  */
2640                 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2641
2642                 /* unlikely, but we inc below, so check to be safe */
2643                 if (found_key.offset == (u64)-1)
2644                         goto out;
2645
2646                 /* search_forward needs a path with locks held, do the
2647                  * search again for the original key.  It is possible
2648                  * this will race with a balance and return a path that
2649                  * we could modify, but this drop is just an optimization
2650                  * and is allowed to miss some leaves.
2651                  */
2652                 btrfs_release_path(root, path);
2653                 found_key.offset++;
2654
2655                 /* setup a max key for search_forward */
2656                 other_key.offset = (u64)-1;
2657                 other_key.type = key.type;
2658                 other_key.objectid = key.objectid;
2659
2660                 path->keep_locks = 1;
2661                 ret = btrfs_search_forward(root, &found_key, &other_key,
2662                                            path, 0, 0);
2663                 path->keep_locks = 0;
2664                 if (ret || found_key.objectid != key.objectid ||
2665                     found_key.type != key.type) {
2666                         ret = 0;
2667                         goto out;
2668                 }
2669
2670                 key.offset = found_key.offset;
2671                 btrfs_release_path(root, path);
2672                 cond_resched();
2673                 goto again;
2674         }
2675
2676         /* we know there's one more slot after us in the tree,
2677          * read that key so we can verify it is also a checksum item
2678          */
2679         btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2680
2681         if (found_key.objectid < inode->i_ino)
2682                 goto next_key;
2683
2684         if (found_key.type != key.type || found_key.offset < new_size)
2685                 goto next_key;
2686
2687         /*
2688          * if the key for the next leaf isn't a csum key from this objectid,
2689          * we can't be sure there aren't good items inside this leaf.
2690          * Bail out
2691          */
2692         if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2693                 goto out;
2694
2695         leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2696         leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2697         /*
2698          * it is safe to delete this leaf, it contains only
2699          * csum items from this inode at an offset >= new_size
2700          */
2701         ret = btrfs_del_leaf(trans, root, path, leaf_start);
2702         BUG_ON(ret);
2703
2704         if (root->ref_cows && leaf_gen < trans->transid) {
2705                 ref = btrfs_alloc_leaf_ref(root, 0);
2706                 if (ref) {
2707                         ref->root_gen = root->root_key.offset;
2708                         ref->bytenr = leaf_start;
2709                         ref->owner = 0;
2710                         ref->generation = leaf_gen;
2711                         ref->nritems = 0;
2712
2713                         btrfs_sort_leaf_ref(ref);
2714
2715                         ret = btrfs_add_leaf_ref(root, ref, 0);
2716                         WARN_ON(ret);
2717                         btrfs_free_leaf_ref(root, ref);
2718                 } else {
2719                         WARN_ON(1);
2720                 }
2721         }
2722 next_key:
2723         btrfs_release_path(root, path);
2724
2725         if (other_key.objectid == inode->i_ino &&
2726             other_key.type == key.type && other_key.offset > key.offset) {
2727                 key.offset = other_key.offset;
2728                 cond_resched();
2729                 goto again;
2730         }
2731         ret = 0;
2732 out:
2733         /* fixup any changes we've made to the path */
2734         path->lowest_level = 0;
2735         path->keep_locks = 0;
2736         btrfs_release_path(root, path);
2737         return ret;
2738 }
2739
2740 #endif
2741
2742 /*
2743  * this can truncate away extent items, csum items and directory items.
2744  * It starts at a high offset and removes keys until it can't find
2745  * any higher than new_size
2746  *
2747  * csum items that cross the new i_size are truncated to the new size
2748  * as well.
2749  *
2750  * min_type is the minimum key type to truncate down to.  If set to 0, this
2751  * will kill all the items on this inode, including the INODE_ITEM_KEY.
2752  */
2753 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2754                                         struct btrfs_root *root,
2755                                         struct inode *inode,
2756                                         u64 new_size, u32 min_type)
2757 {
2758         int ret;
2759         struct btrfs_path *path;
2760         struct btrfs_key key;
2761         struct btrfs_key found_key;
2762         u32 found_type = (u8)-1;
2763         struct extent_buffer *leaf;
2764         struct btrfs_file_extent_item *fi;
2765         u64 extent_start = 0;
2766         u64 extent_num_bytes = 0;
2767         u64 extent_offset = 0;
2768         u64 item_end = 0;
2769         int found_extent;
2770         int del_item;
2771         int pending_del_nr = 0;
2772         int pending_del_slot = 0;
2773         int extent_type = -1;
2774         int encoding;
2775         u64 mask = root->sectorsize - 1;
2776
2777         if (root->ref_cows)
2778                 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2779         path = btrfs_alloc_path();
2780         BUG_ON(!path);
2781         path->reada = -1;
2782
2783         /* FIXME, add redo link to tree so we don't leak on crash */
2784         key.objectid = inode->i_ino;
2785         key.offset = (u64)-1;
2786         key.type = (u8)-1;
2787
2788 search_again:
2789         path->leave_spinning = 1;
2790         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2791         if (ret < 0)
2792                 goto error;
2793
2794         if (ret > 0) {
2795                 /* there are no items in the tree for us to truncate, we're
2796                  * done
2797                  */
2798                 if (path->slots[0] == 0) {
2799                         ret = 0;
2800                         goto error;
2801                 }
2802                 path->slots[0]--;
2803         }
2804
2805         while (1) {
2806                 fi = NULL;
2807                 leaf = path->nodes[0];
2808                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2809                 found_type = btrfs_key_type(&found_key);
2810                 encoding = 0;
2811
2812                 if (found_key.objectid != inode->i_ino)
2813                         break;
2814
2815                 if (found_type < min_type)
2816                         break;
2817
2818                 item_end = found_key.offset;
2819                 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2820                         fi = btrfs_item_ptr(leaf, path->slots[0],
2821                                             struct btrfs_file_extent_item);
2822                         extent_type = btrfs_file_extent_type(leaf, fi);
2823                         encoding = btrfs_file_extent_compression(leaf, fi);
2824                         encoding |= btrfs_file_extent_encryption(leaf, fi);
2825                         encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2826
2827                         if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2828                                 item_end +=
2829                                     btrfs_file_extent_num_bytes(leaf, fi);
2830                         } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2831                                 item_end += btrfs_file_extent_inline_len(leaf,
2832                                                                          fi);
2833                         }
2834                         item_end--;
2835                 }
2836                 if (item_end < new_size) {
2837                         if (found_type == BTRFS_DIR_ITEM_KEY)
2838                                 found_type = BTRFS_INODE_ITEM_KEY;
2839                         else if (found_type == BTRFS_EXTENT_ITEM_KEY)
2840                                 found_type = BTRFS_EXTENT_DATA_KEY;
2841                         else if (found_type == BTRFS_EXTENT_DATA_KEY)
2842                                 found_type = BTRFS_XATTR_ITEM_KEY;
2843                         else if (found_type == BTRFS_XATTR_ITEM_KEY)
2844                                 found_type = BTRFS_INODE_REF_KEY;
2845                         else if (found_type)
2846                                 found_type--;
2847                         else
2848                                 break;
2849                         btrfs_set_key_type(&key, found_type);
2850                         goto next;
2851                 }
2852                 if (found_key.offset >= new_size)
2853                         del_item = 1;
2854                 else
2855                         del_item = 0;
2856                 found_extent = 0;
2857
2858                 /* FIXME, shrink the extent if the ref count is only 1 */
2859                 if (found_type != BTRFS_EXTENT_DATA_KEY)
2860                         goto delete;
2861
2862                 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2863                         u64 num_dec;
2864                         extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2865                         if (!del_item && !encoding) {
2866                                 u64 orig_num_bytes =
2867                                         btrfs_file_extent_num_bytes(leaf, fi);
2868                                 extent_num_bytes = new_size -
2869                                         found_key.offset + root->sectorsize - 1;
2870                                 extent_num_bytes = extent_num_bytes &
2871                                         ~((u64)root->sectorsize - 1);
2872                                 btrfs_set_file_extent_num_bytes(leaf, fi,
2873                                                          extent_num_bytes);
2874                                 num_dec = (orig_num_bytes -
2875                                            extent_num_bytes);
2876                                 if (root->ref_cows && extent_start != 0)
2877                                         inode_sub_bytes(inode, num_dec);
2878                                 btrfs_mark_buffer_dirty(leaf);
2879                         } else {
2880                                 extent_num_bytes =
2881                                         btrfs_file_extent_disk_num_bytes(leaf,
2882                                                                          fi);
2883                                 extent_offset = found_key.offset -
2884                                         btrfs_file_extent_offset(leaf, fi);
2885
2886                                 /* FIXME blocksize != 4096 */
2887                                 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2888                                 if (extent_start != 0) {
2889                                         found_extent = 1;
2890                                         if (root->ref_cows)
2891                                                 inode_sub_bytes(inode, num_dec);
2892                                 }
2893                         }
2894                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2895                         /*
2896                          * we can't truncate inline items that have had
2897                          * special encodings
2898                          */
2899                         if (!del_item &&
2900                             btrfs_file_extent_compression(leaf, fi) == 0 &&
2901                             btrfs_file_extent_encryption(leaf, fi) == 0 &&
2902                             btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2903                                 u32 size = new_size - found_key.offset;
2904
2905                                 if (root->ref_cows) {
2906                                         inode_sub_bytes(inode, item_end + 1 -
2907                                                         new_size);
2908                                 }
2909                                 size =
2910                                     btrfs_file_extent_calc_inline_size(size);
2911                                 ret = btrfs_truncate_item(trans, root, path,
2912                                                           size, 1);
2913                                 BUG_ON(ret);
2914                         } else if (root->ref_cows) {
2915                                 inode_sub_bytes(inode, item_end + 1 -
2916                                                 found_key.offset);
2917                         }
2918                 }
2919 delete:
2920                 if (del_item) {
2921                         if (!pending_del_nr) {
2922                                 /* no pending yet, add ourselves */
2923                                 pending_del_slot = path->slots[0];
2924                                 pending_del_nr = 1;
2925                         } else if (pending_del_nr &&
2926                                    path->slots[0] + 1 == pending_del_slot) {
2927                                 /* hop on the pending chunk */
2928                                 pending_del_nr++;
2929                                 pending_del_slot = path->slots[0];
2930                         } else {
2931                                 BUG();
2932                         }
2933                 } else {
2934                         break;
2935                 }
2936                 if (found_extent && root->ref_cows) {
2937                         btrfs_set_path_blocking(path);
2938                         ret = btrfs_free_extent(trans, root, extent_start,
2939                                                 extent_num_bytes, 0,
2940                                                 btrfs_header_owner(leaf),
2941                                                 inode->i_ino, extent_offset);
2942                         BUG_ON(ret);
2943                 }
2944 next:
2945                 if (path->slots[0] == 0) {
2946                         if (pending_del_nr)
2947                                 goto del_pending;
2948                         btrfs_release_path(root, path);
2949                         if (found_type == BTRFS_INODE_ITEM_KEY)
2950                                 break;
2951                         goto search_again;
2952                 }
2953
2954                 path->slots[0]--;
2955                 if (pending_del_nr &&
2956                     path->slots[0] + 1 != pending_del_slot) {
2957                         struct btrfs_key debug;
2958 del_pending:
2959                         btrfs_item_key_to_cpu(path->nodes[0], &debug,
2960                                               pending_del_slot);
2961                         ret = btrfs_del_items(trans, root, path,
2962                                               pending_del_slot,
2963                                               pending_del_nr);
2964                         BUG_ON(ret);
2965                         pending_del_nr = 0;
2966                         btrfs_release_path(root, path);
2967                         if (found_type == BTRFS_INODE_ITEM_KEY)
2968                                 break;
2969                         goto search_again;
2970                 }
2971         }
2972         ret = 0;
2973 error:
2974         if (pending_del_nr) {
2975                 ret = btrfs_del_items(trans, root, path, pending_del_slot,
2976                                       pending_del_nr);
2977         }
2978         btrfs_free_path(path);
2979         return ret;
2980 }
2981
2982 /*
2983  * taken from block_truncate_page, but does cow as it zeros out
2984  * any bytes left in the last page in the file.
2985  */
2986 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2987 {
2988         struct inode *inode = mapping->host;
2989         struct btrfs_root *root = BTRFS_I(inode)->root;
2990         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2991         struct btrfs_ordered_extent *ordered;
2992         char *kaddr;
2993         u32 blocksize = root->sectorsize;
2994         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2995         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2996         struct page *page;
2997         int ret = 0;
2998         u64 page_start;
2999         u64 page_end;
3000
3001         if ((offset & (blocksize - 1)) == 0)
3002                 goto out;
3003
3004         ret = -ENOMEM;
3005 again:
3006         page = grab_cache_page(mapping, index);
3007         if (!page)
3008                 goto out;
3009
3010         page_start = page_offset(page);
3011         page_end = page_start + PAGE_CACHE_SIZE - 1;
3012
3013         if (!PageUptodate(page)) {
3014                 ret = btrfs_readpage(NULL, page);
3015                 lock_page(page);
3016                 if (page->mapping != mapping) {
3017                         unlock_page(page);
3018                         page_cache_release(page);
3019                         goto again;
3020                 }
3021                 if (!PageUptodate(page)) {
3022                         ret = -EIO;
3023                         goto out_unlock;
3024                 }
3025         }
3026         wait_on_page_writeback(page);
3027
3028         lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3029         set_page_extent_mapped(page);
3030
3031         ordered = btrfs_lookup_ordered_extent(inode, page_start);
3032         if (ordered) {
3033                 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3034                 unlock_page(page);
3035                 page_cache_release(page);
3036                 btrfs_start_ordered_extent(inode, ordered, 1);
3037                 btrfs_put_ordered_extent(ordered);
3038                 goto again;
3039         }
3040
3041         ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
3042         if (ret) {
3043                 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3044                 goto out_unlock;
3045         }
3046
3047         ret = 0;
3048         if (offset != PAGE_CACHE_SIZE) {
3049                 kaddr = kmap(page);
3050                 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
3051                 flush_dcache_page(page);
3052                 kunmap(page);
3053         }
3054         ClearPageChecked(page);
3055         set_page_dirty(page);
3056         unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3057
3058 out_unlock:
3059         unlock_page(page);
3060         page_cache_release(page);
3061 out:
3062         return ret;
3063 }
3064
3065 int btrfs_cont_expand(struct inode *inode, loff_t size)
3066 {
3067         struct btrfs_trans_handle *trans;
3068         struct btrfs_root *root = BTRFS_I(inode)->root;
3069         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3070         struct extent_map *em;
3071         u64 mask = root->sectorsize - 1;
3072         u64 hole_start = (inode->i_size + mask) & ~mask;
3073         u64 block_end = (size + mask) & ~mask;
3074         u64 last_byte;
3075         u64 cur_offset;
3076         u64 hole_size;
3077         int err = 0;
3078
3079         if (size <= hole_start)
3080                 return 0;
3081
3082         btrfs_truncate_page(inode->i_mapping, inode->i_size);
3083
3084         while (1) {
3085                 struct btrfs_ordered_extent *ordered;
3086                 btrfs_wait_ordered_range(inode, hole_start,
3087                                          block_end - hole_start);
3088                 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3089                 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3090                 if (!ordered)
3091                         break;
3092                 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3093                 btrfs_put_ordered_extent(ordered);
3094         }
3095
3096         trans = btrfs_start_transaction(root, 1);
3097         btrfs_set_trans_block_group(trans, inode);
3098
3099         cur_offset = hole_start;
3100         while (1) {
3101                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3102                                 block_end - cur_offset, 0);
3103                 BUG_ON(IS_ERR(em) || !em);
3104                 last_byte = min(extent_map_end(em), block_end);
3105                 last_byte = (last_byte + mask) & ~mask;
3106                 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
3107                         u64 hint_byte = 0;