Merge git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog
[sfrench/cifs-2.6.git] / fs / ext4 / inode.c
1 /*
2  *  linux/fs/ext4/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/inode.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  Goal-directed block allocation by Stephen Tweedie
16  *      (sct@redhat.com), 1993, 1998
17  *  Big-endian to little-endian byte-swapping/bitmaps by
18  *        David S. Miller (davem@caip.rutgers.edu), 1995
19  *  64-bit file support on 64-bit platforms by Jakub Jelinek
20  *      (jj@sunsite.ms.mff.cuni.cz)
21  *
22  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23  */
24
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/ext4_jbd2.h>
29 #include <linux/jbd2.h>
30 #include <linux/highuid.h>
31 #include <linux/pagemap.h>
32 #include <linux/quotaops.h>
33 #include <linux/string.h>
34 #include <linux/buffer_head.h>
35 #include <linux/writeback.h>
36 #include <linux/mpage.h>
37 #include <linux/uio.h>
38 #include <linux/bio.h>
39 #include "xattr.h"
40 #include "acl.h"
41
42 /*
43  * Test whether an inode is a fast symlink.
44  */
45 static int ext4_inode_is_fast_symlink(struct inode *inode)
46 {
47         int ea_blocks = EXT4_I(inode)->i_file_acl ?
48                 (inode->i_sb->s_blocksize >> 9) : 0;
49
50         return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
51 }
52
53 /*
54  * The ext4 forget function must perform a revoke if we are freeing data
55  * which has been journaled.  Metadata (eg. indirect blocks) must be
56  * revoked in all cases.
57  *
58  * "bh" may be NULL: a metadata block may have been freed from memory
59  * but there may still be a record of it in the journal, and that record
60  * still needs to be revoked.
61  */
62 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
63                         struct buffer_head *bh, ext4_fsblk_t blocknr)
64 {
65         int err;
66
67         might_sleep();
68
69         BUFFER_TRACE(bh, "enter");
70
71         jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
72                   "data mode %lx\n",
73                   bh, is_metadata, inode->i_mode,
74                   test_opt(inode->i_sb, DATA_FLAGS));
75
76         /* Never use the revoke function if we are doing full data
77          * journaling: there is no need to, and a V1 superblock won't
78          * support it.  Otherwise, only skip the revoke on un-journaled
79          * data blocks. */
80
81         if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
82             (!is_metadata && !ext4_should_journal_data(inode))) {
83                 if (bh) {
84                         BUFFER_TRACE(bh, "call jbd2_journal_forget");
85                         return ext4_journal_forget(handle, bh);
86                 }
87                 return 0;
88         }
89
90         /*
91          * data!=journal && (is_metadata || should_journal_data(inode))
92          */
93         BUFFER_TRACE(bh, "call ext4_journal_revoke");
94         err = ext4_journal_revoke(handle, blocknr, bh);
95         if (err)
96                 ext4_abort(inode->i_sb, __FUNCTION__,
97                            "error %d when attempting revoke", err);
98         BUFFER_TRACE(bh, "exit");
99         return err;
100 }
101
102 /*
103  * Work out how many blocks we need to proceed with the next chunk of a
104  * truncate transaction.
105  */
106 static unsigned long blocks_for_truncate(struct inode *inode)
107 {
108         unsigned long needed;
109
110         needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
111
112         /* Give ourselves just enough room to cope with inodes in which
113          * i_blocks is corrupt: we've seen disk corruptions in the past
114          * which resulted in random data in an inode which looked enough
115          * like a regular file for ext4 to try to delete it.  Things
116          * will go a bit crazy if that happens, but at least we should
117          * try not to panic the whole kernel. */
118         if (needed < 2)
119                 needed = 2;
120
121         /* But we need to bound the transaction so we don't overflow the
122          * journal. */
123         if (needed > EXT4_MAX_TRANS_DATA)
124                 needed = EXT4_MAX_TRANS_DATA;
125
126         return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
127 }
128
129 /*
130  * Truncate transactions can be complex and absolutely huge.  So we need to
131  * be able to restart the transaction at a conventient checkpoint to make
132  * sure we don't overflow the journal.
133  *
134  * start_transaction gets us a new handle for a truncate transaction,
135  * and extend_transaction tries to extend the existing one a bit.  If
136  * extend fails, we need to propagate the failure up and restart the
137  * transaction in the top-level truncate loop. --sct
138  */
139 static handle_t *start_transaction(struct inode *inode)
140 {
141         handle_t *result;
142
143         result = ext4_journal_start(inode, blocks_for_truncate(inode));
144         if (!IS_ERR(result))
145                 return result;
146
147         ext4_std_error(inode->i_sb, PTR_ERR(result));
148         return result;
149 }
150
151 /*
152  * Try to extend this transaction for the purposes of truncation.
153  *
154  * Returns 0 if we managed to create more room.  If we can't create more
155  * room, and the transaction must be restarted we return 1.
156  */
157 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
158 {
159         if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS)
160                 return 0;
161         if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
162                 return 0;
163         return 1;
164 }
165
166 /*
167  * Restart the transaction associated with *handle.  This does a commit,
168  * so before we call here everything must be consistently dirtied against
169  * this transaction.
170  */
171 static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
172 {
173         jbd_debug(2, "restarting handle %p\n", handle);
174         return ext4_journal_restart(handle, blocks_for_truncate(inode));
175 }
176
177 /*
178  * Called at the last iput() if i_nlink is zero.
179  */
180 void ext4_delete_inode (struct inode * inode)
181 {
182         handle_t *handle;
183
184         truncate_inode_pages(&inode->i_data, 0);
185
186         if (is_bad_inode(inode))
187                 goto no_delete;
188
189         handle = start_transaction(inode);
190         if (IS_ERR(handle)) {
191                 /*
192                  * If we're going to skip the normal cleanup, we still need to
193                  * make sure that the in-core orphan linked list is properly
194                  * cleaned up.
195                  */
196                 ext4_orphan_del(NULL, inode);
197                 goto no_delete;
198         }
199
200         if (IS_SYNC(inode))
201                 handle->h_sync = 1;
202         inode->i_size = 0;
203         if (inode->i_blocks)
204                 ext4_truncate(inode);
205         /*
206          * Kill off the orphan record which ext4_truncate created.
207          * AKPM: I think this can be inside the above `if'.
208          * Note that ext4_orphan_del() has to be able to cope with the
209          * deletion of a non-existent orphan - this is because we don't
210          * know if ext4_truncate() actually created an orphan record.
211          * (Well, we could do this if we need to, but heck - it works)
212          */
213         ext4_orphan_del(handle, inode);
214         EXT4_I(inode)->i_dtime  = get_seconds();
215
216         /*
217          * One subtle ordering requirement: if anything has gone wrong
218          * (transaction abort, IO errors, whatever), then we can still
219          * do these next steps (the fs will already have been marked as
220          * having errors), but we can't free the inode if the mark_dirty
221          * fails.
222          */
223         if (ext4_mark_inode_dirty(handle, inode))
224                 /* If that failed, just do the required in-core inode clear. */
225                 clear_inode(inode);
226         else
227                 ext4_free_inode(handle, inode);
228         ext4_journal_stop(handle);
229         return;
230 no_delete:
231         clear_inode(inode);     /* We must guarantee clearing of inode... */
232 }
233
234 typedef struct {
235         __le32  *p;
236         __le32  key;
237         struct buffer_head *bh;
238 } Indirect;
239
240 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
241 {
242         p->key = *(p->p = v);
243         p->bh = bh;
244 }
245
246 static int verify_chain(Indirect *from, Indirect *to)
247 {
248         while (from <= to && from->key == *from->p)
249                 from++;
250         return (from > to);
251 }
252
253 /**
254  *      ext4_block_to_path - parse the block number into array of offsets
255  *      @inode: inode in question (we are only interested in its superblock)
256  *      @i_block: block number to be parsed
257  *      @offsets: array to store the offsets in
258  *      @boundary: set this non-zero if the referred-to block is likely to be
259  *             followed (on disk) by an indirect block.
260  *
261  *      To store the locations of file's data ext4 uses a data structure common
262  *      for UNIX filesystems - tree of pointers anchored in the inode, with
263  *      data blocks at leaves and indirect blocks in intermediate nodes.
264  *      This function translates the block number into path in that tree -
265  *      return value is the path length and @offsets[n] is the offset of
266  *      pointer to (n+1)th node in the nth one. If @block is out of range
267  *      (negative or too large) warning is printed and zero returned.
268  *
269  *      Note: function doesn't find node addresses, so no IO is needed. All
270  *      we need to know is the capacity of indirect blocks (taken from the
271  *      inode->i_sb).
272  */
273
274 /*
275  * Portability note: the last comparison (check that we fit into triple
276  * indirect block) is spelled differently, because otherwise on an
277  * architecture with 32-bit longs and 8Kb pages we might get into trouble
278  * if our filesystem had 8Kb blocks. We might use long long, but that would
279  * kill us on x86. Oh, well, at least the sign propagation does not matter -
280  * i_block would have to be negative in the very beginning, so we would not
281  * get there at all.
282  */
283
284 static int ext4_block_to_path(struct inode *inode,
285                         long i_block, int offsets[4], int *boundary)
286 {
287         int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
288         int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
289         const long direct_blocks = EXT4_NDIR_BLOCKS,
290                 indirect_blocks = ptrs,
291                 double_blocks = (1 << (ptrs_bits * 2));
292         int n = 0;
293         int final = 0;
294
295         if (i_block < 0) {
296                 ext4_warning (inode->i_sb, "ext4_block_to_path", "block < 0");
297         } else if (i_block < direct_blocks) {
298                 offsets[n++] = i_block;
299                 final = direct_blocks;
300         } else if ( (i_block -= direct_blocks) < indirect_blocks) {
301                 offsets[n++] = EXT4_IND_BLOCK;
302                 offsets[n++] = i_block;
303                 final = ptrs;
304         } else if ((i_block -= indirect_blocks) < double_blocks) {
305                 offsets[n++] = EXT4_DIND_BLOCK;
306                 offsets[n++] = i_block >> ptrs_bits;
307                 offsets[n++] = i_block & (ptrs - 1);
308                 final = ptrs;
309         } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
310                 offsets[n++] = EXT4_TIND_BLOCK;
311                 offsets[n++] = i_block >> (ptrs_bits * 2);
312                 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
313                 offsets[n++] = i_block & (ptrs - 1);
314                 final = ptrs;
315         } else {
316                 ext4_warning(inode->i_sb, "ext4_block_to_path", "block > big");
317         }
318         if (boundary)
319                 *boundary = final - 1 - (i_block & (ptrs - 1));
320         return n;
321 }
322
323 /**
324  *      ext4_get_branch - read the chain of indirect blocks leading to data
325  *      @inode: inode in question
326  *      @depth: depth of the chain (1 - direct pointer, etc.)
327  *      @offsets: offsets of pointers in inode/indirect blocks
328  *      @chain: place to store the result
329  *      @err: here we store the error value
330  *
331  *      Function fills the array of triples <key, p, bh> and returns %NULL
332  *      if everything went OK or the pointer to the last filled triple
333  *      (incomplete one) otherwise. Upon the return chain[i].key contains
334  *      the number of (i+1)-th block in the chain (as it is stored in memory,
335  *      i.e. little-endian 32-bit), chain[i].p contains the address of that
336  *      number (it points into struct inode for i==0 and into the bh->b_data
337  *      for i>0) and chain[i].bh points to the buffer_head of i-th indirect
338  *      block for i>0 and NULL for i==0. In other words, it holds the block
339  *      numbers of the chain, addresses they were taken from (and where we can
340  *      verify that chain did not change) and buffer_heads hosting these
341  *      numbers.
342  *
343  *      Function stops when it stumbles upon zero pointer (absent block)
344  *              (pointer to last triple returned, *@err == 0)
345  *      or when it gets an IO error reading an indirect block
346  *              (ditto, *@err == -EIO)
347  *      or when it notices that chain had been changed while it was reading
348  *              (ditto, *@err == -EAGAIN)
349  *      or when it reads all @depth-1 indirect blocks successfully and finds
350  *      the whole chain, all way to the data (returns %NULL, *err == 0).
351  */
352 static Indirect *ext4_get_branch(struct inode *inode, int depth, int *offsets,
353                                  Indirect chain[4], int *err)
354 {
355         struct super_block *sb = inode->i_sb;
356         Indirect *p = chain;
357         struct buffer_head *bh;
358
359         *err = 0;
360         /* i_data is not going away, no lock needed */
361         add_chain (chain, NULL, EXT4_I(inode)->i_data + *offsets);
362         if (!p->key)
363                 goto no_block;
364         while (--depth) {
365                 bh = sb_bread(sb, le32_to_cpu(p->key));
366                 if (!bh)
367                         goto failure;
368                 /* Reader: pointers */
369                 if (!verify_chain(chain, p))
370                         goto changed;
371                 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
372                 /* Reader: end */
373                 if (!p->key)
374                         goto no_block;
375         }
376         return NULL;
377
378 changed:
379         brelse(bh);
380         *err = -EAGAIN;
381         goto no_block;
382 failure:
383         *err = -EIO;
384 no_block:
385         return p;
386 }
387
388 /**
389  *      ext4_find_near - find a place for allocation with sufficient locality
390  *      @inode: owner
391  *      @ind: descriptor of indirect block.
392  *
393  *      This function returns the prefered place for block allocation.
394  *      It is used when heuristic for sequential allocation fails.
395  *      Rules are:
396  *        + if there is a block to the left of our position - allocate near it.
397  *        + if pointer will live in indirect block - allocate near that block.
398  *        + if pointer will live in inode - allocate in the same
399  *          cylinder group.
400  *
401  * In the latter case we colour the starting block by the callers PID to
402  * prevent it from clashing with concurrent allocations for a different inode
403  * in the same block group.   The PID is used here so that functionally related
404  * files will be close-by on-disk.
405  *
406  *      Caller must make sure that @ind is valid and will stay that way.
407  */
408 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
409 {
410         struct ext4_inode_info *ei = EXT4_I(inode);
411         __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
412         __le32 *p;
413         ext4_fsblk_t bg_start;
414         ext4_grpblk_t colour;
415
416         /* Try to find previous block */
417         for (p = ind->p - 1; p >= start; p--) {
418                 if (*p)
419                         return le32_to_cpu(*p);
420         }
421
422         /* No such thing, so let's try location of indirect block */
423         if (ind->bh)
424                 return ind->bh->b_blocknr;
425
426         /*
427          * It is going to be referred to from the inode itself? OK, just put it
428          * into the same cylinder group then.
429          */
430         bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group);
431         colour = (current->pid % 16) *
432                         (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
433         return bg_start + colour;
434 }
435
436 /**
437  *      ext4_find_goal - find a prefered place for allocation.
438  *      @inode: owner
439  *      @block:  block we want
440  *      @chain:  chain of indirect blocks
441  *      @partial: pointer to the last triple within a chain
442  *      @goal:  place to store the result.
443  *
444  *      Normally this function find the prefered place for block allocation,
445  *      stores it in *@goal and returns zero.
446  */
447
448 static ext4_fsblk_t ext4_find_goal(struct inode *inode, long block,
449                 Indirect chain[4], Indirect *partial)
450 {
451         struct ext4_block_alloc_info *block_i;
452
453         block_i =  EXT4_I(inode)->i_block_alloc_info;
454
455         /*
456          * try the heuristic for sequential allocation,
457          * failing that at least try to get decent locality.
458          */
459         if (block_i && (block == block_i->last_alloc_logical_block + 1)
460                 && (block_i->last_alloc_physical_block != 0)) {
461                 return block_i->last_alloc_physical_block + 1;
462         }
463
464         return ext4_find_near(inode, partial);
465 }
466
467 /**
468  *      ext4_blks_to_allocate: Look up the block map and count the number
469  *      of direct blocks need to be allocated for the given branch.
470  *
471  *      @branch: chain of indirect blocks
472  *      @k: number of blocks need for indirect blocks
473  *      @blks: number of data blocks to be mapped.
474  *      @blocks_to_boundary:  the offset in the indirect block
475  *
476  *      return the total number of blocks to be allocate, including the
477  *      direct and indirect blocks.
478  */
479 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
480                 int blocks_to_boundary)
481 {
482         unsigned long count = 0;
483
484         /*
485          * Simple case, [t,d]Indirect block(s) has not allocated yet
486          * then it's clear blocks on that path have not allocated
487          */
488         if (k > 0) {
489                 /* right now we don't handle cross boundary allocation */
490                 if (blks < blocks_to_boundary + 1)
491                         count += blks;
492                 else
493                         count += blocks_to_boundary + 1;
494                 return count;
495         }
496
497         count++;
498         while (count < blks && count <= blocks_to_boundary &&
499                 le32_to_cpu(*(branch[0].p + count)) == 0) {
500                 count++;
501         }
502         return count;
503 }
504
505 /**
506  *      ext4_alloc_blocks: multiple allocate blocks needed for a branch
507  *      @indirect_blks: the number of blocks need to allocate for indirect
508  *                      blocks
509  *
510  *      @new_blocks: on return it will store the new block numbers for
511  *      the indirect blocks(if needed) and the first direct block,
512  *      @blks:  on return it will store the total number of allocated
513  *              direct blocks
514  */
515 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
516                         ext4_fsblk_t goal, int indirect_blks, int blks,
517                         ext4_fsblk_t new_blocks[4], int *err)
518 {
519         int target, i;
520         unsigned long count = 0;
521         int index = 0;
522         ext4_fsblk_t current_block = 0;
523         int ret = 0;
524
525         /*
526          * Here we try to allocate the requested multiple blocks at once,
527          * on a best-effort basis.
528          * To build a branch, we should allocate blocks for
529          * the indirect blocks(if not allocated yet), and at least
530          * the first direct block of this branch.  That's the
531          * minimum number of blocks need to allocate(required)
532          */
533         target = blks + indirect_blks;
534
535         while (1) {
536                 count = target;
537                 /* allocating blocks for indirect blocks and direct blocks */
538                 current_block = ext4_new_blocks(handle,inode,goal,&count,err);
539                 if (*err)
540                         goto failed_out;
541
542                 target -= count;
543                 /* allocate blocks for indirect blocks */
544                 while (index < indirect_blks && count) {
545                         new_blocks[index++] = current_block++;
546                         count--;
547                 }
548
549                 if (count > 0)
550                         break;
551         }
552
553         /* save the new block number for the first direct block */
554         new_blocks[index] = current_block;
555
556         /* total number of blocks allocated for direct blocks */
557         ret = count;
558         *err = 0;
559         return ret;
560 failed_out:
561         for (i = 0; i <index; i++)
562                 ext4_free_blocks(handle, inode, new_blocks[i], 1);
563         return ret;
564 }
565
566 /**
567  *      ext4_alloc_branch - allocate and set up a chain of blocks.
568  *      @inode: owner
569  *      @indirect_blks: number of allocated indirect blocks
570  *      @blks: number of allocated direct blocks
571  *      @offsets: offsets (in the blocks) to store the pointers to next.
572  *      @branch: place to store the chain in.
573  *
574  *      This function allocates blocks, zeroes out all but the last one,
575  *      links them into chain and (if we are synchronous) writes them to disk.
576  *      In other words, it prepares a branch that can be spliced onto the
577  *      inode. It stores the information about that chain in the branch[], in
578  *      the same format as ext4_get_branch() would do. We are calling it after
579  *      we had read the existing part of chain and partial points to the last
580  *      triple of that (one with zero ->key). Upon the exit we have the same
581  *      picture as after the successful ext4_get_block(), except that in one
582  *      place chain is disconnected - *branch->p is still zero (we did not
583  *      set the last link), but branch->key contains the number that should
584  *      be placed into *branch->p to fill that gap.
585  *
586  *      If allocation fails we free all blocks we've allocated (and forget
587  *      their buffer_heads) and return the error value the from failed
588  *      ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
589  *      as described above and return 0.
590  */
591 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
592                         int indirect_blks, int *blks, ext4_fsblk_t goal,
593                         int *offsets, Indirect *branch)
594 {
595         int blocksize = inode->i_sb->s_blocksize;
596         int i, n = 0;
597         int err = 0;
598         struct buffer_head *bh;
599         int num;
600         ext4_fsblk_t new_blocks[4];
601         ext4_fsblk_t current_block;
602
603         num = ext4_alloc_blocks(handle, inode, goal, indirect_blks,
604                                 *blks, new_blocks, &err);
605         if (err)
606                 return err;
607
608         branch[0].key = cpu_to_le32(new_blocks[0]);
609         /*
610          * metadata blocks and data blocks are allocated.
611          */
612         for (n = 1; n <= indirect_blks;  n++) {
613                 /*
614                  * Get buffer_head for parent block, zero it out
615                  * and set the pointer to new one, then send
616                  * parent to disk.
617                  */
618                 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
619                 branch[n].bh = bh;
620                 lock_buffer(bh);
621                 BUFFER_TRACE(bh, "call get_create_access");
622                 err = ext4_journal_get_create_access(handle, bh);
623                 if (err) {
624                         unlock_buffer(bh);
625                         brelse(bh);
626                         goto failed;
627                 }
628
629                 memset(bh->b_data, 0, blocksize);
630                 branch[n].p = (__le32 *) bh->b_data + offsets[n];
631                 branch[n].key = cpu_to_le32(new_blocks[n]);
632                 *branch[n].p = branch[n].key;
633                 if ( n == indirect_blks) {
634                         current_block = new_blocks[n];
635                         /*
636                          * End of chain, update the last new metablock of
637                          * the chain to point to the new allocated
638                          * data blocks numbers
639                          */
640                         for (i=1; i < num; i++)
641                                 *(branch[n].p + i) = cpu_to_le32(++current_block);
642                 }
643                 BUFFER_TRACE(bh, "marking uptodate");
644                 set_buffer_uptodate(bh);
645                 unlock_buffer(bh);
646
647                 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
648                 err = ext4_journal_dirty_metadata(handle, bh);
649                 if (err)
650                         goto failed;
651         }
652         *blks = num;
653         return err;
654 failed:
655         /* Allocation failed, free what we already allocated */
656         for (i = 1; i <= n ; i++) {
657                 BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
658                 ext4_journal_forget(handle, branch[i].bh);
659         }
660         for (i = 0; i <indirect_blks; i++)
661                 ext4_free_blocks(handle, inode, new_blocks[i], 1);
662
663         ext4_free_blocks(handle, inode, new_blocks[i], num);
664
665         return err;
666 }
667
668 /**
669  * ext4_splice_branch - splice the allocated branch onto inode.
670  * @inode: owner
671  * @block: (logical) number of block we are adding
672  * @chain: chain of indirect blocks (with a missing link - see
673  *      ext4_alloc_branch)
674  * @where: location of missing link
675  * @num:   number of indirect blocks we are adding
676  * @blks:  number of direct blocks we are adding
677  *
678  * This function fills the missing link and does all housekeeping needed in
679  * inode (->i_blocks, etc.). In case of success we end up with the full
680  * chain to new block and return 0.
681  */
682 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
683                         long block, Indirect *where, int num, int blks)
684 {
685         int i;
686         int err = 0;
687         struct ext4_block_alloc_info *block_i;
688         ext4_fsblk_t current_block;
689
690         block_i = EXT4_I(inode)->i_block_alloc_info;
691         /*
692          * If we're splicing into a [td]indirect block (as opposed to the
693          * inode) then we need to get write access to the [td]indirect block
694          * before the splice.
695          */
696         if (where->bh) {
697                 BUFFER_TRACE(where->bh, "get_write_access");
698                 err = ext4_journal_get_write_access(handle, where->bh);
699                 if (err)
700                         goto err_out;
701         }
702         /* That's it */
703
704         *where->p = where->key;
705
706         /*
707          * Update the host buffer_head or inode to point to more just allocated
708          * direct blocks blocks
709          */
710         if (num == 0 && blks > 1) {
711                 current_block = le32_to_cpu(where->key) + 1;
712                 for (i = 1; i < blks; i++)
713                         *(where->p + i ) = cpu_to_le32(current_block++);
714         }
715
716         /*
717          * update the most recently allocated logical & physical block
718          * in i_block_alloc_info, to assist find the proper goal block for next
719          * allocation
720          */
721         if (block_i) {
722                 block_i->last_alloc_logical_block = block + blks - 1;
723                 block_i->last_alloc_physical_block =
724                                 le32_to_cpu(where[num].key) + blks - 1;
725         }
726
727         /* We are done with atomic stuff, now do the rest of housekeeping */
728
729         inode->i_ctime = CURRENT_TIME_SEC;
730         ext4_mark_inode_dirty(handle, inode);
731
732         /* had we spliced it onto indirect block? */
733         if (where->bh) {
734                 /*
735                  * If we spliced it onto an indirect block, we haven't
736                  * altered the inode.  Note however that if it is being spliced
737                  * onto an indirect block at the very end of the file (the
738                  * file is growing) then we *will* alter the inode to reflect
739                  * the new i_size.  But that is not done here - it is done in
740                  * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
741                  */
742                 jbd_debug(5, "splicing indirect only\n");
743                 BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata");
744                 err = ext4_journal_dirty_metadata(handle, where->bh);
745                 if (err)
746                         goto err_out;
747         } else {
748                 /*
749                  * OK, we spliced it into the inode itself on a direct block.
750                  * Inode was dirtied above.
751                  */
752                 jbd_debug(5, "splicing direct\n");
753         }
754         return err;
755
756 err_out:
757         for (i = 1; i <= num; i++) {
758                 BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
759                 ext4_journal_forget(handle, where[i].bh);
760                 ext4_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
761         }
762         ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
763
764         return err;
765 }
766
767 /*
768  * Allocation strategy is simple: if we have to allocate something, we will
769  * have to go the whole way to leaf. So let's do it before attaching anything
770  * to tree, set linkage between the newborn blocks, write them if sync is
771  * required, recheck the path, free and repeat if check fails, otherwise
772  * set the last missing link (that will protect us from any truncate-generated
773  * removals - all blocks on the path are immune now) and possibly force the
774  * write on the parent block.
775  * That has a nice additional property: no special recovery from the failed
776  * allocations is needed - we simply release blocks and do not touch anything
777  * reachable from inode.
778  *
779  * `handle' can be NULL if create == 0.
780  *
781  * The BKL may not be held on entry here.  Be sure to take it early.
782  * return > 0, # of blocks mapped or allocated.
783  * return = 0, if plain lookup failed.
784  * return < 0, error case.
785  */
786 int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
787                 sector_t iblock, unsigned long maxblocks,
788                 struct buffer_head *bh_result,
789                 int create, int extend_disksize)
790 {
791         int err = -EIO;
792         int offsets[4];
793         Indirect chain[4];
794         Indirect *partial;
795         ext4_fsblk_t goal;
796         int indirect_blks;
797         int blocks_to_boundary = 0;
798         int depth;
799         struct ext4_inode_info *ei = EXT4_I(inode);
800         int count = 0;
801         ext4_fsblk_t first_block = 0;
802
803
804         J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
805         J_ASSERT(handle != NULL || create == 0);
806         depth = ext4_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
807
808         if (depth == 0)
809                 goto out;
810
811         partial = ext4_get_branch(inode, depth, offsets, chain, &err);
812
813         /* Simplest case - block found, no allocation needed */
814         if (!partial) {
815                 first_block = le32_to_cpu(chain[depth - 1].key);
816                 clear_buffer_new(bh_result);
817                 count++;
818                 /*map more blocks*/
819                 while (count < maxblocks && count <= blocks_to_boundary) {
820                         ext4_fsblk_t blk;
821
822                         if (!verify_chain(chain, partial)) {
823                                 /*
824                                  * Indirect block might be removed by
825                                  * truncate while we were reading it.
826                                  * Handling of that case: forget what we've
827                                  * got now. Flag the err as EAGAIN, so it
828                                  * will reread.
829                                  */
830                                 err = -EAGAIN;
831                                 count = 0;
832                                 break;
833                         }
834                         blk = le32_to_cpu(*(chain[depth-1].p + count));
835
836                         if (blk == first_block + count)
837                                 count++;
838                         else
839                                 break;
840                 }
841                 if (err != -EAGAIN)
842                         goto got_it;
843         }
844
845         /* Next simple case - plain lookup or failed read of indirect block */
846         if (!create || err == -EIO)
847                 goto cleanup;
848
849         mutex_lock(&ei->truncate_mutex);
850
851         /*
852          * If the indirect block is missing while we are reading
853          * the chain(ext4_get_branch() returns -EAGAIN err), or
854          * if the chain has been changed after we grab the semaphore,
855          * (either because another process truncated this branch, or
856          * another get_block allocated this branch) re-grab the chain to see if
857          * the request block has been allocated or not.
858          *
859          * Since we already block the truncate/other get_block
860          * at this point, we will have the current copy of the chain when we
861          * splice the branch into the tree.
862          */
863         if (err == -EAGAIN || !verify_chain(chain, partial)) {
864                 while (partial > chain) {
865                         brelse(partial->bh);
866                         partial--;
867                 }
868                 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
869                 if (!partial) {
870                         count++;
871                         mutex_unlock(&ei->truncate_mutex);
872                         if (err)
873                                 goto cleanup;
874                         clear_buffer_new(bh_result);
875                         goto got_it;
876                 }
877         }
878
879         /*
880          * Okay, we need to do block allocation.  Lazily initialize the block
881          * allocation info here if necessary
882         */
883         if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
884                 ext4_init_block_alloc_info(inode);
885
886         goal = ext4_find_goal(inode, iblock, chain, partial);
887
888         /* the number of blocks need to allocate for [d,t]indirect blocks */
889         indirect_blks = (chain + depth) - partial - 1;
890
891         /*
892          * Next look up the indirect map to count the totoal number of
893          * direct blocks to allocate for this branch.
894          */
895         count = ext4_blks_to_allocate(partial, indirect_blks,
896                                         maxblocks, blocks_to_boundary);
897         /*
898          * Block out ext4_truncate while we alter the tree
899          */
900         err = ext4_alloc_branch(handle, inode, indirect_blks, &count, goal,
901                                 offsets + (partial - chain), partial);
902
903         /*
904          * The ext4_splice_branch call will free and forget any buffers
905          * on the new chain if there is a failure, but that risks using
906          * up transaction credits, especially for bitmaps where the
907          * credits cannot be returned.  Can we handle this somehow?  We
908          * may need to return -EAGAIN upwards in the worst case.  --sct
909          */
910         if (!err)
911                 err = ext4_splice_branch(handle, inode, iblock,
912                                         partial, indirect_blks, count);
913         /*
914          * i_disksize growing is protected by truncate_mutex.  Don't forget to
915          * protect it if you're about to implement concurrent
916          * ext4_get_block() -bzzz
917         */
918         if (!err && extend_disksize && inode->i_size > ei->i_disksize)
919                 ei->i_disksize = inode->i_size;
920         mutex_unlock(&ei->truncate_mutex);
921         if (err)
922                 goto cleanup;
923
924         set_buffer_new(bh_result);
925 got_it:
926         map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
927         if (count > blocks_to_boundary)
928                 set_buffer_boundary(bh_result);
929         err = count;
930         /* Clean up and exit */
931         partial = chain + depth - 1;    /* the whole chain */
932 cleanup:
933         while (partial > chain) {
934                 BUFFER_TRACE(partial->bh, "call brelse");
935                 brelse(partial->bh);
936                 partial--;
937         }
938         BUFFER_TRACE(bh_result, "returned");
939 out:
940         return err;
941 }
942
943 #define DIO_CREDITS (EXT4_RESERVE_TRANS_BLOCKS + 32)
944
945 static int ext4_get_block(struct inode *inode, sector_t iblock,
946                         struct buffer_head *bh_result, int create)
947 {
948         handle_t *handle = ext4_journal_current_handle();
949         int ret = 0;
950         unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
951
952         if (!create)
953                 goto get_block;         /* A read */
954
955         if (max_blocks == 1)
956                 goto get_block;         /* A single block get */
957
958         if (handle->h_transaction->t_state == T_LOCKED) {
959                 /*
960                  * Huge direct-io writes can hold off commits for long
961                  * periods of time.  Let this commit run.
962                  */
963                 ext4_journal_stop(handle);
964                 handle = ext4_journal_start(inode, DIO_CREDITS);
965                 if (IS_ERR(handle))
966                         ret = PTR_ERR(handle);
967                 goto get_block;
968         }
969
970         if (handle->h_buffer_credits <= EXT4_RESERVE_TRANS_BLOCKS) {
971                 /*
972                  * Getting low on buffer credits...
973                  */
974                 ret = ext4_journal_extend(handle, DIO_CREDITS);
975                 if (ret > 0) {
976                         /*
977                          * Couldn't extend the transaction.  Start a new one.
978                          */
979                         ret = ext4_journal_restart(handle, DIO_CREDITS);
980                 }
981         }
982
983 get_block:
984         if (ret == 0) {
985                 ret = ext4_get_blocks_wrap(handle, inode, iblock,
986                                         max_blocks, bh_result, create, 0);
987                 if (ret > 0) {
988                         bh_result->b_size = (ret << inode->i_blkbits);
989                         ret = 0;
990                 }
991         }
992         return ret;
993 }
994
995 /*
996  * `handle' can be NULL if create is zero
997  */
998 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
999                                 long block, int create, int *errp)
1000 {
1001         struct buffer_head dummy;
1002         int fatal = 0, err;
1003
1004         J_ASSERT(handle != NULL || create == 0);
1005
1006         dummy.b_state = 0;
1007         dummy.b_blocknr = -1000;
1008         buffer_trace_init(&dummy.b_history);
1009         err = ext4_get_blocks_wrap(handle, inode, block, 1,
1010                                         &dummy, create, 1);
1011         /*
1012          * ext4_get_blocks_handle() returns number of blocks
1013          * mapped. 0 in case of a HOLE.
1014          */
1015         if (err > 0) {
1016                 if (err > 1)
1017                         WARN_ON(1);
1018                 err = 0;
1019         }
1020         *errp = err;
1021         if (!err && buffer_mapped(&dummy)) {
1022                 struct buffer_head *bh;
1023                 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1024                 if (!bh) {
1025                         *errp = -EIO;
1026                         goto err;
1027                 }
1028                 if (buffer_new(&dummy)) {
1029                         J_ASSERT(create != 0);
1030                         J_ASSERT(handle != 0);
1031
1032                         /*
1033                          * Now that we do not always journal data, we should
1034                          * keep in mind whether this should always journal the
1035                          * new buffer as metadata.  For now, regular file
1036                          * writes use ext4_get_block instead, so it's not a
1037                          * problem.
1038                          */
1039                         lock_buffer(bh);
1040                         BUFFER_TRACE(bh, "call get_create_access");
1041                         fatal = ext4_journal_get_create_access(handle, bh);
1042                         if (!fatal && !buffer_uptodate(bh)) {
1043                                 memset(bh->b_data,0,inode->i_sb->s_blocksize);
1044                                 set_buffer_uptodate(bh);
1045                         }
1046                         unlock_buffer(bh);
1047                         BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
1048                         err = ext4_journal_dirty_metadata(handle, bh);
1049                         if (!fatal)
1050                                 fatal = err;
1051                 } else {
1052                         BUFFER_TRACE(bh, "not a new buffer");
1053                 }
1054                 if (fatal) {
1055                         *errp = fatal;
1056                         brelse(bh);
1057                         bh = NULL;
1058                 }
1059                 return bh;
1060         }
1061 err:
1062         return NULL;
1063 }
1064
1065 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1066                                int block, int create, int *err)
1067 {
1068         struct buffer_head * bh;
1069
1070         bh = ext4_getblk(handle, inode, block, create, err);
1071         if (!bh)
1072                 return bh;
1073         if (buffer_uptodate(bh))
1074                 return bh;
1075         ll_rw_block(READ_META, 1, &bh);
1076         wait_on_buffer(bh);
1077         if (buffer_uptodate(bh))
1078                 return bh;
1079         put_bh(bh);
1080         *err = -EIO;
1081         return NULL;
1082 }
1083
1084 static int walk_page_buffers(   handle_t *handle,
1085                                 struct buffer_head *head,
1086                                 unsigned from,
1087                                 unsigned to,
1088                                 int *partial,
1089                                 int (*fn)(      handle_t *handle,
1090                                                 struct buffer_head *bh))
1091 {
1092         struct buffer_head *bh;
1093         unsigned block_start, block_end;
1094         unsigned blocksize = head->b_size;
1095         int err, ret = 0;
1096         struct buffer_head *next;
1097
1098         for (   bh = head, block_start = 0;
1099                 ret == 0 && (bh != head || !block_start);
1100                 block_start = block_end, bh = next)
1101         {
1102                 next = bh->b_this_page;
1103                 block_end = block_start + blocksize;
1104                 if (block_end <= from || block_start >= to) {
1105                         if (partial && !buffer_uptodate(bh))
1106                                 *partial = 1;
1107                         continue;
1108                 }
1109                 err = (*fn)(handle, bh);
1110                 if (!ret)
1111                         ret = err;
1112         }
1113         return ret;
1114 }
1115
1116 /*
1117  * To preserve ordering, it is essential that the hole instantiation and
1118  * the data write be encapsulated in a single transaction.  We cannot
1119  * close off a transaction and start a new one between the ext4_get_block()
1120  * and the commit_write().  So doing the jbd2_journal_start at the start of
1121  * prepare_write() is the right place.
1122  *
1123  * Also, this function can nest inside ext4_writepage() ->
1124  * block_write_full_page(). In that case, we *know* that ext4_writepage()
1125  * has generated enough buffer credits to do the whole page.  So we won't
1126  * block on the journal in that case, which is good, because the caller may
1127  * be PF_MEMALLOC.
1128  *
1129  * By accident, ext4 can be reentered when a transaction is open via
1130  * quota file writes.  If we were to commit the transaction while thus
1131  * reentered, there can be a deadlock - we would be holding a quota
1132  * lock, and the commit would never complete if another thread had a
1133  * transaction open and was blocking on the quota lock - a ranking
1134  * violation.
1135  *
1136  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1137  * will _not_ run commit under these circumstances because handle->h_ref
1138  * is elevated.  We'll still have enough credits for the tiny quotafile
1139  * write.
1140  */
1141 static int do_journal_get_write_access(handle_t *handle,
1142                                         struct buffer_head *bh)
1143 {
1144         if (!buffer_mapped(bh) || buffer_freed(bh))
1145                 return 0;
1146         return ext4_journal_get_write_access(handle, bh);
1147 }
1148
1149 static int ext4_prepare_write(struct file *file, struct page *page,
1150                               unsigned from, unsigned to)
1151 {
1152         struct inode *inode = page->mapping->host;
1153         int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
1154         handle_t *handle;
1155         int retries = 0;
1156
1157 retry:
1158         handle = ext4_journal_start(inode, needed_blocks);
1159         if (IS_ERR(handle)) {
1160                 ret = PTR_ERR(handle);
1161                 goto out;
1162         }
1163         if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
1164                 ret = nobh_prepare_write(page, from, to, ext4_get_block);
1165         else
1166                 ret = block_prepare_write(page, from, to, ext4_get_block);
1167         if (ret)
1168                 goto prepare_write_failed;
1169
1170         if (ext4_should_journal_data(inode)) {
1171                 ret = walk_page_buffers(handle, page_buffers(page),
1172                                 from, to, NULL, do_journal_get_write_access);
1173         }
1174 prepare_write_failed:
1175         if (ret)
1176                 ext4_journal_stop(handle);
1177         if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1178                 goto retry;
1179 out:
1180         return ret;
1181 }
1182
1183 int ext4_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1184 {
1185         int err = jbd2_journal_dirty_data(handle, bh);
1186         if (err)
1187                 ext4_journal_abort_handle(__FUNCTION__, __FUNCTION__,
1188                                                 bh, handle,err);
1189         return err;
1190 }
1191
1192 /* For commit_write() in data=journal mode */
1193 static int commit_write_fn(handle_t *handle, struct buffer_head *bh)
1194 {
1195         if (!buffer_mapped(bh) || buffer_freed(bh))
1196                 return 0;
1197         set_buffer_uptodate(bh);
1198         return ext4_journal_dirty_metadata(handle, bh);
1199 }
1200
1201 /*
1202  * We need to pick up the new inode size which generic_commit_write gave us
1203  * `file' can be NULL - eg, when called from page_symlink().
1204  *
1205  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1206  * buffers are managed internally.
1207  */
1208 static int ext4_ordered_commit_write(struct file *file, struct page *page,
1209                              unsigned from, unsigned to)
1210 {
1211         handle_t *handle = ext4_journal_current_handle();
1212         struct inode *inode = page->mapping->host;
1213         int ret = 0, ret2;
1214
1215         ret = walk_page_buffers(handle, page_buffers(page),
1216                 from, to, NULL, ext4_journal_dirty_data);
1217
1218         if (ret == 0) {
1219                 /*
1220                  * generic_commit_write() will run mark_inode_dirty() if i_size
1221                  * changes.  So let's piggyback the i_disksize mark_inode_dirty
1222                  * into that.
1223                  */
1224                 loff_t new_i_size;
1225
1226                 new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1227                 if (new_i_size > EXT4_I(inode)->i_disksize)
1228                         EXT4_I(inode)->i_disksize = new_i_size;
1229                 ret = generic_commit_write(file, page, from, to);
1230         }
1231         ret2 = ext4_journal_stop(handle);
1232         if (!ret)
1233                 ret = ret2;
1234         return ret;
1235 }
1236
1237 static int ext4_writeback_commit_write(struct file *file, struct page *page,
1238                              unsigned from, unsigned to)
1239 {
1240         handle_t *handle = ext4_journal_current_handle();
1241         struct inode *inode = page->mapping->host;
1242         int ret = 0, ret2;
1243         loff_t new_i_size;
1244
1245         new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1246         if (new_i_size > EXT4_I(inode)->i_disksize)
1247                 EXT4_I(inode)->i_disksize = new_i_size;
1248
1249         if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
1250                 ret = nobh_commit_write(file, page, from, to);
1251         else
1252                 ret = generic_commit_write(file, page, from, to);
1253
1254         ret2 = ext4_journal_stop(handle);
1255         if (!ret)
1256                 ret = ret2;
1257         return ret;
1258 }
1259
1260 static int ext4_journalled_commit_write(struct file *file,
1261                         struct page *page, unsigned from, unsigned to)
1262 {
1263         handle_t *handle = ext4_journal_current_handle();
1264         struct inode *inode = page->mapping->host;
1265         int ret = 0, ret2;
1266         int partial = 0;
1267         loff_t pos;
1268
1269         /*
1270          * Here we duplicate the generic_commit_write() functionality
1271          */
1272         pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1273
1274         ret = walk_page_buffers(handle, page_buffers(page), from,
1275                                 to, &partial, commit_write_fn);
1276         if (!partial)
1277                 SetPageUptodate(page);
1278         if (pos > inode->i_size)
1279                 i_size_write(inode, pos);
1280         EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1281         if (inode->i_size > EXT4_I(inode)->i_disksize) {
1282                 EXT4_I(inode)->i_disksize = inode->i_size;
1283                 ret2 = ext4_mark_inode_dirty(handle, inode);
1284                 if (!ret)
1285                         ret = ret2;
1286         }
1287         ret2 = ext4_journal_stop(handle);
1288         if (!ret)
1289                 ret = ret2;
1290         return ret;
1291 }
1292
1293 /*
1294  * bmap() is special.  It gets used by applications such as lilo and by
1295  * the swapper to find the on-disk block of a specific piece of data.
1296  *
1297  * Naturally, this is dangerous if the block concerned is still in the
1298  * journal.  If somebody makes a swapfile on an ext4 data-journaling
1299  * filesystem and enables swap, then they may get a nasty shock when the
1300  * data getting swapped to that swapfile suddenly gets overwritten by
1301  * the original zero's written out previously to the journal and
1302  * awaiting writeback in the kernel's buffer cache.
1303  *
1304  * So, if we see any bmap calls here on a modified, data-journaled file,
1305  * take extra steps to flush any blocks which might be in the cache.
1306  */
1307 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
1308 {
1309         struct inode *inode = mapping->host;
1310         journal_t *journal;
1311         int err;
1312
1313         if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
1314                 /*
1315                  * This is a REALLY heavyweight approach, but the use of
1316                  * bmap on dirty files is expected to be extremely rare:
1317                  * only if we run lilo or swapon on a freshly made file
1318                  * do we expect this to happen.
1319                  *
1320                  * (bmap requires CAP_SYS_RAWIO so this does not
1321                  * represent an unprivileged user DOS attack --- we'd be
1322                  * in trouble if mortal users could trigger this path at
1323                  * will.)
1324                  *
1325                  * NB. EXT4_STATE_JDATA is not set on files other than
1326                  * regular files.  If somebody wants to bmap a directory
1327                  * or symlink and gets confused because the buffer
1328                  * hasn't yet been flushed to disk, they deserve
1329                  * everything they get.
1330                  */
1331
1332                 EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
1333                 journal = EXT4_JOURNAL(inode);
1334                 jbd2_journal_lock_updates(journal);
1335                 err = jbd2_journal_flush(journal);
1336                 jbd2_journal_unlock_updates(journal);
1337
1338                 if (err)
1339                         return 0;
1340         }
1341
1342         return generic_block_bmap(mapping,block,ext4_get_block);
1343 }
1344
1345 static int bget_one(handle_t *handle, struct buffer_head *bh)
1346 {
1347         get_bh(bh);
1348         return 0;
1349 }
1350
1351 static int bput_one(handle_t *handle, struct buffer_head *bh)
1352 {
1353         put_bh(bh);
1354         return 0;
1355 }
1356
1357 static int jbd2_journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1358 {
1359         if (buffer_mapped(bh))
1360                 return ext4_journal_dirty_data(handle, bh);
1361         return 0;
1362 }
1363
1364 /*
1365  * Note that we always start a transaction even if we're not journalling
1366  * data.  This is to preserve ordering: any hole instantiation within
1367  * __block_write_full_page -> ext4_get_block() should be journalled
1368  * along with the data so we don't crash and then get metadata which
1369  * refers to old data.
1370  *
1371  * In all journalling modes block_write_full_page() will start the I/O.
1372  *
1373  * Problem:
1374  *
1375  *      ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1376  *              ext4_writepage()
1377  *
1378  * Similar for:
1379  *
1380  *      ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1381  *
1382  * Same applies to ext4_get_block().  We will deadlock on various things like
1383  * lock_journal and i_truncate_mutex.
1384  *
1385  * Setting PF_MEMALLOC here doesn't work - too many internal memory
1386  * allocations fail.
1387  *
1388  * 16May01: If we're reentered then journal_current_handle() will be
1389  *          non-zero. We simply *return*.
1390  *
1391  * 1 July 2001: @@@ FIXME:
1392  *   In journalled data mode, a data buffer may be metadata against the
1393  *   current transaction.  But the same file is part of a shared mapping
1394  *   and someone does a writepage() on it.
1395  *
1396  *   We will move the buffer onto the async_data list, but *after* it has
1397  *   been dirtied. So there's a small window where we have dirty data on
1398  *   BJ_Metadata.
1399  *
1400  *   Note that this only applies to the last partial page in the file.  The
1401  *   bit which block_write_full_page() uses prepare/commit for.  (That's
1402  *   broken code anyway: it's wrong for msync()).
1403  *
1404  *   It's a rare case: affects the final partial page, for journalled data
1405  *   where the file is subject to bith write() and writepage() in the same
1406  *   transction.  To fix it we'll need a custom block_write_full_page().
1407  *   We'll probably need that anyway for journalling writepage() output.
1408  *
1409  * We don't honour synchronous mounts for writepage().  That would be
1410  * disastrous.  Any write() or metadata operation will sync the fs for
1411  * us.
1412  *
1413  * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1414  * we don't need to open a transaction here.
1415  */
1416 static int ext4_ordered_writepage(struct page *page,
1417                                 struct writeback_control *wbc)
1418 {
1419         struct inode *inode = page->mapping->host;
1420         struct buffer_head *page_bufs;
1421         handle_t *handle = NULL;
1422         int ret = 0;
1423         int err;
1424
1425         J_ASSERT(PageLocked(page));
1426
1427         /*
1428          * We give up here if we're reentered, because it might be for a
1429          * different filesystem.
1430          */
1431         if (ext4_journal_current_handle())
1432                 goto out_fail;
1433
1434         handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1435
1436         if (IS_ERR(handle)) {
1437                 ret = PTR_ERR(handle);
1438                 goto out_fail;
1439         }
1440
1441         if (!page_has_buffers(page)) {
1442                 create_empty_buffers(page, inode->i_sb->s_blocksize,
1443                                 (1 << BH_Dirty)|(1 << BH_Uptodate));
1444         }
1445         page_bufs = page_buffers(page);
1446         walk_page_buffers(handle, page_bufs, 0,
1447                         PAGE_CACHE_SIZE, NULL, bget_one);
1448
1449         ret = block_write_full_page(page, ext4_get_block, wbc);
1450
1451         /*
1452          * The page can become unlocked at any point now, and
1453          * truncate can then come in and change things.  So we
1454          * can't touch *page from now on.  But *page_bufs is
1455          * safe due to elevated refcount.
1456          */
1457
1458         /*
1459          * And attach them to the current transaction.  But only if
1460          * block_write_full_page() succeeded.  Otherwise they are unmapped,
1461          * and generally junk.
1462          */
1463         if (ret == 0) {
1464                 err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1465                                         NULL, jbd2_journal_dirty_data_fn);
1466                 if (!ret)
1467                         ret = err;
1468         }
1469         walk_page_buffers(handle, page_bufs, 0,
1470                         PAGE_CACHE_SIZE, NULL, bput_one);
1471         err = ext4_journal_stop(handle);
1472         if (!ret)
1473                 ret = err;
1474         return ret;
1475
1476 out_fail:
1477         redirty_page_for_writepage(wbc, page);
1478         unlock_page(page);
1479         return ret;
1480 }
1481
1482 static int ext4_writeback_writepage(struct page *page,
1483                                 struct writeback_control *wbc)
1484 {
1485         struct inode *inode = page->mapping->host;
1486         handle_t *handle = NULL;
1487         int ret = 0;
1488         int err;
1489
1490         if (ext4_journal_current_handle())
1491                 goto out_fail;
1492
1493         handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1494         if (IS_ERR(handle)) {
1495                 ret = PTR_ERR(handle);
1496                 goto out_fail;
1497         }
1498
1499         if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
1500                 ret = nobh_writepage(page, ext4_get_block, wbc);
1501         else
1502                 ret = block_write_full_page(page, ext4_get_block, wbc);
1503
1504         err = ext4_journal_stop(handle);
1505         if (!ret)
1506                 ret = err;
1507         return ret;
1508
1509 out_fail:
1510         redirty_page_for_writepage(wbc, page);
1511         unlock_page(page);
1512         return ret;
1513 }
1514
1515 static int ext4_journalled_writepage(struct page *page,
1516                                 struct writeback_control *wbc)
1517 {
1518         struct inode *inode = page->mapping->host;
1519         handle_t *handle = NULL;
1520         int ret = 0;
1521         int err;
1522
1523         if (ext4_journal_current_handle())
1524                 goto no_write;
1525
1526         handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1527         if (IS_ERR(handle)) {
1528                 ret = PTR_ERR(handle);
1529                 goto no_write;
1530         }
1531
1532         if (!page_has_buffers(page) || PageChecked(page)) {
1533                 /*
1534                  * It's mmapped pagecache.  Add buffers and journal it.  There
1535                  * doesn't seem much point in redirtying the page here.
1536                  */
1537                 ClearPageChecked(page);
1538                 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
1539                                         ext4_get_block);
1540                 if (ret != 0) {
1541                         ext4_journal_stop(handle);
1542                         goto out_unlock;
1543                 }
1544                 ret = walk_page_buffers(handle, page_buffers(page), 0,
1545                         PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1546
1547                 err = walk_page_buffers(handle, page_buffers(page), 0,
1548                                 PAGE_CACHE_SIZE, NULL, commit_write_fn);
1549                 if (ret == 0)
1550                         ret = err;
1551                 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1552                 unlock_page(page);
1553         } else {
1554                 /*
1555                  * It may be a page full of checkpoint-mode buffers.  We don't
1556                  * really know unless we go poke around in the buffer_heads.
1557                  * But block_write_full_page will do the right thing.
1558                  */
1559                 ret = block_write_full_page(page, ext4_get_block, wbc);
1560         }
1561         err = ext4_journal_stop(handle);
1562         if (!ret)
1563                 ret = err;
1564 out:
1565         return ret;
1566
1567 no_write:
1568         redirty_page_for_writepage(wbc, page);
1569 out_unlock:
1570         unlock_page(page);
1571         goto out;
1572 }
1573
1574 static int ext4_readpage(struct file *file, struct page *page)
1575 {
1576         return mpage_readpage(page, ext4_get_block);
1577 }
1578
1579 static int
1580 ext4_readpages(struct file *file, struct address_space *mapping,
1581                 struct list_head *pages, unsigned nr_pages)
1582 {
1583         return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
1584 }
1585
1586 static void ext4_invalidatepage(struct page *page, unsigned long offset)
1587 {
1588         journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1589
1590         /*
1591          * If it's a full truncate we just forget about the pending dirtying
1592          */
1593         if (offset == 0)
1594                 ClearPageChecked(page);
1595
1596         jbd2_journal_invalidatepage(journal, page, offset);
1597 }
1598
1599 static int ext4_releasepage(struct page *page, gfp_t wait)
1600 {
1601         journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1602
1603         WARN_ON(PageChecked(page));
1604         if (!page_has_buffers(page))
1605                 return 0;
1606         return jbd2_journal_try_to_free_buffers(journal, page, wait);
1607 }
1608
1609 /*
1610  * If the O_DIRECT write will extend the file then add this inode to the
1611  * orphan list.  So recovery will truncate it back to the original size
1612  * if the machine crashes during the write.
1613  *
1614  * If the O_DIRECT write is intantiating holes inside i_size and the machine
1615  * crashes then stale disk data _may_ be exposed inside the file.
1616  */
1617 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
1618                         const struct iovec *iov, loff_t offset,
1619                         unsigned long nr_segs)
1620 {
1621         struct file *file = iocb->ki_filp;
1622         struct inode *inode = file->f_mapping->host;
1623         struct ext4_inode_info *ei = EXT4_I(inode);
1624         handle_t *handle = NULL;
1625         ssize_t ret;
1626         int orphan = 0;
1627         size_t count = iov_length(iov, nr_segs);
1628
1629         if (rw == WRITE) {
1630                 loff_t final_size = offset + count;
1631
1632                 handle = ext4_journal_start(inode, DIO_CREDITS);
1633                 if (IS_ERR(handle)) {
1634                         ret = PTR_ERR(handle);
1635                         goto out;
1636                 }
1637                 if (final_size > inode->i_size) {
1638                         ret = ext4_orphan_add(handle, inode);
1639                         if (ret)
1640                                 goto out_stop;
1641                         orphan = 1;
1642                         ei->i_disksize = inode->i_size;
1643                 }
1644         }
1645
1646         ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1647                                  offset, nr_segs,
1648                                  ext4_get_block, NULL);
1649
1650         /*
1651          * Reacquire the handle: ext4_get_block() can restart the transaction
1652          */
1653         handle = ext4_journal_current_handle();
1654
1655 out_stop:
1656         if (handle) {
1657                 int err;
1658
1659                 if (orphan && inode->i_nlink)
1660                         ext4_orphan_del(handle, inode);
1661                 if (orphan && ret > 0) {
1662                         loff_t end = offset + ret;
1663                         if (end > inode->i_size) {
1664                                 ei->i_disksize = end;
1665                                 i_size_write(inode, end);
1666                                 /*
1667                                  * We're going to return a positive `ret'
1668                                  * here due to non-zero-length I/O, so there's
1669                                  * no way of reporting error returns from
1670                                  * ext4_mark_inode_dirty() to userspace.  So
1671                                  * ignore it.
1672                                  */
1673                                 ext4_mark_inode_dirty(handle, inode);
1674                         }
1675                 }
1676                 err = ext4_journal_stop(handle);
1677                 if (ret == 0)
1678                         ret = err;
1679         }
1680 out:
1681         return ret;
1682 }
1683
1684 /*
1685  * Pages can be marked dirty completely asynchronously from ext4's journalling
1686  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
1687  * much here because ->set_page_dirty is called under VFS locks.  The page is
1688  * not necessarily locked.
1689  *
1690  * We cannot just dirty the page and leave attached buffers clean, because the
1691  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
1692  * or jbddirty because all the journalling code will explode.
1693  *
1694  * So what we do is to mark the page "pending dirty" and next time writepage
1695  * is called, propagate that into the buffers appropriately.
1696  */
1697 static int ext4_journalled_set_page_dirty(struct page *page)
1698 {
1699         SetPageChecked(page);
1700         return __set_page_dirty_nobuffers(page);
1701 }
1702
1703 static const struct address_space_operations ext4_ordered_aops = {
1704         .readpage       = ext4_readpage,
1705         .readpages      = ext4_readpages,
1706         .writepage      = ext4_ordered_writepage,
1707         .sync_page      = block_sync_page,
1708         .prepare_write  = ext4_prepare_write,
1709         .commit_write   = ext4_ordered_commit_write,
1710         .bmap           = ext4_bmap,
1711         .invalidatepage = ext4_invalidatepage,
1712         .releasepage    = ext4_releasepage,
1713         .direct_IO      = ext4_direct_IO,
1714         .migratepage    = buffer_migrate_page,
1715 };
1716
1717 static const struct address_space_operations ext4_writeback_aops = {
1718         .readpage       = ext4_readpage,
1719         .readpages      = ext4_readpages,
1720         .writepage      = ext4_writeback_writepage,
1721         .sync_page      = block_sync_page,
1722         .prepare_write  = ext4_prepare_write,
1723         .commit_write   = ext4_writeback_commit_write,
1724         .bmap           = ext4_bmap,
1725         .invalidatepage = ext4_invalidatepage,
1726         .releasepage    = ext4_releasepage,
1727         .direct_IO      = ext4_direct_IO,
1728         .migratepage    = buffer_migrate_page,
1729 };
1730
1731 static const struct address_space_operations ext4_journalled_aops = {
1732         .readpage       = ext4_readpage,
1733         .readpages      = ext4_readpages,
1734         .writepage      = ext4_journalled_writepage,
1735         .sync_page      = block_sync_page,
1736         .prepare_write  = ext4_prepare_write,
1737         .commit_write   = ext4_journalled_commit_write,
1738         .set_page_dirty = ext4_journalled_set_page_dirty,
1739         .bmap           = ext4_bmap,
1740         .invalidatepage = ext4_invalidatepage,
1741         .releasepage    = ext4_releasepage,
1742 };
1743
1744 void ext4_set_aops(struct inode *inode)
1745 {
1746         if (ext4_should_order_data(inode))
1747                 inode->i_mapping->a_ops = &ext4_ordered_aops;
1748         else if (ext4_should_writeback_data(inode))
1749                 inode->i_mapping->a_ops = &ext4_writeback_aops;
1750         else
1751                 inode->i_mapping->a_ops = &ext4_journalled_aops;
1752 }
1753
1754 /*
1755  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
1756  * up to the end of the block which corresponds to `from'.
1757  * This required during truncate. We need to physically zero the tail end
1758  * of that block so it doesn't yield old data if the file is later grown.
1759  */
1760 int ext4_block_truncate_page(handle_t *handle, struct page *page,
1761                 struct address_space *mapping, loff_t from)
1762 {
1763         ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
1764         unsigned offset = from & (PAGE_CACHE_SIZE-1);
1765         unsigned blocksize, iblock, length, pos;
1766         struct inode *inode = mapping->host;
1767         struct buffer_head *bh;
1768         int err = 0;
1769         void *kaddr;
1770
1771         blocksize = inode->i_sb->s_blocksize;
1772         length = blocksize - (offset & (blocksize - 1));
1773         iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1774
1775         /*
1776          * For "nobh" option,  we can only work if we don't need to
1777          * read-in the page - otherwise we create buffers to do the IO.
1778          */
1779         if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1780              ext4_should_writeback_data(inode) && PageUptodate(page)) {
1781                 kaddr = kmap_atomic(page, KM_USER0);
1782                 memset(kaddr + offset, 0, length);
1783                 flush_dcache_page(page);
1784                 kunmap_atomic(kaddr, KM_USER0);
1785                 set_page_dirty(page);
1786                 goto unlock;
1787         }
1788
1789         if (!page_has_buffers(page))
1790                 create_empty_buffers(page, blocksize, 0);
1791
1792         /* Find the buffer that contains "offset" */
1793         bh = page_buffers(page);
1794         pos = blocksize;
1795         while (offset >= pos) {
1796                 bh = bh->b_this_page;
1797                 iblock++;
1798                 pos += blocksize;
1799         }
1800
1801         err = 0;
1802         if (buffer_freed(bh)) {
1803                 BUFFER_TRACE(bh, "freed: skip");
1804                 goto unlock;
1805         }
1806
1807         if (!buffer_mapped(bh)) {
1808                 BUFFER_TRACE(bh, "unmapped");
1809                 ext4_get_block(inode, iblock, bh, 0);
1810                 /* unmapped? It's a hole - nothing to do */
1811                 if (!buffer_mapped(bh)) {
1812                         BUFFER_TRACE(bh, "still unmapped");
1813                         goto unlock;
1814                 }
1815         }
1816
1817         /* Ok, it's mapped. Make sure it's up-to-date */
1818         if (PageUptodate(page))
1819                 set_buffer_uptodate(bh);
1820
1821         if (!buffer_uptodate(bh)) {
1822                 err = -EIO;
1823                 ll_rw_block(READ, 1, &bh);
1824                 wait_on_buffer(bh);
1825                 /* Uhhuh. Read error. Complain and punt. */
1826                 if (!buffer_uptodate(bh))
1827                         goto unlock;
1828         }
1829
1830         if (ext4_should_journal_data(inode)) {
1831                 BUFFER_TRACE(bh, "get write access");
1832                 err = ext4_journal_get_write_access(handle, bh);
1833                 if (err)
1834                         goto unlock;
1835         }
1836
1837         kaddr = kmap_atomic(page, KM_USER0);
1838         memset(kaddr + offset, 0, length);
1839         flush_dcache_page(page);
1840         kunmap_atomic(kaddr, KM_USER0);
1841
1842         BUFFER_TRACE(bh, "zeroed end of block");
1843
1844         err = 0;
1845         if (ext4_should_journal_data(inode)) {
1846                 err = ext4_journal_dirty_metadata(handle, bh);
1847         } else {
1848                 if (ext4_should_order_data(inode))
1849                         err = ext4_journal_dirty_data(handle, bh);
1850                 mark_buffer_dirty(bh);
1851         }
1852
1853 unlock:
1854         unlock_page(page);
1855         page_cache_release(page);
1856         return err;
1857 }
1858
1859 /*
1860  * Probably it should be a library function... search for first non-zero word
1861  * or memcmp with zero_page, whatever is better for particular architecture.
1862  * Linus?
1863  */
1864 static inline int all_zeroes(__le32 *p, __le32 *q)
1865 {
1866         while (p < q)
1867                 if (*p++)
1868                         return 0;
1869         return 1;
1870 }
1871
1872 /**
1873  *      ext4_find_shared - find the indirect blocks for partial truncation.
1874  *      @inode:   inode in question
1875  *      @depth:   depth of the affected branch
1876  *      @offsets: offsets of pointers in that branch (see ext4_block_to_path)
1877  *      @chain:   place to store the pointers to partial indirect blocks
1878  *      @top:     place to the (detached) top of branch
1879  *
1880  *      This is a helper function used by ext4_truncate().
1881  *
1882  *      When we do truncate() we may have to clean the ends of several
1883  *      indirect blocks but leave the blocks themselves alive. Block is
1884  *      partially truncated if some data below the new i_size is refered
1885  *      from it (and it is on the path to the first completely truncated
1886  *      data block, indeed).  We have to free the top of that path along
1887  *      with everything to the right of the path. Since no allocation
1888  *      past the truncation point is possible until ext4_truncate()
1889  *      finishes, we may safely do the latter, but top of branch may
1890  *      require special attention - pageout below the truncation point
1891  *      might try to populate it.
1892  *
1893  *      We atomically detach the top of branch from the tree, store the
1894  *      block number of its root in *@top, pointers to buffer_heads of
1895  *      partially truncated blocks - in @chain[].bh and pointers to
1896  *      their last elements that should not be removed - in
1897  *      @chain[].p. Return value is the pointer to last filled element
1898  *      of @chain.
1899  *
1900  *      The work left to caller to do the actual freeing of subtrees:
1901  *              a) free the subtree starting from *@top
1902  *              b) free the subtrees whose roots are stored in
1903  *                      (@chain[i].p+1 .. end of @chain[i].bh->b_data)
1904  *              c) free the subtrees growing from the inode past the @chain[0].
1905  *                      (no partially truncated stuff there).  */
1906
1907 static Indirect *ext4_find_shared(struct inode *inode, int depth,
1908                         int offsets[4], Indirect chain[4], __le32 *top)
1909 {
1910         Indirect *partial, *p;
1911         int k, err;
1912
1913         *top = 0;
1914         /* Make k index the deepest non-null offest + 1 */
1915         for (k = depth; k > 1 && !offsets[k-1]; k--)
1916                 ;
1917         partial = ext4_get_branch(inode, k, offsets, chain, &err);
1918         /* Writer: pointers */
1919         if (!partial)
1920                 partial = chain + k-1;
1921         /*
1922          * If the branch acquired continuation since we've looked at it -
1923          * fine, it should all survive and (new) top doesn't belong to us.
1924          */
1925         if (!partial->key && *partial->p)
1926                 /* Writer: end */
1927                 goto no_top;
1928         for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
1929                 ;
1930         /*
1931          * OK, we've found the last block that must survive. The rest of our
1932          * branch should be detached before unlocking. However, if that rest
1933          * of branch is all ours and does not grow immediately from the inode
1934          * it's easier to cheat and just decrement partial->p.
1935          */
1936         if (p == chain + k - 1 && p > chain) {
1937                 p->p--;
1938         } else {
1939                 *top = *p->p;
1940                 /* Nope, don't do this in ext4.  Must leave the tree intact */
1941 #if 0
1942                 *p->p = 0;
1943 #endif
1944         }
1945         /* Writer: end */
1946
1947         while(partial > p) {
1948                 brelse(partial->bh);
1949                 partial--;
1950         }
1951 no_top:
1952         return partial;
1953 }
1954
1955 /*
1956  * Zero a number of block pointers in either an inode or an indirect block.
1957  * If we restart the transaction we must again get write access to the
1958  * indirect block for further modification.
1959  *
1960  * We release `count' blocks on disk, but (last - first) may be greater
1961  * than `count' because there can be holes in there.
1962  */
1963 static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
1964                 struct buffer_head *bh, ext4_fsblk_t block_to_free,
1965                 unsigned long count, __le32 *first, __le32 *last)
1966 {
1967         __le32 *p;
1968         if (try_to_extend_transaction(handle, inode)) {
1969                 if (bh) {
1970                         BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
1971                         ext4_journal_dirty_metadata(handle, bh);
1972                 }
1973                 ext4_mark_inode_dirty(handle, inode);
1974                 ext4_journal_test_restart(handle, inode);
1975                 if (bh) {
1976                         BUFFER_TRACE(bh, "retaking write access");
1977                         ext4_journal_get_write_access(handle, bh);
1978                 }
1979         }
1980
1981         /*
1982          * Any buffers which are on the journal will be in memory. We find
1983          * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget()
1984          * on them.  We've already detached each block from the file, so
1985          * bforget() in jbd2_journal_forget() should be safe.
1986          *
1987          * AKPM: turn on bforget in jbd2_journal_forget()!!!
1988          */
1989         for (p = first; p < last; p++) {
1990                 u32 nr = le32_to_cpu(*p);
1991                 if (nr) {
1992                         struct buffer_head *bh;
1993
1994                         *p = 0;
1995                         bh = sb_find_get_block(inode->i_sb, nr);
1996                         ext4_forget(handle, 0, inode, bh, nr);
1997                 }
1998         }
1999
2000         ext4_free_blocks(handle, inode, block_to_free, count);
2001 }
2002
2003 /**
2004  * ext4_free_data - free a list of data blocks
2005  * @handle:     handle for this transaction
2006  * @inode:      inode we are dealing with
2007  * @this_bh:    indirect buffer_head which contains *@first and *@last
2008  * @first:      array of block numbers
2009  * @last:       points immediately past the end of array
2010  *
2011  * We are freeing all blocks refered from that array (numbers are stored as
2012  * little-endian 32-bit) and updating @inode->i_blocks appropriately.
2013  *
2014  * We accumulate contiguous runs of blocks to free.  Conveniently, if these
2015  * blocks are contiguous then releasing them at one time will only affect one
2016  * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
2017  * actually use a lot of journal space.
2018  *
2019  * @this_bh will be %NULL if @first and @last point into the inode's direct
2020  * block pointers.
2021  */
2022 static void ext4_free_data(handle_t *handle, struct inode *inode,
2023                            struct buffer_head *this_bh,
2024                            __le32 *first, __le32 *last)
2025 {
2026         ext4_fsblk_t block_to_free = 0;    /* Starting block # of a run */
2027         unsigned long count = 0;            /* Number of blocks in the run */
2028         __le32 *block_to_free_p = NULL;     /* Pointer into inode/ind
2029                                                corresponding to
2030                                                block_to_free */
2031         ext4_fsblk_t nr;                    /* Current block # */
2032         __le32 *p;                          /* Pointer into inode/ind
2033                                                for current block */
2034         int err;
2035
2036         if (this_bh) {                          /* For indirect block */
2037                 BUFFER_TRACE(this_bh, "get_write_access");
2038                 err = ext4_journal_get_write_access(handle, this_bh);
2039                 /* Important: if we can't update the indirect pointers
2040                  * to the blocks, we can't free them. */
2041                 if (err)
2042                         return;
2043         }
2044
2045         for (p = first; p < last; p++) {
2046                 nr = le32_to_cpu(*p);
2047                 if (nr) {
2048                         /* accumulate blocks to free if they're contiguous */
2049                         if (count == 0) {
2050                                 block_to_free = nr;
2051                                 block_to_free_p = p;
2052                                 count = 1;
2053                         } else if (nr == block_to_free + count) {
2054                                 count++;
2055                         } else {
2056                                 ext4_clear_blocks(handle, inode, this_bh,
2057                                                   block_to_free,
2058                                                   count, block_to_free_p, p);
2059                                 block_to_free = nr;
2060                                 block_to_free_p = p;
2061                                 count = 1;
2062                         }
2063                 }
2064         }
2065
2066         if (count > 0)
2067                 ext4_clear_blocks(handle, inode, this_bh, block_to_free,
2068                                   count, block_to_free_p, p);
2069
2070         if (this_bh) {
2071                 BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata");
2072                 ext4_journal_dirty_metadata(handle, this_bh);
2073         }
2074 }
2075
2076 /**
2077  *      ext4_free_branches - free an array of branches
2078  *      @handle: JBD handle for this transaction
2079  *      @inode: inode we are dealing with
2080  *      @parent_bh: the buffer_head which contains *@first and *@last
2081  *      @first: array of block numbers
2082  *      @last:  pointer immediately past the end of array
2083  *      @depth: depth of the branches to free
2084  *
2085  *      We are freeing all blocks refered from these branches (numbers are
2086  *      stored as little-endian 32-bit) and updating @inode->i_blocks
2087  *      appropriately.
2088  */
2089 static void ext4_free_branches(handle_t *handle, struct inode *inode,
2090                                struct buffer_head *parent_bh,
2091                                __le32 *first, __le32 *last, int depth)
2092 {
2093         ext4_fsblk_t nr;
2094         __le32 *p;
2095
2096         if (is_handle_aborted(handle))
2097                 return;
2098
2099         if (depth--) {
2100                 struct buffer_head *bh;
2101                 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2102                 p = last;
2103                 while (--p >= first) {
2104                         nr = le32_to_cpu(*p);
2105                         if (!nr)
2106                                 continue;               /* A hole */
2107
2108                         /* Go read the buffer for the next level down */
2109                         bh = sb_bread(inode->i_sb, nr);
2110
2111                         /*
2112                          * A read failure? Report error and clear slot
2113                          * (should be rare).
2114                          */
2115                         if (!bh) {
2116                                 ext4_error(inode->i_sb, "ext4_free_branches",
2117                                            "Read failure, inode=%lu, block=%llu",
2118                                            inode->i_ino, nr);
2119                                 continue;
2120                         }
2121
2122                         /* This zaps the entire block.  Bottom up. */
2123                         BUFFER_TRACE(bh, "free child branches");
2124                         ext4_free_branches(handle, inode, bh,
2125                                            (__le32*)bh->b_data,
2126                                            (__le32*)bh->b_data + addr_per_block,
2127                                            depth);
2128
2129                         /*
2130                          * We've probably journalled the indirect block several
2131                          * times during the truncate.  But it's no longer
2132                          * needed and we now drop it from the transaction via
2133                          * jbd2_journal_revoke().
2134                          *
2135                          * That's easy if it's exclusively part of this
2136                          * transaction.  But if it's part of the committing
2137                          * transaction then jbd2_journal_forget() will simply
2138                          * brelse() it.  That means that if the underlying
2139                          * block is reallocated in ext4_get_block(),
2140                          * unmap_underlying_metadata() will find this block
2141                          * and will try to get rid of it.  damn, damn.
2142                          *
2143                          * If this block has already been committed to the
2144                          * journal, a revoke record will be written.  And
2145                          * revoke records must be emitted *before* clearing
2146                          * this block's bit in the bitmaps.
2147                          */
2148                         ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
2149
2150                         /*
2151                          * Everything below this this pointer has been
2152                          * released.  Now let this top-of-subtree go.
2153                          *
2154                          * We want the freeing of this indirect block to be
2155                          * atomic in the journal with the updating of the
2156                          * bitmap block which owns it.  So make some room in
2157                          * the journal.
2158                          *
2159                          * We zero the parent pointer *after* freeing its
2160                          * pointee in the bitmaps, so if extend_transaction()
2161                          * for some reason fails to put the bitmap changes and
2162                          * the release into the same transaction, recovery
2163                          * will merely complain about releasing a free block,
2164                          * rather than leaking blocks.
2165                          */
2166                         if (is_handle_aborted(handle))
2167                                 return;
2168                         if (try_to_extend_transaction(handle, inode)) {
2169                                 ext4_mark_inode_dirty(handle, inode);
2170                                 ext4_journal_test_restart(handle, inode);
2171                         }
2172
2173                         ext4_free_blocks(handle, inode, nr, 1);
2174
2175                         if (parent_bh) {
2176                                 /*
2177                                  * The block which we have just freed is
2178                                  * pointed to by an indirect block: journal it
2179                                  */
2180                                 BUFFER_TRACE(parent_bh, "get_write_access");
2181                                 if (!ext4_journal_get_write_access(handle,
2182                                                                    parent_bh)){
2183                                         *p = 0;
2184                                         BUFFER_TRACE(parent_bh,
2185                                         "call ext4_journal_dirty_metadata");
2186                                         ext4_journal_dirty_metadata(handle,
2187                                                                     parent_bh);
2188                                 }
2189                         }
2190                 }
2191         } else {
2192                 /* We have reached the bottom of the tree. */
2193                 BUFFER_TRACE(parent_bh, "free data blocks");
2194                 ext4_free_data(handle, inode, parent_bh, first, last);
2195         }
2196 }
2197
2198 /*
2199  * ext4_truncate()
2200  *
2201  * We block out ext4_get_block() block instantiations across the entire
2202  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
2203  * simultaneously on behalf of the same inode.
2204  *
2205  * As we work through the truncate and commmit bits of it to the journal there
2206  * is one core, guiding principle: the file's tree must always be consistent on
2207  * disk.  We must be able to restart the truncate after a crash.
2208  *
2209  * The file's tree may be transiently inconsistent in memory (although it
2210  * probably isn't), but whenever we close off and commit a journal transaction,
2211  * the contents of (the filesystem + the journal) must be consistent and
2212  * restartable.  It's pretty simple, really: bottom up, right to left (although
2213  * left-to-right works OK too).
2214  *
2215  * Note that at recovery time, journal replay occurs *before* the restart of
2216  * truncate against the orphan inode list.
2217  *
2218  * The committed inode has the new, desired i_size (which is the same as
2219  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
2220  * that this inode's truncate did not complete and it will again call
2221  * ext4_truncate() to have another go.  So there will be instantiated blocks
2222  * to the right of the truncation point in a crashed ext4 filesystem.  But
2223  * that's fine - as long as they are linked from the inode, the post-crash
2224  * ext4_truncate() run will find them and release them.
2225  */
2226 void ext4_truncate(struct inode *inode)
2227 {
2228         handle_t *handle;
2229         struct ext4_inode_info *ei = EXT4_I(inode);
2230         __le32 *i_data = ei->i_data;
2231         int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2232         struct address_space *mapping = inode->i_mapping;
2233         int offsets[4];
2234         Indirect chain[4];
2235         Indirect *partial;
2236         __le32 nr = 0;
2237         int n;
2238         long last_block;
2239         unsigned blocksize = inode->i_sb->s_blocksize;
2240         struct page *page;
2241
2242         if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2243             S_ISLNK(inode->i_mode)))
2244                 return;
2245         if (ext4_inode_is_fast_symlink(inode))
2246                 return;
2247         if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2248                 return;
2249
2250         /*
2251          * We have to lock the EOF page here, because lock_page() nests
2252          * outside jbd2_journal_start().
2253          */
2254         if ((inode->i_size & (blocksize - 1)) == 0) {
2255                 /* Block boundary? Nothing to do */
2256                 page = NULL;
2257         } else {
2258                 page = grab_cache_page(mapping,
2259                                 inode->i_size >> PAGE_CACHE_SHIFT);
2260                 if (!page)
2261                         return;
2262         }
2263
2264         if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
2265                 return ext4_ext_truncate(inode, page);
2266
2267         handle = start_transaction(inode);
2268         if (IS_ERR(handle)) {
2269                 if (page) {
2270                         clear_highpage(page);
2271                         flush_dcache_page(page);
2272                         unlock_page(page);
2273                         page_cache_release(page);
2274                 }
2275                 return;         /* AKPM: return what? */
2276         }
2277
2278         last_block = (inode->i_size + blocksize-1)
2279                                         >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
2280
2281         if (page)
2282                 ext4_block_truncate_page(handle, page, mapping, inode->i_size);
2283
2284         n = ext4_block_to_path(inode, last_block, offsets, NULL);
2285         if (n == 0)
2286                 goto out_stop;  /* error */
2287
2288         /*
2289          * OK.  This truncate is going to happen.  We add the inode to the
2290          * orphan list, so that if this truncate spans multiple transactions,
2291          * and we crash, we will resume the truncate when the filesystem
2292          * recovers.  It also marks the inode dirty, to catch the new size.
2293          *
2294          * Implication: the file must always be in a sane, consistent
2295          * truncatable state while each transaction commits.
2296          */
2297         if (ext4_orphan_add(handle, inode))
2298                 goto out_stop;
2299
2300         /*
2301          * The orphan list entry will now protect us from any crash which
2302          * occurs before the truncate completes, so it is now safe to propagate
2303          * the new, shorter inode size (held for now in i_size) into the
2304          * on-disk inode. We do this via i_disksize, which is the value which
2305          * ext4 *really* writes onto the disk inode.
2306          */
2307         ei->i_disksize = inode->i_size;
2308
2309         /*
2310          * From here we block out all ext4_get_block() callers who want to
2311          * modify the block allocation tree.
2312          */
2313         mutex_lock(&ei->truncate_mutex);
2314
2315         if (n == 1) {           /* direct blocks */
2316                 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
2317                                i_data + EXT4_NDIR_BLOCKS);
2318                 goto do_indirects;
2319         }
2320
2321         partial = ext4_find_shared(inode, n, offsets, chain, &nr);
2322         /* Kill the top of shared branch (not detached) */
2323         if (nr) {
2324                 if (partial == chain) {
2325                         /* Shared branch grows from the inode */
2326                         ext4_free_branches(handle, inode, NULL,
2327                                            &nr, &nr+1, (chain+n-1) - partial);
2328                         *partial->p = 0;
2329                         /*
2330                          * We mark the inode dirty prior to restart,
2331                          * and prior to stop.  No need for it here.
2332                          */
2333                 } else {
2334                         /* Shared branch grows from an indirect block */
2335                         BUFFER_TRACE(partial->bh, "get_write_access");
2336                         ext4_free_branches(handle, inode, partial->bh,
2337                                         partial->p,
2338                                         partial->p+1, (chain+n-1) - partial);
2339                 }
2340         }
2341         /* Clear the ends of indirect blocks on the shared branch */
2342         while (partial > chain) {
2343                 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
2344                                    (__le32*)partial->bh->b_data+addr_per_block,
2345                                    (chain+n-1) - partial);
2346                 BUFFER_TRACE(partial->bh, "call brelse");
2347                 brelse (partial->bh);
2348                 partial--;
2349         }
2350 do_indirects:
2351         /* Kill the remaining (whole) subtrees */
2352         switch (offsets[0]) {
2353         default:
2354                 nr = i_data[EXT4_IND_BLOCK];
2355                 if (nr) {
2356                         ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2357                         i_data[EXT4_IND_BLOCK] = 0;
2358                 }
2359         case EXT4_IND_BLOCK:
2360                 nr = i_data[EXT4_DIND_BLOCK];
2361                 if (nr) {
2362                         ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2363                         i_data[EXT4_DIND_BLOCK] = 0;
2364                 }
2365         case EXT4_DIND_BLOCK:
2366                 nr = i_data[EXT4_TIND_BLOCK];
2367                 if (nr) {
2368                         ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2369                         i_data[EXT4_TIND_BLOCK] = 0;
2370                 }
2371         case EXT4_TIND_BLOCK:
2372                 ;
2373         }
2374
2375         ext4_discard_reservation(inode);
2376
2377         mutex_unlock(&ei->truncate_mutex);
2378         inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
2379         ext4_mark_inode_dirty(handle, inode);
2380
2381         /*
2382          * In a multi-transaction truncate, we only make the final transaction
2383          * synchronous
2384          */
2385         if (IS_SYNC(inode))
2386                 handle->h_sync = 1;
2387 out_stop:
2388         /*
2389          * If this was a simple ftruncate(), and the file will remain alive
2390          * then we need to clear up the orphan record which we created above.
2391          * However, if this was a real unlink then we were called by
2392          * ext4_delete_inode(), and we allow that function to clean up the
2393          * orphan info for us.
2394          */
2395         if (inode->i_nlink)
2396                 ext4_orphan_del(handle, inode);
2397
2398         ext4_journal_stop(handle);
2399 }
2400
2401 static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
2402                 unsigned long ino, struct ext4_iloc *iloc)
2403 {
2404         unsigned long desc, group_desc, block_group;
2405         unsigned long offset;
2406         ext4_fsblk_t block;
2407         struct buffer_head *bh;
2408         struct ext4_group_desc * gdp;
2409
2410         if (!ext4_valid_inum(sb, ino)) {
2411                 /*
2412                  * This error is already checked for in namei.c unless we are
2413                  * looking at an NFS filehandle, in which case no error
2414                  * report is needed
2415                  */
2416                 return 0;
2417         }
2418
2419         block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
2420         if (block_group >= EXT4_SB(sb)->s_groups_count) {
2421                 ext4_error(sb,"ext4_get_inode_block","group >= groups count");
2422                 return 0;
2423         }
2424         smp_rmb();
2425         group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
2426         desc = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2427         bh = EXT4_SB(sb)->s_group_desc[group_desc];
2428         if (!bh) {
2429                 ext4_error (sb, "ext4_get_inode_block",
2430                             "Descriptor not loaded");
2431                 return 0;
2432         }
2433
2434         gdp = (struct ext4_group_desc *)((__u8 *)bh->b_data +
2435                 desc * EXT4_DESC_SIZE(sb));
2436         /*
2437          * Figure out the offset within the block group inode table
2438          */
2439         offset = ((ino - 1) % EXT4_INODES_PER_GROUP(sb)) *
2440                 EXT4_INODE_SIZE(sb);
2441         block = ext4_inode_table(sb, gdp) +
2442                 (offset >> EXT4_BLOCK_SIZE_BITS(sb));
2443
2444         iloc->block_group = block_group;
2445         iloc->offset = offset & (EXT4_BLOCK_SIZE(sb) - 1);
2446         return block;
2447 }
2448
2449 /*
2450  * ext4_get_inode_loc returns with an extra refcount against the inode's
2451  * underlying buffer_head on success. If 'in_mem' is true, we have all
2452  * data in memory that is needed to recreate the on-disk version of this
2453  * inode.
2454  */
2455 static int __ext4_get_inode_loc(struct inode *inode,
2456                                 struct ext4_iloc *iloc, int in_mem)
2457 {
2458         ext4_fsblk_t block;
2459         struct buffer_head *bh;
2460
2461         block = ext4_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2462         if (!block)
2463                 return -EIO;
2464
2465         bh = sb_getblk(inode->i_sb, block);
2466         if (!bh) {
2467                 ext4_error (inode->i_sb, "ext4_get_inode_loc",
2468                                 "unable to read inode block - "
2469                                 "inode=%lu, block=%llu",
2470                                  inode->i_ino, block);
2471                 return -EIO;
2472         }
2473         if (!buffer_uptodate(bh)) {
2474                 lock_buffer(bh);
2475                 if (buffer_uptodate(bh)) {
2476                         /* someone brought it uptodate while we waited */
2477                         unlock_buffer(bh);
2478                         goto has_buffer;
2479                 }
2480
2481                 /*
2482                  * If we have all information of the inode in memory and this
2483                  * is the only valid inode in the block, we need not read the
2484                  * block.
2485                  */
2486                 if (in_mem) {
2487                         struct buffer_head *bitmap_bh;
2488                         struct ext4_group_desc *desc;
2489                         int inodes_per_buffer;
2490                         int inode_offset, i;
2491                         int block_group;
2492                         int start;
2493
2494                         block_group = (inode->i_ino - 1) /
2495                                         EXT4_INODES_PER_GROUP(inode->i_sb);
2496                         inodes_per_buffer = bh->b_size /
2497                                 EXT4_INODE_SIZE(inode->i_sb);
2498                         inode_offset = ((inode->i_ino - 1) %
2499                                         EXT4_INODES_PER_GROUP(inode->i_sb));
2500                         start = inode_offset & ~(inodes_per_buffer - 1);
2501
2502                         /* Is the inode bitmap in cache? */
2503                         desc = ext4_get_group_desc(inode->i_sb,
2504                                                 block_group, NULL);
2505                         if (!desc)
2506                                 goto make_io;
2507
2508                         bitmap_bh = sb_getblk(inode->i_sb,
2509                                 ext4_inode_bitmap(inode->i_sb, desc));
2510                         if (!bitmap_bh)
2511                                 goto make_io;
2512
2513                         /*
2514                          * If the inode bitmap isn't in cache then the
2515                          * optimisation may end up performing two reads instead
2516                          * of one, so skip it.
2517                          */
2518                         if (!buffer_uptodate(bitmap_bh)) {
2519                                 brelse(bitmap_bh);
2520                                 goto make_io;
2521                         }
2522                         for (i = start; i < start + inodes_per_buffer; i++) {
2523                                 if (i == inode_offset)
2524                                         continue;
2525                                 if (ext4_test_bit(i, bitmap_bh->b_data))
2526                                         break;
2527                         }
2528                         brelse(bitmap_bh);
2529                         if (i == start + inodes_per_buffer) {
2530                                 /* all other inodes are free, so skip I/O */
2531                                 memset(bh->b_data, 0, bh->b_size);
2532                                 set_buffer_uptodate(bh);
2533                                 unlock_buffer(bh);
2534                                 goto has_buffer;
2535                         }
2536                 }
2537
2538 make_io:
2539                 /*
2540                  * There are other valid inodes in the buffer, this inode
2541                  * has in-inode xattrs, or we don't have this inode in memory.
2542                  * Read the block from disk.
2543                  */
2544                 get_bh(bh);
2545                 bh->b_end_io = end_buffer_read_sync;
2546                 submit_bh(READ_META, bh);
2547                 wait_on_buffer(bh);
2548                 if (!buffer_uptodate(bh)) {
2549                         ext4_error(inode->i_sb, "ext4_get_inode_loc",
2550                                         "unable to read inode block - "
2551                                         "inode=%lu, block=%llu",
2552                                         inode->i_ino, block);
2553                         brelse(bh);
2554                         return -EIO;
2555                 }
2556         }
2557 has_buffer:
2558         iloc->bh = bh;
2559         return 0;
2560 }
2561
2562 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
2563 {
2564         /* We have all inode data except xattrs in memory here. */
2565         return __ext4_get_inode_loc(inode, iloc,
2566                 !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
2567 }
2568
2569 void ext4_set_inode_flags(struct inode *inode)
2570 {
2571         unsigned int flags = EXT4_I(inode)->i_flags;
2572
2573         inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2574         if (flags & EXT4_SYNC_FL)
2575                 inode->i_flags |= S_SYNC;
2576         if (flags & EXT4_APPEND_FL)
2577                 inode->i_flags |= S_APPEND;
2578         if (flags & EXT4_IMMUTABLE_FL)
2579                 inode->i_flags |= S_IMMUTABLE;
2580         if (flags & EXT4_NOATIME_FL)
2581                 inode->i_flags |= S_NOATIME;
2582         if (flags & EXT4_DIRSYNC_FL)
2583                 inode->i_flags |= S_DIRSYNC;
2584 }
2585
2586 void ext4_read_inode(struct inode * inode)
2587 {
2588         struct ext4_iloc iloc;
2589         struct ext4_inode *raw_inode;
2590         struct ext4_inode_info *ei = EXT4_I(inode);
2591         struct buffer_head *bh;
2592         int block;
2593
2594 #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
2595         ei->i_acl = EXT4_ACL_NOT_CACHED;
2596         ei->i_default_acl = EXT4_ACL_NOT_CACHED;
2597 #endif
2598         ei->i_block_alloc_info = NULL;
2599
2600         if (__ext4_get_inode_loc(inode, &iloc, 0))
2601                 goto bad_inode;
2602         bh = iloc.bh;
2603         raw_inode = ext4_raw_inode(&iloc);
2604         inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2605         inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2606         inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2607         if(!(test_opt (inode->i_sb, NO_UID32))) {
2608                 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2609                 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2610         }
2611         inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2612         inode->i_size = le32_to_cpu(raw_inode->i_size);
2613         inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
2614         inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
2615         inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
2616         inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
2617
2618         ei->i_state = 0;
2619         ei->i_dir_start_lookup = 0;
2620         ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2621         /* We now have enough fields to check if the inode was active or not.
2622          * This is needed because nfsd might try to access dead inodes
2623          * the test is that same one that e2fsck uses
2624          * NeilBrown 1999oct15
2625          */
2626         if (inode->i_nlink == 0) {
2627                 if (inode->i_mode == 0 ||
2628                     !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
2629                         /* this inode is deleted */
2630                         brelse (bh);
2631                         goto bad_inode;
2632                 }
2633                 /* The only unlinked inodes we let through here have
2634                  * valid i_mode and are being read by the orphan
2635                  * recovery code: that's fine, we're about to complete
2636                  * the process of deleting those. */
2637         }
2638         inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2639         ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2640 #ifdef EXT4_FRAGMENTS
2641         ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
2642         ei->i_frag_no = raw_inode->i_frag;
2643         ei->i_frag_size = raw_inode->i_fsize;
2644 #endif
2645         ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2646         if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
2647             cpu_to_le32(EXT4_OS_HURD))
2648                 ei->i_file_acl |=
2649                         ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
2650         if (!S_ISREG(inode->i_mode)) {
2651                 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2652         } else {
2653                 inode->i_size |=
2654                         ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2655         }
2656         ei->i_disksize = inode->i_size;
2657         inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2658         ei->i_block_group = iloc.block_group;
2659         /*
2660          * NOTE! The in-memory inode i_data array is in little-endian order
2661          * even on big-endian machines: we do NOT byteswap the block numbers!
2662          */
2663         for (block = 0; block < EXT4_N_BLOCKS; block++)
2664                 ei->i_data[block] = raw_inode->i_block[block];
2665         INIT_LIST_HEAD(&ei->i_orphan);
2666
2667         if (inode->i_ino >= EXT4_FIRST_INO(inode->i_sb) + 1 &&
2668             EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
2669                 /*
2670                  * When mke2fs creates big inodes it does not zero out
2671                  * the unused bytes above EXT4_GOOD_OLD_INODE_SIZE,
2672                  * so ignore those first few inodes.
2673                  */
2674                 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2675                 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2676                     EXT4_INODE_SIZE(inode->i_sb))
2677                         goto bad_inode;
2678                 if (ei->i_extra_isize == 0) {
2679                         /* The extra space is currently unused. Use it. */
2680                         ei->i_extra_isize = sizeof(struct ext4_inode) -
2681                                             EXT4_GOOD_OLD_INODE_SIZE;
2682                 } else {
2683                         __le32 *magic = (void *)raw_inode +
2684                                         EXT4_GOOD_OLD_INODE_SIZE +
2685                                         ei->i_extra_isize;
2686                         if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
2687                                  ei->i_state |= EXT4_STATE_XATTR;
2688                 }
2689         } else
2690                 ei->i_extra_isize = 0;
2691
2692         if (S_ISREG(inode->i_mode)) {
2693                 inode->i_op = &ext4_file_inode_operations;
2694                 inode->i_fop = &ext4_file_operations;
2695                 ext4_set_aops(inode);
2696         } else if (S_ISDIR(inode->i_mode)) {
2697                 inode->i_op = &ext4_dir_inode_operations;
2698                 inode->i_fop = &ext4_dir_operations;
2699         } else if (S_ISLNK(inode->i_mode)) {
2700                 if (ext4_inode_is_fast_symlink(inode))
2701                         inode->i_op = &ext4_fast_symlink_inode_operations;
2702                 else {
2703                         inode->i_op = &ext4_symlink_inode_operations;
2704                         ext4_set_aops(inode);
2705                 }
2706         } else {
2707                 inode->i_op = &ext4_special_inode_operations;
2708                 if (raw_inode->i_block[0])
2709                         init_special_inode(inode, inode->i_mode,
2710                            old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2711                 else
2712                         init_special_inode(inode, inode->i_mode,
2713                            new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2714         }
2715         brelse (iloc.bh);
2716         ext4_set_inode_flags(inode);
2717         return;
2718
2719 bad_inode:
2720         make_bad_inode(inode);
2721         return;
2722 }
2723
2724 /*
2725  * Post the struct inode info into an on-disk inode location in the
2726  * buffer-cache.  This gobbles the caller's reference to the
2727  * buffer_head in the inode location struct.
2728  *
2729  * The caller must have write access to iloc->bh.
2730  */
2731 static int ext4_do_update_inode(handle_t *handle,
2732                                 struct inode *inode,
2733                                 struct ext4_iloc *iloc)
2734 {
2735         struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
2736         struct ext4_inode_info *ei = EXT4_I(inode);
2737         struct buffer_head *bh = iloc->bh;
2738         int err = 0, rc, block;
2739
2740         /* For fields not not tracking in the in-memory inode,
2741          * initialise them to zero for new inodes. */
2742         if (ei->i_state & EXT4_STATE_NEW)
2743                 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
2744
2745         raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2746         if(!(test_opt(inode->i_sb, NO_UID32))) {
2747                 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2748                 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2749 /*
2750  * Fix up interoperability with old kernels. Otherwise, old inodes get
2751  * re-used with the upper 16 bits of the uid/gid intact
2752  */
2753                 if(!ei->i_dtime) {
2754                         raw_inode->i_uid_high =
2755                                 cpu_to_le16(high_16_bits(inode->i_uid));
2756                         raw_inode->i_gid_high =
2757                                 cpu_to_le16(high_16_bits(inode->i_gid));
2758                 } else {
2759                         raw_inode->i_uid_high = 0;
2760                         raw_inode->i_gid_high = 0;
2761                 }
2762         } else {
2763                 raw_inode->i_uid_low =
2764                         cpu_to_le16(fs_high2lowuid(inode->i_uid));
2765                 raw_inode->i_gid_low =
2766                         cpu_to_le16(fs_high2lowgid(inode->i_gid));
2767                 raw_inode->i_uid_high = 0;
2768                 raw_inode->i_gid_high = 0;
2769         }
2770         raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2771         raw_inode->i_size = cpu_to_le32(ei->i_disksize);
2772         raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
2773         raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
2774         raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
2775         raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
2776         raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2777         raw_inode->i_flags = cpu_to_le32(ei->i_flags);
2778 #ifdef EXT4_FRAGMENTS
2779         raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
2780         raw_inode->i_frag = ei->i_frag_no;
2781         raw_inode->i_fsize = ei->i_frag_size;
2782 #endif
2783         if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
2784             cpu_to_le32(EXT4_OS_HURD))
2785                 raw_inode->i_file_acl_high =
2786                         cpu_to_le16(ei->i_file_acl >> 32);
2787         raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
2788         if (!S_ISREG(inode->i_mode)) {
2789                 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
2790         } else {
2791                 raw_inode->i_size_high =
2792                         cpu_to_le32(ei->i_disksize >> 32);
2793                 if (ei->i_disksize > 0x7fffffffULL) {
2794                         struct super_block *sb = inode->i_sb;
2795                         if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
2796                                         EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
2797                             EXT4_SB(sb)->s_es->s_rev_level ==
2798                                         cpu_to_le32(EXT4_GOOD_OLD_REV)) {
2799                                /* If this is the first large file
2800                                 * created, add a flag to the superblock.
2801                                 */
2802                                 err = ext4_journal_get_write_access(handle,
2803                                                 EXT4_SB(sb)->s_sbh);
2804                                 if (err)
2805                                         goto out_brelse;
2806                                 ext4_update_dynamic_rev(sb);
2807                                 EXT4_SET_RO_COMPAT_FEATURE(sb,
2808                                         EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
2809                                 sb->s_dirt = 1;
2810                                 handle->h_sync = 1;
2811                                 err = ext4_journal_dirty_metadata(handle,
2812                                                 EXT4_SB(sb)->s_sbh);
2813                         }
2814                 }
2815         }
2816         raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2817         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
2818                 if (old_valid_dev(inode->i_rdev)) {
2819                         raw_inode->i_block[0] =
2820                                 cpu_to_le32(old_encode_dev(inode->i_rdev));
2821                         raw_inode->i_block[1] = 0;
2822                 } else {
2823                         raw_inode->i_block[0] = 0;
2824                         raw_inode->i_block[1] =
2825                                 cpu_to_le32(new_encode_dev(inode->i_rdev));
2826                         raw_inode->i_block[2] = 0;
2827                 }
2828         } else for (block = 0; block < EXT4_N_BLOCKS; block++)
2829                 raw_inode->i_block[block] = ei->i_data[block];
2830
2831         if (ei->i_extra_isize)
2832                 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
2833
2834         BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
2835         rc = ext4_journal_dirty_metadata(handle, bh);
2836         if (!err)
2837                 err = rc;
2838         ei->i_state &= ~EXT4_STATE_NEW;
2839
2840 out_brelse:
2841         brelse (bh);
2842         ext4_std_error(inode->i_sb, err);
2843         return err;
2844 }
2845
2846 /*
2847  * ext4_write_inode()
2848  *
2849  * We are called from a few places:
2850  *
2851  * - Within generic_file_write() for O_SYNC files.
2852  *   Here, there will be no transaction running. We wait for any running
2853  *   trasnaction to commit.
2854  *
2855  * - Within sys_sync(), kupdate and such.
2856  *   We wait on commit, if tol to.
2857  *
2858  * - Within prune_icache() (PF_MEMALLOC == true)
2859  *   Here we simply return.  We can't afford to block kswapd on the
2860  *   journal commit.
2861  *
2862  * In all cases it is actually safe for us to return without doing anything,
2863  * because the inode has been copied into a raw inode buffer in
2864  * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
2865  * knfsd.
2866  *
2867  * Note that we are absolutely dependent upon all inode dirtiers doing the
2868  * right thing: they *must* call mark_inode_dirty() after dirtying info in
2869  * which we are interested.
2870  *
2871  * It would be a bug for them to not do this.  The code:
2872  *
2873  *      mark_inode_dirty(inode)
2874  *      stuff();
2875  *      inode->i_size = expr;
2876  *
2877  * is in error because a kswapd-driven write_inode() could occur while
2878  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
2879  * will no longer be on the superblock's dirty inode list.
2880  */
2881 int ext4_write_inode(struct inode *inode, int wait)
2882 {
2883         if (current->flags & PF_MEMALLOC)
2884                 return 0;
2885
2886         if (ext4_journal_current_handle()) {
2887                 jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n");
2888                 dump_stack();
2889                 return -EIO;
2890         }
2891
2892         if (!wait)
2893                 return 0;
2894
2895         return ext4_force_commit(inode->i_sb);
2896 }
2897
2898 /*
2899  * ext4_setattr()
2900  *
2901  * Called from notify_change.
2902  *
2903  * We want to trap VFS attempts to truncate the file as soon as
2904  * possible.  In particular, we want to make sure that when the VFS
2905  * shrinks i_size, we put the inode on the orphan list and modify
2906  * i_disksize immediately, so that during the subsequent flushing of
2907  * dirty pages and freeing of disk blocks, we can guarantee that any
2908  * commit will leave the blocks being flushed in an unused state on
2909  * disk.  (On recovery, the inode will get truncated and the blocks will
2910  * be freed, so we have a strong guarantee that no future commit will
2911  * leave these blocks visible to the user.)
2912  *
2913  * Called with inode->sem down.
2914  */
2915 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
2916 {
2917         struct inode *inode = dentry->d_inode;
2918         int error, rc = 0;
2919         const unsigned int ia_valid = attr->ia_valid;
2920
2921         error = inode_change_ok(inode, attr);
2922         if (error)
2923                 return error;
2924
2925         if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
2926                 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
2927                 handle_t *handle;
2928
2929                 /* (user+group)*(old+new) structure, inode write (sb,
2930                  * inode block, ? - but truncate inode update has it) */
2931                 handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
2932                                         EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
2933                 if (IS_ERR(handle)) {
2934                         error = PTR_ERR(handle);
2935                         goto err_out;
2936                 }
2937                 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
2938                 if (error) {
2939                         ext4_journal_stop(handle);
2940                         return error;
2941                 }
2942                 /* Update corresponding info in inode so that everything is in
2943                  * one transaction */
2944                 if (attr->ia_valid & ATTR_UID)
2945                         inode->i_uid = attr->ia_uid;
2946                 if (attr->ia_valid & ATTR_GID)
2947                         inode->i_gid = attr->ia_gid;
2948                 error = ext4_mark_inode_dirty(handle, inode);
2949                 ext4_journal_stop(handle);
2950         }
2951
2952         if (S_ISREG(inode->i_mode) &&
2953             attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
2954                 handle_t *handle;
2955
2956                 handle = ext4_journal_start(inode, 3);
2957                 if (IS_ERR(handle)) {
2958                         error = PTR_ERR(handle);
2959                         goto err_out;
2960                 }
2961
2962                 error = ext4_orphan_add(handle, inode);
2963                 EXT4_I(inode)->i_disksize = attr->ia_size;
2964                 rc = ext4_mark_inode_dirty(handle, inode);
2965                 if (!error)
2966                         error = rc;
2967                 ext4_journal_stop(handle);
2968         }
2969
2970         rc = inode_setattr(inode, attr);
2971
2972         /* If inode_setattr's call to ext4_truncate failed to get a
2973          * transaction handle at all, we need to clean up the in-core
2974          * orphan list manually. */
2975         if (inode->i_nlink)
2976                 ext4_orphan_del(NULL, inode);
2977
2978         if (!rc && (ia_valid & ATTR_MODE))
2979                 rc = ext4_acl_chmod(inode);
2980
2981 err_out:
2982         ext4_std_error(inode->i_sb, error);
2983         if (!error)
2984                 error = rc;
2985         return error;
2986 }
2987
2988
2989 /*
2990  * How many blocks doth make a writepage()?
2991  *
2992  * With N blocks per page, it may be:
2993  * N data blocks
2994  * 2 indirect block
2995  * 2 dindirect
2996  * 1 tindirect
2997  * N+5 bitmap blocks (from the above)
2998  * N+5 group descriptor summary blocks
2999  * 1 inode block
3000  * 1 superblock.
3001  * 2 * EXT4_SINGLEDATA_TRANS_BLOCKS for the quote files
3002  *
3003  * 3 * (N + 5) + 2 + 2 * EXT4_SINGLEDATA_TRANS_BLOCKS
3004  *
3005  * With ordered or writeback data it's the same, less the N data blocks.
3006  *
3007  * If the inode's direct blocks can hold an integral number of pages then a
3008  * page cannot straddle two indirect blocks, and we can only touch one indirect
3009  * and dindirect block, and the "5" above becomes "3".
3010  *
3011  * This still overestimates under most circumstances.  If we were to pass the
3012  * start and end offsets in here as well we could do block_to_path() on each
3013  * block and work out the exact number of indirects which are touched.  Pah.
3014  */
3015
3016 int ext4_writepage_trans_blocks(struct inode *inode)
3017 {
3018         int bpp = ext4_journal_blocks_per_page(inode);
3019         int indirects = (EXT4_NDIR_BLOCKS % bpp) ? 5 : 3;
3020         int ret;
3021
3022         if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
3023                 return ext4_ext_writepage_trans_blocks(inode, bpp);
3024
3025         if (ext4_should_journal_data(inode))
3026                 ret = 3 * (bpp + indirects) + 2;
3027         else
3028                 ret = 2 * (bpp + indirects) + 2;
3029
3030 #ifdef CONFIG_QUOTA
3031         /* We know that structure was already allocated during DQUOT_INIT so
3032          * we will be updating only the data blocks + inodes */
3033         ret += 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
3034 #endif
3035
3036         return ret;
3037 }
3038
3039 /*
3040  * The caller must have previously called ext4_reserve_inode_write().
3041  * Give this, we know that the caller already has write access to iloc->bh.
3042  */
3043 int ext4_mark_iloc_dirty(handle_t *handle,
3044                 struct inode *inode, struct ext4_iloc *iloc)
3045 {
3046         int err = 0;
3047
3048         /* the do_update_inode consumes one bh->b_count */
3049         get_bh(iloc->bh);
3050
3051         /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
3052         err = ext4_do_update_inode(handle, inode, iloc);
3053         put_bh(iloc->bh);
3054         return err;
3055 }
3056
3057 /*
3058  * On success, We end up with an outstanding reference count against
3059  * iloc->bh.  This _must_ be cleaned up later.
3060  */
3061
3062 int
3063 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
3064                          struct ext4_iloc *iloc)
3065 {
3066         int err = 0;
3067         if (handle) {
3068                 err = ext4_get_inode_loc(inode, iloc);
3069                 if (!err) {
3070                         BUFFER_TRACE(iloc->bh, "get_write_access");
3071                         err = ext4_journal_get_write_access(handle, iloc->bh);
3072                         if (err) {
3073                                 brelse(iloc->bh);
3074                                 iloc->bh = NULL;
3075                         }
3076                 }
3077         }
3078         ext4_std_error(inode->i_sb, err);
3079         return err;
3080 }
3081
3082 /*
3083  * What we do here is to mark the in-core inode as clean with respect to inode
3084  * dirtiness (it may still be data-dirty).
3085  * This means that the in-core inode may be reaped by prune_icache
3086  * without having to perform any I/O.  This is a very good thing,
3087  * because *any* task may call prune_icache - even ones which
3088  * have a transaction open against a different journal.
3089  *
3090  * Is this cheating?  Not really.  Sure, we haven't written the
3091  * inode out, but prune_icache isn't a user-visible syncing function.
3092  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3093  * we start and wait on commits.
3094  *
3095  * Is this efficient/effective?  Well, we're being nice to the system
3096  * by cleaning up our inodes proactively so they can be reaped
3097  * without I/O.  But we are potentially leaving up to five seconds'
3098  * worth of inodes floating about which prune_icache wants us to
3099  * write out.  One way to fix that would be to get prune_icache()
3100  * to do a write_super() to free up some memory.  It has the desired
3101  * effect.
3102  */
3103 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
3104 {
3105         struct ext4_iloc iloc;
3106         int err;
3107
3108         might_sleep();
3109         err = ext4_reserve_inode_write(handle, inode, &iloc);
3110         if (!err)
3111                 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
3112         return err;
3113 }
3114
3115 /*
3116  * ext4_dirty_inode() is called from __mark_inode_dirty()
3117  *
3118  * We're really interested in the case where a file is being extended.
3119  * i_size has been changed by generic_commit_write() and we thus need
3120  * to include the updated inode in the current transaction.
3121  *
3122  * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
3123  * are allocated to the file.
3124  *
3125  * If the inode is marked synchronous, we don't honour that here - doing
3126  * so would cause a commit on atime updates, which we don't bother doing.
3127  * We handle synchronous inodes at the highest possible level.
3128  */
3129 void ext4_dirty_inode(struct inode *inode)
3130 {
3131         handle_t *current_handle = ext4_journal_current_handle();
3132         handle_t *handle;
3133
3134         handle = ext4_journal_start(inode, 2);
3135         if (IS_ERR(handle))
3136                 goto out;
3137         if (current_handle &&
3138                 current_handle->h_transaction != handle->h_transaction) {
3139                 /* This task has a transaction open against a different fs */
3140                 printk(KERN_EMERG "%s: transactions do not match!\n",
3141                        __FUNCTION__);
3142         } else {
3143                 jbd_debug(5, "marking dirty.  outer handle=%p\n",
3144                                 current_handle);
3145                 ext4_mark_inode_dirty(handle, inode);
3146         }
3147         ext4_journal_stop(handle);
3148 out:
3149         return;
3150 }
3151
3152 #if 0
3153 /*
3154  * Bind an inode's backing buffer_head into this transaction, to prevent
3155  * it from being flushed to disk early.  Unlike
3156  * ext4_reserve_inode_write, this leaves behind no bh reference and
3157  * returns no iloc structure, so the caller needs to repeat the iloc
3158  * lookup to mark the inode dirty later.
3159  */
3160 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
3161 {
3162         struct ext4_iloc iloc;
3163
3164         int err = 0;
3165         if (handle) {
3166                 err = ext4_get_inode_loc(inode, &iloc);
3167                 if (!err) {
3168                         BUFFER_TRACE(iloc.bh, "get_write_access");
3169                         err = jbd2_journal_get_write_access(handle, iloc.bh);
3170                         if (!err)
3171                                 err = ext4_journal_dirty_metadata(handle,
3172                                                                   iloc.bh);
3173                         brelse(iloc.bh);
3174                 }
3175         }
3176         ext4_std_error(inode->i_sb, err);
3177         return err;
3178 }
3179 #endif
3180
3181 int ext4_change_inode_journal_flag(struct inode *inode, int val)
3182 {
3183         journal_t *journal;
3184         handle_t *handle;
3185         int err;
3186
3187         /*
3188          * We have to be very careful here: changing a data block's
3189          * journaling status dynamically is dangerous.  If we write a
3190          * data block to the journal, change the status and then delete
3191          * that block, we risk forgetting to revoke the old log record
3192          * from the journal and so a subsequent replay can corrupt data.
3193          * So, first we make sure that the journal is empty and that
3194          * nobody is changing anything.
3195          */
3196
3197         journal = EXT4_JOURNAL(inode);
3198         if (is_journal_aborted(journal) || IS_RDONLY(inode))
3199                 return -EROFS;
3200
3201         jbd2_journal_lock_updates(journal);
3202         jbd2_journal_flush(journal);
3203
3204         /*
3205          * OK, there are no updates running now, and all cached data is
3206          * synced to disk.  We are now in a completely consistent state
3207          * which doesn't have anything in the journal, and we know that
3208          * no filesystem updates are running, so it is safe to modify
3209          * the inode's in-core data-journaling state flag now.
3210          */
3211
3212         if (val)
3213                 EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
3214         else
3215                 EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
3216         ext4_set_aops(inode);
3217
3218         jbd2_journal_unlock_updates(journal);
3219
3220         /* Finally we can mark the inode as dirty. */
3221
3222         handle = ext4_journal_start(inode, 1);
3223         if (IS_ERR(handle))
3224                 return PTR_ERR(handle);
3225
3226         err = ext4_mark_inode_dirty(handle, inode);
3227         handle->h_sync = 1;
3228         ext4_journal_stop(handle);
3229         ext4_std_error(inode->i_sb, err);
3230
3231         return err;
3232 }