2 * linux/fs/ext4/inode.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * 64-bit file support on 64-bit platforms by Jakub Jelinek
16 * (jj@sunsite.ms.mff.cuni.cz)
18 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
22 #include <linux/time.h>
23 #include <linux/highuid.h>
24 #include <linux/pagemap.h>
25 #include <linux/dax.h>
26 #include <linux/quotaops.h>
27 #include <linux/string.h>
28 #include <linux/buffer_head.h>
29 #include <linux/writeback.h>
30 #include <linux/pagevec.h>
31 #include <linux/mpage.h>
32 #include <linux/namei.h>
33 #include <linux/uio.h>
34 #include <linux/bio.h>
35 #include <linux/workqueue.h>
36 #include <linux/kernel.h>
37 #include <linux/printk.h>
38 #include <linux/slab.h>
39 #include <linux/bitops.h>
40 #include <linux/iomap.h>
42 #include "ext4_jbd2.h"
47 #include <trace/events/ext4.h>
49 #define MPAGE_DA_EXTENT_TAIL 0x01
51 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
52 struct ext4_inode_info *ei)
54 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
57 int offset = offsetof(struct ext4_inode, i_checksum_lo);
58 unsigned int csum_size = sizeof(dummy_csum);
60 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
61 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
63 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
64 EXT4_GOOD_OLD_INODE_SIZE - offset);
66 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
67 offset = offsetof(struct ext4_inode, i_checksum_hi);
68 csum = ext4_chksum(sbi, csum, (__u8 *)raw +
69 EXT4_GOOD_OLD_INODE_SIZE,
70 offset - EXT4_GOOD_OLD_INODE_SIZE);
71 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
72 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
76 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
77 EXT4_INODE_SIZE(inode->i_sb) - offset);
83 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
84 struct ext4_inode_info *ei)
86 __u32 provided, calculated;
88 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
89 cpu_to_le32(EXT4_OS_LINUX) ||
90 !ext4_has_metadata_csum(inode->i_sb))
93 provided = le16_to_cpu(raw->i_checksum_lo);
94 calculated = ext4_inode_csum(inode, raw, ei);
95 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
96 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
97 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
101 return provided == calculated;
104 static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
105 struct ext4_inode_info *ei)
109 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
110 cpu_to_le32(EXT4_OS_LINUX) ||
111 !ext4_has_metadata_csum(inode->i_sb))
114 csum = ext4_inode_csum(inode, raw, ei);
115 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
116 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
117 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
118 raw->i_checksum_hi = cpu_to_le16(csum >> 16);
121 static inline int ext4_begin_ordered_truncate(struct inode *inode,
124 trace_ext4_begin_ordered_truncate(inode, new_size);
126 * If jinode is zero, then we never opened the file for
127 * writing, so there's no need to call
128 * jbd2_journal_begin_ordered_truncate() since there's no
129 * outstanding writes we need to flush.
131 if (!EXT4_I(inode)->jinode)
133 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
134 EXT4_I(inode)->jinode,
138 static void ext4_invalidatepage(struct page *page, unsigned int offset,
139 unsigned int length);
140 static int __ext4_journalled_writepage(struct page *page, unsigned int len);
141 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
142 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
146 * Test whether an inode is a fast symlink.
147 * A fast symlink has its symlink data stored in ext4_inode_info->i_data.
149 int ext4_inode_is_fast_symlink(struct inode *inode)
151 return S_ISLNK(inode->i_mode) && inode->i_size &&
152 (inode->i_size < EXT4_N_BLOCKS * 4);
156 * Restart the transaction associated with *handle. This does a commit,
157 * so before we call here everything must be consistently dirtied against
160 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
166 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
167 * moment, get_block can be called only for blocks inside i_size since
168 * page cache has been already dropped and writes are blocked by
169 * i_mutex. So we can safely drop the i_data_sem here.
171 BUG_ON(EXT4_JOURNAL(inode) == NULL);
172 jbd_debug(2, "restarting handle %p\n", handle);
173 up_write(&EXT4_I(inode)->i_data_sem);
174 ret = ext4_journal_restart(handle, nblocks);
175 down_write(&EXT4_I(inode)->i_data_sem);
176 ext4_discard_preallocations(inode);
182 * Called at the last iput() if i_nlink is zero.
184 void ext4_evict_inode(struct inode *inode)
188 int extra_credits = 3;
189 struct ext4_xattr_inode_array *ea_inode_array = NULL;
191 trace_ext4_evict_inode(inode);
193 if (inode->i_nlink) {
195 * When journalling data dirty buffers are tracked only in the
196 * journal. So although mm thinks everything is clean and
197 * ready for reaping the inode might still have some pages to
198 * write in the running transaction or waiting to be
199 * checkpointed. Thus calling jbd2_journal_invalidatepage()
200 * (via truncate_inode_pages()) to discard these buffers can
201 * cause data loss. Also even if we did not discard these
202 * buffers, we would have no way to find them after the inode
203 * is reaped and thus user could see stale data if he tries to
204 * read them before the transaction is checkpointed. So be
205 * careful and force everything to disk here... We use
206 * ei->i_datasync_tid to store the newest transaction
207 * containing inode's data.
209 * Note that directories do not have this problem because they
210 * don't use page cache.
212 if (inode->i_ino != EXT4_JOURNAL_INO &&
213 ext4_should_journal_data(inode) &&
214 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
215 inode->i_data.nrpages) {
216 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
217 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
219 jbd2_complete_transaction(journal, commit_tid);
220 filemap_write_and_wait(&inode->i_data);
222 truncate_inode_pages_final(&inode->i_data);
227 if (is_bad_inode(inode))
229 dquot_initialize(inode);
231 if (ext4_should_order_data(inode))
232 ext4_begin_ordered_truncate(inode, 0);
233 truncate_inode_pages_final(&inode->i_data);
236 * Protect us against freezing - iput() caller didn't have to have any
237 * protection against it
239 sb_start_intwrite(inode->i_sb);
241 if (!IS_NOQUOTA(inode))
242 extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
244 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
245 ext4_blocks_for_truncate(inode)+extra_credits);
246 if (IS_ERR(handle)) {
247 ext4_std_error(inode->i_sb, PTR_ERR(handle));
249 * If we're going to skip the normal cleanup, we still need to
250 * make sure that the in-core orphan linked list is properly
253 ext4_orphan_del(NULL, inode);
254 sb_end_intwrite(inode->i_sb);
259 ext4_handle_sync(handle);
262 * Set inode->i_size to 0 before calling ext4_truncate(). We need
263 * special handling of symlinks here because i_size is used to
264 * determine whether ext4_inode_info->i_data contains symlink data or
265 * block mappings. Setting i_size to 0 will remove its fast symlink
266 * status. Erase i_data so that it becomes a valid empty block map.
268 if (ext4_inode_is_fast_symlink(inode))
269 memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
271 err = ext4_mark_inode_dirty(handle, inode);
273 ext4_warning(inode->i_sb,
274 "couldn't mark inode dirty (err %d)", err);
277 if (inode->i_blocks) {
278 err = ext4_truncate(inode);
280 ext4_error(inode->i_sb,
281 "couldn't truncate inode %lu (err %d)",
287 /* Remove xattr references. */
288 err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
291 ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
293 ext4_journal_stop(handle);
294 ext4_orphan_del(NULL, inode);
295 sb_end_intwrite(inode->i_sb);
296 ext4_xattr_inode_array_free(ea_inode_array);
301 * Kill off the orphan record which ext4_truncate created.
302 * AKPM: I think this can be inside the above `if'.
303 * Note that ext4_orphan_del() has to be able to cope with the
304 * deletion of a non-existent orphan - this is because we don't
305 * know if ext4_truncate() actually created an orphan record.
306 * (Well, we could do this if we need to, but heck - it works)
308 ext4_orphan_del(handle, inode);
309 EXT4_I(inode)->i_dtime = get_seconds();
312 * One subtle ordering requirement: if anything has gone wrong
313 * (transaction abort, IO errors, whatever), then we can still
314 * do these next steps (the fs will already have been marked as
315 * having errors), but we can't free the inode if the mark_dirty
318 if (ext4_mark_inode_dirty(handle, inode))
319 /* If that failed, just do the required in-core inode clear. */
320 ext4_clear_inode(inode);
322 ext4_free_inode(handle, inode);
323 ext4_journal_stop(handle);
324 sb_end_intwrite(inode->i_sb);
325 ext4_xattr_inode_array_free(ea_inode_array);
328 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
332 qsize_t *ext4_get_reserved_space(struct inode *inode)
334 return &EXT4_I(inode)->i_reserved_quota;
339 * Called with i_data_sem down, which is important since we can call
340 * ext4_discard_preallocations() from here.
342 void ext4_da_update_reserve_space(struct inode *inode,
343 int used, int quota_claim)
345 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
346 struct ext4_inode_info *ei = EXT4_I(inode);
348 spin_lock(&ei->i_block_reservation_lock);
349 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
350 if (unlikely(used > ei->i_reserved_data_blocks)) {
351 ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
352 "with only %d reserved data blocks",
353 __func__, inode->i_ino, used,
354 ei->i_reserved_data_blocks);
356 used = ei->i_reserved_data_blocks;
359 /* Update per-inode reservations */
360 ei->i_reserved_data_blocks -= used;
361 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
363 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
365 /* Update quota subsystem for data blocks */
367 dquot_claim_block(inode, EXT4_C2B(sbi, used));
370 * We did fallocate with an offset that is already delayed
371 * allocated. So on delayed allocated writeback we should
372 * not re-claim the quota for fallocated blocks.
374 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
378 * If we have done all the pending block allocations and if
379 * there aren't any writers on the inode, we can discard the
380 * inode's preallocations.
382 if ((ei->i_reserved_data_blocks == 0) &&
383 (atomic_read(&inode->i_writecount) == 0))
384 ext4_discard_preallocations(inode);
387 static int __check_block_validity(struct inode *inode, const char *func,
389 struct ext4_map_blocks *map)
391 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
393 ext4_error_inode(inode, func, line, map->m_pblk,
394 "lblock %lu mapped to illegal pblock "
395 "(length %d)", (unsigned long) map->m_lblk,
397 return -EFSCORRUPTED;
402 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
407 if (ext4_encrypted_inode(inode))
408 return fscrypt_zeroout_range(inode, lblk, pblk, len);
410 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
417 #define check_block_validity(inode, map) \
418 __check_block_validity((inode), __func__, __LINE__, (map))
420 #ifdef ES_AGGRESSIVE_TEST
421 static void ext4_map_blocks_es_recheck(handle_t *handle,
423 struct ext4_map_blocks *es_map,
424 struct ext4_map_blocks *map,
431 * There is a race window that the result is not the same.
432 * e.g. xfstests #223 when dioread_nolock enables. The reason
433 * is that we lookup a block mapping in extent status tree with
434 * out taking i_data_sem. So at the time the unwritten extent
435 * could be converted.
437 down_read(&EXT4_I(inode)->i_data_sem);
438 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
439 retval = ext4_ext_map_blocks(handle, inode, map, flags &
440 EXT4_GET_BLOCKS_KEEP_SIZE);
442 retval = ext4_ind_map_blocks(handle, inode, map, flags &
443 EXT4_GET_BLOCKS_KEEP_SIZE);
445 up_read((&EXT4_I(inode)->i_data_sem));
448 * We don't check m_len because extent will be collpased in status
449 * tree. So the m_len might not equal.
451 if (es_map->m_lblk != map->m_lblk ||
452 es_map->m_flags != map->m_flags ||
453 es_map->m_pblk != map->m_pblk) {
454 printk("ES cache assertion failed for inode: %lu "
455 "es_cached ex [%d/%d/%llu/%x] != "
456 "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
457 inode->i_ino, es_map->m_lblk, es_map->m_len,
458 es_map->m_pblk, es_map->m_flags, map->m_lblk,
459 map->m_len, map->m_pblk, map->m_flags,
463 #endif /* ES_AGGRESSIVE_TEST */
466 * The ext4_map_blocks() function tries to look up the requested blocks,
467 * and returns if the blocks are already mapped.
469 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
470 * and store the allocated blocks in the result buffer head and mark it
473 * If file type is extents based, it will call ext4_ext_map_blocks(),
474 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
477 * On success, it returns the number of blocks being mapped or allocated. if
478 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
479 * is marked as unwritten. If the create == 1, it will mark @map as mapped.
481 * It returns 0 if plain look up failed (blocks have not been allocated), in
482 * that case, @map is returned as unmapped but we still do fill map->m_len to
483 * indicate the length of a hole starting at map->m_lblk.
485 * It returns the error in case of allocation failure.
487 int ext4_map_blocks(handle_t *handle, struct inode *inode,
488 struct ext4_map_blocks *map, int flags)
490 struct extent_status es;
493 #ifdef ES_AGGRESSIVE_TEST
494 struct ext4_map_blocks orig_map;
496 memcpy(&orig_map, map, sizeof(*map));
500 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
501 "logical block %lu\n", inode->i_ino, flags, map->m_len,
502 (unsigned long) map->m_lblk);
505 * ext4_map_blocks returns an int, and m_len is an unsigned int
507 if (unlikely(map->m_len > INT_MAX))
508 map->m_len = INT_MAX;
510 /* We can handle the block number less than EXT_MAX_BLOCKS */
511 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
512 return -EFSCORRUPTED;
514 /* Lookup extent status tree firstly */
515 if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
516 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
517 map->m_pblk = ext4_es_pblock(&es) +
518 map->m_lblk - es.es_lblk;
519 map->m_flags |= ext4_es_is_written(&es) ?
520 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
521 retval = es.es_len - (map->m_lblk - es.es_lblk);
522 if (retval > map->m_len)
525 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
527 retval = es.es_len - (map->m_lblk - es.es_lblk);
528 if (retval > map->m_len)
535 #ifdef ES_AGGRESSIVE_TEST
536 ext4_map_blocks_es_recheck(handle, inode, map,
543 * Try to see if we can get the block without requesting a new
546 down_read(&EXT4_I(inode)->i_data_sem);
547 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
548 retval = ext4_ext_map_blocks(handle, inode, map, flags &
549 EXT4_GET_BLOCKS_KEEP_SIZE);
551 retval = ext4_ind_map_blocks(handle, inode, map, flags &
552 EXT4_GET_BLOCKS_KEEP_SIZE);
557 if (unlikely(retval != map->m_len)) {
558 ext4_warning(inode->i_sb,
559 "ES len assertion failed for inode "
560 "%lu: retval %d != map->m_len %d",
561 inode->i_ino, retval, map->m_len);
565 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
566 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
567 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
568 !(status & EXTENT_STATUS_WRITTEN) &&
569 ext4_find_delalloc_range(inode, map->m_lblk,
570 map->m_lblk + map->m_len - 1))
571 status |= EXTENT_STATUS_DELAYED;
572 ret = ext4_es_insert_extent(inode, map->m_lblk,
573 map->m_len, map->m_pblk, status);
577 up_read((&EXT4_I(inode)->i_data_sem));
580 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
581 ret = check_block_validity(inode, map);
586 /* If it is only a block(s) look up */
587 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
591 * Returns if the blocks have already allocated
593 * Note that if blocks have been preallocated
594 * ext4_ext_get_block() returns the create = 0
595 * with buffer head unmapped.
597 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
599 * If we need to convert extent to unwritten
600 * we continue and do the actual work in
601 * ext4_ext_map_blocks()
603 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
607 * Here we clear m_flags because after allocating an new extent,
608 * it will be set again.
610 map->m_flags &= ~EXT4_MAP_FLAGS;
613 * New blocks allocate and/or writing to unwritten extent
614 * will possibly result in updating i_data, so we take
615 * the write lock of i_data_sem, and call get_block()
616 * with create == 1 flag.
618 down_write(&EXT4_I(inode)->i_data_sem);
621 * We need to check for EXT4 here because migrate
622 * could have changed the inode type in between
624 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
625 retval = ext4_ext_map_blocks(handle, inode, map, flags);
627 retval = ext4_ind_map_blocks(handle, inode, map, flags);
629 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
631 * We allocated new blocks which will result in
632 * i_data's format changing. Force the migrate
633 * to fail by clearing migrate flags
635 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
639 * Update reserved blocks/metadata blocks after successful
640 * block allocation which had been deferred till now. We don't
641 * support fallocate for non extent files. So we can update
642 * reserve space here.
645 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
646 ext4_da_update_reserve_space(inode, retval, 1);
652 if (unlikely(retval != map->m_len)) {
653 ext4_warning(inode->i_sb,
654 "ES len assertion failed for inode "
655 "%lu: retval %d != map->m_len %d",
656 inode->i_ino, retval, map->m_len);
661 * We have to zeroout blocks before inserting them into extent
662 * status tree. Otherwise someone could look them up there and
663 * use them before they are really zeroed. We also have to
664 * unmap metadata before zeroing as otherwise writeback can
665 * overwrite zeros with stale data from block device.
667 if (flags & EXT4_GET_BLOCKS_ZERO &&
668 map->m_flags & EXT4_MAP_MAPPED &&
669 map->m_flags & EXT4_MAP_NEW) {
670 clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk,
672 ret = ext4_issue_zeroout(inode, map->m_lblk,
673 map->m_pblk, map->m_len);
681 * If the extent has been zeroed out, we don't need to update
682 * extent status tree.
684 if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
685 ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
686 if (ext4_es_is_written(&es))
689 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
690 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
691 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
692 !(status & EXTENT_STATUS_WRITTEN) &&
693 ext4_find_delalloc_range(inode, map->m_lblk,
694 map->m_lblk + map->m_len - 1))
695 status |= EXTENT_STATUS_DELAYED;
696 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
697 map->m_pblk, status);
705 up_write((&EXT4_I(inode)->i_data_sem));
706 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
707 ret = check_block_validity(inode, map);
712 * Inodes with freshly allocated blocks where contents will be
713 * visible after transaction commit must be on transaction's
716 if (map->m_flags & EXT4_MAP_NEW &&
717 !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
718 !(flags & EXT4_GET_BLOCKS_ZERO) &&
719 !ext4_is_quota_file(inode) &&
720 ext4_should_order_data(inode)) {
721 if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
722 ret = ext4_jbd2_inode_add_wait(handle, inode);
724 ret = ext4_jbd2_inode_add_write(handle, inode);
733 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
734 * we have to be careful as someone else may be manipulating b_state as well.
736 static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
738 unsigned long old_state;
739 unsigned long new_state;
741 flags &= EXT4_MAP_FLAGS;
743 /* Dummy buffer_head? Set non-atomically. */
745 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
749 * Someone else may be modifying b_state. Be careful! This is ugly but
750 * once we get rid of using bh as a container for mapping information
751 * to pass to / from get_block functions, this can go away.
754 old_state = READ_ONCE(bh->b_state);
755 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
757 cmpxchg(&bh->b_state, old_state, new_state) != old_state));
760 static int _ext4_get_block(struct inode *inode, sector_t iblock,
761 struct buffer_head *bh, int flags)
763 struct ext4_map_blocks map;
766 if (ext4_has_inline_data(inode))
770 map.m_len = bh->b_size >> inode->i_blkbits;
772 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
775 map_bh(bh, inode->i_sb, map.m_pblk);
776 ext4_update_bh_state(bh, map.m_flags);
777 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
779 } else if (ret == 0) {
780 /* hole case, need to fill in bh->b_size */
781 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
786 int ext4_get_block(struct inode *inode, sector_t iblock,
787 struct buffer_head *bh, int create)
789 return _ext4_get_block(inode, iblock, bh,
790 create ? EXT4_GET_BLOCKS_CREATE : 0);
794 * Get block function used when preparing for buffered write if we require
795 * creating an unwritten extent if blocks haven't been allocated. The extent
796 * will be converted to written after the IO is complete.
798 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
799 struct buffer_head *bh_result, int create)
801 ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
802 inode->i_ino, create);
803 return _ext4_get_block(inode, iblock, bh_result,
804 EXT4_GET_BLOCKS_IO_CREATE_EXT);
807 /* Maximum number of blocks we map for direct IO at once. */
808 #define DIO_MAX_BLOCKS 4096
811 * Get blocks function for the cases that need to start a transaction -
812 * generally difference cases of direct IO and DAX IO. It also handles retries
815 static int ext4_get_block_trans(struct inode *inode, sector_t iblock,
816 struct buffer_head *bh_result, int flags)
823 /* Trim mapping request to maximum we can map at once for DIO */
824 if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS)
825 bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits;
826 dio_credits = ext4_chunk_trans_blocks(inode,
827 bh_result->b_size >> inode->i_blkbits);
829 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
831 return PTR_ERR(handle);
833 ret = _ext4_get_block(inode, iblock, bh_result, flags);
834 ext4_journal_stop(handle);
836 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
841 /* Get block function for DIO reads and writes to inodes without extents */
842 int ext4_dio_get_block(struct inode *inode, sector_t iblock,
843 struct buffer_head *bh, int create)
845 /* We don't expect handle for direct IO */
846 WARN_ON_ONCE(ext4_journal_current_handle());
849 return _ext4_get_block(inode, iblock, bh, 0);
850 return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE);
854 * Get block function for AIO DIO writes when we create unwritten extent if
855 * blocks are not allocated yet. The extent will be converted to written
856 * after IO is complete.
858 static int ext4_dio_get_block_unwritten_async(struct inode *inode,
859 sector_t iblock, struct buffer_head *bh_result, int create)
863 /* We don't expect handle for direct IO */
864 WARN_ON_ONCE(ext4_journal_current_handle());
866 ret = ext4_get_block_trans(inode, iblock, bh_result,
867 EXT4_GET_BLOCKS_IO_CREATE_EXT);
870 * When doing DIO using unwritten extents, we need io_end to convert
871 * unwritten extents to written on IO completion. We allocate io_end
872 * once we spot unwritten extent and store it in b_private. Generic
873 * DIO code keeps b_private set and furthermore passes the value to
874 * our completion callback in 'private' argument.
876 if (!ret && buffer_unwritten(bh_result)) {
877 if (!bh_result->b_private) {
878 ext4_io_end_t *io_end;
880 io_end = ext4_init_io_end(inode, GFP_KERNEL);
883 bh_result->b_private = io_end;
884 ext4_set_io_unwritten_flag(inode, io_end);
886 set_buffer_defer_completion(bh_result);
893 * Get block function for non-AIO DIO writes when we create unwritten extent if
894 * blocks are not allocated yet. The extent will be converted to written
895 * after IO is complete by ext4_direct_IO_write().
897 static int ext4_dio_get_block_unwritten_sync(struct inode *inode,
898 sector_t iblock, struct buffer_head *bh_result, int create)
902 /* We don't expect handle for direct IO */
903 WARN_ON_ONCE(ext4_journal_current_handle());
905 ret = ext4_get_block_trans(inode, iblock, bh_result,
906 EXT4_GET_BLOCKS_IO_CREATE_EXT);
909 * Mark inode as having pending DIO writes to unwritten extents.
910 * ext4_direct_IO_write() checks this flag and converts extents to
913 if (!ret && buffer_unwritten(bh_result))
914 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
919 static int ext4_dio_get_block_overwrite(struct inode *inode, sector_t iblock,
920 struct buffer_head *bh_result, int create)
924 ext4_debug("ext4_dio_get_block_overwrite: inode %lu, create flag %d\n",
925 inode->i_ino, create);
926 /* We don't expect handle for direct IO */
927 WARN_ON_ONCE(ext4_journal_current_handle());
929 ret = _ext4_get_block(inode, iblock, bh_result, 0);
931 * Blocks should have been preallocated! ext4_file_write_iter() checks
934 WARN_ON_ONCE(!buffer_mapped(bh_result) || buffer_unwritten(bh_result));
941 * `handle' can be NULL if create is zero
943 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
944 ext4_lblk_t block, int map_flags)
946 struct ext4_map_blocks map;
947 struct buffer_head *bh;
948 int create = map_flags & EXT4_GET_BLOCKS_CREATE;
951 J_ASSERT(handle != NULL || create == 0);
955 err = ext4_map_blocks(handle, inode, &map, map_flags);
958 return create ? ERR_PTR(-ENOSPC) : NULL;
962 bh = sb_getblk(inode->i_sb, map.m_pblk);
964 return ERR_PTR(-ENOMEM);
965 if (map.m_flags & EXT4_MAP_NEW) {
966 J_ASSERT(create != 0);
967 J_ASSERT(handle != NULL);
970 * Now that we do not always journal data, we should
971 * keep in mind whether this should always journal the
972 * new buffer as metadata. For now, regular file
973 * writes use ext4_get_block instead, so it's not a
977 BUFFER_TRACE(bh, "call get_create_access");
978 err = ext4_journal_get_create_access(handle, bh);
983 if (!buffer_uptodate(bh)) {
984 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
985 set_buffer_uptodate(bh);
988 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
989 err = ext4_handle_dirty_metadata(handle, inode, bh);
993 BUFFER_TRACE(bh, "not a new buffer");
1000 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1001 ext4_lblk_t block, int map_flags)
1003 struct buffer_head *bh;
1005 bh = ext4_getblk(handle, inode, block, map_flags);
1008 if (!bh || buffer_uptodate(bh))
1010 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
1012 if (buffer_uptodate(bh))
1015 return ERR_PTR(-EIO);
1018 /* Read a contiguous batch of blocks. */
1019 int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
1020 bool wait, struct buffer_head **bhs)
1024 for (i = 0; i < bh_count; i++) {
1025 bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
1026 if (IS_ERR(bhs[i])) {
1027 err = PTR_ERR(bhs[i]);
1033 for (i = 0; i < bh_count; i++)
1034 /* Note that NULL bhs[i] is valid because of holes. */
1035 if (bhs[i] && !buffer_uptodate(bhs[i]))
1036 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1,
1042 for (i = 0; i < bh_count; i++)
1044 wait_on_buffer(bhs[i]);
1046 for (i = 0; i < bh_count; i++) {
1047 if (bhs[i] && !buffer_uptodate(bhs[i])) {
1055 for (i = 0; i < bh_count; i++) {
1062 int ext4_walk_page_buffers(handle_t *handle,
1063 struct buffer_head *head,
1067 int (*fn)(handle_t *handle,
1068 struct buffer_head *bh))
1070 struct buffer_head *bh;
1071 unsigned block_start, block_end;
1072 unsigned blocksize = head->b_size;
1074 struct buffer_head *next;
1076 for (bh = head, block_start = 0;
1077 ret == 0 && (bh != head || !block_start);
1078 block_start = block_end, bh = next) {
1079 next = bh->b_this_page;
1080 block_end = block_start + blocksize;
1081 if (block_end <= from || block_start >= to) {
1082 if (partial && !buffer_uptodate(bh))
1086 err = (*fn)(handle, bh);
1094 * To preserve ordering, it is essential that the hole instantiation and
1095 * the data write be encapsulated in a single transaction. We cannot
1096 * close off a transaction and start a new one between the ext4_get_block()
1097 * and the commit_write(). So doing the jbd2_journal_start at the start of
1098 * prepare_write() is the right place.
1100 * Also, this function can nest inside ext4_writepage(). In that case, we
1101 * *know* that ext4_writepage() has generated enough buffer credits to do the
1102 * whole page. So we won't block on the journal in that case, which is good,
1103 * because the caller may be PF_MEMALLOC.
1105 * By accident, ext4 can be reentered when a transaction is open via
1106 * quota file writes. If we were to commit the transaction while thus
1107 * reentered, there can be a deadlock - we would be holding a quota
1108 * lock, and the commit would never complete if another thread had a
1109 * transaction open and was blocking on the quota lock - a ranking
1112 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1113 * will _not_ run commit under these circumstances because handle->h_ref
1114 * is elevated. We'll still have enough credits for the tiny quotafile
1117 int do_journal_get_write_access(handle_t *handle,
1118 struct buffer_head *bh)
1120 int dirty = buffer_dirty(bh);
1123 if (!buffer_mapped(bh) || buffer_freed(bh))
1126 * __block_write_begin() could have dirtied some buffers. Clean
1127 * the dirty bit as jbd2_journal_get_write_access() could complain
1128 * otherwise about fs integrity issues. Setting of the dirty bit
1129 * by __block_write_begin() isn't a real problem here as we clear
1130 * the bit before releasing a page lock and thus writeback cannot
1131 * ever write the buffer.
1134 clear_buffer_dirty(bh);
1135 BUFFER_TRACE(bh, "get write access");
1136 ret = ext4_journal_get_write_access(handle, bh);
1138 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1142 #ifdef CONFIG_EXT4_FS_ENCRYPTION
1143 static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1144 get_block_t *get_block)
1146 unsigned from = pos & (PAGE_SIZE - 1);
1147 unsigned to = from + len;
1148 struct inode *inode = page->mapping->host;
1149 unsigned block_start, block_end;
1152 unsigned blocksize = inode->i_sb->s_blocksize;
1154 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
1155 bool decrypt = false;
1157 BUG_ON(!PageLocked(page));
1158 BUG_ON(from > PAGE_SIZE);
1159 BUG_ON(to > PAGE_SIZE);
1162 if (!page_has_buffers(page))
1163 create_empty_buffers(page, blocksize, 0);
1164 head = page_buffers(page);
1165 bbits = ilog2(blocksize);
1166 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1168 for (bh = head, block_start = 0; bh != head || !block_start;
1169 block++, block_start = block_end, bh = bh->b_this_page) {
1170 block_end = block_start + blocksize;
1171 if (block_end <= from || block_start >= to) {
1172 if (PageUptodate(page)) {
1173 if (!buffer_uptodate(bh))
1174 set_buffer_uptodate(bh);
1179 clear_buffer_new(bh);
1180 if (!buffer_mapped(bh)) {
1181 WARN_ON(bh->b_size != blocksize);
1182 err = get_block(inode, block, bh, 1);
1185 if (buffer_new(bh)) {
1186 clean_bdev_bh_alias(bh);
1187 if (PageUptodate(page)) {
1188 clear_buffer_new(bh);
1189 set_buffer_uptodate(bh);
1190 mark_buffer_dirty(bh);
1193 if (block_end > to || block_start < from)
1194 zero_user_segments(page, to, block_end,
1199 if (PageUptodate(page)) {
1200 if (!buffer_uptodate(bh))
1201 set_buffer_uptodate(bh);
1204 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1205 !buffer_unwritten(bh) &&
1206 (block_start < from || block_end > to)) {
1207 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
1209 decrypt = ext4_encrypted_inode(inode) &&
1210 S_ISREG(inode->i_mode);
1214 * If we issued read requests, let them complete.
1216 while (wait_bh > wait) {
1217 wait_on_buffer(*--wait_bh);
1218 if (!buffer_uptodate(*wait_bh))
1222 page_zero_new_buffers(page, from, to);
1224 err = fscrypt_decrypt_page(page->mapping->host, page,
1225 PAGE_SIZE, 0, page->index);
1230 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1231 loff_t pos, unsigned len, unsigned flags,
1232 struct page **pagep, void **fsdata)
1234 struct inode *inode = mapping->host;
1235 int ret, needed_blocks;
1242 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
1245 trace_ext4_write_begin(inode, pos, len, flags);
1247 * Reserve one block more for addition to orphan list in case
1248 * we allocate blocks but write fails for some reason
1250 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1251 index = pos >> PAGE_SHIFT;
1252 from = pos & (PAGE_SIZE - 1);
1255 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1256 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1265 * grab_cache_page_write_begin() can take a long time if the
1266 * system is thrashing due to memory pressure, or if the page
1267 * is being written back. So grab it first before we start
1268 * the transaction handle. This also allows us to allocate
1269 * the page (if needed) without using GFP_NOFS.
1272 page = grab_cache_page_write_begin(mapping, index, flags);
1278 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1279 if (IS_ERR(handle)) {
1281 return PTR_ERR(handle);
1285 if (page->mapping != mapping) {
1286 /* The page got truncated from under us */
1289 ext4_journal_stop(handle);
1292 /* In case writeback began while the page was unlocked */
1293 wait_for_stable_page(page);
1295 #ifdef CONFIG_EXT4_FS_ENCRYPTION
1296 if (ext4_should_dioread_nolock(inode))
1297 ret = ext4_block_write_begin(page, pos, len,
1298 ext4_get_block_unwritten);
1300 ret = ext4_block_write_begin(page, pos, len,
1303 if (ext4_should_dioread_nolock(inode))
1304 ret = __block_write_begin(page, pos, len,
1305 ext4_get_block_unwritten);
1307 ret = __block_write_begin(page, pos, len, ext4_get_block);
1309 if (!ret && ext4_should_journal_data(inode)) {
1310 ret = ext4_walk_page_buffers(handle, page_buffers(page),
1312 do_journal_get_write_access);
1318 * __block_write_begin may have instantiated a few blocks
1319 * outside i_size. Trim these off again. Don't need
1320 * i_size_read because we hold i_mutex.
1322 * Add inode to orphan list in case we crash before
1325 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1326 ext4_orphan_add(handle, inode);
1328 ext4_journal_stop(handle);
1329 if (pos + len > inode->i_size) {
1330 ext4_truncate_failed_write(inode);
1332 * If truncate failed early the inode might
1333 * still be on the orphan list; we need to
1334 * make sure the inode is removed from the
1335 * orphan list in that case.
1338 ext4_orphan_del(NULL, inode);
1341 if (ret == -ENOSPC &&
1342 ext4_should_retry_alloc(inode->i_sb, &retries))
1351 /* For write_end() in data=journal mode */
1352 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1355 if (!buffer_mapped(bh) || buffer_freed(bh))
1357 set_buffer_uptodate(bh);
1358 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1359 clear_buffer_meta(bh);
1360 clear_buffer_prio(bh);
1365 * We need to pick up the new inode size which generic_commit_write gave us
1366 * `file' can be NULL - eg, when called from page_symlink().
1368 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1369 * buffers are managed internally.
1371 static int ext4_write_end(struct file *file,
1372 struct address_space *mapping,
1373 loff_t pos, unsigned len, unsigned copied,
1374 struct page *page, void *fsdata)
1376 handle_t *handle = ext4_journal_current_handle();
1377 struct inode *inode = mapping->host;
1378 loff_t old_size = inode->i_size;
1380 int i_size_changed = 0;
1382 trace_ext4_write_end(inode, pos, len, copied);
1383 if (ext4_has_inline_data(inode)) {
1384 ret = ext4_write_inline_data_end(inode, pos, len,
1393 copied = block_write_end(file, mapping, pos,
1394 len, copied, page, fsdata);
1396 * it's important to update i_size while still holding page lock:
1397 * page writeout could otherwise come in and zero beyond i_size.
1399 i_size_changed = ext4_update_inode_size(inode, pos + copied);
1404 pagecache_isize_extended(inode, old_size, pos);
1406 * Don't mark the inode dirty under page lock. First, it unnecessarily
1407 * makes the holding time of page lock longer. Second, it forces lock
1408 * ordering of page lock and transaction start for journaling
1412 ext4_mark_inode_dirty(handle, inode);
1414 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1415 /* if we have allocated more blocks and copied
1416 * less. We will have blocks allocated outside
1417 * inode->i_size. So truncate them
1419 ext4_orphan_add(handle, inode);
1421 ret2 = ext4_journal_stop(handle);
1425 if (pos + len > inode->i_size) {
1426 ext4_truncate_failed_write(inode);
1428 * If truncate failed early the inode might still be
1429 * on the orphan list; we need to make sure the inode
1430 * is removed from the orphan list in that case.
1433 ext4_orphan_del(NULL, inode);
1436 return ret ? ret : copied;
1440 * This is a private version of page_zero_new_buffers() which doesn't
1441 * set the buffer to be dirty, since in data=journalled mode we need
1442 * to call ext4_handle_dirty_metadata() instead.
1444 static void ext4_journalled_zero_new_buffers(handle_t *handle,
1446 unsigned from, unsigned to)
1448 unsigned int block_start = 0, block_end;
1449 struct buffer_head *head, *bh;
1451 bh = head = page_buffers(page);
1453 block_end = block_start + bh->b_size;
1454 if (buffer_new(bh)) {
1455 if (block_end > from && block_start < to) {
1456 if (!PageUptodate(page)) {
1457 unsigned start, size;
1459 start = max(from, block_start);
1460 size = min(to, block_end) - start;
1462 zero_user(page, start, size);
1463 write_end_fn(handle, bh);
1465 clear_buffer_new(bh);
1468 block_start = block_end;
1469 bh = bh->b_this_page;
1470 } while (bh != head);
1473 static int ext4_journalled_write_end(struct file *file,
1474 struct address_space *mapping,
1475 loff_t pos, unsigned len, unsigned copied,
1476 struct page *page, void *fsdata)
1478 handle_t *handle = ext4_journal_current_handle();
1479 struct inode *inode = mapping->host;
1480 loff_t old_size = inode->i_size;
1484 int size_changed = 0;
1486 trace_ext4_journalled_write_end(inode, pos, len, copied);
1487 from = pos & (PAGE_SIZE - 1);
1490 BUG_ON(!ext4_handle_valid(handle));
1492 if (ext4_has_inline_data(inode)) {
1493 ret = ext4_write_inline_data_end(inode, pos, len,
1501 } else if (unlikely(copied < len) && !PageUptodate(page)) {
1503 ext4_journalled_zero_new_buffers(handle, page, from, to);
1505 if (unlikely(copied < len))
1506 ext4_journalled_zero_new_buffers(handle, page,
1508 ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
1509 from + copied, &partial,
1512 SetPageUptodate(page);
1514 size_changed = ext4_update_inode_size(inode, pos + copied);
1515 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1516 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1521 pagecache_isize_extended(inode, old_size, pos);
1524 ret2 = ext4_mark_inode_dirty(handle, inode);
1529 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1530 /* if we have allocated more blocks and copied
1531 * less. We will have blocks allocated outside
1532 * inode->i_size. So truncate them
1534 ext4_orphan_add(handle, inode);
1537 ret2 = ext4_journal_stop(handle);
1540 if (pos + len > inode->i_size) {
1541 ext4_truncate_failed_write(inode);
1543 * If truncate failed early the inode might still be
1544 * on the orphan list; we need to make sure the inode
1545 * is removed from the orphan list in that case.
1548 ext4_orphan_del(NULL, inode);
1551 return ret ? ret : copied;
1555 * Reserve space for a single cluster
1557 static int ext4_da_reserve_space(struct inode *inode)
1559 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1560 struct ext4_inode_info *ei = EXT4_I(inode);
1564 * We will charge metadata quota at writeout time; this saves
1565 * us from metadata over-estimation, though we may go over by
1566 * a small amount in the end. Here we just reserve for data.
1568 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1572 spin_lock(&ei->i_block_reservation_lock);
1573 if (ext4_claim_free_clusters(sbi, 1, 0)) {
1574 spin_unlock(&ei->i_block_reservation_lock);
1575 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1578 ei->i_reserved_data_blocks++;
1579 trace_ext4_da_reserve_space(inode);
1580 spin_unlock(&ei->i_block_reservation_lock);
1582 return 0; /* success */
1585 static void ext4_da_release_space(struct inode *inode, int to_free)
1587 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1588 struct ext4_inode_info *ei = EXT4_I(inode);
1591 return; /* Nothing to release, exit */
1593 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1595 trace_ext4_da_release_space(inode, to_free);
1596 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1598 * if there aren't enough reserved blocks, then the
1599 * counter is messed up somewhere. Since this
1600 * function is called from invalidate page, it's
1601 * harmless to return without any action.
1603 ext4_warning(inode->i_sb, "ext4_da_release_space: "
1604 "ino %lu, to_free %d with only %d reserved "
1605 "data blocks", inode->i_ino, to_free,
1606 ei->i_reserved_data_blocks);
1608 to_free = ei->i_reserved_data_blocks;
1610 ei->i_reserved_data_blocks -= to_free;
1612 /* update fs dirty data blocks counter */
1613 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1615 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1617 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1620 static void ext4_da_page_release_reservation(struct page *page,
1621 unsigned int offset,
1622 unsigned int length)
1624 int to_release = 0, contiguous_blks = 0;
1625 struct buffer_head *head, *bh;
1626 unsigned int curr_off = 0;
1627 struct inode *inode = page->mapping->host;
1628 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1629 unsigned int stop = offset + length;
1633 BUG_ON(stop > PAGE_SIZE || stop < length);
1635 head = page_buffers(page);
1638 unsigned int next_off = curr_off + bh->b_size;
1640 if (next_off > stop)
1643 if ((offset <= curr_off) && (buffer_delay(bh))) {
1646 clear_buffer_delay(bh);
1647 } else if (contiguous_blks) {
1648 lblk = page->index <<
1649 (PAGE_SHIFT - inode->i_blkbits);
1650 lblk += (curr_off >> inode->i_blkbits) -
1652 ext4_es_remove_extent(inode, lblk, contiguous_blks);
1653 contiguous_blks = 0;
1655 curr_off = next_off;
1656 } while ((bh = bh->b_this_page) != head);
1658 if (contiguous_blks) {
1659 lblk = page->index << (PAGE_SHIFT - inode->i_blkbits);
1660 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
1661 ext4_es_remove_extent(inode, lblk, contiguous_blks);
1664 /* If we have released all the blocks belonging to a cluster, then we
1665 * need to release the reserved space for that cluster. */
1666 num_clusters = EXT4_NUM_B2C(sbi, to_release);
1667 while (num_clusters > 0) {
1668 lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) +
1669 ((num_clusters - 1) << sbi->s_cluster_bits);
1670 if (sbi->s_cluster_ratio == 1 ||
1671 !ext4_find_delalloc_cluster(inode, lblk))
1672 ext4_da_release_space(inode, 1);
1679 * Delayed allocation stuff
1682 struct mpage_da_data {
1683 struct inode *inode;
1684 struct writeback_control *wbc;
1686 pgoff_t first_page; /* The first page to write */
1687 pgoff_t next_page; /* Current page to examine */
1688 pgoff_t last_page; /* Last page to examine */
1690 * Extent to map - this can be after first_page because that can be
1691 * fully mapped. We somewhat abuse m_flags to store whether the extent
1692 * is delalloc or unwritten.
1694 struct ext4_map_blocks map;
1695 struct ext4_io_submit io_submit; /* IO submission data */
1696 unsigned int do_map:1;
1699 static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1704 struct pagevec pvec;
1705 struct inode *inode = mpd->inode;
1706 struct address_space *mapping = inode->i_mapping;
1708 /* This is necessary when next_page == 0. */
1709 if (mpd->first_page >= mpd->next_page)
1712 index = mpd->first_page;
1713 end = mpd->next_page - 1;
1715 ext4_lblk_t start, last;
1716 start = index << (PAGE_SHIFT - inode->i_blkbits);
1717 last = end << (PAGE_SHIFT - inode->i_blkbits);
1718 ext4_es_remove_extent(inode, start, last - start + 1);
1721 pagevec_init(&pvec, 0);
1722 while (index <= end) {
1723 nr_pages = pagevec_lookup(&pvec, mapping, &index, PAGEVEC_SIZE);
1726 for (i = 0; i < nr_pages; i++) {
1727 struct page *page = pvec.pages[i];
1728 if (page->index > end)
1730 BUG_ON(!PageLocked(page));
1731 BUG_ON(PageWriteback(page));
1733 if (page_mapped(page))
1734 clear_page_dirty_for_io(page);
1735 block_invalidatepage(page, 0, PAGE_SIZE);
1736 ClearPageUptodate(page);
1740 pagevec_release(&pvec);
1744 static void ext4_print_free_blocks(struct inode *inode)
1746 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1747 struct super_block *sb = inode->i_sb;
1748 struct ext4_inode_info *ei = EXT4_I(inode);
1750 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1751 EXT4_C2B(EXT4_SB(inode->i_sb),
1752 ext4_count_free_clusters(sb)));
1753 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1754 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1755 (long long) EXT4_C2B(EXT4_SB(sb),
1756 percpu_counter_sum(&sbi->s_freeclusters_counter)));
1757 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1758 (long long) EXT4_C2B(EXT4_SB(sb),
1759 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1760 ext4_msg(sb, KERN_CRIT, "Block reservation details");
1761 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1762 ei->i_reserved_data_blocks);
1766 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1768 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1772 * This function is grabs code from the very beginning of
1773 * ext4_map_blocks, but assumes that the caller is from delayed write
1774 * time. This function looks up the requested blocks and sets the
1775 * buffer delay bit under the protection of i_data_sem.
1777 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1778 struct ext4_map_blocks *map,
1779 struct buffer_head *bh)
1781 struct extent_status es;
1783 sector_t invalid_block = ~((sector_t) 0xffff);
1784 #ifdef ES_AGGRESSIVE_TEST
1785 struct ext4_map_blocks orig_map;
1787 memcpy(&orig_map, map, sizeof(*map));
1790 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1794 ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1795 "logical block %lu\n", inode->i_ino, map->m_len,
1796 (unsigned long) map->m_lblk);
1798 /* Lookup extent status tree firstly */
1799 if (ext4_es_lookup_extent(inode, iblock, &es)) {
1800 if (ext4_es_is_hole(&es)) {
1802 down_read(&EXT4_I(inode)->i_data_sem);
1807 * Delayed extent could be allocated by fallocate.
1808 * So we need to check it.
1810 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1811 map_bh(bh, inode->i_sb, invalid_block);
1813 set_buffer_delay(bh);
1817 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1818 retval = es.es_len - (iblock - es.es_lblk);
1819 if (retval > map->m_len)
1820 retval = map->m_len;
1821 map->m_len = retval;
1822 if (ext4_es_is_written(&es))
1823 map->m_flags |= EXT4_MAP_MAPPED;
1824 else if (ext4_es_is_unwritten(&es))
1825 map->m_flags |= EXT4_MAP_UNWRITTEN;
1829 #ifdef ES_AGGRESSIVE_TEST
1830 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1836 * Try to see if we can get the block without requesting a new
1837 * file system block.
1839 down_read(&EXT4_I(inode)->i_data_sem);
1840 if (ext4_has_inline_data(inode))
1842 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1843 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1845 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1851 * XXX: __block_prepare_write() unmaps passed block,
1855 * If the block was allocated from previously allocated cluster,
1856 * then we don't need to reserve it again. However we still need
1857 * to reserve metadata for every block we're going to write.
1859 if (EXT4_SB(inode->i_sb)->s_cluster_ratio == 1 ||
1860 !ext4_find_delalloc_cluster(inode, map->m_lblk)) {
1861 ret = ext4_da_reserve_space(inode);
1863 /* not enough space to reserve */
1869 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1870 ~0, EXTENT_STATUS_DELAYED);
1876 map_bh(bh, inode->i_sb, invalid_block);
1878 set_buffer_delay(bh);
1879 } else if (retval > 0) {
1881 unsigned int status;
1883 if (unlikely(retval != map->m_len)) {
1884 ext4_warning(inode->i_sb,
1885 "ES len assertion failed for inode "
1886 "%lu: retval %d != map->m_len %d",
1887 inode->i_ino, retval, map->m_len);
1891 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1892 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1893 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1894 map->m_pblk, status);
1900 up_read((&EXT4_I(inode)->i_data_sem));
1906 * This is a special get_block_t callback which is used by
1907 * ext4_da_write_begin(). It will either return mapped block or
1908 * reserve space for a single block.
1910 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1911 * We also have b_blocknr = -1 and b_bdev initialized properly
1913 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1914 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1915 * initialized properly.
1917 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1918 struct buffer_head *bh, int create)
1920 struct ext4_map_blocks map;
1923 BUG_ON(create == 0);
1924 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1926 map.m_lblk = iblock;
1930 * first, we need to know whether the block is allocated already
1931 * preallocated blocks are unmapped but should treated
1932 * the same as allocated blocks.
1934 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1938 map_bh(bh, inode->i_sb, map.m_pblk);
1939 ext4_update_bh_state(bh, map.m_flags);
1941 if (buffer_unwritten(bh)) {
1942 /* A delayed write to unwritten bh should be marked
1943 * new and mapped. Mapped ensures that we don't do
1944 * get_block multiple times when we write to the same
1945 * offset and new ensures that we do proper zero out
1946 * for partial write.
1949 set_buffer_mapped(bh);
1954 static int bget_one(handle_t *handle, struct buffer_head *bh)
1960 static int bput_one(handle_t *handle, struct buffer_head *bh)
1966 static int __ext4_journalled_writepage(struct page *page,
1969 struct address_space *mapping = page->mapping;
1970 struct inode *inode = mapping->host;
1971 struct buffer_head *page_bufs = NULL;
1972 handle_t *handle = NULL;
1973 int ret = 0, err = 0;
1974 int inline_data = ext4_has_inline_data(inode);
1975 struct buffer_head *inode_bh = NULL;
1977 ClearPageChecked(page);
1980 BUG_ON(page->index != 0);
1981 BUG_ON(len > ext4_get_max_inline_size(inode));
1982 inode_bh = ext4_journalled_write_inline_data(inode, len, page);
1983 if (inode_bh == NULL)
1986 page_bufs = page_buffers(page);
1991 ext4_walk_page_buffers(handle, page_bufs, 0, len,
1995 * We need to release the page lock before we start the
1996 * journal, so grab a reference so the page won't disappear
1997 * out from under us.
2002 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
2003 ext4_writepage_trans_blocks(inode));
2004 if (IS_ERR(handle)) {
2005 ret = PTR_ERR(handle);
2007 goto out_no_pagelock;
2009 BUG_ON(!ext4_handle_valid(handle));
2013 if (page->mapping != mapping) {
2014 /* The page got truncated from under us */
2015 ext4_journal_stop(handle);
2021 BUFFER_TRACE(inode_bh, "get write access");
2022 ret = ext4_journal_get_write_access(handle, inode_bh);
2024 err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
2027 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
2028 do_journal_get_write_access);
2030 err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
2035 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
2036 err = ext4_journal_stop(handle);
2040 if (!ext4_has_inline_data(inode))
2041 ext4_walk_page_buffers(NULL, page_bufs, 0, len,
2043 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
2052 * Note that we don't need to start a transaction unless we're journaling data
2053 * because we should have holes filled from ext4_page_mkwrite(). We even don't
2054 * need to file the inode to the transaction's list in ordered mode because if
2055 * we are writing back data added by write(), the inode is already there and if
2056 * we are writing back data modified via mmap(), no one guarantees in which
2057 * transaction the data will hit the disk. In case we are journaling data, we
2058 * cannot start transaction directly because transaction start ranks above page
2059 * lock so we have to do some magic.
2061 * This function can get called via...
2062 * - ext4_writepages after taking page lock (have journal handle)
2063 * - journal_submit_inode_data_buffers (no journal handle)
2064 * - shrink_page_list via the kswapd/direct reclaim (no journal handle)
2065 * - grab_page_cache when doing write_begin (have journal handle)
2067 * We don't do any block allocation in this function. If we have page with
2068 * multiple blocks we need to write those buffer_heads that are mapped. This
2069 * is important for mmaped based write. So if we do with blocksize 1K
2070 * truncate(f, 1024);
2071 * a = mmap(f, 0, 4096);
2073 * truncate(f, 4096);
2074 * we have in the page first buffer_head mapped via page_mkwrite call back
2075 * but other buffer_heads would be unmapped but dirty (dirty done via the
2076 * do_wp_page). So writepage should write the first block. If we modify
2077 * the mmap area beyond 1024 we will again get a page_fault and the
2078 * page_mkwrite callback will do the block allocation and mark the
2079 * buffer_heads mapped.
2081 * We redirty the page if we have any buffer_heads that is either delay or
2082 * unwritten in the page.
2084 * We can get recursively called as show below.
2086 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
2089 * But since we don't do any block allocation we should not deadlock.
2090 * Page also have the dirty flag cleared so we don't get recurive page_lock.
2092 static int ext4_writepage(struct page *page,
2093 struct writeback_control *wbc)
2098 struct buffer_head *page_bufs = NULL;
2099 struct inode *inode = page->mapping->host;
2100 struct ext4_io_submit io_submit;
2101 bool keep_towrite = false;
2103 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
2104 ext4_invalidatepage(page, 0, PAGE_SIZE);
2109 trace_ext4_writepage(page);
2110 size = i_size_read(inode);
2111 if (page->index == size >> PAGE_SHIFT)
2112 len = size & ~PAGE_MASK;
2116 page_bufs = page_buffers(page);
2118 * We cannot do block allocation or other extent handling in this
2119 * function. If there are buffers needing that, we have to redirty
2120 * the page. But we may reach here when we do a journal commit via
2121 * journal_submit_inode_data_buffers() and in that case we must write
2122 * allocated buffers to achieve data=ordered mode guarantees.
2124 * Also, if there is only one buffer per page (the fs block
2125 * size == the page size), if one buffer needs block
2126 * allocation or needs to modify the extent tree to clear the
2127 * unwritten flag, we know that the page can't be written at
2128 * all, so we might as well refuse the write immediately.
2129 * Unfortunately if the block size != page size, we can't as
2130 * easily detect this case using ext4_walk_page_buffers(), but
2131 * for the extremely common case, this is an optimization that
2132 * skips a useless round trip through ext4_bio_write_page().
2134 if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2135 ext4_bh_delay_or_unwritten)) {
2136 redirty_page_for_writepage(wbc, page);
2137 if ((current->flags & PF_MEMALLOC) ||
2138 (inode->i_sb->s_blocksize == PAGE_SIZE)) {
2140 * For memory cleaning there's no point in writing only
2141 * some buffers. So just bail out. Warn if we came here
2142 * from direct reclaim.
2144 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
2149 keep_towrite = true;
2152 if (PageChecked(page) && ext4_should_journal_data(inode))
2154 * It's mmapped pagecache. Add buffers and journal it. There
2155 * doesn't seem much point in redirtying the page here.
2157 return __ext4_journalled_writepage(page, len);
2159 ext4_io_submit_init(&io_submit, wbc);
2160 io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
2161 if (!io_submit.io_end) {
2162 redirty_page_for_writepage(wbc, page);
2166 ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite);
2167 ext4_io_submit(&io_submit);
2168 /* Drop io_end reference we got from init */
2169 ext4_put_io_end_defer(io_submit.io_end);
2173 static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
2179 BUG_ON(page->index != mpd->first_page);
2180 clear_page_dirty_for_io(page);
2182 * We have to be very careful here! Nothing protects writeback path
2183 * against i_size changes and the page can be writeably mapped into
2184 * page tables. So an application can be growing i_size and writing
2185 * data through mmap while writeback runs. clear_page_dirty_for_io()
2186 * write-protects our page in page tables and the page cannot get
2187 * written to again until we release page lock. So only after
2188 * clear_page_dirty_for_io() we are safe to sample i_size for
2189 * ext4_bio_write_page() to zero-out tail of the written page. We rely
2190 * on the barrier provided by TestClearPageDirty in
2191 * clear_page_dirty_for_io() to make sure i_size is really sampled only
2192 * after page tables are updated.
2194 size = i_size_read(mpd->inode);
2195 if (page->index == size >> PAGE_SHIFT)
2196 len = size & ~PAGE_MASK;
2199 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
2201 mpd->wbc->nr_to_write--;
2207 #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
2210 * mballoc gives us at most this number of blocks...
2211 * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
2212 * The rest of mballoc seems to handle chunks up to full group size.
2214 #define MAX_WRITEPAGES_EXTENT_LEN 2048
2217 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
2219 * @mpd - extent of blocks
2220 * @lblk - logical number of the block in the file
2221 * @bh - buffer head we want to add to the extent
2223 * The function is used to collect contig. blocks in the same state. If the
2224 * buffer doesn't require mapping for writeback and we haven't started the
2225 * extent of buffers to map yet, the function returns 'true' immediately - the
2226 * caller can write the buffer right away. Otherwise the function returns true
2227 * if the block has been added to the extent, false if the block couldn't be
2230 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
2231 struct buffer_head *bh)
2233 struct ext4_map_blocks *map = &mpd->map;
2235 /* Buffer that doesn't need mapping for writeback? */
2236 if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
2237 (!buffer_delay(bh) && !buffer_unwritten(bh))) {
2238 /* So far no extent to map => we write the buffer right away */
2239 if (map->m_len == 0)
2244 /* First block in the extent? */
2245 if (map->m_len == 0) {
2246 /* We cannot map unless handle is started... */
2251 map->m_flags = bh->b_state & BH_FLAGS;
2255 /* Don't go larger than mballoc is willing to allocate */
2256 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
2259 /* Can we merge the block to our big extent? */
2260 if (lblk == map->m_lblk + map->m_len &&
2261 (bh->b_state & BH_FLAGS) == map->m_flags) {
2269 * mpage_process_page_bufs - submit page buffers for IO or add them to extent
2271 * @mpd - extent of blocks for mapping
2272 * @head - the first buffer in the page
2273 * @bh - buffer we should start processing from
2274 * @lblk - logical number of the block in the file corresponding to @bh
2276 * Walk through page buffers from @bh upto @head (exclusive) and either submit
2277 * the page for IO if all buffers in this page were mapped and there's no
2278 * accumulated extent of buffers to map or add buffers in the page to the
2279 * extent of buffers to map. The function returns 1 if the caller can continue
2280 * by processing the next page, 0 if it should stop adding buffers to the
2281 * extent to map because we cannot extend it anymore. It can also return value
2282 * < 0 in case of error during IO submission.
2284 static int mpage_process_page_bufs(struct mpage_da_data *mpd,
2285 struct buffer_head *head,
2286 struct buffer_head *bh,
2289 struct inode *inode = mpd->inode;
2291 ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
2292 >> inode->i_blkbits;
2295 BUG_ON(buffer_locked(bh));
2297 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
2298 /* Found extent to map? */
2301 /* Buffer needs mapping and handle is not started? */
2304 /* Everything mapped so far and we hit EOF */
2307 } while (lblk++, (bh = bh->b_this_page) != head);
2308 /* So far everything mapped? Submit the page for IO. */
2309 if (mpd->map.m_len == 0) {
2310 err = mpage_submit_page(mpd, head->b_page);
2314 return lblk < blocks;
2318 * mpage_map_buffers - update buffers corresponding to changed extent and
2319 * submit fully mapped pages for IO
2321 * @mpd - description of extent to map, on return next extent to map
2323 * Scan buffers corresponding to changed extent (we expect corresponding pages
2324 * to be already locked) and update buffer state according to new extent state.
2325 * We map delalloc buffers to their physical location, clear unwritten bits,
2326 * and mark buffers as uninit when we perform writes to unwritten extents
2327 * and do extent conversion after IO is finished. If the last page is not fully
2328 * mapped, we update @map to the next extent in the last page that needs
2329 * mapping. Otherwise we submit the page for IO.
2331 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2333 struct pagevec pvec;
2335 struct inode *inode = mpd->inode;
2336 struct buffer_head *head, *bh;
2337 int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2343 start = mpd->map.m_lblk >> bpp_bits;
2344 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2345 lblk = start << bpp_bits;
2346 pblock = mpd->map.m_pblk;
2348 pagevec_init(&pvec, 0);
2349 while (start <= end) {
2350 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, &start,
2354 for (i = 0; i < nr_pages; i++) {
2355 struct page *page = pvec.pages[i];
2357 if (page->index > end)
2359 bh = head = page_buffers(page);
2361 if (lblk < mpd->map.m_lblk)
2363 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2365 * Buffer after end of mapped extent.
2366 * Find next buffer in the page to map.
2369 mpd->map.m_flags = 0;
2371 * FIXME: If dioread_nolock supports
2372 * blocksize < pagesize, we need to make
2373 * sure we add size mapped so far to
2374 * io_end->size as the following call
2375 * can submit the page for IO.
2377 err = mpage_process_page_bufs(mpd, head,
2379 pagevec_release(&pvec);
2384 if (buffer_delay(bh)) {
2385 clear_buffer_delay(bh);
2386 bh->b_blocknr = pblock++;
2388 clear_buffer_unwritten(bh);
2389 } while (lblk++, (bh = bh->b_this_page) != head);
2392 * FIXME: This is going to break if dioread_nolock
2393 * supports blocksize < pagesize as we will try to
2394 * convert potentially unmapped parts of inode.
2396 mpd->io_submit.io_end->size += PAGE_SIZE;
2397 /* Page fully mapped - let IO run! */
2398 err = mpage_submit_page(mpd, page);
2400 pagevec_release(&pvec);
2404 pagevec_release(&pvec);
2406 /* Extent fully mapped and matches with page boundary. We are done. */
2408 mpd->map.m_flags = 0;
2412 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2414 struct inode *inode = mpd->inode;
2415 struct ext4_map_blocks *map = &mpd->map;
2416 int get_blocks_flags;
2417 int err, dioread_nolock;
2419 trace_ext4_da_write_pages_extent(inode, map);
2421 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2422 * to convert an unwritten extent to be initialized (in the case
2423 * where we have written into one or more preallocated blocks). It is
2424 * possible that we're going to need more metadata blocks than
2425 * previously reserved. However we must not fail because we're in
2426 * writeback and there is nothing we can do about it so it might result
2427 * in data loss. So use reserved blocks to allocate metadata if
2430 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
2431 * the blocks in question are delalloc blocks. This indicates
2432 * that the blocks and quotas has already been checked when
2433 * the data was copied into the page cache.
2435 get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2436 EXT4_GET_BLOCKS_METADATA_NOFAIL |
2437 EXT4_GET_BLOCKS_IO_SUBMIT;
2438 dioread_nolock = ext4_should_dioread_nolock(inode);
2440 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2441 if (map->m_flags & (1 << BH_Delay))
2442 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2444 err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2447 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2448 if (!mpd->io_submit.io_end->handle &&
2449 ext4_handle_valid(handle)) {
2450 mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2451 handle->h_rsv_handle = NULL;
2453 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2456 BUG_ON(map->m_len == 0);
2457 if (map->m_flags & EXT4_MAP_NEW) {
2458 clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk,
2465 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2466 * mpd->len and submit pages underlying it for IO
2468 * @handle - handle for journal operations
2469 * @mpd - extent to map
2470 * @give_up_on_write - we set this to true iff there is a fatal error and there
2471 * is no hope of writing the data. The caller should discard
2472 * dirty pages to avoid infinite loops.
2474 * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2475 * delayed, blocks are allocated, if it is unwritten, we may need to convert
2476 * them to initialized or split the described range from larger unwritten
2477 * extent. Note that we need not map all the described range since allocation
2478 * can return less blocks or the range is covered by more unwritten extents. We
2479 * cannot map more because we are limited by reserved transaction credits. On
2480 * the other hand we always make sure that the last touched page is fully
2481 * mapped so that it can be written out (and thus forward progress is
2482 * guaranteed). After mapping we submit all mapped pages for IO.
2484 static int mpage_map_and_submit_extent(handle_t *handle,
2485 struct mpage_da_data *mpd,
2486 bool *give_up_on_write)
2488 struct inode *inode = mpd->inode;
2489 struct ext4_map_blocks *map = &mpd->map;
2494 mpd->io_submit.io_end->offset =
2495 ((loff_t)map->m_lblk) << inode->i_blkbits;
2497 err = mpage_map_one_extent(handle, mpd);
2499 struct super_block *sb = inode->i_sb;
2501 if (ext4_forced_shutdown(EXT4_SB(sb)) ||
2502 EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
2503 goto invalidate_dirty_pages;
2505 * Let the uper layers retry transient errors.
2506 * In the case of ENOSPC, if ext4_count_free_blocks()
2507 * is non-zero, a commit should free up blocks.
2509 if ((err == -ENOMEM) ||
2510 (err == -ENOSPC && ext4_count_free_clusters(sb))) {
2512 goto update_disksize;
2515 ext4_msg(sb, KERN_CRIT,
2516 "Delayed block allocation failed for "
2517 "inode %lu at logical offset %llu with"
2518 " max blocks %u with error %d",
2520 (unsigned long long)map->m_lblk,
2521 (unsigned)map->m_len, -err);
2522 ext4_msg(sb, KERN_CRIT,
2523 "This should not happen!! Data will "
2526 ext4_print_free_blocks(inode);
2527 invalidate_dirty_pages:
2528 *give_up_on_write = true;
2533 * Update buffer state, submit mapped pages, and get us new
2536 err = mpage_map_and_submit_buffers(mpd);
2538 goto update_disksize;
2539 } while (map->m_len);
2543 * Update on-disk size after IO is submitted. Races with
2544 * truncate are avoided by checking i_size under i_data_sem.
2546 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2547 if (disksize > EXT4_I(inode)->i_disksize) {
2551 down_write(&EXT4_I(inode)->i_data_sem);
2552 i_size = i_size_read(inode);
2553 if (disksize > i_size)
2555 if (disksize > EXT4_I(inode)->i_disksize)
2556 EXT4_I(inode)->i_disksize = disksize;
2557 up_write(&EXT4_I(inode)->i_data_sem);
2558 err2 = ext4_mark_inode_dirty(handle, inode);
2560 ext4_error(inode->i_sb,
2561 "Failed to mark inode %lu dirty",
2570 * Calculate the total number of credits to reserve for one writepages
2571 * iteration. This is called from ext4_writepages(). We map an extent of
2572 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2573 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2574 * bpp - 1 blocks in bpp different extents.
2576 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2578 int bpp = ext4_journal_blocks_per_page(inode);
2580 return ext4_meta_trans_blocks(inode,
2581 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2585 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2586 * and underlying extent to map
2588 * @mpd - where to look for pages
2590 * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2591 * IO immediately. When we find a page which isn't mapped we start accumulating
2592 * extent of buffers underlying these pages that needs mapping (formed by
2593 * either delayed or unwritten buffers). We also lock the pages containing
2594 * these buffers. The extent found is returned in @mpd structure (starting at
2595 * mpd->lblk with length mpd->len blocks).
2597 * Note that this function can attach bios to one io_end structure which are
2598 * neither logically nor physically contiguous. Although it may seem as an
2599 * unnecessary complication, it is actually inevitable in blocksize < pagesize
2600 * case as we need to track IO to all buffers underlying a page in one io_end.
2602 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2604 struct address_space *mapping = mpd->inode->i_mapping;
2605 struct pagevec pvec;
2606 unsigned int nr_pages;
2607 long left = mpd->wbc->nr_to_write;
2608 pgoff_t index = mpd->first_page;
2609 pgoff_t end = mpd->last_page;
2612 int blkbits = mpd->inode->i_blkbits;
2614 struct buffer_head *head;
2616 if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2617 tag = PAGECACHE_TAG_TOWRITE;
2619 tag = PAGECACHE_TAG_DIRTY;
2621 pagevec_init(&pvec, 0);
2623 mpd->next_page = index;
2624 while (index <= end) {
2625 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2626 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2630 for (i = 0; i < nr_pages; i++) {
2631 struct page *page = pvec.pages[i];
2634 * At this point, the page may be truncated or
2635 * invalidated (changing page->mapping to NULL), or
2636 * even swizzled back from swapper_space to tmpfs file
2637 * mapping. However, page->index will not change
2638 * because we have a reference on the page.
2640 if (page->index > end)
2644 * Accumulated enough dirty pages? This doesn't apply
2645 * to WB_SYNC_ALL mode. For integrity sync we have to
2646 * keep going because someone may be concurrently
2647 * dirtying pages, and we might have synced a lot of
2648 * newly appeared dirty pages, but have not synced all
2649 * of the old dirty pages.
2651 if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
2654 /* If we can't merge this page, we are done. */
2655 if (mpd->map.m_len > 0 && mpd->next_page != page->index)
2660 * If the page is no longer dirty, or its mapping no
2661 * longer corresponds to inode we are writing (which
2662 * means it has been truncated or invalidated), or the
2663 * page is already under writeback and we are not doing
2664 * a data integrity writeback, skip the page
2666 if (!PageDirty(page) ||
2667 (PageWriteback(page) &&
2668 (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2669 unlikely(page->mapping != mapping)) {
2674 wait_on_page_writeback(page);
2675 BUG_ON(PageWriteback(page));
2677 if (mpd->map.m_len == 0)
2678 mpd->first_page = page->index;
2679 mpd->next_page = page->index + 1;
2680 /* Add all dirty buffers to mpd */
2681 lblk = ((ext4_lblk_t)page->index) <<
2682 (PAGE_SHIFT - blkbits);
2683 head = page_buffers(page);
2684 err = mpage_process_page_bufs(mpd, head, head, lblk);
2690 pagevec_release(&pvec);
2695 pagevec_release(&pvec);
2699 static int __writepage(struct page *page, struct writeback_control *wbc,
2702 struct address_space *mapping = data;
2703 int ret = ext4_writepage(page, wbc);
2704 mapping_set_error(mapping, ret);
2708 static int ext4_writepages(struct address_space *mapping,
2709 struct writeback_control *wbc)
2711 pgoff_t writeback_index = 0;
2712 long nr_to_write = wbc->nr_to_write;
2713 int range_whole = 0;
2715 handle_t *handle = NULL;
2716 struct mpage_da_data mpd;
2717 struct inode *inode = mapping->host;
2718 int needed_blocks, rsv_blocks = 0, ret = 0;
2719 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2721 struct blk_plug plug;
2722 bool give_up_on_write = false;
2724 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2727 percpu_down_read(&sbi->s_journal_flag_rwsem);
2728 trace_ext4_writepages(inode, wbc);
2730 if (dax_mapping(mapping)) {
2731 ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev,
2733 goto out_writepages;
2737 * No pages to write? This is mainly a kludge to avoid starting
2738 * a transaction for special inodes like journal inode on last iput()
2739 * because that could violate lock ordering on umount
2741 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2742 goto out_writepages;
2744 if (ext4_should_journal_data(inode)) {
2745 struct blk_plug plug;
2747 blk_start_plug(&plug);
2748 ret = write_cache_pages(mapping, wbc, __writepage, mapping);
2749 blk_finish_plug(&plug);
2750 goto out_writepages;
2754 * If the filesystem has aborted, it is read-only, so return
2755 * right away instead of dumping stack traces later on that
2756 * will obscure the real source of the problem. We test
2757 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2758 * the latter could be true if the filesystem is mounted
2759 * read-only, and in that case, ext4_writepages should
2760 * *never* be called, so if that ever happens, we would want
2763 if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) ||
2764 sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) {
2766 goto out_writepages;
2769 if (ext4_should_dioread_nolock(inode)) {
2771 * We may need to convert up to one extent per block in
2772 * the page and we may dirty the inode.
2774 rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits);
2778 * If we have inline data and arrive here, it means that
2779 * we will soon create the block for the 1st page, so
2780 * we'd better clear the inline data here.
2782 if (ext4_has_inline_data(inode)) {
2783 /* Just inode will be modified... */
2784 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2785 if (IS_ERR(handle)) {
2786 ret = PTR_ERR(handle);
2787 goto out_writepages;
2789 BUG_ON(ext4_test_inode_state(inode,
2790 EXT4_STATE_MAY_INLINE_DATA));
2791 ext4_destroy_inline_data(handle, inode);
2792 ext4_journal_stop(handle);
2795 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2798 if (wbc->range_cyclic) {
2799 writeback_index = mapping->writeback_index;
2800 if (writeback_index)
2802 mpd.first_page = writeback_index;
2805 mpd.first_page = wbc->range_start >> PAGE_SHIFT;
2806 mpd.last_page = wbc->range_end >> PAGE_SHIFT;
2811 ext4_io_submit_init(&mpd.io_submit, wbc);
2813 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2814 tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
2816 blk_start_plug(&plug);
2819 * First writeback pages that don't need mapping - we can avoid
2820 * starting a transaction unnecessarily and also avoid being blocked
2821 * in the block layer on device congestion while having transaction
2825 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2826 if (!mpd.io_submit.io_end) {
2830 ret = mpage_prepare_extent_to_map(&mpd);
2831 /* Submit prepared bio */
2832 ext4_io_submit(&mpd.io_submit);
2833 ext4_put_io_end_defer(mpd.io_submit.io_end);
2834 mpd.io_submit.io_end = NULL;
2835 /* Unlock pages we didn't use */
2836 mpage_release_unused_pages(&mpd, false);
2840 while (!done && mpd.first_page <= mpd.last_page) {
2841 /* For each extent of pages we use new io_end */
2842 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2843 if (!mpd.io_submit.io_end) {
2849 * We have two constraints: We find one extent to map and we
2850 * must always write out whole page (makes a difference when
2851 * blocksize < pagesize) so that we don't block on IO when we
2852 * try to write out the rest of the page. Journalled mode is
2853 * not supported by delalloc.
2855 BUG_ON(ext4_should_journal_data(inode));
2856 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2858 /* start a new transaction */
2859 handle = ext4_journal_start_with_reserve(inode,
2860 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2861 if (IS_ERR(handle)) {
2862 ret = PTR_ERR(handle);
2863 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2864 "%ld pages, ino %lu; err %d", __func__,
2865 wbc->nr_to_write, inode->i_ino, ret);
2866 /* Release allocated io_end */
2867 ext4_put_io_end(mpd.io_submit.io_end);
2868 mpd.io_submit.io_end = NULL;
2873 trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
2874 ret = mpage_prepare_extent_to_map(&mpd);
2877 ret = mpage_map_and_submit_extent(handle, &mpd,
2881 * We scanned the whole range (or exhausted
2882 * nr_to_write), submitted what was mapped and
2883 * didn't find anything needing mapping. We are
2890 * Caution: If the handle is synchronous,
2891 * ext4_journal_stop() can wait for transaction commit
2892 * to finish which may depend on writeback of pages to
2893 * complete or on page lock to be released. In that
2894 * case, we have to wait until after after we have
2895 * submitted all the IO, released page locks we hold,
2896 * and dropped io_end reference (for extent conversion
2897 * to be able to complete) before stopping the handle.
2899 if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
2900 ext4_journal_stop(handle);
2904 /* Submit prepared bio */
2905 ext4_io_submit(&mpd.io_submit);
2906 /* Unlock pages we didn't use */
2907 mpage_release_unused_pages(&mpd, give_up_on_write);
2909 * Drop our io_end reference we got from init. We have
2910 * to be careful and use deferred io_end finishing if
2911 * we are still holding the transaction as we can
2912 * release the last reference to io_end which may end
2913 * up doing unwritten extent conversion.
2916 ext4_put_io_end_defer(mpd.io_submit.io_end);
2917 ext4_journal_stop(handle);
2919 ext4_put_io_end(mpd.io_submit.io_end);
2920 mpd.io_submit.io_end = NULL;
2922 if (ret == -ENOSPC && sbi->s_journal) {
2924 * Commit the transaction which would
2925 * free blocks released in the transaction
2928 jbd2_journal_force_commit_nested(sbi->s_journal);
2932 /* Fatal error - ENOMEM, EIO... */
2937 blk_finish_plug(&plug);
2938 if (!ret && !cycled && wbc->nr_to_write > 0) {
2940 mpd.last_page = writeback_index - 1;
2946 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2948 * Set the writeback_index so that range_cyclic
2949 * mode will write it back later
2951 mapping->writeback_index = mpd.first_page;
2954 trace_ext4_writepages_result(inode, wbc, ret,
2955 nr_to_write - wbc->nr_to_write);
2956 percpu_up_read(&sbi->s_journal_flag_rwsem);
2960 static int ext4_nonda_switch(struct super_block *sb)
2962 s64 free_clusters, dirty_clusters;
2963 struct ext4_sb_info *sbi = EXT4_SB(sb);
2966 * switch to non delalloc mode if we are running low
2967 * on free block. The free block accounting via percpu
2968 * counters can get slightly wrong with percpu_counter_batch getting
2969 * accumulated on each CPU without updating global counters
2970 * Delalloc need an accurate free block accounting. So switch
2971 * to non delalloc when we are near to error range.
2974 percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2976 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2978 * Start pushing delalloc when 1/2 of free blocks are dirty.
2980 if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
2981 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2983 if (2 * free_clusters < 3 * dirty_clusters ||
2984 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
2986 * free block count is less than 150% of dirty blocks
2987 * or free blocks is less than watermark
2994 /* We always reserve for an inode update; the superblock could be there too */
2995 static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
2997 if (likely(ext4_has_feature_large_file(inode->i_sb)))
3000 if (pos + len <= 0x7fffffffULL)
3003 /* We might need to update the superblock to set LARGE_FILE */
3007 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
3008 loff_t pos, unsigned len, unsigned flags,
3009 struct page **pagep, void **fsdata)
3011 int ret, retries = 0;
3014 struct inode *inode = mapping->host;
3017 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
3020 index = pos >> PAGE_SHIFT;
3022 if (ext4_nonda_switch(inode->i_sb) ||
3023 S_ISLNK(inode->i_mode)) {
3024 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
3025 return ext4_write_begin(file, mapping, pos,
3026 len, flags, pagep, fsdata);
3028 *fsdata = (void *)0;
3029 trace_ext4_da_write_begin(inode, pos, len, flags);
3031 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
3032 ret = ext4_da_write_inline_data_begin(mapping, inode,
3042 * grab_cache_page_write_begin() can take a long time if the
3043 * system is thrashing due to memory pressure, or if the page
3044 * is being written back. So grab it first before we start
3045 * the transaction handle. This also allows us to allocate
3046 * the page (if needed) without using GFP_NOFS.
3049 page = grab_cache_page_write_begin(mapping, index, flags);
3055 * With delayed allocation, we don't log the i_disksize update
3056 * if there is delayed block allocation. But we still need
3057 * to journalling the i_disksize update if writes to the end
3058 * of file which has an already mapped buffer.
3061 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
3062 ext4_da_write_credits(inode, pos, len));
3063 if (IS_ERR(handle)) {
3065 return PTR_ERR(handle);
3069 if (page->mapping != mapping) {
3070 /* The page got truncated from under us */
3073 ext4_journal_stop(handle);
3076 /* In case writeback began while the page was unlocked */
3077 wait_for_stable_page(page);
3079 #ifdef CONFIG_EXT4_FS_ENCRYPTION
3080 ret = ext4_block_write_begin(page, pos, len,
3081 ext4_da_get_block_prep);
3083 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
3087 ext4_journal_stop(handle);
3089 * block_write_begin may have instantiated a few blocks
3090 * outside i_size. Trim these off again. Don't need
3091 * i_size_read because we hold i_mutex.
3093 if (pos + len > inode->i_size)
3094 ext4_truncate_failed_write(inode);
3096 if (ret == -ENOSPC &&
3097 ext4_should_retry_alloc(inode->i_sb, &retries))
3109 * Check if we should update i_disksize
3110 * when write to the end of file but not require block allocation
3112 static int ext4_da_should_update_i_disksize(struct page *page,
3113 unsigned long offset)
3115 struct buffer_head *bh;
3116 struct inode *inode = page->mapping->host;
3120 bh = page_buffers(page);
3121 idx = offset >> inode->i_blkbits;
3123 for (i = 0; i < idx; i++)