2 * linux/fs/ext4/inode.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * 64-bit file support on 64-bit platforms by Jakub Jelinek
16 * (jj@sunsite.ms.mff.cuni.cz)
18 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
22 #include <linux/time.h>
23 #include <linux/highuid.h>
24 #include <linux/pagemap.h>
25 #include <linux/dax.h>
26 #include <linux/quotaops.h>
27 #include <linux/string.h>
28 #include <linux/buffer_head.h>
29 #include <linux/writeback.h>
30 #include <linux/pagevec.h>
31 #include <linux/mpage.h>
32 #include <linux/namei.h>
33 #include <linux/uio.h>
34 #include <linux/bio.h>
35 #include <linux/workqueue.h>
36 #include <linux/kernel.h>
37 #include <linux/printk.h>
38 #include <linux/slab.h>
39 #include <linux/bitops.h>
40 #include <linux/iomap.h>
42 #include "ext4_jbd2.h"
47 #include <trace/events/ext4.h>
49 #define MPAGE_DA_EXTENT_TAIL 0x01
51 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
52 struct ext4_inode_info *ei)
54 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
57 int offset = offsetof(struct ext4_inode, i_checksum_lo);
58 unsigned int csum_size = sizeof(dummy_csum);
60 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
61 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
63 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
64 EXT4_GOOD_OLD_INODE_SIZE - offset);
66 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
67 offset = offsetof(struct ext4_inode, i_checksum_hi);
68 csum = ext4_chksum(sbi, csum, (__u8 *)raw +
69 EXT4_GOOD_OLD_INODE_SIZE,
70 offset - EXT4_GOOD_OLD_INODE_SIZE);
71 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
72 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
76 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
77 EXT4_INODE_SIZE(inode->i_sb) - offset);
83 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
84 struct ext4_inode_info *ei)
86 __u32 provided, calculated;
88 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
89 cpu_to_le32(EXT4_OS_LINUX) ||
90 !ext4_has_metadata_csum(inode->i_sb))
93 provided = le16_to_cpu(raw->i_checksum_lo);
94 calculated = ext4_inode_csum(inode, raw, ei);
95 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
96 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
97 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
101 return provided == calculated;
104 static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
105 struct ext4_inode_info *ei)
109 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
110 cpu_to_le32(EXT4_OS_LINUX) ||
111 !ext4_has_metadata_csum(inode->i_sb))
114 csum = ext4_inode_csum(inode, raw, ei);
115 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
116 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
117 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
118 raw->i_checksum_hi = cpu_to_le16(csum >> 16);
121 static inline int ext4_begin_ordered_truncate(struct inode *inode,
124 trace_ext4_begin_ordered_truncate(inode, new_size);
126 * If jinode is zero, then we never opened the file for
127 * writing, so there's no need to call
128 * jbd2_journal_begin_ordered_truncate() since there's no
129 * outstanding writes we need to flush.
131 if (!EXT4_I(inode)->jinode)
133 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
134 EXT4_I(inode)->jinode,
138 static void ext4_invalidatepage(struct page *page, unsigned int offset,
139 unsigned int length);
140 static int __ext4_journalled_writepage(struct page *page, unsigned int len);
141 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
142 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
146 * Test whether an inode is a fast symlink.
148 int ext4_inode_is_fast_symlink(struct inode *inode)
150 int ea_blocks = EXT4_I(inode)->i_file_acl ?
151 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
153 if (ext4_has_inline_data(inode))
156 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
160 * Restart the transaction associated with *handle. This does a commit,
161 * so before we call here everything must be consistently dirtied against
164 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
170 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
171 * moment, get_block can be called only for blocks inside i_size since
172 * page cache has been already dropped and writes are blocked by
173 * i_mutex. So we can safely drop the i_data_sem here.
175 BUG_ON(EXT4_JOURNAL(inode) == NULL);
176 jbd_debug(2, "restarting handle %p\n", handle);
177 up_write(&EXT4_I(inode)->i_data_sem);
178 ret = ext4_journal_restart(handle, nblocks);
179 down_write(&EXT4_I(inode)->i_data_sem);
180 ext4_discard_preallocations(inode);
186 * Called at the last iput() if i_nlink is zero.
188 void ext4_evict_inode(struct inode *inode)
193 trace_ext4_evict_inode(inode);
195 if (inode->i_nlink) {
197 * When journalling data dirty buffers are tracked only in the
198 * journal. So although mm thinks everything is clean and
199 * ready for reaping the inode might still have some pages to
200 * write in the running transaction or waiting to be
201 * checkpointed. Thus calling jbd2_journal_invalidatepage()
202 * (via truncate_inode_pages()) to discard these buffers can
203 * cause data loss. Also even if we did not discard these
204 * buffers, we would have no way to find them after the inode
205 * is reaped and thus user could see stale data if he tries to
206 * read them before the transaction is checkpointed. So be
207 * careful and force everything to disk here... We use
208 * ei->i_datasync_tid to store the newest transaction
209 * containing inode's data.
211 * Note that directories do not have this problem because they
212 * don't use page cache.
214 if (inode->i_ino != EXT4_JOURNAL_INO &&
215 ext4_should_journal_data(inode) &&
216 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
217 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
218 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
220 jbd2_complete_transaction(journal, commit_tid);
221 filemap_write_and_wait(&inode->i_data);
223 truncate_inode_pages_final(&inode->i_data);
228 if (is_bad_inode(inode))
230 dquot_initialize(inode);
232 if (ext4_should_order_data(inode))
233 ext4_begin_ordered_truncate(inode, 0);
234 truncate_inode_pages_final(&inode->i_data);
237 * Protect us against freezing - iput() caller didn't have to have any
238 * protection against it
240 sb_start_intwrite(inode->i_sb);
241 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
242 ext4_blocks_for_truncate(inode)+3);
243 if (IS_ERR(handle)) {
244 ext4_std_error(inode->i_sb, PTR_ERR(handle));
246 * If we're going to skip the normal cleanup, we still need to
247 * make sure that the in-core orphan linked list is properly
250 ext4_orphan_del(NULL, inode);
251 sb_end_intwrite(inode->i_sb);
256 ext4_handle_sync(handle);
258 err = ext4_mark_inode_dirty(handle, inode);
260 ext4_warning(inode->i_sb,
261 "couldn't mark inode dirty (err %d)", err);
264 if (inode->i_blocks) {
265 err = ext4_truncate(inode);
267 ext4_error(inode->i_sb,
268 "couldn't truncate inode %lu (err %d)",
275 * ext4_ext_truncate() doesn't reserve any slop when it
276 * restarts journal transactions; therefore there may not be
277 * enough credits left in the handle to remove the inode from
278 * the orphan list and set the dtime field.
280 if (!ext4_handle_has_enough_credits(handle, 3)) {
281 err = ext4_journal_extend(handle, 3);
283 err = ext4_journal_restart(handle, 3);
285 ext4_warning(inode->i_sb,
286 "couldn't extend journal (err %d)", err);
288 ext4_journal_stop(handle);
289 ext4_orphan_del(NULL, inode);
290 sb_end_intwrite(inode->i_sb);
296 * Kill off the orphan record which ext4_truncate created.
297 * AKPM: I think this can be inside the above `if'.
298 * Note that ext4_orphan_del() has to be able to cope with the
299 * deletion of a non-existent orphan - this is because we don't
300 * know if ext4_truncate() actually created an orphan record.
301 * (Well, we could do this if we need to, but heck - it works)
303 ext4_orphan_del(handle, inode);
304 EXT4_I(inode)->i_dtime = get_seconds();
307 * One subtle ordering requirement: if anything has gone wrong
308 * (transaction abort, IO errors, whatever), then we can still
309 * do these next steps (the fs will already have been marked as
310 * having errors), but we can't free the inode if the mark_dirty
313 if (ext4_mark_inode_dirty(handle, inode))
314 /* If that failed, just do the required in-core inode clear. */
315 ext4_clear_inode(inode);
317 ext4_free_inode(handle, inode);
318 ext4_journal_stop(handle);
319 sb_end_intwrite(inode->i_sb);
322 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
326 qsize_t *ext4_get_reserved_space(struct inode *inode)
328 return &EXT4_I(inode)->i_reserved_quota;
333 * Called with i_data_sem down, which is important since we can call
334 * ext4_discard_preallocations() from here.
336 void ext4_da_update_reserve_space(struct inode *inode,
337 int used, int quota_claim)
339 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
340 struct ext4_inode_info *ei = EXT4_I(inode);
342 spin_lock(&ei->i_block_reservation_lock);
343 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
344 if (unlikely(used > ei->i_reserved_data_blocks)) {
345 ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
346 "with only %d reserved data blocks",
347 __func__, inode->i_ino, used,
348 ei->i_reserved_data_blocks);
350 used = ei->i_reserved_data_blocks;
353 /* Update per-inode reservations */
354 ei->i_reserved_data_blocks -= used;
355 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
357 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
359 /* Update quota subsystem for data blocks */
361 dquot_claim_block(inode, EXT4_C2B(sbi, used));
364 * We did fallocate with an offset that is already delayed
365 * allocated. So on delayed allocated writeback we should
366 * not re-claim the quota for fallocated blocks.
368 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
372 * If we have done all the pending block allocations and if
373 * there aren't any writers on the inode, we can discard the
374 * inode's preallocations.
376 if ((ei->i_reserved_data_blocks == 0) &&
377 (atomic_read(&inode->i_writecount) == 0))
378 ext4_discard_preallocations(inode);
381 static int __check_block_validity(struct inode *inode, const char *func,
383 struct ext4_map_blocks *map)
385 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
387 ext4_error_inode(inode, func, line, map->m_pblk,
388 "lblock %lu mapped to illegal pblock "
389 "(length %d)", (unsigned long) map->m_lblk,
391 return -EFSCORRUPTED;
396 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
401 if (ext4_encrypted_inode(inode))
402 return fscrypt_zeroout_range(inode, lblk, pblk, len);
404 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
411 #define check_block_validity(inode, map) \
412 __check_block_validity((inode), __func__, __LINE__, (map))
414 #ifdef ES_AGGRESSIVE_TEST
415 static void ext4_map_blocks_es_recheck(handle_t *handle,
417 struct ext4_map_blocks *es_map,
418 struct ext4_map_blocks *map,
425 * There is a race window that the result is not the same.
426 * e.g. xfstests #223 when dioread_nolock enables. The reason
427 * is that we lookup a block mapping in extent status tree with
428 * out taking i_data_sem. So at the time the unwritten extent
429 * could be converted.
431 down_read(&EXT4_I(inode)->i_data_sem);
432 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
433 retval = ext4_ext_map_blocks(handle, inode, map, flags &
434 EXT4_GET_BLOCKS_KEEP_SIZE);
436 retval = ext4_ind_map_blocks(handle, inode, map, flags &
437 EXT4_GET_BLOCKS_KEEP_SIZE);
439 up_read((&EXT4_I(inode)->i_data_sem));
442 * We don't check m_len because extent will be collpased in status
443 * tree. So the m_len might not equal.
445 if (es_map->m_lblk != map->m_lblk ||
446 es_map->m_flags != map->m_flags ||
447 es_map->m_pblk != map->m_pblk) {
448 printk("ES cache assertion failed for inode: %lu "
449 "es_cached ex [%d/%d/%llu/%x] != "
450 "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
451 inode->i_ino, es_map->m_lblk, es_map->m_len,
452 es_map->m_pblk, es_map->m_flags, map->m_lblk,
453 map->m_len, map->m_pblk, map->m_flags,
457 #endif /* ES_AGGRESSIVE_TEST */
460 * The ext4_map_blocks() function tries to look up the requested blocks,
461 * and returns if the blocks are already mapped.
463 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
464 * and store the allocated blocks in the result buffer head and mark it
467 * If file type is extents based, it will call ext4_ext_map_blocks(),
468 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
471 * On success, it returns the number of blocks being mapped or allocated. if
472 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
473 * is marked as unwritten. If the create == 1, it will mark @map as mapped.
475 * It returns 0 if plain look up failed (blocks have not been allocated), in
476 * that case, @map is returned as unmapped but we still do fill map->m_len to
477 * indicate the length of a hole starting at map->m_lblk.
479 * It returns the error in case of allocation failure.
481 int ext4_map_blocks(handle_t *handle, struct inode *inode,
482 struct ext4_map_blocks *map, int flags)
484 struct extent_status es;
487 #ifdef ES_AGGRESSIVE_TEST
488 struct ext4_map_blocks orig_map;
490 memcpy(&orig_map, map, sizeof(*map));
494 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
495 "logical block %lu\n", inode->i_ino, flags, map->m_len,
496 (unsigned long) map->m_lblk);
499 * ext4_map_blocks returns an int, and m_len is an unsigned int
501 if (unlikely(map->m_len > INT_MAX))
502 map->m_len = INT_MAX;
504 /* We can handle the block number less than EXT_MAX_BLOCKS */
505 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
506 return -EFSCORRUPTED;
508 /* Lookup extent status tree firstly */
509 if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
510 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
511 map->m_pblk = ext4_es_pblock(&es) +
512 map->m_lblk - es.es_lblk;
513 map->m_flags |= ext4_es_is_written(&es) ?
514 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
515 retval = es.es_len - (map->m_lblk - es.es_lblk);
516 if (retval > map->m_len)
519 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
521 retval = es.es_len - (map->m_lblk - es.es_lblk);
522 if (retval > map->m_len)
529 #ifdef ES_AGGRESSIVE_TEST
530 ext4_map_blocks_es_recheck(handle, inode, map,
537 * Try to see if we can get the block without requesting a new
540 down_read(&EXT4_I(inode)->i_data_sem);
541 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
542 retval = ext4_ext_map_blocks(handle, inode, map, flags &
543 EXT4_GET_BLOCKS_KEEP_SIZE);
545 retval = ext4_ind_map_blocks(handle, inode, map, flags &
546 EXT4_GET_BLOCKS_KEEP_SIZE);
551 if (unlikely(retval != map->m_len)) {
552 ext4_warning(inode->i_sb,
553 "ES len assertion failed for inode "
554 "%lu: retval %d != map->m_len %d",
555 inode->i_ino, retval, map->m_len);
559 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
560 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
561 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
562 !(status & EXTENT_STATUS_WRITTEN) &&
563 ext4_find_delalloc_range(inode, map->m_lblk,
564 map->m_lblk + map->m_len - 1))
565 status |= EXTENT_STATUS_DELAYED;
566 ret = ext4_es_insert_extent(inode, map->m_lblk,
567 map->m_len, map->m_pblk, status);
571 up_read((&EXT4_I(inode)->i_data_sem));
574 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
575 ret = check_block_validity(inode, map);
580 /* If it is only a block(s) look up */
581 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
585 * Returns if the blocks have already allocated
587 * Note that if blocks have been preallocated
588 * ext4_ext_get_block() returns the create = 0
589 * with buffer head unmapped.
591 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
593 * If we need to convert extent to unwritten
594 * we continue and do the actual work in
595 * ext4_ext_map_blocks()
597 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
601 * Here we clear m_flags because after allocating an new extent,
602 * it will be set again.
604 map->m_flags &= ~EXT4_MAP_FLAGS;
607 * New blocks allocate and/or writing to unwritten extent
608 * will possibly result in updating i_data, so we take
609 * the write lock of i_data_sem, and call get_block()
610 * with create == 1 flag.
612 down_write(&EXT4_I(inode)->i_data_sem);
615 * We need to check for EXT4 here because migrate
616 * could have changed the inode type in between
618 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
619 retval = ext4_ext_map_blocks(handle, inode, map, flags);
621 retval = ext4_ind_map_blocks(handle, inode, map, flags);
623 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
625 * We allocated new blocks which will result in
626 * i_data's format changing. Force the migrate
627 * to fail by clearing migrate flags
629 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
633 * Update reserved blocks/metadata blocks after successful
634 * block allocation which had been deferred till now. We don't
635 * support fallocate for non extent files. So we can update
636 * reserve space here.
639 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
640 ext4_da_update_reserve_space(inode, retval, 1);
646 if (unlikely(retval != map->m_len)) {
647 ext4_warning(inode->i_sb,
648 "ES len assertion failed for inode "
649 "%lu: retval %d != map->m_len %d",
650 inode->i_ino, retval, map->m_len);
655 * We have to zeroout blocks before inserting them into extent
656 * status tree. Otherwise someone could look them up there and
657 * use them before they are really zeroed. We also have to
658 * unmap metadata before zeroing as otherwise writeback can
659 * overwrite zeros with stale data from block device.
661 if (flags & EXT4_GET_BLOCKS_ZERO &&
662 map->m_flags & EXT4_MAP_MAPPED &&
663 map->m_flags & EXT4_MAP_NEW) {
664 clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk,
666 ret = ext4_issue_zeroout(inode, map->m_lblk,
667 map->m_pblk, map->m_len);
675 * If the extent has been zeroed out, we don't need to update
676 * extent status tree.
678 if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
679 ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
680 if (ext4_es_is_written(&es))
683 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
684 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
685 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
686 !(status & EXTENT_STATUS_WRITTEN) &&
687 ext4_find_delalloc_range(inode, map->m_lblk,
688 map->m_lblk + map->m_len - 1))
689 status |= EXTENT_STATUS_DELAYED;
690 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
691 map->m_pblk, status);
699 up_write((&EXT4_I(inode)->i_data_sem));
700 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
701 ret = check_block_validity(inode, map);
706 * Inodes with freshly allocated blocks where contents will be
707 * visible after transaction commit must be on transaction's
710 if (map->m_flags & EXT4_MAP_NEW &&
711 !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
712 !(flags & EXT4_GET_BLOCKS_ZERO) &&
713 !IS_NOQUOTA(inode) &&
714 ext4_should_order_data(inode)) {
715 if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
716 ret = ext4_jbd2_inode_add_wait(handle, inode);
718 ret = ext4_jbd2_inode_add_write(handle, inode);
727 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
728 * we have to be careful as someone else may be manipulating b_state as well.
730 static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
732 unsigned long old_state;
733 unsigned long new_state;
735 flags &= EXT4_MAP_FLAGS;
737 /* Dummy buffer_head? Set non-atomically. */
739 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
743 * Someone else may be modifying b_state. Be careful! This is ugly but
744 * once we get rid of using bh as a container for mapping information
745 * to pass to / from get_block functions, this can go away.
748 old_state = READ_ONCE(bh->b_state);
749 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
751 cmpxchg(&bh->b_state, old_state, new_state) != old_state));
754 static int _ext4_get_block(struct inode *inode, sector_t iblock,
755 struct buffer_head *bh, int flags)
757 struct ext4_map_blocks map;
760 if (ext4_has_inline_data(inode))
764 map.m_len = bh->b_size >> inode->i_blkbits;
766 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
769 map_bh(bh, inode->i_sb, map.m_pblk);
770 ext4_update_bh_state(bh, map.m_flags);
771 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
773 } else if (ret == 0) {
774 /* hole case, need to fill in bh->b_size */
775 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
780 int ext4_get_block(struct inode *inode, sector_t iblock,
781 struct buffer_head *bh, int create)
783 return _ext4_get_block(inode, iblock, bh,
784 create ? EXT4_GET_BLOCKS_CREATE : 0);
788 * Get block function used when preparing for buffered write if we require
789 * creating an unwritten extent if blocks haven't been allocated. The extent
790 * will be converted to written after the IO is complete.
792 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
793 struct buffer_head *bh_result, int create)
795 ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
796 inode->i_ino, create);
797 return _ext4_get_block(inode, iblock, bh_result,
798 EXT4_GET_BLOCKS_IO_CREATE_EXT);
801 /* Maximum number of blocks we map for direct IO at once. */
802 #define DIO_MAX_BLOCKS 4096
805 * Get blocks function for the cases that need to start a transaction -
806 * generally difference cases of direct IO and DAX IO. It also handles retries
809 static int ext4_get_block_trans(struct inode *inode, sector_t iblock,
810 struct buffer_head *bh_result, int flags)
817 /* Trim mapping request to maximum we can map at once for DIO */
818 if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS)
819 bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits;
820 dio_credits = ext4_chunk_trans_blocks(inode,
821 bh_result->b_size >> inode->i_blkbits);
823 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
825 return PTR_ERR(handle);
827 ret = _ext4_get_block(inode, iblock, bh_result, flags);
828 ext4_journal_stop(handle);
830 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
835 /* Get block function for DIO reads and writes to inodes without extents */
836 int ext4_dio_get_block(struct inode *inode, sector_t iblock,
837 struct buffer_head *bh, int create)
839 /* We don't expect handle for direct IO */
840 WARN_ON_ONCE(ext4_journal_current_handle());
843 return _ext4_get_block(inode, iblock, bh, 0);
844 return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE);
848 * Get block function for AIO DIO writes when we create unwritten extent if
849 * blocks are not allocated yet. The extent will be converted to written
850 * after IO is complete.
852 static int ext4_dio_get_block_unwritten_async(struct inode *inode,
853 sector_t iblock, struct buffer_head *bh_result, int create)
857 /* We don't expect handle for direct IO */
858 WARN_ON_ONCE(ext4_journal_current_handle());
860 ret = ext4_get_block_trans(inode, iblock, bh_result,
861 EXT4_GET_BLOCKS_IO_CREATE_EXT);
864 * When doing DIO using unwritten extents, we need io_end to convert
865 * unwritten extents to written on IO completion. We allocate io_end
866 * once we spot unwritten extent and store it in b_private. Generic
867 * DIO code keeps b_private set and furthermore passes the value to
868 * our completion callback in 'private' argument.
870 if (!ret && buffer_unwritten(bh_result)) {
871 if (!bh_result->b_private) {
872 ext4_io_end_t *io_end;
874 io_end = ext4_init_io_end(inode, GFP_KERNEL);
877 bh_result->b_private = io_end;
878 ext4_set_io_unwritten_flag(inode, io_end);
880 set_buffer_defer_completion(bh_result);
887 * Get block function for non-AIO DIO writes when we create unwritten extent if
888 * blocks are not allocated yet. The extent will be converted to written
889 * after IO is complete from ext4_ext_direct_IO() function.
891 static int ext4_dio_get_block_unwritten_sync(struct inode *inode,
892 sector_t iblock, struct buffer_head *bh_result, int create)
896 /* We don't expect handle for direct IO */
897 WARN_ON_ONCE(ext4_journal_current_handle());
899 ret = ext4_get_block_trans(inode, iblock, bh_result,
900 EXT4_GET_BLOCKS_IO_CREATE_EXT);
903 * Mark inode as having pending DIO writes to unwritten extents.
904 * ext4_ext_direct_IO() checks this flag and converts extents to
907 if (!ret && buffer_unwritten(bh_result))
908 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
913 static int ext4_dio_get_block_overwrite(struct inode *inode, sector_t iblock,
914 struct buffer_head *bh_result, int create)
918 ext4_debug("ext4_dio_get_block_overwrite: inode %lu, create flag %d\n",
919 inode->i_ino, create);
920 /* We don't expect handle for direct IO */
921 WARN_ON_ONCE(ext4_journal_current_handle());
923 ret = _ext4_get_block(inode, iblock, bh_result, 0);
925 * Blocks should have been preallocated! ext4_file_write_iter() checks
928 WARN_ON_ONCE(!buffer_mapped(bh_result) || buffer_unwritten(bh_result));
935 * `handle' can be NULL if create is zero
937 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
938 ext4_lblk_t block, int map_flags)
940 struct ext4_map_blocks map;
941 struct buffer_head *bh;
942 int create = map_flags & EXT4_GET_BLOCKS_CREATE;
945 J_ASSERT(handle != NULL || create == 0);
949 err = ext4_map_blocks(handle, inode, &map, map_flags);
952 return create ? ERR_PTR(-ENOSPC) : NULL;
956 bh = sb_getblk(inode->i_sb, map.m_pblk);
958 return ERR_PTR(-ENOMEM);
959 if (map.m_flags & EXT4_MAP_NEW) {
960 J_ASSERT(create != 0);
961 J_ASSERT(handle != NULL);
964 * Now that we do not always journal data, we should
965 * keep in mind whether this should always journal the
966 * new buffer as metadata. For now, regular file
967 * writes use ext4_get_block instead, so it's not a
971 BUFFER_TRACE(bh, "call get_create_access");
972 err = ext4_journal_get_create_access(handle, bh);
977 if (!buffer_uptodate(bh)) {
978 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
979 set_buffer_uptodate(bh);
982 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
983 err = ext4_handle_dirty_metadata(handle, inode, bh);
987 BUFFER_TRACE(bh, "not a new buffer");
994 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
995 ext4_lblk_t block, int map_flags)
997 struct buffer_head *bh;
999 bh = ext4_getblk(handle, inode, block, map_flags);
1002 if (!bh || buffer_uptodate(bh))
1004 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
1006 if (buffer_uptodate(bh))
1009 return ERR_PTR(-EIO);
1012 int ext4_walk_page_buffers(handle_t *handle,
1013 struct buffer_head *head,
1017 int (*fn)(handle_t *handle,
1018 struct buffer_head *bh))
1020 struct buffer_head *bh;
1021 unsigned block_start, block_end;
1022 unsigned blocksize = head->b_size;
1024 struct buffer_head *next;
1026 for (bh = head, block_start = 0;
1027 ret == 0 && (bh != head || !block_start);
1028 block_start = block_end, bh = next) {
1029 next = bh->b_this_page;
1030 block_end = block_start + blocksize;
1031 if (block_end <= from || block_start >= to) {
1032 if (partial && !buffer_uptodate(bh))
1036 err = (*fn)(handle, bh);
1044 * To preserve ordering, it is essential that the hole instantiation and
1045 * the data write be encapsulated in a single transaction. We cannot
1046 * close off a transaction and start a new one between the ext4_get_block()
1047 * and the commit_write(). So doing the jbd2_journal_start at the start of
1048 * prepare_write() is the right place.
1050 * Also, this function can nest inside ext4_writepage(). In that case, we
1051 * *know* that ext4_writepage() has generated enough buffer credits to do the
1052 * whole page. So we won't block on the journal in that case, which is good,
1053 * because the caller may be PF_MEMALLOC.
1055 * By accident, ext4 can be reentered when a transaction is open via
1056 * quota file writes. If we were to commit the transaction while thus
1057 * reentered, there can be a deadlock - we would be holding a quota
1058 * lock, and the commit would never complete if another thread had a
1059 * transaction open and was blocking on the quota lock - a ranking
1062 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1063 * will _not_ run commit under these circumstances because handle->h_ref
1064 * is elevated. We'll still have enough credits for the tiny quotafile
1067 int do_journal_get_write_access(handle_t *handle,
1068 struct buffer_head *bh)
1070 int dirty = buffer_dirty(bh);
1073 if (!buffer_mapped(bh) || buffer_freed(bh))
1076 * __block_write_begin() could have dirtied some buffers. Clean
1077 * the dirty bit as jbd2_journal_get_write_access() could complain
1078 * otherwise about fs integrity issues. Setting of the dirty bit
1079 * by __block_write_begin() isn't a real problem here as we clear
1080 * the bit before releasing a page lock and thus writeback cannot
1081 * ever write the buffer.
1084 clear_buffer_dirty(bh);
1085 BUFFER_TRACE(bh, "get write access");
1086 ret = ext4_journal_get_write_access(handle, bh);
1088 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1092 #ifdef CONFIG_EXT4_FS_ENCRYPTION
1093 static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1094 get_block_t *get_block)
1096 unsigned from = pos & (PAGE_SIZE - 1);
1097 unsigned to = from + len;
1098 struct inode *inode = page->mapping->host;
1099 unsigned block_start, block_end;
1102 unsigned blocksize = inode->i_sb->s_blocksize;
1104 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
1105 bool decrypt = false;
1107 BUG_ON(!PageLocked(page));
1108 BUG_ON(from > PAGE_SIZE);
1109 BUG_ON(to > PAGE_SIZE);
1112 if (!page_has_buffers(page))
1113 create_empty_buffers(page, blocksize, 0);
1114 head = page_buffers(page);
1115 bbits = ilog2(blocksize);
1116 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1118 for (bh = head, block_start = 0; bh != head || !block_start;
1119 block++, block_start = block_end, bh = bh->b_this_page) {
1120 block_end = block_start + blocksize;
1121 if (block_end <= from || block_start >= to) {
1122 if (PageUptodate(page)) {
1123 if (!buffer_uptodate(bh))
1124 set_buffer_uptodate(bh);
1129 clear_buffer_new(bh);
1130 if (!buffer_mapped(bh)) {
1131 WARN_ON(bh->b_size != blocksize);
1132 err = get_block(inode, block, bh, 1);
1135 if (buffer_new(bh)) {
1136 clean_bdev_bh_alias(bh);
1137 if (PageUptodate(page)) {
1138 clear_buffer_new(bh);
1139 set_buffer_uptodate(bh);
1140 mark_buffer_dirty(bh);
1143 if (block_end > to || block_start < from)
1144 zero_user_segments(page, to, block_end,
1149 if (PageUptodate(page)) {
1150 if (!buffer_uptodate(bh))
1151 set_buffer_uptodate(bh);
1154 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1155 !buffer_unwritten(bh) &&
1156 (block_start < from || block_end > to)) {
1157 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
1159 decrypt = ext4_encrypted_inode(inode) &&
1160 S_ISREG(inode->i_mode);
1164 * If we issued read requests, let them complete.
1166 while (wait_bh > wait) {
1167 wait_on_buffer(*--wait_bh);
1168 if (!buffer_uptodate(*wait_bh))
1172 page_zero_new_buffers(page, from, to);
1174 err = fscrypt_decrypt_page(page->mapping->host, page,
1175 PAGE_SIZE, 0, page->index);
1180 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1181 loff_t pos, unsigned len, unsigned flags,
1182 struct page **pagep, void **fsdata)
1184 struct inode *inode = mapping->host;
1185 int ret, needed_blocks;
1192 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
1195 trace_ext4_write_begin(inode, pos, len, flags);
1197 * Reserve one block more for addition to orphan list in case
1198 * we allocate blocks but write fails for some reason
1200 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1201 index = pos >> PAGE_SHIFT;
1202 from = pos & (PAGE_SIZE - 1);
1205 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1206 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1215 * grab_cache_page_write_begin() can take a long time if the
1216 * system is thrashing due to memory pressure, or if the page
1217 * is being written back. So grab it first before we start
1218 * the transaction handle. This also allows us to allocate
1219 * the page (if needed) without using GFP_NOFS.
1222 page = grab_cache_page_write_begin(mapping, index, flags);
1228 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1229 if (IS_ERR(handle)) {
1231 return PTR_ERR(handle);
1235 if (page->mapping != mapping) {
1236 /* The page got truncated from under us */
1239 ext4_journal_stop(handle);
1242 /* In case writeback began while the page was unlocked */
1243 wait_for_stable_page(page);
1245 #ifdef CONFIG_EXT4_FS_ENCRYPTION
1246 if (ext4_should_dioread_nolock(inode))
1247 ret = ext4_block_write_begin(page, pos, len,
1248 ext4_get_block_unwritten);
1250 ret = ext4_block_write_begin(page, pos, len,
1253 if (ext4_should_dioread_nolock(inode))
1254 ret = __block_write_begin(page, pos, len,
1255 ext4_get_block_unwritten);
1257 ret = __block_write_begin(page, pos, len, ext4_get_block);
1259 if (!ret && ext4_should_journal_data(inode)) {
1260 ret = ext4_walk_page_buffers(handle, page_buffers(page),
1262 do_journal_get_write_access);
1268 * __block_write_begin may have instantiated a few blocks
1269 * outside i_size. Trim these off again. Don't need
1270 * i_size_read because we hold i_mutex.
1272 * Add inode to orphan list in case we crash before
1275 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1276 ext4_orphan_add(handle, inode);
1278 ext4_journal_stop(handle);
1279 if (pos + len > inode->i_size) {
1280 ext4_truncate_failed_write(inode);
1282 * If truncate failed early the inode might
1283 * still be on the orphan list; we need to
1284 * make sure the inode is removed from the
1285 * orphan list in that case.
1288 ext4_orphan_del(NULL, inode);
1291 if (ret == -ENOSPC &&
1292 ext4_should_retry_alloc(inode->i_sb, &retries))
1301 /* For write_end() in data=journal mode */
1302 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1305 if (!buffer_mapped(bh) || buffer_freed(bh))
1307 set_buffer_uptodate(bh);
1308 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1309 clear_buffer_meta(bh);
1310 clear_buffer_prio(bh);
1315 * We need to pick up the new inode size which generic_commit_write gave us
1316 * `file' can be NULL - eg, when called from page_symlink().
1318 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1319 * buffers are managed internally.
1321 static int ext4_write_end(struct file *file,
1322 struct address_space *mapping,
1323 loff_t pos, unsigned len, unsigned copied,
1324 struct page *page, void *fsdata)
1326 handle_t *handle = ext4_journal_current_handle();
1327 struct inode *inode = mapping->host;
1328 loff_t old_size = inode->i_size;
1330 int i_size_changed = 0;
1332 trace_ext4_write_end(inode, pos, len, copied);
1333 if (ext4_has_inline_data(inode)) {
1334 ret = ext4_write_inline_data_end(inode, pos, len,
1343 copied = block_write_end(file, mapping, pos,
1344 len, copied, page, fsdata);
1346 * it's important to update i_size while still holding page lock:
1347 * page writeout could otherwise come in and zero beyond i_size.
1349 i_size_changed = ext4_update_inode_size(inode, pos + copied);
1354 pagecache_isize_extended(inode, old_size, pos);
1356 * Don't mark the inode dirty under page lock. First, it unnecessarily
1357 * makes the holding time of page lock longer. Second, it forces lock
1358 * ordering of page lock and transaction start for journaling
1362 ext4_mark_inode_dirty(handle, inode);
1364 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1365 /* if we have allocated more blocks and copied
1366 * less. We will have blocks allocated outside
1367 * inode->i_size. So truncate them
1369 ext4_orphan_add(handle, inode);
1371 ret2 = ext4_journal_stop(handle);
1375 if (pos + len > inode->i_size) {
1376 ext4_truncate_failed_write(inode);
1378 * If truncate failed early the inode might still be
1379 * on the orphan list; we need to make sure the inode
1380 * is removed from the orphan list in that case.
1383 ext4_orphan_del(NULL, inode);
1386 return ret ? ret : copied;
1390 * This is a private version of page_zero_new_buffers() which doesn't
1391 * set the buffer to be dirty, since in data=journalled mode we need
1392 * to call ext4_handle_dirty_metadata() instead.
1394 static void ext4_journalled_zero_new_buffers(handle_t *handle,
1396 unsigned from, unsigned to)
1398 unsigned int block_start = 0, block_end;
1399 struct buffer_head *head, *bh;
1401 bh = head = page_buffers(page);
1403 block_end = block_start + bh->b_size;
1404 if (buffer_new(bh)) {
1405 if (block_end > from && block_start < to) {
1406 if (!PageUptodate(page)) {
1407 unsigned start, size;
1409 start = max(from, block_start);
1410 size = min(to, block_end) - start;
1412 zero_user(page, start, size);
1413 write_end_fn(handle, bh);
1415 clear_buffer_new(bh);
1418 block_start = block_end;
1419 bh = bh->b_this_page;
1420 } while (bh != head);
1423 static int ext4_journalled_write_end(struct file *file,
1424 struct address_space *mapping,
1425 loff_t pos, unsigned len, unsigned copied,
1426 struct page *page, void *fsdata)
1428 handle_t *handle = ext4_journal_current_handle();
1429 struct inode *inode = mapping->host;
1430 loff_t old_size = inode->i_size;
1434 int size_changed = 0;
1436 trace_ext4_journalled_write_end(inode, pos, len, copied);
1437 from = pos & (PAGE_SIZE - 1);
1440 BUG_ON(!ext4_handle_valid(handle));
1442 if (ext4_has_inline_data(inode)) {
1443 ret = ext4_write_inline_data_end(inode, pos, len,
1451 } else if (unlikely(copied < len) && !PageUptodate(page)) {
1453 ext4_journalled_zero_new_buffers(handle, page, from, to);
1455 if (unlikely(copied < len))
1456 ext4_journalled_zero_new_buffers(handle, page,
1458 ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
1459 from + copied, &partial,
1462 SetPageUptodate(page);
1464 size_changed = ext4_update_inode_size(inode, pos + copied);
1465 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1466 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1471 pagecache_isize_extended(inode, old_size, pos);
1474 ret2 = ext4_mark_inode_dirty(handle, inode);
1479 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1480 /* if we have allocated more blocks and copied
1481 * less. We will have blocks allocated outside
1482 * inode->i_size. So truncate them
1484 ext4_orphan_add(handle, inode);
1487 ret2 = ext4_journal_stop(handle);
1490 if (pos + len > inode->i_size) {
1491 ext4_truncate_failed_write(inode);
1493 * If truncate failed early the inode might still be
1494 * on the orphan list; we need to make sure the inode
1495 * is removed from the orphan list in that case.
1498 ext4_orphan_del(NULL, inode);
1501 return ret ? ret : copied;
1505 * Reserve space for a single cluster
1507 static int ext4_da_reserve_space(struct inode *inode)
1509 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1510 struct ext4_inode_info *ei = EXT4_I(inode);
1514 * We will charge metadata quota at writeout time; this saves
1515 * us from metadata over-estimation, though we may go over by
1516 * a small amount in the end. Here we just reserve for data.
1518 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1522 spin_lock(&ei->i_block_reservation_lock);
1523 if (ext4_claim_free_clusters(sbi, 1, 0)) {
1524 spin_unlock(&ei->i_block_reservation_lock);
1525 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1528 ei->i_reserved_data_blocks++;
1529 trace_ext4_da_reserve_space(inode);
1530 spin_unlock(&ei->i_block_reservation_lock);
1532 return 0; /* success */
1535 static void ext4_da_release_space(struct inode *inode, int to_free)
1537 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1538 struct ext4_inode_info *ei = EXT4_I(inode);
1541 return; /* Nothing to release, exit */
1543 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1545 trace_ext4_da_release_space(inode, to_free);
1546 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1548 * if there aren't enough reserved blocks, then the
1549 * counter is messed up somewhere. Since this
1550 * function is called from invalidate page, it's
1551 * harmless to return without any action.
1553 ext4_warning(inode->i_sb, "ext4_da_release_space: "
1554 "ino %lu, to_free %d with only %d reserved "
1555 "data blocks", inode->i_ino, to_free,
1556 ei->i_reserved_data_blocks);
1558 to_free = ei->i_reserved_data_blocks;
1560 ei->i_reserved_data_blocks -= to_free;
1562 /* update fs dirty data blocks counter */
1563 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1565 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1567 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1570 static void ext4_da_page_release_reservation(struct page *page,
1571 unsigned int offset,
1572 unsigned int length)
1574 int to_release = 0, contiguous_blks = 0;
1575 struct buffer_head *head, *bh;
1576 unsigned int curr_off = 0;
1577 struct inode *inode = page->mapping->host;
1578 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1579 unsigned int stop = offset + length;
1583 BUG_ON(stop > PAGE_SIZE || stop < length);
1585 head = page_buffers(page);
1588 unsigned int next_off = curr_off + bh->b_size;
1590 if (next_off > stop)
1593 if ((offset <= curr_off) && (buffer_delay(bh))) {
1596 clear_buffer_delay(bh);
1597 } else if (contiguous_blks) {
1598 lblk = page->index <<
1599 (PAGE_SHIFT - inode->i_blkbits);
1600 lblk += (curr_off >> inode->i_blkbits) -
1602 ext4_es_remove_extent(inode, lblk, contiguous_blks);
1603 contiguous_blks = 0;
1605 curr_off = next_off;
1606 } while ((bh = bh->b_this_page) != head);
1608 if (contiguous_blks) {
1609 lblk = page->index << (PAGE_SHIFT - inode->i_blkbits);
1610 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
1611 ext4_es_remove_extent(inode, lblk, contiguous_blks);
1614 /* If we have released all the blocks belonging to a cluster, then we
1615 * need to release the reserved space for that cluster. */
1616 num_clusters = EXT4_NUM_B2C(sbi, to_release);
1617 while (num_clusters > 0) {
1618 lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) +
1619 ((num_clusters - 1) << sbi->s_cluster_bits);
1620 if (sbi->s_cluster_ratio == 1 ||
1621 !ext4_find_delalloc_cluster(inode, lblk))
1622 ext4_da_release_space(inode, 1);
1629 * Delayed allocation stuff
1632 struct mpage_da_data {
1633 struct inode *inode;
1634 struct writeback_control *wbc;
1636 pgoff_t first_page; /* The first page to write */
1637 pgoff_t next_page; /* Current page to examine */
1638 pgoff_t last_page; /* Last page to examine */
1640 * Extent to map - this can be after first_page because that can be
1641 * fully mapped. We somewhat abuse m_flags to store whether the extent
1642 * is delalloc or unwritten.
1644 struct ext4_map_blocks map;
1645 struct ext4_io_submit io_submit; /* IO submission data */
1646 unsigned int do_map:1;
1649 static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1654 struct pagevec pvec;
1655 struct inode *inode = mpd->inode;
1656 struct address_space *mapping = inode->i_mapping;
1658 /* This is necessary when next_page == 0. */
1659 if (mpd->first_page >= mpd->next_page)
1662 index = mpd->first_page;
1663 end = mpd->next_page - 1;
1665 ext4_lblk_t start, last;
1666 start = index << (PAGE_SHIFT - inode->i_blkbits);
1667 last = end << (PAGE_SHIFT - inode->i_blkbits);
1668 ext4_es_remove_extent(inode, start, last - start + 1);
1671 pagevec_init(&pvec, 0);
1672 while (index <= end) {
1673 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1676 for (i = 0; i < nr_pages; i++) {
1677 struct page *page = pvec.pages[i];
1678 if (page->index > end)
1680 BUG_ON(!PageLocked(page));
1681 BUG_ON(PageWriteback(page));
1683 if (page_mapped(page))
1684 clear_page_dirty_for_io(page);
1685 block_invalidatepage(page, 0, PAGE_SIZE);
1686 ClearPageUptodate(page);
1690 index = pvec.pages[nr_pages - 1]->index + 1;
1691 pagevec_release(&pvec);
1695 static void ext4_print_free_blocks(struct inode *inode)
1697 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1698 struct super_block *sb = inode->i_sb;
1699 struct ext4_inode_info *ei = EXT4_I(inode);
1701 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1702 EXT4_C2B(EXT4_SB(inode->i_sb),
1703 ext4_count_free_clusters(sb)));
1704 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1705 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1706 (long long) EXT4_C2B(EXT4_SB(sb),
1707 percpu_counter_sum(&sbi->s_freeclusters_counter)));
1708 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1709 (long long) EXT4_C2B(EXT4_SB(sb),
1710 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1711 ext4_msg(sb, KERN_CRIT, "Block reservation details");
1712 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1713 ei->i_reserved_data_blocks);
1717 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1719 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1723 * This function is grabs code from the very beginning of
1724 * ext4_map_blocks, but assumes that the caller is from delayed write
1725 * time. This function looks up the requested blocks and sets the
1726 * buffer delay bit under the protection of i_data_sem.
1728 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1729 struct ext4_map_blocks *map,
1730 struct buffer_head *bh)
1732 struct extent_status es;
1734 sector_t invalid_block = ~((sector_t) 0xffff);
1735 #ifdef ES_AGGRESSIVE_TEST
1736 struct ext4_map_blocks orig_map;
1738 memcpy(&orig_map, map, sizeof(*map));
1741 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1745 ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1746 "logical block %lu\n", inode->i_ino, map->m_len,
1747 (unsigned long) map->m_lblk);
1749 /* Lookup extent status tree firstly */
1750 if (ext4_es_lookup_extent(inode, iblock, &es)) {
1751 if (ext4_es_is_hole(&es)) {
1753 down_read(&EXT4_I(inode)->i_data_sem);
1758 * Delayed extent could be allocated by fallocate.
1759 * So we need to check it.
1761 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1762 map_bh(bh, inode->i_sb, invalid_block);
1764 set_buffer_delay(bh);
1768 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1769 retval = es.es_len - (iblock - es.es_lblk);
1770 if (retval > map->m_len)
1771 retval = map->m_len;
1772 map->m_len = retval;
1773 if (ext4_es_is_written(&es))
1774 map->m_flags |= EXT4_MAP_MAPPED;
1775 else if (ext4_es_is_unwritten(&es))
1776 map->m_flags |= EXT4_MAP_UNWRITTEN;
1780 #ifdef ES_AGGRESSIVE_TEST
1781 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1787 * Try to see if we can get the block without requesting a new
1788 * file system block.
1790 down_read(&EXT4_I(inode)->i_data_sem);
1791 if (ext4_has_inline_data(inode))
1793 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1794 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1796 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1802 * XXX: __block_prepare_write() unmaps passed block,
1806 * If the block was allocated from previously allocated cluster,
1807 * then we don't need to reserve it again. However we still need
1808 * to reserve metadata for every block we're going to write.
1810 if (EXT4_SB(inode->i_sb)->s_cluster_ratio == 1 ||
1811 !ext4_find_delalloc_cluster(inode, map->m_lblk)) {
1812 ret = ext4_da_reserve_space(inode);
1814 /* not enough space to reserve */
1820 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1821 ~0, EXTENT_STATUS_DELAYED);
1827 map_bh(bh, inode->i_sb, invalid_block);
1829 set_buffer_delay(bh);
1830 } else if (retval > 0) {
1832 unsigned int status;
1834 if (unlikely(retval != map->m_len)) {
1835 ext4_warning(inode->i_sb,
1836 "ES len assertion failed for inode "
1837 "%lu: retval %d != map->m_len %d",
1838 inode->i_ino, retval, map->m_len);
1842 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1843 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1844 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1845 map->m_pblk, status);
1851 up_read((&EXT4_I(inode)->i_data_sem));
1857 * This is a special get_block_t callback which is used by
1858 * ext4_da_write_begin(). It will either return mapped block or
1859 * reserve space for a single block.
1861 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1862 * We also have b_blocknr = -1 and b_bdev initialized properly
1864 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1865 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1866 * initialized properly.
1868 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1869 struct buffer_head *bh, int create)
1871 struct ext4_map_blocks map;
1874 BUG_ON(create == 0);
1875 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1877 map.m_lblk = iblock;
1881 * first, we need to know whether the block is allocated already
1882 * preallocated blocks are unmapped but should treated
1883 * the same as allocated blocks.
1885 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1889 map_bh(bh, inode->i_sb, map.m_pblk);
1890 ext4_update_bh_state(bh, map.m_flags);
1892 if (buffer_unwritten(bh)) {
1893 /* A delayed write to unwritten bh should be marked
1894 * new and mapped. Mapped ensures that we don't do
1895 * get_block multiple times when we write to the same
1896 * offset and new ensures that we do proper zero out
1897 * for partial write.
1900 set_buffer_mapped(bh);
1905 static int bget_one(handle_t *handle, struct buffer_head *bh)
1911 static int bput_one(handle_t *handle, struct buffer_head *bh)
1917 static int __ext4_journalled_writepage(struct page *page,
1920 struct address_space *mapping = page->mapping;
1921 struct inode *inode = mapping->host;
1922 struct buffer_head *page_bufs = NULL;
1923 handle_t *handle = NULL;
1924 int ret = 0, err = 0;
1925 int inline_data = ext4_has_inline_data(inode);
1926 struct buffer_head *inode_bh = NULL;
1928 ClearPageChecked(page);
1931 BUG_ON(page->index != 0);
1932 BUG_ON(len > ext4_get_max_inline_size(inode));
1933 inode_bh = ext4_journalled_write_inline_data(inode, len, page);
1934 if (inode_bh == NULL)
1937 page_bufs = page_buffers(page);
1942 ext4_walk_page_buffers(handle, page_bufs, 0, len,
1946 * We need to release the page lock before we start the
1947 * journal, so grab a reference so the page won't disappear
1948 * out from under us.
1953 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
1954 ext4_writepage_trans_blocks(inode));
1955 if (IS_ERR(handle)) {
1956 ret = PTR_ERR(handle);
1958 goto out_no_pagelock;
1960 BUG_ON(!ext4_handle_valid(handle));
1964 if (page->mapping != mapping) {
1965 /* The page got truncated from under us */
1966 ext4_journal_stop(handle);
1972 BUFFER_TRACE(inode_bh, "get write access");
1973 ret = ext4_journal_get_write_access(handle, inode_bh);
1975 err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
1978 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1979 do_journal_get_write_access);
1981 err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1986 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1987 err = ext4_journal_stop(handle);
1991 if (!ext4_has_inline_data(inode))
1992 ext4_walk_page_buffers(NULL, page_bufs, 0, len,
1994 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
2003 * Note that we don't need to start a transaction unless we're journaling data
2004 * because we should have holes filled from ext4_page_mkwrite(). We even don't
2005 * need to file the inode to the transaction's list in ordered mode because if
2006 * we are writing back data added by write(), the inode is already there and if
2007 * we are writing back data modified via mmap(), no one guarantees in which
2008 * transaction the data will hit the disk. In case we are journaling data, we
2009 * cannot start transaction directly because transaction start ranks above page
2010 * lock so we have to do some magic.
2012 * This function can get called via...
2013 * - ext4_writepages after taking page lock (have journal handle)
2014 * - journal_submit_inode_data_buffers (no journal handle)
2015 * - shrink_page_list via the kswapd/direct reclaim (no journal handle)
2016 * - grab_page_cache when doing write_begin (have journal handle)
2018 * We don't do any block allocation in this function. If we have page with
2019 * multiple blocks we need to write those buffer_heads that are mapped. This
2020 * is important for mmaped based write. So if we do with blocksize 1K
2021 * truncate(f, 1024);
2022 * a = mmap(f, 0, 4096);
2024 * truncate(f, 4096);
2025 * we have in the page first buffer_head mapped via page_mkwrite call back
2026 * but other buffer_heads would be unmapped but dirty (dirty done via the
2027 * do_wp_page). So writepage should write the first block. If we modify
2028 * the mmap area beyond 1024 we will again get a page_fault and the
2029 * page_mkwrite callback will do the block allocation and mark the
2030 * buffer_heads mapped.
2032 * We redirty the page if we have any buffer_heads that is either delay or
2033 * unwritten in the page.
2035 * We can get recursively called as show below.
2037 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
2040 * But since we don't do any block allocation we should not deadlock.
2041 * Page also have the dirty flag cleared so we don't get recurive page_lock.
2043 static int ext4_writepage(struct page *page,
2044 struct writeback_control *wbc)
2049 struct buffer_head *page_bufs = NULL;
2050 struct inode *inode = page->mapping->host;
2051 struct ext4_io_submit io_submit;
2052 bool keep_towrite = false;
2054 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
2055 ext4_invalidatepage(page, 0, PAGE_SIZE);
2060 trace_ext4_writepage(page);
2061 size = i_size_read(inode);
2062 if (page->index == size >> PAGE_SHIFT)
2063 len = size & ~PAGE_MASK;
2067 page_bufs = page_buffers(page);
2069 * We cannot do block allocation or other extent handling in this
2070 * function. If there are buffers needing that, we have to redirty
2071 * the page. But we may reach here when we do a journal commit via
2072 * journal_submit_inode_data_buffers() and in that case we must write
2073 * allocated buffers to achieve data=ordered mode guarantees.
2075 * Also, if there is only one buffer per page (the fs block
2076 * size == the page size), if one buffer needs block
2077 * allocation or needs to modify the extent tree to clear the
2078 * unwritten flag, we know that the page can't be written at
2079 * all, so we might as well refuse the write immediately.
2080 * Unfortunately if the block size != page size, we can't as
2081 * easily detect this case using ext4_walk_page_buffers(), but
2082 * for the extremely common case, this is an optimization that
2083 * skips a useless round trip through ext4_bio_write_page().
2085 if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2086 ext4_bh_delay_or_unwritten)) {
2087 redirty_page_for_writepage(wbc, page);
2088 if ((current->flags & PF_MEMALLOC) ||
2089 (inode->i_sb->s_blocksize == PAGE_SIZE)) {
2091 * For memory cleaning there's no point in writing only
2092 * some buffers. So just bail out. Warn if we came here
2093 * from direct reclaim.
2095 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
2100 keep_towrite = true;
2103 if (PageChecked(page) && ext4_should_journal_data(inode))
2105 * It's mmapped pagecache. Add buffers and journal it. There
2106 * doesn't seem much point in redirtying the page here.
2108 return __ext4_journalled_writepage(page, len);
2110 ext4_io_submit_init(&io_submit, wbc);
2111 io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
2112 if (!io_submit.io_end) {
2113 redirty_page_for_writepage(wbc, page);
2117 ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite);
2118 ext4_io_submit(&io_submit);
2119 /* Drop io_end reference we got from init */
2120 ext4_put_io_end_defer(io_submit.io_end);
2124 static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
2130 BUG_ON(page->index != mpd->first_page);
2131 clear_page_dirty_for_io(page);
2133 * We have to be very careful here! Nothing protects writeback path
2134 * against i_size changes and the page can be writeably mapped into
2135 * page tables. So an application can be growing i_size and writing
2136 * data through mmap while writeback runs. clear_page_dirty_for_io()
2137 * write-protects our page in page tables and the page cannot get
2138 * written to again until we release page lock. So only after
2139 * clear_page_dirty_for_io() we are safe to sample i_size for
2140 * ext4_bio_write_page() to zero-out tail of the written page. We rely
2141 * on the barrier provided by TestClearPageDirty in
2142 * clear_page_dirty_for_io() to make sure i_size is really sampled only
2143 * after page tables are updated.
2145 size = i_size_read(mpd->inode);
2146 if (page->index == size >> PAGE_SHIFT)
2147 len = size & ~PAGE_MASK;
2150 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
2152 mpd->wbc->nr_to_write--;
2158 #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
2161 * mballoc gives us at most this number of blocks...
2162 * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
2163 * The rest of mballoc seems to handle chunks up to full group size.
2165 #define MAX_WRITEPAGES_EXTENT_LEN 2048
2168 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
2170 * @mpd - extent of blocks
2171 * @lblk - logical number of the block in the file
2172 * @bh - buffer head we want to add to the extent
2174 * The function is used to collect contig. blocks in the same state. If the
2175 * buffer doesn't require mapping for writeback and we haven't started the
2176 * extent of buffers to map yet, the function returns 'true' immediately - the
2177 * caller can write the buffer right away. Otherwise the function returns true
2178 * if the block has been added to the extent, false if the block couldn't be
2181 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
2182 struct buffer_head *bh)
2184 struct ext4_map_blocks *map = &mpd->map;
2186 /* Buffer that doesn't need mapping for writeback? */
2187 if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
2188 (!buffer_delay(bh) && !buffer_unwritten(bh))) {
2189 /* So far no extent to map => we write the buffer right away */
2190 if (map->m_len == 0)
2195 /* First block in the extent? */
2196 if (map->m_len == 0) {
2197 /* We cannot map unless handle is started... */
2202 map->m_flags = bh->b_state & BH_FLAGS;
2206 /* Don't go larger than mballoc is willing to allocate */
2207 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
2210 /* Can we merge the block to our big extent? */
2211 if (lblk == map->m_lblk + map->m_len &&
2212 (bh->b_state & BH_FLAGS) == map->m_flags) {
2220 * mpage_process_page_bufs - submit page buffers for IO or add them to extent
2222 * @mpd - extent of blocks for mapping
2223 * @head - the first buffer in the page
2224 * @bh - buffer we should start processing from
2225 * @lblk - logical number of the block in the file corresponding to @bh
2227 * Walk through page buffers from @bh upto @head (exclusive) and either submit
2228 * the page for IO if all buffers in this page were mapped and there's no
2229 * accumulated extent of buffers to map or add buffers in the page to the
2230 * extent of buffers to map. The function returns 1 if the caller can continue
2231 * by processing the next page, 0 if it should stop adding buffers to the
2232 * extent to map because we cannot extend it anymore. It can also return value
2233 * < 0 in case of error during IO submission.
2235 static int mpage_process_page_bufs(struct mpage_da_data *mpd,
2236 struct buffer_head *head,
2237 struct buffer_head *bh,
2240 struct inode *inode = mpd->inode;
2242 ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
2243 >> inode->i_blkbits;
2246 BUG_ON(buffer_locked(bh));
2248 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
2249 /* Found extent to map? */
2252 /* Buffer needs mapping and handle is not started? */
2255 /* Everything mapped so far and we hit EOF */
2258 } while (lblk++, (bh = bh->b_this_page) != head);
2259 /* So far everything mapped? Submit the page for IO. */
2260 if (mpd->map.m_len == 0) {
2261 err = mpage_submit_page(mpd, head->b_page);
2265 return lblk < blocks;
2269 * mpage_map_buffers - update buffers corresponding to changed extent and
2270 * submit fully mapped pages for IO
2272 * @mpd - description of extent to map, on return next extent to map
2274 * Scan buffers corresponding to changed extent (we expect corresponding pages
2275 * to be already locked) and update buffer state according to new extent state.
2276 * We map delalloc buffers to their physical location, clear unwritten bits,
2277 * and mark buffers as uninit when we perform writes to unwritten extents
2278 * and do extent conversion after IO is finished. If the last page is not fully
2279 * mapped, we update @map to the next extent in the last page that needs
2280 * mapping. Otherwise we submit the page for IO.
2282 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2284 struct pagevec pvec;
2286 struct inode *inode = mpd->inode;
2287 struct buffer_head *head, *bh;
2288 int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2294 start = mpd->map.m_lblk >> bpp_bits;
2295 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2296 lblk = start << bpp_bits;
2297 pblock = mpd->map.m_pblk;
2299 pagevec_init(&pvec, 0);
2300 while (start <= end) {
2301 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start,
2305 for (i = 0; i < nr_pages; i++) {
2306 struct page *page = pvec.pages[i];
2308 if (page->index > end)
2310 /* Up to 'end' pages must be contiguous */
2311 BUG_ON(page->index != start);
2312 bh = head = page_buffers(page);
2314 if (lblk < mpd->map.m_lblk)
2316 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2318 * Buffer after end of mapped extent.
2319 * Find next buffer in the page to map.
2322 mpd->map.m_flags = 0;
2324 * FIXME: If dioread_nolock supports
2325 * blocksize < pagesize, we need to make
2326 * sure we add size mapped so far to
2327 * io_end->size as the following call
2328 * can submit the page for IO.
2330 err = mpage_process_page_bufs(mpd, head,
2332 pagevec_release(&pvec);
2337 if (buffer_delay(bh)) {
2338 clear_buffer_delay(bh);
2339 bh->b_blocknr = pblock++;
2341 clear_buffer_unwritten(bh);
2342 } while (lblk++, (bh = bh->b_this_page) != head);
2345 * FIXME: This is going to break if dioread_nolock
2346 * supports blocksize < pagesize as we will try to
2347 * convert potentially unmapped parts of inode.
2349 mpd->io_submit.io_end->size += PAGE_SIZE;
2350 /* Page fully mapped - let IO run! */
2351 err = mpage_submit_page(mpd, page);
2353 pagevec_release(&pvec);
2358 pagevec_release(&pvec);
2360 /* Extent fully mapped and matches with page boundary. We are done. */
2362 mpd->map.m_flags = 0;
2366 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2368 struct inode *inode = mpd->inode;
2369 struct ext4_map_blocks *map = &mpd->map;
2370 int get_blocks_flags;
2371 int err, dioread_nolock;
2373 trace_ext4_da_write_pages_extent(inode, map);
2375 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2376 * to convert an unwritten extent to be initialized (in the case
2377 * where we have written into one or more preallocated blocks). It is
2378 * possible that we're going to need more metadata blocks than
2379 * previously reserved. However we must not fail because we're in
2380 * writeback and there is nothing we can do about it so it might result
2381 * in data loss. So use reserved blocks to allocate metadata if
2384 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
2385 * the blocks in question are delalloc blocks. This indicates
2386 * that the blocks and quotas has already been checked when
2387 * the data was copied into the page cache.
2389 get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2390 EXT4_GET_BLOCKS_METADATA_NOFAIL |
2391 EXT4_GET_BLOCKS_IO_SUBMIT;
2392 dioread_nolock = ext4_should_dioread_nolock(inode);
2394 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2395 if (map->m_flags & (1 << BH_Delay))
2396 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2398 err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2401 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2402 if (!mpd->io_submit.io_end->handle &&
2403 ext4_handle_valid(handle)) {
2404 mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2405 handle->h_rsv_handle = NULL;
2407 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2410 BUG_ON(map->m_len == 0);
2411 if (map->m_flags & EXT4_MAP_NEW) {
2412 clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk,
2419 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2420 * mpd->len and submit pages underlying it for IO
2422 * @handle - handle for journal operations
2423 * @mpd - extent to map
2424 * @give_up_on_write - we set this to true iff there is a fatal error and there
2425 * is no hope of writing the data. The caller should discard
2426 * dirty pages to avoid infinite loops.
2428 * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2429 * delayed, blocks are allocated, if it is unwritten, we may need to convert
2430 * them to initialized or split the described range from larger unwritten
2431 * extent. Note that we need not map all the described range since allocation
2432 * can return less blocks or the range is covered by more unwritten extents. We
2433 * cannot map more because we are limited by reserved transaction credits. On
2434 * the other hand we always make sure that the last touched page is fully
2435 * mapped so that it can be written out (and thus forward progress is
2436 * guaranteed). After mapping we submit all mapped pages for IO.
2438 static int mpage_map_and_submit_extent(handle_t *handle,
2439 struct mpage_da_data *mpd,
2440 bool *give_up_on_write)
2442 struct inode *inode = mpd->inode;
2443 struct ext4_map_blocks *map = &mpd->map;
2448 mpd->io_submit.io_end->offset =
2449 ((loff_t)map->m_lblk) << inode->i_blkbits;
2451 err = mpage_map_one_extent(handle, mpd);
2453 struct super_block *sb = inode->i_sb;
2455 if (ext4_forced_shutdown(EXT4_SB(sb)) ||
2456 EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
2457 goto invalidate_dirty_pages;
2459 * Let the uper layers retry transient errors.
2460 * In the case of ENOSPC, if ext4_count_free_blocks()
2461 * is non-zero, a commit should free up blocks.
2463 if ((err == -ENOMEM) ||
2464 (err == -ENOSPC && ext4_count_free_clusters(sb))) {
2466 goto update_disksize;
2469 ext4_msg(sb, KERN_CRIT,
2470 "Delayed block allocation failed for "
2471 "inode %lu at logical offset %llu with"
2472 " max blocks %u with error %d",
2474 (unsigned long long)map->m_lblk,
2475 (unsigned)map->m_len, -err);
2476 ext4_msg(sb, KERN_CRIT,
2477 "This should not happen!! Data will "
2480 ext4_print_free_blocks(inode);
2481 invalidate_dirty_pages:
2482 *give_up_on_write = true;
2487 * Update buffer state, submit mapped pages, and get us new
2490 err = mpage_map_and_submit_buffers(mpd);
2492 goto update_disksize;
2493 } while (map->m_len);
2497 * Update on-disk size after IO is submitted. Races with
2498 * truncate are avoided by checking i_size under i_data_sem.
2500 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2501 if (disksize > EXT4_I(inode)->i_disksize) {
2505 down_write(&EXT4_I(inode)->i_data_sem);
2506 i_size = i_size_read(inode);
2507 if (disksize > i_size)
2509 if (disksize > EXT4_I(inode)->i_disksize)
2510 EXT4_I(inode)->i_disksize = disksize;
2511 up_write(&EXT4_I(inode)->i_data_sem);
2512 err2 = ext4_mark_inode_dirty(handle, inode);
2514 ext4_error(inode->i_sb,
2515 "Failed to mark inode %lu dirty",
2524 * Calculate the total number of credits to reserve for one writepages
2525 * iteration. This is called from ext4_writepages(). We map an extent of
2526 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2527 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2528 * bpp - 1 blocks in bpp different extents.
2530 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2532 int bpp = ext4_journal_blocks_per_page(inode);
2534 return ext4_meta_trans_blocks(inode,
2535 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2539 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2540 * and underlying extent to map
2542 * @mpd - where to look for pages
2544 * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2545 * IO immediately. When we find a page which isn't mapped we start accumulating
2546 * extent of buffers underlying these pages that needs mapping (formed by
2547 * either delayed or unwritten buffers). We also lock the pages containing
2548 * these buffers. The extent found is returned in @mpd structure (starting at
2549 * mpd->lblk with length mpd->len blocks).
2551 * Note that this function can attach bios to one io_end structure which are
2552 * neither logically nor physically contiguous. Although it may seem as an
2553 * unnecessary complication, it is actually inevitable in blocksize < pagesize
2554 * case as we need to track IO to all buffers underlying a page in one io_end.
2556 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2558 struct address_space *mapping = mpd->inode->i_mapping;
2559 struct pagevec pvec;
2560 unsigned int nr_pages;
2561 long left = mpd->wbc->nr_to_write;
2562 pgoff_t index = mpd->first_page;
2563 pgoff_t end = mpd->last_page;
2566 int blkbits = mpd->inode->i_blkbits;
2568 struct buffer_head *head;
2570 if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2571 tag = PAGECACHE_TAG_TOWRITE;
2573 tag = PAGECACHE_TAG_DIRTY;
2575 pagevec_init(&pvec, 0);
2577 mpd->next_page = index;
2578 while (index <= end) {
2579 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2580 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2584 for (i = 0; i < nr_pages; i++) {
2585 struct page *page = pvec.pages[i];
2588 * At this point, the page may be truncated or
2589 * invalidated (changing page->mapping to NULL), or
2590 * even swizzled back from swapper_space to tmpfs file
2591 * mapping. However, page->index will not change
2592 * because we have a reference on the page.
2594 if (page->index > end)
2598 * Accumulated enough dirty pages? This doesn't apply
2599 * to WB_SYNC_ALL mode. For integrity sync we have to
2600 * keep going because someone may be concurrently
2601 * dirtying pages, and we might have synced a lot of
2602 * newly appeared dirty pages, but have not synced all
2603 * of the old dirty pages.
2605 if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
2608 /* If we can't merge this page, we are done. */
2609 if (mpd->map.m_len > 0 && mpd->next_page != page->index)
2614 * If the page is no longer dirty, or its mapping no
2615 * longer corresponds to inode we are writing (which
2616 * means it has been truncated or invalidated), or the
2617 * page is already under writeback and we are not doing
2618 * a data integrity writeback, skip the page
2620 if (!PageDirty(page) ||
2621 (PageWriteback(page) &&
2622 (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2623 unlikely(page->mapping != mapping)) {
2628 wait_on_page_writeback(page);
2629 BUG_ON(PageWriteback(page));
2631 if (mpd->map.m_len == 0)
2632 mpd->first_page = page->index;
2633 mpd->next_page = page->index + 1;
2634 /* Add all dirty buffers to mpd */
2635 lblk = ((ext4_lblk_t)page->index) <<
2636 (PAGE_SHIFT - blkbits);
2637 head = page_buffers(page);
2638 err = mpage_process_page_bufs(mpd, head, head, lblk);
2644 pagevec_release(&pvec);
2649 pagevec_release(&pvec);
2653 static int __writepage(struct page *page, struct writeback_control *wbc,
2656 struct address_space *mapping = data;
2657 int ret = ext4_writepage(page, wbc);
2658 mapping_set_error(mapping, ret);
2662 static int ext4_writepages(struct address_space *mapping,
2663 struct writeback_control *wbc)
2665 pgoff_t writeback_index = 0;
2666 long nr_to_write = wbc->nr_to_write;
2667 int range_whole = 0;
2669 handle_t *handle = NULL;
2670 struct mpage_da_data mpd;
2671 struct inode *inode = mapping->host;
2672 int needed_blocks, rsv_blocks = 0, ret = 0;
2673 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2675 struct blk_plug plug;
2676 bool give_up_on_write = false;
2678 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2681 percpu_down_read(&sbi->s_journal_flag_rwsem);
2682 trace_ext4_writepages(inode, wbc);
2684 if (dax_mapping(mapping)) {
2685 ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev,
2687 goto out_writepages;
2691 * No pages to write? This is mainly a kludge to avoid starting
2692 * a transaction for special inodes like journal inode on last iput()
2693 * because that could violate lock ordering on umount
2695 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2696 goto out_writepages;
2698 if (ext4_should_journal_data(inode)) {
2699 struct blk_plug plug;
2701 blk_start_plug(&plug);
2702 ret = write_cache_pages(mapping, wbc, __writepage, mapping);
2703 blk_finish_plug(&plug);
2704 goto out_writepages;
2708 * If the filesystem has aborted, it is read-only, so return
2709 * right away instead of dumping stack traces later on that
2710 * will obscure the real source of the problem. We test
2711 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2712 * the latter could be true if the filesystem is mounted
2713 * read-only, and in that case, ext4_writepages should
2714 * *never* be called, so if that ever happens, we would want
2717 if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) ||
2718 sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) {
2720 goto out_writepages;
2723 if (ext4_should_dioread_nolock(inode)) {
2725 * We may need to convert up to one extent per block in
2726 * the page and we may dirty the inode.
2728 rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits);
2732 * If we have inline data and arrive here, it means that
2733 * we will soon create the block for the 1st page, so
2734 * we'd better clear the inline data here.
2736 if (ext4_has_inline_data(inode)) {
2737 /* Just inode will be modified... */
2738 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2739 if (IS_ERR(handle)) {
2740 ret = PTR_ERR(handle);
2741 goto out_writepages;
2743 BUG_ON(ext4_test_inode_state(inode,
2744 EXT4_STATE_MAY_INLINE_DATA));
2745 ext4_destroy_inline_data(handle, inode);
2746 ext4_journal_stop(handle);
2749 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2752 if (wbc->range_cyclic) {
2753 writeback_index = mapping->writeback_index;
2754 if (writeback_index)
2756 mpd.first_page = writeback_index;
2759 mpd.first_page = wbc->range_start >> PAGE_SHIFT;
2760 mpd.last_page = wbc->range_end >> PAGE_SHIFT;
2765 ext4_io_submit_init(&mpd.io_submit, wbc);
2767 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2768 tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
2770 blk_start_plug(&plug);
2773 * First writeback pages that don't need mapping - we can avoid
2774 * starting a transaction unnecessarily and also avoid being blocked
2775 * in the block layer on device congestion while having transaction
2779 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2780 if (!mpd.io_submit.io_end) {
2784 ret = mpage_prepare_extent_to_map(&mpd);
2785 /* Submit prepared bio */
2786 ext4_io_submit(&mpd.io_submit);
2787 ext4_put_io_end_defer(mpd.io_submit.io_end);
2788 mpd.io_submit.io_end = NULL;
2789 /* Unlock pages we didn't use */
2790 mpage_release_unused_pages(&mpd, false);
2794 while (!done && mpd.first_page <= mpd.last_page) {
2795 /* For each extent of pages we use new io_end */
2796 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2797 if (!mpd.io_submit.io_end) {
2803 * We have two constraints: We find one extent to map and we
2804 * must always write out whole page (makes a difference when
2805 * blocksize < pagesize) so that we don't block on IO when we
2806 * try to write out the rest of the page. Journalled mode is
2807 * not supported by delalloc.
2809 BUG_ON(ext4_should_journal_data(inode));
2810 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2812 /* start a new transaction */
2813 handle = ext4_journal_start_with_reserve(inode,
2814 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2815 if (IS_ERR(handle)) {
2816 ret = PTR_ERR(handle);
2817 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2818 "%ld pages, ino %lu; err %d", __func__,
2819 wbc->nr_to_write, inode->i_ino, ret);
2820 /* Release allocated io_end */
2821 ext4_put_io_end(mpd.io_submit.io_end);
2822 mpd.io_submit.io_end = NULL;
2827 trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
2828 ret = mpage_prepare_extent_to_map(&mpd);
2831 ret = mpage_map_and_submit_extent(handle, &mpd,
2835 * We scanned the whole range (or exhausted
2836 * nr_to_write), submitted what was mapped and
2837 * didn't find anything needing mapping. We are
2844 * Caution: If the handle is synchronous,
2845 * ext4_journal_stop() can wait for transaction commit
2846 * to finish which may depend on writeback of pages to
2847 * complete or on page lock to be released. In that
2848 * case, we have to wait until after after we have
2849 * submitted all the IO, released page locks we hold,
2850 * and dropped io_end reference (for extent conversion
2851 * to be able to complete) before stopping the handle.
2853 if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
2854 ext4_journal_stop(handle);
2858 /* Submit prepared bio */
2859 ext4_io_submit(&mpd.io_submit);
2860 /* Unlock pages we didn't use */
2861 mpage_release_unused_pages(&mpd, give_up_on_write);
2863 * Drop our io_end reference we got from init. We have
2864 * to be careful and use deferred io_end finishing if
2865 * we are still holding the transaction as we can
2866 * release the last reference to io_end which may end
2867 * up doing unwritten extent conversion.
2870 ext4_put_io_end_defer(mpd.io_submit.io_end);
2871 ext4_journal_stop(handle);
2873 ext4_put_io_end(mpd.io_submit.io_end);
2874 mpd.io_submit.io_end = NULL;
2876 if (ret == -ENOSPC && sbi->s_journal) {
2878 * Commit the transaction which would
2879 * free blocks released in the transaction
2882 jbd2_journal_force_commit_nested(sbi->s_journal);
2886 /* Fatal error - ENOMEM, EIO... */
2891 blk_finish_plug(&plug);
2892 if (!ret && !cycled && wbc->nr_to_write > 0) {
2894 mpd.last_page = writeback_index - 1;
2900 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2902 * Set the writeback_index so that range_cyclic
2903 * mode will write it back later
2905 mapping->writeback_index = mpd.first_page;
2908 trace_ext4_writepages_result(inode, wbc, ret,
2909 nr_to_write - wbc->nr_to_write);
2910 percpu_up_read(&sbi->s_journal_flag_rwsem);
2914 static int ext4_nonda_switch(struct super_block *sb)
2916 s64 free_clusters, dirty_clusters;
2917 struct ext4_sb_info *sbi = EXT4_SB(sb);
2920 * switch to non delalloc mode if we are running low
2921 * on free block. The free block accounting via percpu
2922 * counters can get slightly wrong with percpu_counter_batch getting
2923 * accumulated on each CPU without updating global counters
2924 * Delalloc need an accurate free block accounting. So switch
2925 * to non delalloc when we are near to error range.
2928 percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2930 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2932 * Start pushing delalloc when 1/2 of free blocks are dirty.
2934 if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
2935 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2937 if (2 * free_clusters < 3 * dirty_clusters ||
2938 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
2940 * free block count is less than 150% of dirty blocks
2941 * or free blocks is less than watermark
2948 /* We always reserve for an inode update; the superblock could be there too */
2949 static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
2951 if (likely(ext4_has_feature_large_file(inode->i_sb)))
2954 if (pos + len <= 0x7fffffffULL)
2957 /* We might need to update the superblock to set LARGE_FILE */
2961 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2962 loff_t pos, unsigned len, unsigned flags,
2963 struct page **pagep, void **fsdata)
2965 int ret, retries = 0;
2968 struct inode *inode = mapping->host;
2971 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2974 index = pos >> PAGE_SHIFT;
2976 if (ext4_nonda_switch(inode->i_sb) ||
2977 S_ISLNK(inode->i_mode)) {
2978 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2979 return ext4_write_begin(file, mapping, pos,
2980 len, flags, pagep, fsdata);
2982 *fsdata = (void *)0;
2983 trace_ext4_da_write_begin(inode, pos, len, flags);
2985 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
2986 ret = ext4_da_write_inline_data_begin(mapping, inode,
2996 * grab_cache_page_write_begin() can take a long time if the
2997 * system is thrashing due to memory pressure, or if the page
2998 * is being written back. So grab it first before we start
2999 * the transaction handle. This also allows us to allocate
3000 * the page (if needed) without using GFP_NOFS.
3003 page = grab_cache_page_write_begin(mapping, index, flags);
3009 * With delayed allocation, we don't log the i_disksize update
3010 * if there is delayed block allocation. But we still need
3011 * to journalling the i_disksize update if writes to the end
3012 * of file which has an already mapped buffer.
3015 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
3016 ext4_da_write_credits(inode, pos, len));
3017 if (IS_ERR(handle)) {
3019 return PTR_ERR(handle);
3023 if (page->mapping != mapping) {
3024 /* The page got truncated from under us */
3027 ext4_journal_stop(handle);
3030 /* In case writeback began while the page was unlocked */
3031 wait_for_stable_page(page);
3033 #ifdef CONFIG_EXT4_FS_ENCRYPTION
3034 ret = ext4_block_write_begin(page, pos, len,
3035 ext4_da_get_block_prep);
3037 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
3041 ext4_journal_stop(handle);
3043 * block_write_begin may have instantiated a few blocks
3044 * outside i_size. Trim these off again. Don't need
3045 * i_size_read because we hold i_mutex.
3047 if (pos + len > inode->i_size)
3048 ext4_truncate_failed_write(inode);
3050 if (ret == -ENOSPC &&
3051 ext4_should_retry_alloc(inode->i_sb, &retries))
3063 * Check if we should update i_disksize
3064 * when write to the end of file but not require block allocation
3066 static int ext4_da_should_update_i_disksize(struct page *page,
3067 unsigned long offset)
3069 struct buffer_head *bh;
3070 struct inode *inode = page->mapping->host;
3074 bh = page_buffers(page);
3075 idx = offset >> inode->i_blkbits;
3077 for (i = 0; i < idx; i++)
3078 bh = bh->b_this_page;
3080 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
3085 static int ext4_da_write_end(struct file *file,
3086 struct address_space *mapping,
3087 loff_t pos, unsigned len, unsigned copied,
3088 struct page *page, void *fsdata)
3090 struct inode *inode = mapping->host;
3092 handle_t *handle = ext4_journal_current_handle();
3094 unsigned long start, end;
3095 int write_mode = (int)(unsigned long)fsdata;
3097 if (write_mode == FALL_BACK_TO_NONDELALLOC)
3098 return ext4_write_end(file, mapping, pos,
3099 len, copied, page, fsdata);
3101 trace_ext4_da_write_end(inode, pos, len, copied);
3102 start = pos & (PAGE_SIZE - 1);
3103 end = start + copied - 1;
3106 * generic_write_end() will run mark_inode_dirty() if i_size
3107 * changes. So let's piggyback the i_disksize mark_inode_dirty
3110 new_i_size = pos + copied;
3111 if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
3112 if (ext4_has_inline_data(inode) ||
3113 ext4_da_should_update_i_disksize(page, end)) {
3114 ext4_update_i_disksize(inode, new_i_size);
3115 /* We need to mark inode dirty even if
3116 * new_i_size is less that inode->i_size
3117 * bu greater than i_disksize.(hint delalloc)
3119 ext4_mark_inode_dirty(handle, inode);
3123 if (write_mode != CONVERT_INLINE_DATA &&