4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/backing-dev.h>
15 #include <linux/writeback.h>
21 #include <trace/events/f2fs.h>
23 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
25 if (is_inode_flag_set(inode, FI_NEW_INODE))
28 if (f2fs_inode_dirtied(inode, sync))
31 mark_inode_dirty_sync(inode);
34 void f2fs_set_inode_flags(struct inode *inode)
36 unsigned int flags = F2FS_I(inode)->i_flags;
37 unsigned int new_fl = 0;
39 if (flags & F2FS_SYNC_FL)
41 if (flags & F2FS_APPEND_FL)
43 if (flags & F2FS_IMMUTABLE_FL)
44 new_fl |= S_IMMUTABLE;
45 if (flags & F2FS_NOATIME_FL)
47 if (flags & F2FS_DIRSYNC_FL)
49 if (f2fs_encrypted_inode(inode))
50 new_fl |= S_ENCRYPTED;
51 inode_set_flags(inode, new_fl,
52 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
56 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
58 int extra_size = get_extra_isize(inode);
60 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
61 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
62 if (ri->i_addr[extra_size])
63 inode->i_rdev = old_decode_dev(
64 le32_to_cpu(ri->i_addr[extra_size]));
66 inode->i_rdev = new_decode_dev(
67 le32_to_cpu(ri->i_addr[extra_size + 1]));
71 static int __written_first_block(struct f2fs_sb_info *sbi,
72 struct f2fs_inode *ri)
74 block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
76 if (!__is_valid_data_blkaddr(addr))
78 if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
83 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
85 int extra_size = get_extra_isize(inode);
87 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
88 if (old_valid_dev(inode->i_rdev)) {
89 ri->i_addr[extra_size] =
90 cpu_to_le32(old_encode_dev(inode->i_rdev));
91 ri->i_addr[extra_size + 1] = 0;
93 ri->i_addr[extra_size] = 0;
94 ri->i_addr[extra_size + 1] =
95 cpu_to_le32(new_encode_dev(inode->i_rdev));
96 ri->i_addr[extra_size + 2] = 0;
101 static void __recover_inline_status(struct inode *inode, struct page *ipage)
103 void *inline_data = inline_data_addr(inode, ipage);
104 __le32 *start = inline_data;
105 __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
107 while (start < end) {
109 f2fs_wait_on_page_writeback(ipage, NODE, true);
111 set_inode_flag(inode, FI_DATA_EXIST);
112 set_raw_inline(inode, F2FS_INODE(ipage));
113 set_page_dirty(ipage);
120 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
122 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
124 if (!f2fs_sb_has_inode_chksum(sbi->sb))
127 if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
130 if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
137 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
139 struct f2fs_node *node = F2FS_NODE(page);
140 struct f2fs_inode *ri = &node->i;
141 __le32 ino = node->footer.ino;
142 __le32 gen = ri->i_generation;
143 __u32 chksum, chksum_seed;
145 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
146 unsigned int cs_size = sizeof(dummy_cs);
148 chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
150 chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
152 chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
153 chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
155 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
156 F2FS_BLKSIZE - offset);
160 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
162 struct f2fs_inode *ri;
163 __u32 provided, calculated;
165 if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
168 #ifdef CONFIG_F2FS_CHECK_FS
169 if (!f2fs_enable_inode_chksum(sbi, page))
171 if (!f2fs_enable_inode_chksum(sbi, page) ||
172 PageDirty(page) || PageWriteback(page))
176 ri = &F2FS_NODE(page)->i;
177 provided = le32_to_cpu(ri->i_inode_checksum);
178 calculated = f2fs_inode_chksum(sbi, page);
180 if (provided != calculated)
181 f2fs_msg(sbi->sb, KERN_WARNING,
182 "checksum invalid, ino = %x, %x vs. %x",
183 ino_of_node(page), provided, calculated);
185 return provided == calculated;
188 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
190 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
192 if (!f2fs_enable_inode_chksum(sbi, page))
195 ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
198 static bool sanity_check_inode(struct inode *inode, struct page *node_page)
200 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
201 struct f2fs_inode_info *fi = F2FS_I(inode);
202 unsigned long long iblocks;
204 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
206 set_sbi_flag(sbi, SBI_NEED_FSCK);
207 f2fs_msg(sbi->sb, KERN_WARNING,
208 "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, "
210 __func__, inode->i_ino, iblocks);
214 if (ino_of_node(node_page) != nid_of_node(node_page)) {
215 set_sbi_flag(sbi, SBI_NEED_FSCK);
216 f2fs_msg(sbi->sb, KERN_WARNING,
217 "%s: corrupted inode footer i_ino=%lx, ino,nid: "
218 "[%u, %u] run fsck to fix.",
219 __func__, inode->i_ino,
220 ino_of_node(node_page), nid_of_node(node_page));
224 if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)
225 && !f2fs_has_extra_attr(inode)) {
226 set_sbi_flag(sbi, SBI_NEED_FSCK);
227 f2fs_msg(sbi->sb, KERN_WARNING,
228 "%s: corrupted inode ino=%lx, run fsck to fix.",
229 __func__, inode->i_ino);
233 if (f2fs_has_extra_attr(inode) &&
234 !f2fs_sb_has_extra_attr(sbi->sb)) {
235 set_sbi_flag(sbi, SBI_NEED_FSCK);
236 f2fs_msg(sbi->sb, KERN_WARNING,
237 "%s: inode (ino=%lx) is with extra_attr, "
238 "but extra_attr feature is off",
239 __func__, inode->i_ino);
243 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
244 fi->i_extra_isize % sizeof(__le32)) {
245 set_sbi_flag(sbi, SBI_NEED_FSCK);
246 f2fs_msg(sbi->sb, KERN_WARNING,
247 "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, "
249 __func__, inode->i_ino, fi->i_extra_isize,
250 F2FS_TOTAL_EXTRA_ATTR_SIZE);
254 if (F2FS_I(inode)->extent_tree) {
255 struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
258 (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) ||
259 !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
261 set_sbi_flag(sbi, SBI_NEED_FSCK);
262 f2fs_msg(sbi->sb, KERN_WARNING,
263 "%s: inode (ino=%lx) extent info [%u, %u, %u] "
264 "is incorrect, run fsck to fix",
265 __func__, inode->i_ino,
266 ei->blk, ei->fofs, ei->len);
271 if (f2fs_has_inline_data(inode) &&
272 (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
273 set_sbi_flag(sbi, SBI_NEED_FSCK);
274 f2fs_msg(sbi->sb, KERN_WARNING,
275 "%s: inode (ino=%lx, mode=%u) should not have "
276 "inline_data, run fsck to fix",
277 __func__, inode->i_ino, inode->i_mode);
281 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
282 set_sbi_flag(sbi, SBI_NEED_FSCK);
283 f2fs_msg(sbi->sb, KERN_WARNING,
284 "%s: inode (ino=%lx, mode=%u) should not have "
285 "inline_dentry, run fsck to fix",
286 __func__, inode->i_ino, inode->i_mode);
293 static int do_read_inode(struct inode *inode)
295 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
296 struct f2fs_inode_info *fi = F2FS_I(inode);
297 struct page *node_page;
298 struct f2fs_inode *ri;
302 /* Check if ino is within scope */
303 if (f2fs_check_nid_range(sbi, inode->i_ino))
306 node_page = f2fs_get_node_page(sbi, inode->i_ino);
307 if (IS_ERR(node_page))
308 return PTR_ERR(node_page);
310 ri = F2FS_INODE(node_page);
312 inode->i_mode = le16_to_cpu(ri->i_mode);
313 i_uid_write(inode, le32_to_cpu(ri->i_uid));
314 i_gid_write(inode, le32_to_cpu(ri->i_gid));
315 set_nlink(inode, le32_to_cpu(ri->i_links));
316 inode->i_size = le64_to_cpu(ri->i_size);
317 inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
319 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
320 inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
321 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
322 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
323 inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
324 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
325 inode->i_generation = le32_to_cpu(ri->i_generation);
326 if (S_ISDIR(inode->i_mode))
327 fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
328 else if (S_ISREG(inode->i_mode))
329 fi->i_gc_failures[GC_FAILURE_PIN] =
330 le16_to_cpu(ri->i_gc_failures);
331 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
332 fi->i_flags = le32_to_cpu(ri->i_flags);
334 fi->i_advise = ri->i_advise;
335 fi->i_pino = le32_to_cpu(ri->i_pino);
336 fi->i_dir_level = ri->i_dir_level;
338 if (f2fs_init_extent_tree(inode, &ri->i_ext))
339 set_page_dirty(node_page);
341 get_inline_info(inode, ri);
343 fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
344 le16_to_cpu(ri->i_extra_isize) : 0;
346 if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) {
347 fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
348 } else if (f2fs_has_inline_xattr(inode) ||
349 f2fs_has_inline_dentry(inode)) {
350 fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
354 * Previous inline data or directory always reserved 200 bytes
355 * in inode layout, even if inline_xattr is disabled. In order
356 * to keep inline_dentry's structure for backward compatibility,
357 * we get the space back only from inline_data.
359 fi->i_inline_xattr_size = 0;
362 if (!sanity_check_inode(inode, node_page)) {
363 f2fs_put_page(node_page, 1);
367 /* check data exist */
368 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
369 __recover_inline_status(inode, node_page);
371 /* get rdev by using inline_info */
372 __get_inode_rdev(inode, ri);
374 if (S_ISREG(inode->i_mode)) {
375 err = __written_first_block(sbi, ri);
377 f2fs_put_page(node_page, 1);
381 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
384 if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
385 fi->last_disk_size = inode->i_size;
387 if (fi->i_flags & F2FS_PROJINHERIT_FL)
388 set_inode_flag(inode, FI_PROJ_INHERIT);
390 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi->sb) &&
391 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
392 i_projid = (projid_t)le32_to_cpu(ri->i_projid);
394 i_projid = F2FS_DEF_PROJID;
395 fi->i_projid = make_kprojid(&init_user_ns, i_projid);
397 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi->sb) &&
398 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
399 fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
400 fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
403 F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
404 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
405 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
406 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
407 f2fs_put_page(node_page, 1);
409 stat_inc_inline_xattr(inode);
410 stat_inc_inline_inode(inode);
411 stat_inc_inline_dir(inode);
416 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
418 struct f2fs_sb_info *sbi = F2FS_SB(sb);
422 inode = iget_locked(sb, ino);
424 return ERR_PTR(-ENOMEM);
426 if (!(inode->i_state & I_NEW)) {
427 trace_f2fs_iget(inode);
430 if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
433 ret = do_read_inode(inode);
437 if (ino == F2FS_NODE_INO(sbi)) {
438 inode->i_mapping->a_ops = &f2fs_node_aops;
439 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
440 } else if (ino == F2FS_META_INO(sbi)) {
441 inode->i_mapping->a_ops = &f2fs_meta_aops;
442 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
443 } else if (S_ISREG(inode->i_mode)) {
444 inode->i_op = &f2fs_file_inode_operations;
445 inode->i_fop = &f2fs_file_operations;
446 inode->i_mapping->a_ops = &f2fs_dblock_aops;
447 } else if (S_ISDIR(inode->i_mode)) {
448 inode->i_op = &f2fs_dir_inode_operations;
449 inode->i_fop = &f2fs_dir_operations;
450 inode->i_mapping->a_ops = &f2fs_dblock_aops;
451 inode_nohighmem(inode);
452 } else if (S_ISLNK(inode->i_mode)) {
453 if (f2fs_encrypted_inode(inode))
454 inode->i_op = &f2fs_encrypted_symlink_inode_operations;
456 inode->i_op = &f2fs_symlink_inode_operations;
457 inode_nohighmem(inode);
458 inode->i_mapping->a_ops = &f2fs_dblock_aops;
459 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
460 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
461 inode->i_op = &f2fs_special_inode_operations;
462 init_special_inode(inode, inode->i_mode, inode->i_rdev);
467 f2fs_set_inode_flags(inode);
468 unlock_new_inode(inode);
469 trace_f2fs_iget(inode);
474 trace_f2fs_iget_exit(inode, ret);
478 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
482 inode = f2fs_iget(sb, ino);
484 if (PTR_ERR(inode) == -ENOMEM) {
485 congestion_wait(BLK_RW_ASYNC, HZ/50);
492 void f2fs_update_inode(struct inode *inode, struct page *node_page)
494 struct f2fs_inode *ri;
495 struct extent_tree *et = F2FS_I(inode)->extent_tree;
497 f2fs_wait_on_page_writeback(node_page, NODE, true);
498 set_page_dirty(node_page);
500 f2fs_inode_synced(inode);
502 ri = F2FS_INODE(node_page);
504 ri->i_mode = cpu_to_le16(inode->i_mode);
505 ri->i_advise = F2FS_I(inode)->i_advise;
506 ri->i_uid = cpu_to_le32(i_uid_read(inode));
507 ri->i_gid = cpu_to_le32(i_gid_read(inode));
508 ri->i_links = cpu_to_le32(inode->i_nlink);
509 ri->i_size = cpu_to_le64(i_size_read(inode));
510 ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
513 read_lock(&et->lock);
514 set_raw_extent(&et->largest, &ri->i_ext);
515 read_unlock(&et->lock);
517 memset(&ri->i_ext, 0, sizeof(ri->i_ext));
519 set_raw_inline(inode, ri);
521 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
522 ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
523 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
524 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
525 ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
526 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
527 if (S_ISDIR(inode->i_mode))
528 ri->i_current_depth =
529 cpu_to_le32(F2FS_I(inode)->i_current_depth);
530 else if (S_ISREG(inode->i_mode))
532 cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
533 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
534 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
535 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
536 ri->i_generation = cpu_to_le32(inode->i_generation);
537 ri->i_dir_level = F2FS_I(inode)->i_dir_level;
539 if (f2fs_has_extra_attr(inode)) {
540 ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
542 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)->sb))
543 ri->i_inline_xattr_size =
544 cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
546 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) &&
547 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
551 i_projid = from_kprojid(&init_user_ns,
552 F2FS_I(inode)->i_projid);
553 ri->i_projid = cpu_to_le32(i_projid);
556 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)->sb) &&
557 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
560 cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
562 cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
566 __set_inode_rdev(inode, ri);
569 if (inode->i_nlink == 0)
570 clear_inline_node(node_page);
572 F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
573 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
574 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
575 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
577 #ifdef CONFIG_F2FS_CHECK_FS
578 f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
582 void f2fs_update_inode_page(struct inode *inode)
584 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
585 struct page *node_page;
587 node_page = f2fs_get_node_page(sbi, inode->i_ino);
588 if (IS_ERR(node_page)) {
589 int err = PTR_ERR(node_page);
590 if (err == -ENOMEM) {
593 } else if (err != -ENOENT) {
594 f2fs_stop_checkpoint(sbi, false);
598 f2fs_update_inode(inode, node_page);
599 f2fs_put_page(node_page, 1);
602 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
604 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
606 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
607 inode->i_ino == F2FS_META_INO(sbi))
610 if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
614 * We need to balance fs here to prevent from producing dirty node pages
615 * during the urgent cleaning time when runing out of free sections.
617 f2fs_update_inode_page(inode);
618 if (wbc && wbc->nr_to_write)
619 f2fs_balance_fs(sbi, true);
624 * Called at the last iput() if i_nlink is zero
626 void f2fs_evict_inode(struct inode *inode)
628 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
629 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
632 /* some remained atomic pages should discarded */
633 if (f2fs_is_atomic_file(inode))
634 f2fs_drop_inmem_pages(inode);
636 trace_f2fs_evict_inode(inode);
637 truncate_inode_pages_final(&inode->i_data);
639 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
640 inode->i_ino == F2FS_META_INO(sbi))
643 f2fs_bug_on(sbi, get_dirty_pages(inode));
644 f2fs_remove_dirty_inode(inode);
646 f2fs_destroy_extent_tree(inode);
648 if (inode->i_nlink || is_bad_inode(inode))
651 dquot_initialize(inode);
653 f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
654 f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
655 f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
657 sb_start_intwrite(inode->i_sb);
658 set_inode_flag(inode, FI_NO_ALLOC);
659 i_size_write(inode, 0);
661 if (F2FS_HAS_BLOCKS(inode))
662 err = f2fs_truncate(inode);
664 if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
665 f2fs_show_injection_info(FAULT_EVICT_INODE);
671 err = f2fs_remove_inode_page(inode);
677 /* give more chances, if ENOMEM case */
678 if (err == -ENOMEM) {
684 f2fs_update_inode_page(inode);
685 dquot_free_inode(inode);
686 sb_end_intwrite(inode->i_sb);
690 stat_dec_inline_xattr(inode);
691 stat_dec_inline_dir(inode);
692 stat_dec_inline_inode(inode);
694 if (likely(!is_set_ckpt_flags(sbi, CP_ERROR_FLAG)))
695 f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
697 f2fs_inode_synced(inode);
699 /* ino == 0, if f2fs_new_inode() was failed t*/
701 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
704 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
705 if (inode->i_nlink) {
706 if (is_inode_flag_set(inode, FI_APPEND_WRITE))
707 f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
708 if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
709 f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
711 if (is_inode_flag_set(inode, FI_FREE_NID)) {
712 f2fs_alloc_nid_failed(sbi, inode->i_ino);
713 clear_inode_flag(inode, FI_FREE_NID);
716 * If xattr nid is corrupted, we can reach out error condition,
717 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
718 * In that case, f2fs_check_nid_range() is enough to give a clue.
722 fscrypt_put_encryption_info(inode);
726 /* caller should call f2fs_lock_op() */
727 void f2fs_handle_failed_inode(struct inode *inode)
729 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
734 * clear nlink of inode in order to release resource of inode
740 * we must call this to avoid inode being remained as dirty, resulting
741 * in a panic when flushing dirty inodes in gdirty_list.
743 f2fs_update_inode_page(inode);
744 f2fs_inode_synced(inode);
746 /* don't make bad inode, since it becomes a regular file. */
747 unlock_new_inode(inode);
750 * Note: we should add inode to orphan list before f2fs_unlock_op()
751 * so we can prevent losing this orphan when encoutering checkpoint
752 * and following suddenly power-off.
754 err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
756 set_sbi_flag(sbi, SBI_NEED_FSCK);
757 f2fs_msg(sbi->sb, KERN_WARNING,
758 "May loss orphan inode, run fsck to fix.");
762 if (ni.blk_addr != NULL_ADDR) {
763 err = f2fs_acquire_orphan_inode(sbi);
765 set_sbi_flag(sbi, SBI_NEED_FSCK);
766 f2fs_msg(sbi->sb, KERN_WARNING,
767 "Too many orphan inodes, run fsck to fix.");
769 f2fs_add_orphan_inode(inode);
771 f2fs_alloc_nid_done(sbi, inode->i_ino);
773 set_inode_flag(inode, FI_FREE_NID);
779 /* iput will drop the inode object */