4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
17 static struct kmem_cache *fsync_entry_slab;
19 bool space_for_roll_forward(struct f2fs_sb_info *sbi)
21 if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
22 > sbi->user_block_count)
27 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
30 struct list_head *this;
31 struct fsync_inode_entry *entry;
33 list_for_each(this, head) {
34 entry = list_entry(this, struct fsync_inode_entry, list);
35 if (entry->inode->i_ino == ino)
41 static int recover_dentry(struct page *ipage, struct inode *inode)
43 struct f2fs_node *raw_node = F2FS_NODE(ipage);
44 struct f2fs_inode *raw_inode = &(raw_node->i);
45 nid_t pino = le32_to_cpu(raw_inode->i_pino);
46 struct f2fs_dir_entry *de;
49 struct inode *dir, *einode;
52 dir = check_dirty_dir_inode(F2FS_SB(inode->i_sb), pino);
54 dir = f2fs_iget(inode->i_sb, pino);
59 set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
60 add_dirty_dir_inode(dir);
63 name.len = le32_to_cpu(raw_inode->i_namelen);
64 name.name = raw_inode->i_name;
66 if (unlikely(name.len > F2FS_NAME_LEN)) {
72 de = f2fs_find_entry(dir, &name, &page);
73 if (de && inode->i_ino == le32_to_cpu(de->ino))
76 einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
79 if (PTR_ERR(einode) == -ENOENT)
83 err = acquire_orphan_inode(F2FS_SB(inode->i_sb));
88 f2fs_delete_entry(de, page, einode);
92 err = __f2fs_add_link(dir, &name, inode);
97 f2fs_put_page(page, 0);
99 f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode and its dentry: "
100 "ino = %x, name = %s, dir = %lx, err = %d",
101 ino_of_node(ipage), raw_inode->i_name,
102 IS_ERR(dir) ? 0 : dir->i_ino, err);
106 static int recover_inode(struct inode *inode, struct page *node_page)
108 struct f2fs_node *raw_node = F2FS_NODE(node_page);
109 struct f2fs_inode *raw_inode = &(raw_node->i);
111 if (!IS_INODE(node_page))
114 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
115 i_size_write(inode, le64_to_cpu(raw_inode->i_size));
116 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
117 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
118 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
119 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
120 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
121 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
123 if (is_dent_dnode(node_page))
124 return recover_dentry(node_page, inode);
126 f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
127 ino_of_node(node_page), raw_inode->i_name);
131 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
133 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
134 struct curseg_info *curseg;
139 /* get node pages in the current segment */
140 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
141 blkaddr = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff;
144 page = alloc_page(GFP_F2FS_ZERO);
150 struct fsync_inode_entry *entry;
152 err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC);
158 if (cp_ver != cpver_of_node(page))
161 if (!is_fsync_dnode(page))
164 entry = get_fsync_inode(head, ino_of_node(page));
166 if (IS_INODE(page) && is_dent_dnode(page))
167 set_inode_flag(F2FS_I(entry->inode),
170 if (IS_INODE(page) && is_dent_dnode(page)) {
171 err = recover_inode_page(sbi, page);
176 /* add this fsync inode to the list */
177 entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
183 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
184 if (IS_ERR(entry->inode)) {
185 err = PTR_ERR(entry->inode);
186 kmem_cache_free(fsync_entry_slab, entry);
189 list_add_tail(&entry->list, head);
191 entry->blkaddr = blkaddr;
193 err = recover_inode(entry->inode, page);
194 if (err && err != -ENOENT)
197 /* check next segment */
198 blkaddr = next_blkaddr_of_node(page);
202 __free_pages(page, 0);
207 static void destroy_fsync_dnodes(struct list_head *head)
209 struct fsync_inode_entry *entry, *tmp;
211 list_for_each_entry_safe(entry, tmp, head, list) {
213 list_del(&entry->list);
214 kmem_cache_free(fsync_entry_slab, entry);
218 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
219 block_t blkaddr, struct dnode_of_data *dn)
221 struct seg_entry *sentry;
222 unsigned int segno = GET_SEGNO(sbi, blkaddr);
223 unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) &
224 (sbi->blocks_per_seg - 1);
225 struct f2fs_summary sum;
229 struct page *node_page;
234 sentry = get_seg_entry(sbi, segno);
235 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
238 /* Get the previous summary */
239 for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
240 struct curseg_info *curseg = CURSEG_I(sbi, i);
241 if (curseg->segno == segno) {
242 sum = curseg->sum_blk->entries[blkoff];
246 if (i > CURSEG_COLD_DATA) {
247 struct page *sum_page = get_sum_page(sbi, segno);
248 struct f2fs_summary_block *sum_node;
249 kaddr = page_address(sum_page);
250 sum_node = (struct f2fs_summary_block *)kaddr;
251 sum = sum_node->entries[blkoff];
252 f2fs_put_page(sum_page, 1);
255 /* Use the locked dnode page and inode */
256 nid = le32_to_cpu(sum.nid);
257 if (dn->inode->i_ino == nid) {
258 struct dnode_of_data tdn = *dn;
260 tdn.node_page = dn->inode_page;
261 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
262 truncate_data_blocks_range(&tdn, 1);
264 } else if (dn->nid == nid) {
265 struct dnode_of_data tdn = *dn;
266 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
267 truncate_data_blocks_range(&tdn, 1);
271 /* Get the node page */
272 node_page = get_node_page(sbi, nid);
273 if (IS_ERR(node_page))
274 return PTR_ERR(node_page);
276 offset = ofs_of_node(node_page);
277 ino = ino_of_node(node_page);
278 f2fs_put_page(node_page, 1);
280 /* Deallocate previous index in the node page */
281 inode = f2fs_iget(sbi->sb, ino);
283 return PTR_ERR(inode);
285 bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
286 le16_to_cpu(sum.ofs_in_node);
288 truncate_hole(inode, bidx, bidx + 1);
293 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
294 struct page *page, block_t blkaddr)
296 struct f2fs_inode_info *fi = F2FS_I(inode);
297 unsigned int start, end;
298 struct dnode_of_data dn;
299 struct f2fs_summary sum;
301 int err = 0, recovered = 0;
303 start = start_bidx_of_node(ofs_of_node(page), fi);
305 end = start + ADDRS_PER_INODE(fi);
307 end = start + ADDRS_PER_BLOCK;
310 set_new_dnode(&dn, inode, NULL, NULL, 0);
312 err = get_dnode_of_data(&dn, start, ALLOC_NODE);
318 wait_on_page_writeback(dn.node_page);
320 get_node_info(sbi, dn.nid, &ni);
321 f2fs_bug_on(ni.ino != ino_of_node(page));
322 f2fs_bug_on(ofs_of_node(dn.node_page) != ofs_of_node(page));
324 for (; start < end; start++) {
327 src = datablock_addr(dn.node_page, dn.ofs_in_node);
328 dest = datablock_addr(page, dn.ofs_in_node);
330 if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
331 if (src == NULL_ADDR) {
332 err = reserve_new_block(&dn);
333 /* We should not get -ENOSPC */
337 /* Check the previous node page having this index */
338 err = check_index_in_prev_nodes(sbi, dest, &dn);
342 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
344 /* write dummy data page */
345 recover_data_page(sbi, NULL, &sum, src, dest);
346 update_extent_cache(dest, &dn);
352 /* write node page in place */
353 set_summary(&sum, dn.nid, 0, 0);
354 if (IS_INODE(dn.node_page))
355 sync_inode_page(&dn);
357 copy_node_footer(dn.node_page, page);
358 fill_node_footer(dn.node_page, dn.nid, ni.ino,
359 ofs_of_node(page), false);
360 set_page_dirty(dn.node_page);
362 recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
367 f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx, "
368 "recovered_data = %d blocks, err = %d",
369 inode->i_ino, recovered, err);
373 static int recover_data(struct f2fs_sb_info *sbi,
374 struct list_head *head, int type)
376 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
377 struct curseg_info *curseg;
382 /* get node pages in the current segment */
383 curseg = CURSEG_I(sbi, type);
384 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
387 page = alloc_page(GFP_F2FS_ZERO);
394 struct fsync_inode_entry *entry;
396 err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC);
402 if (cp_ver != cpver_of_node(page))
405 entry = get_fsync_inode(head, ino_of_node(page));
409 err = do_recover_data(sbi, entry->inode, page, blkaddr);
413 if (entry->blkaddr == blkaddr) {
415 list_del(&entry->list);
416 kmem_cache_free(fsync_entry_slab, entry);
419 /* check next segment */
420 blkaddr = next_blkaddr_of_node(page);
424 __free_pages(page, 0);
427 allocate_new_segments(sbi);
431 int recover_fsync_data(struct f2fs_sb_info *sbi)
433 struct list_head inode_list;
435 bool need_writecp = false;
437 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
438 sizeof(struct fsync_inode_entry), NULL);
439 if (!fsync_entry_slab)
442 INIT_LIST_HEAD(&inode_list);
444 /* step #1: find fsynced inode numbers */
445 sbi->por_doing = true;
446 err = find_fsync_dnodes(sbi, &inode_list);
450 if (list_empty(&inode_list))
455 /* step #2: recover data */
456 err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
457 f2fs_bug_on(!list_empty(&inode_list));
459 destroy_fsync_dnodes(&inode_list);
460 kmem_cache_destroy(fsync_entry_slab);
461 sbi->por_doing = false;
462 if (!err && need_writecp)
463 write_checkpoint(sbi, false);