Merge tag 'sound-fix-6.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[sfrench/cifs-2.6.git] / fs / f2fs / node.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/node.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/mpage.h>
11 #include <linux/sched/mm.h>
12 #include <linux/blkdev.h>
13 #include <linux/pagevec.h>
14 #include <linux/swap.h>
15
16 #include "f2fs.h"
17 #include "node.h"
18 #include "segment.h"
19 #include "xattr.h"
20 #include "iostat.h"
21 #include <trace/events/f2fs.h>
22
23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
24
25 static struct kmem_cache *nat_entry_slab;
26 static struct kmem_cache *free_nid_slab;
27 static struct kmem_cache *nat_entry_set_slab;
28 static struct kmem_cache *fsync_node_entry_slab;
29
30 /*
31  * Check whether the given nid is within node id range.
32  */
33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
34 {
35         if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
36                 set_sbi_flag(sbi, SBI_NEED_FSCK);
37                 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
38                           __func__, nid);
39                 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
40                 return -EFSCORRUPTED;
41         }
42         return 0;
43 }
44
45 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
46 {
47         struct f2fs_nm_info *nm_i = NM_I(sbi);
48         struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
49         struct sysinfo val;
50         unsigned long avail_ram;
51         unsigned long mem_size = 0;
52         bool res = false;
53
54         if (!nm_i)
55                 return true;
56
57         si_meminfo(&val);
58
59         /* only uses low memory */
60         avail_ram = val.totalram - val.totalhigh;
61
62         /*
63          * give 25%, 25%, 50%, 50%, 25%, 25% memory for each components respectively
64          */
65         if (type == FREE_NIDS) {
66                 mem_size = (nm_i->nid_cnt[FREE_NID] *
67                                 sizeof(struct free_nid)) >> PAGE_SHIFT;
68                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
69         } else if (type == NAT_ENTRIES) {
70                 mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
71                                 sizeof(struct nat_entry)) >> PAGE_SHIFT;
72                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
73                 if (excess_cached_nats(sbi))
74                         res = false;
75         } else if (type == DIRTY_DENTS) {
76                 if (sbi->sb->s_bdi->wb.dirty_exceeded)
77                         return false;
78                 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
79                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
80         } else if (type == INO_ENTRIES) {
81                 int i;
82
83                 for (i = 0; i < MAX_INO_ENTRY; i++)
84                         mem_size += sbi->im[i].ino_num *
85                                                 sizeof(struct ino_entry);
86                 mem_size >>= PAGE_SHIFT;
87                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
88         } else if (type == READ_EXTENT_CACHE || type == AGE_EXTENT_CACHE) {
89                 enum extent_type etype = type == READ_EXTENT_CACHE ?
90                                                 EX_READ : EX_BLOCK_AGE;
91                 struct extent_tree_info *eti = &sbi->extent_tree[etype];
92
93                 mem_size = (atomic_read(&eti->total_ext_tree) *
94                                 sizeof(struct extent_tree) +
95                                 atomic_read(&eti->total_ext_node) *
96                                 sizeof(struct extent_node)) >> PAGE_SHIFT;
97                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
98         } else if (type == DISCARD_CACHE) {
99                 mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
100                                 sizeof(struct discard_cmd)) >> PAGE_SHIFT;
101                 res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
102         } else if (type == COMPRESS_PAGE) {
103 #ifdef CONFIG_F2FS_FS_COMPRESSION
104                 unsigned long free_ram = val.freeram;
105
106                 /*
107                  * free memory is lower than watermark or cached page count
108                  * exceed threshold, deny caching compress page.
109                  */
110                 res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
111                         (COMPRESS_MAPPING(sbi)->nrpages <
112                          free_ram * sbi->compress_percent / 100);
113 #else
114                 res = false;
115 #endif
116         } else {
117                 if (!sbi->sb->s_bdi->wb.dirty_exceeded)
118                         return true;
119         }
120         return res;
121 }
122
123 static void clear_node_page_dirty(struct page *page)
124 {
125         if (PageDirty(page)) {
126                 f2fs_clear_page_cache_dirty_tag(page);
127                 clear_page_dirty_for_io(page);
128                 dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
129         }
130         ClearPageUptodate(page);
131 }
132
133 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
134 {
135         return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid));
136 }
137
138 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
139 {
140         struct page *src_page;
141         struct page *dst_page;
142         pgoff_t dst_off;
143         void *src_addr;
144         void *dst_addr;
145         struct f2fs_nm_info *nm_i = NM_I(sbi);
146
147         dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
148
149         /* get current nat block page with lock */
150         src_page = get_current_nat_page(sbi, nid);
151         if (IS_ERR(src_page))
152                 return src_page;
153         dst_page = f2fs_grab_meta_page(sbi, dst_off);
154         f2fs_bug_on(sbi, PageDirty(src_page));
155
156         src_addr = page_address(src_page);
157         dst_addr = page_address(dst_page);
158         memcpy(dst_addr, src_addr, PAGE_SIZE);
159         set_page_dirty(dst_page);
160         f2fs_put_page(src_page, 1);
161
162         set_to_next_nat(nm_i, nid);
163
164         return dst_page;
165 }
166
167 static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi,
168                                                 nid_t nid, bool no_fail)
169 {
170         struct nat_entry *new;
171
172         new = f2fs_kmem_cache_alloc(nat_entry_slab,
173                                         GFP_F2FS_ZERO, no_fail, sbi);
174         if (new) {
175                 nat_set_nid(new, nid);
176                 nat_reset_flag(new);
177         }
178         return new;
179 }
180
181 static void __free_nat_entry(struct nat_entry *e)
182 {
183         kmem_cache_free(nat_entry_slab, e);
184 }
185
186 /* must be locked by nat_tree_lock */
187 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
188         struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
189 {
190         if (no_fail)
191                 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
192         else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
193                 return NULL;
194
195         if (raw_ne)
196                 node_info_from_raw_nat(&ne->ni, raw_ne);
197
198         spin_lock(&nm_i->nat_list_lock);
199         list_add_tail(&ne->list, &nm_i->nat_entries);
200         spin_unlock(&nm_i->nat_list_lock);
201
202         nm_i->nat_cnt[TOTAL_NAT]++;
203         nm_i->nat_cnt[RECLAIMABLE_NAT]++;
204         return ne;
205 }
206
207 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
208 {
209         struct nat_entry *ne;
210
211         ne = radix_tree_lookup(&nm_i->nat_root, n);
212
213         /* for recent accessed nat entry, move it to tail of lru list */
214         if (ne && !get_nat_flag(ne, IS_DIRTY)) {
215                 spin_lock(&nm_i->nat_list_lock);
216                 if (!list_empty(&ne->list))
217                         list_move_tail(&ne->list, &nm_i->nat_entries);
218                 spin_unlock(&nm_i->nat_list_lock);
219         }
220
221         return ne;
222 }
223
224 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
225                 nid_t start, unsigned int nr, struct nat_entry **ep)
226 {
227         return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
228 }
229
230 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
231 {
232         radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
233         nm_i->nat_cnt[TOTAL_NAT]--;
234         nm_i->nat_cnt[RECLAIMABLE_NAT]--;
235         __free_nat_entry(e);
236 }
237
238 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
239                                                         struct nat_entry *ne)
240 {
241         nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
242         struct nat_entry_set *head;
243
244         head = radix_tree_lookup(&nm_i->nat_set_root, set);
245         if (!head) {
246                 head = f2fs_kmem_cache_alloc(nat_entry_set_slab,
247                                                 GFP_NOFS, true, NULL);
248
249                 INIT_LIST_HEAD(&head->entry_list);
250                 INIT_LIST_HEAD(&head->set_list);
251                 head->set = set;
252                 head->entry_cnt = 0;
253                 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
254         }
255         return head;
256 }
257
258 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
259                                                 struct nat_entry *ne)
260 {
261         struct nat_entry_set *head;
262         bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
263
264         if (!new_ne)
265                 head = __grab_nat_entry_set(nm_i, ne);
266
267         /*
268          * update entry_cnt in below condition:
269          * 1. update NEW_ADDR to valid block address;
270          * 2. update old block address to new one;
271          */
272         if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
273                                 !get_nat_flag(ne, IS_DIRTY)))
274                 head->entry_cnt++;
275
276         set_nat_flag(ne, IS_PREALLOC, new_ne);
277
278         if (get_nat_flag(ne, IS_DIRTY))
279                 goto refresh_list;
280
281         nm_i->nat_cnt[DIRTY_NAT]++;
282         nm_i->nat_cnt[RECLAIMABLE_NAT]--;
283         set_nat_flag(ne, IS_DIRTY, true);
284 refresh_list:
285         spin_lock(&nm_i->nat_list_lock);
286         if (new_ne)
287                 list_del_init(&ne->list);
288         else
289                 list_move_tail(&ne->list, &head->entry_list);
290         spin_unlock(&nm_i->nat_list_lock);
291 }
292
293 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
294                 struct nat_entry_set *set, struct nat_entry *ne)
295 {
296         spin_lock(&nm_i->nat_list_lock);
297         list_move_tail(&ne->list, &nm_i->nat_entries);
298         spin_unlock(&nm_i->nat_list_lock);
299
300         set_nat_flag(ne, IS_DIRTY, false);
301         set->entry_cnt--;
302         nm_i->nat_cnt[DIRTY_NAT]--;
303         nm_i->nat_cnt[RECLAIMABLE_NAT]++;
304 }
305
306 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
307                 nid_t start, unsigned int nr, struct nat_entry_set **ep)
308 {
309         return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
310                                                         start, nr);
311 }
312
313 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
314 {
315         return NODE_MAPPING(sbi) == page->mapping &&
316                         IS_DNODE(page) && is_cold_node(page);
317 }
318
319 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
320 {
321         spin_lock_init(&sbi->fsync_node_lock);
322         INIT_LIST_HEAD(&sbi->fsync_node_list);
323         sbi->fsync_seg_id = 0;
324         sbi->fsync_node_num = 0;
325 }
326
327 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
328                                                         struct page *page)
329 {
330         struct fsync_node_entry *fn;
331         unsigned long flags;
332         unsigned int seq_id;
333
334         fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
335                                         GFP_NOFS, true, NULL);
336
337         get_page(page);
338         fn->page = page;
339         INIT_LIST_HEAD(&fn->list);
340
341         spin_lock_irqsave(&sbi->fsync_node_lock, flags);
342         list_add_tail(&fn->list, &sbi->fsync_node_list);
343         fn->seq_id = sbi->fsync_seg_id++;
344         seq_id = fn->seq_id;
345         sbi->fsync_node_num++;
346         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
347
348         return seq_id;
349 }
350
351 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
352 {
353         struct fsync_node_entry *fn;
354         unsigned long flags;
355
356         spin_lock_irqsave(&sbi->fsync_node_lock, flags);
357         list_for_each_entry(fn, &sbi->fsync_node_list, list) {
358                 if (fn->page == page) {
359                         list_del(&fn->list);
360                         sbi->fsync_node_num--;
361                         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
362                         kmem_cache_free(fsync_node_entry_slab, fn);
363                         put_page(page);
364                         return;
365                 }
366         }
367         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
368         f2fs_bug_on(sbi, 1);
369 }
370
371 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
372 {
373         unsigned long flags;
374
375         spin_lock_irqsave(&sbi->fsync_node_lock, flags);
376         sbi->fsync_seg_id = 0;
377         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
378 }
379
380 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
381 {
382         struct f2fs_nm_info *nm_i = NM_I(sbi);
383         struct nat_entry *e;
384         bool need = false;
385
386         f2fs_down_read(&nm_i->nat_tree_lock);
387         e = __lookup_nat_cache(nm_i, nid);
388         if (e) {
389                 if (!get_nat_flag(e, IS_CHECKPOINTED) &&
390                                 !get_nat_flag(e, HAS_FSYNCED_INODE))
391                         need = true;
392         }
393         f2fs_up_read(&nm_i->nat_tree_lock);
394         return need;
395 }
396
397 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
398 {
399         struct f2fs_nm_info *nm_i = NM_I(sbi);
400         struct nat_entry *e;
401         bool is_cp = true;
402
403         f2fs_down_read(&nm_i->nat_tree_lock);
404         e = __lookup_nat_cache(nm_i, nid);
405         if (e && !get_nat_flag(e, IS_CHECKPOINTED))
406                 is_cp = false;
407         f2fs_up_read(&nm_i->nat_tree_lock);
408         return is_cp;
409 }
410
411 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
412 {
413         struct f2fs_nm_info *nm_i = NM_I(sbi);
414         struct nat_entry *e;
415         bool need_update = true;
416
417         f2fs_down_read(&nm_i->nat_tree_lock);
418         e = __lookup_nat_cache(nm_i, ino);
419         if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
420                         (get_nat_flag(e, IS_CHECKPOINTED) ||
421                          get_nat_flag(e, HAS_FSYNCED_INODE)))
422                 need_update = false;
423         f2fs_up_read(&nm_i->nat_tree_lock);
424         return need_update;
425 }
426
427 /* must be locked by nat_tree_lock */
428 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
429                                                 struct f2fs_nat_entry *ne)
430 {
431         struct f2fs_nm_info *nm_i = NM_I(sbi);
432         struct nat_entry *new, *e;
433
434         /* Let's mitigate lock contention of nat_tree_lock during checkpoint */
435         if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
436                 return;
437
438         new = __alloc_nat_entry(sbi, nid, false);
439         if (!new)
440                 return;
441
442         f2fs_down_write(&nm_i->nat_tree_lock);
443         e = __lookup_nat_cache(nm_i, nid);
444         if (!e)
445                 e = __init_nat_entry(nm_i, new, ne, false);
446         else
447                 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
448                                 nat_get_blkaddr(e) !=
449                                         le32_to_cpu(ne->block_addr) ||
450                                 nat_get_version(e) != ne->version);
451         f2fs_up_write(&nm_i->nat_tree_lock);
452         if (e != new)
453                 __free_nat_entry(new);
454 }
455
456 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
457                         block_t new_blkaddr, bool fsync_done)
458 {
459         struct f2fs_nm_info *nm_i = NM_I(sbi);
460         struct nat_entry *e;
461         struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true);
462
463         f2fs_down_write(&nm_i->nat_tree_lock);
464         e = __lookup_nat_cache(nm_i, ni->nid);
465         if (!e) {
466                 e = __init_nat_entry(nm_i, new, NULL, true);
467                 copy_node_info(&e->ni, ni);
468                 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
469         } else if (new_blkaddr == NEW_ADDR) {
470                 /*
471                  * when nid is reallocated,
472                  * previous nat entry can be remained in nat cache.
473                  * So, reinitialize it with new information.
474                  */
475                 copy_node_info(&e->ni, ni);
476                 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
477         }
478         /* let's free early to reduce memory consumption */
479         if (e != new)
480                 __free_nat_entry(new);
481
482         /* sanity check */
483         f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
484         f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
485                         new_blkaddr == NULL_ADDR);
486         f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
487                         new_blkaddr == NEW_ADDR);
488         f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
489                         new_blkaddr == NEW_ADDR);
490
491         /* increment version no as node is removed */
492         if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
493                 unsigned char version = nat_get_version(e);
494
495                 nat_set_version(e, inc_node_version(version));
496         }
497
498         /* change address */
499         nat_set_blkaddr(e, new_blkaddr);
500         if (!__is_valid_data_blkaddr(new_blkaddr))
501                 set_nat_flag(e, IS_CHECKPOINTED, false);
502         __set_nat_cache_dirty(nm_i, e);
503
504         /* update fsync_mark if its inode nat entry is still alive */
505         if (ni->nid != ni->ino)
506                 e = __lookup_nat_cache(nm_i, ni->ino);
507         if (e) {
508                 if (fsync_done && ni->nid == ni->ino)
509                         set_nat_flag(e, HAS_FSYNCED_INODE, true);
510                 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
511         }
512         f2fs_up_write(&nm_i->nat_tree_lock);
513 }
514
515 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
516 {
517         struct f2fs_nm_info *nm_i = NM_I(sbi);
518         int nr = nr_shrink;
519
520         if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
521                 return 0;
522
523         spin_lock(&nm_i->nat_list_lock);
524         while (nr_shrink) {
525                 struct nat_entry *ne;
526
527                 if (list_empty(&nm_i->nat_entries))
528                         break;
529
530                 ne = list_first_entry(&nm_i->nat_entries,
531                                         struct nat_entry, list);
532                 list_del(&ne->list);
533                 spin_unlock(&nm_i->nat_list_lock);
534
535                 __del_from_nat_cache(nm_i, ne);
536                 nr_shrink--;
537
538                 spin_lock(&nm_i->nat_list_lock);
539         }
540         spin_unlock(&nm_i->nat_list_lock);
541
542         f2fs_up_write(&nm_i->nat_tree_lock);
543         return nr - nr_shrink;
544 }
545
546 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
547                                 struct node_info *ni, bool checkpoint_context)
548 {
549         struct f2fs_nm_info *nm_i = NM_I(sbi);
550         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
551         struct f2fs_journal *journal = curseg->journal;
552         nid_t start_nid = START_NID(nid);
553         struct f2fs_nat_block *nat_blk;
554         struct page *page = NULL;
555         struct f2fs_nat_entry ne;
556         struct nat_entry *e;
557         pgoff_t index;
558         block_t blkaddr;
559         int i;
560
561         ni->nid = nid;
562 retry:
563         /* Check nat cache */
564         f2fs_down_read(&nm_i->nat_tree_lock);
565         e = __lookup_nat_cache(nm_i, nid);
566         if (e) {
567                 ni->ino = nat_get_ino(e);
568                 ni->blk_addr = nat_get_blkaddr(e);
569                 ni->version = nat_get_version(e);
570                 f2fs_up_read(&nm_i->nat_tree_lock);
571                 return 0;
572         }
573
574         /*
575          * Check current segment summary by trying to grab journal_rwsem first.
576          * This sem is on the critical path on the checkpoint requiring the above
577          * nat_tree_lock. Therefore, we should retry, if we failed to grab here
578          * while not bothering checkpoint.
579          */
580         if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
581                 down_read(&curseg->journal_rwsem);
582         } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
583                                 !down_read_trylock(&curseg->journal_rwsem)) {
584                 f2fs_up_read(&nm_i->nat_tree_lock);
585                 goto retry;
586         }
587
588         i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
589         if (i >= 0) {
590                 ne = nat_in_journal(journal, i);
591                 node_info_from_raw_nat(ni, &ne);
592         }
593         up_read(&curseg->journal_rwsem);
594         if (i >= 0) {
595                 f2fs_up_read(&nm_i->nat_tree_lock);
596                 goto cache;
597         }
598
599         /* Fill node_info from nat page */
600         index = current_nat_addr(sbi, nid);
601         f2fs_up_read(&nm_i->nat_tree_lock);
602
603         page = f2fs_get_meta_page(sbi, index);
604         if (IS_ERR(page))
605                 return PTR_ERR(page);
606
607         nat_blk = (struct f2fs_nat_block *)page_address(page);
608         ne = nat_blk->entries[nid - start_nid];
609         node_info_from_raw_nat(ni, &ne);
610         f2fs_put_page(page, 1);
611 cache:
612         blkaddr = le32_to_cpu(ne.block_addr);
613         if (__is_valid_data_blkaddr(blkaddr) &&
614                 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
615                 return -EFAULT;
616
617         /* cache nat entry */
618         cache_nat_entry(sbi, nid, &ne);
619         return 0;
620 }
621
622 /*
623  * readahead MAX_RA_NODE number of node pages.
624  */
625 static void f2fs_ra_node_pages(struct page *parent, int start, int n)
626 {
627         struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
628         struct blk_plug plug;
629         int i, end;
630         nid_t nid;
631
632         blk_start_plug(&plug);
633
634         /* Then, try readahead for siblings of the desired node */
635         end = start + n;
636         end = min(end, (int)NIDS_PER_BLOCK);
637         for (i = start; i < end; i++) {
638                 nid = get_nid(parent, i, false);
639                 f2fs_ra_node_page(sbi, nid);
640         }
641
642         blk_finish_plug(&plug);
643 }
644
645 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
646 {
647         const long direct_index = ADDRS_PER_INODE(dn->inode);
648         const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
649         const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
650         unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
651         int cur_level = dn->cur_level;
652         int max_level = dn->max_level;
653         pgoff_t base = 0;
654
655         if (!dn->max_level)
656                 return pgofs + 1;
657
658         while (max_level-- > cur_level)
659                 skipped_unit *= NIDS_PER_BLOCK;
660
661         switch (dn->max_level) {
662         case 3:
663                 base += 2 * indirect_blks;
664                 fallthrough;
665         case 2:
666                 base += 2 * direct_blks;
667                 fallthrough;
668         case 1:
669                 base += direct_index;
670                 break;
671         default:
672                 f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
673         }
674
675         return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
676 }
677
678 /*
679  * The maximum depth is four.
680  * Offset[0] will have raw inode offset.
681  */
682 static int get_node_path(struct inode *inode, long block,
683                                 int offset[4], unsigned int noffset[4])
684 {
685         const long direct_index = ADDRS_PER_INODE(inode);
686         const long direct_blks = ADDRS_PER_BLOCK(inode);
687         const long dptrs_per_blk = NIDS_PER_BLOCK;
688         const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
689         const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
690         int n = 0;
691         int level = 0;
692
693         noffset[0] = 0;
694
695         if (block < direct_index) {
696                 offset[n] = block;
697                 goto got;
698         }
699         block -= direct_index;
700         if (block < direct_blks) {
701                 offset[n++] = NODE_DIR1_BLOCK;
702                 noffset[n] = 1;
703                 offset[n] = block;
704                 level = 1;
705                 goto got;
706         }
707         block -= direct_blks;
708         if (block < direct_blks) {
709                 offset[n++] = NODE_DIR2_BLOCK;
710                 noffset[n] = 2;
711                 offset[n] = block;
712                 level = 1;
713                 goto got;
714         }
715         block -= direct_blks;
716         if (block < indirect_blks) {
717                 offset[n++] = NODE_IND1_BLOCK;
718                 noffset[n] = 3;
719                 offset[n++] = block / direct_blks;
720                 noffset[n] = 4 + offset[n - 1];
721                 offset[n] = block % direct_blks;
722                 level = 2;
723                 goto got;
724         }
725         block -= indirect_blks;
726         if (block < indirect_blks) {
727                 offset[n++] = NODE_IND2_BLOCK;
728                 noffset[n] = 4 + dptrs_per_blk;
729                 offset[n++] = block / direct_blks;
730                 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
731                 offset[n] = block % direct_blks;
732                 level = 2;
733                 goto got;
734         }
735         block -= indirect_blks;
736         if (block < dindirect_blks) {
737                 offset[n++] = NODE_DIND_BLOCK;
738                 noffset[n] = 5 + (dptrs_per_blk * 2);
739                 offset[n++] = block / indirect_blks;
740                 noffset[n] = 6 + (dptrs_per_blk * 2) +
741                               offset[n - 1] * (dptrs_per_blk + 1);
742                 offset[n++] = (block / direct_blks) % dptrs_per_blk;
743                 noffset[n] = 7 + (dptrs_per_blk * 2) +
744                               offset[n - 2] * (dptrs_per_blk + 1) +
745                               offset[n - 1];
746                 offset[n] = block % direct_blks;
747                 level = 3;
748                 goto got;
749         } else {
750                 return -E2BIG;
751         }
752 got:
753         return level;
754 }
755
756 /*
757  * Caller should call f2fs_put_dnode(dn).
758  * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
759  * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
760  */
761 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
762 {
763         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
764         struct page *npage[4];
765         struct page *parent = NULL;
766         int offset[4];
767         unsigned int noffset[4];
768         nid_t nids[4];
769         int level, i = 0;
770         int err = 0;
771
772         level = get_node_path(dn->inode, index, offset, noffset);
773         if (level < 0)
774                 return level;
775
776         nids[0] = dn->inode->i_ino;
777         npage[0] = dn->inode_page;
778
779         if (!npage[0]) {
780                 npage[0] = f2fs_get_node_page(sbi, nids[0]);
781                 if (IS_ERR(npage[0]))
782                         return PTR_ERR(npage[0]);
783         }
784
785         /* if inline_data is set, should not report any block indices */
786         if (f2fs_has_inline_data(dn->inode) && index) {
787                 err = -ENOENT;
788                 f2fs_put_page(npage[0], 1);
789                 goto release_out;
790         }
791
792         parent = npage[0];
793         if (level != 0)
794                 nids[1] = get_nid(parent, offset[0], true);
795         dn->inode_page = npage[0];
796         dn->inode_page_locked = true;
797
798         /* get indirect or direct nodes */
799         for (i = 1; i <= level; i++) {
800                 bool done = false;
801
802                 if (!nids[i] && mode == ALLOC_NODE) {
803                         /* alloc new node */
804                         if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
805                                 err = -ENOSPC;
806                                 goto release_pages;
807                         }
808
809                         dn->nid = nids[i];
810                         npage[i] = f2fs_new_node_page(dn, noffset[i]);
811                         if (IS_ERR(npage[i])) {
812                                 f2fs_alloc_nid_failed(sbi, nids[i]);
813                                 err = PTR_ERR(npage[i]);
814                                 goto release_pages;
815                         }
816
817                         set_nid(parent, offset[i - 1], nids[i], i == 1);
818                         f2fs_alloc_nid_done(sbi, nids[i]);
819                         done = true;
820                 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
821                         npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
822                         if (IS_ERR(npage[i])) {
823                                 err = PTR_ERR(npage[i]);
824                                 goto release_pages;
825                         }
826                         done = true;
827                 }
828                 if (i == 1) {
829                         dn->inode_page_locked = false;
830                         unlock_page(parent);
831                 } else {
832                         f2fs_put_page(parent, 1);
833                 }
834
835                 if (!done) {
836                         npage[i] = f2fs_get_node_page(sbi, nids[i]);
837                         if (IS_ERR(npage[i])) {
838                                 err = PTR_ERR(npage[i]);
839                                 f2fs_put_page(npage[0], 0);
840                                 goto release_out;
841                         }
842                 }
843                 if (i < level) {
844                         parent = npage[i];
845                         nids[i + 1] = get_nid(parent, offset[i], false);
846                 }
847         }
848         dn->nid = nids[level];
849         dn->ofs_in_node = offset[level];
850         dn->node_page = npage[level];
851         dn->data_blkaddr = f2fs_data_blkaddr(dn);
852
853         if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
854                                         f2fs_sb_has_readonly(sbi)) {
855                 unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn);
856                 block_t blkaddr;
857
858                 if (!c_len)
859                         goto out;
860
861                 blkaddr = f2fs_data_blkaddr(dn);
862                 if (blkaddr == COMPRESS_ADDR)
863                         blkaddr = data_blkaddr(dn->inode, dn->node_page,
864                                                 dn->ofs_in_node + 1);
865
866                 f2fs_update_read_extent_tree_range_compressed(dn->inode,
867                                         index, blkaddr,
868                                         F2FS_I(dn->inode)->i_cluster_size,
869                                         c_len);
870         }
871 out:
872         return 0;
873
874 release_pages:
875         f2fs_put_page(parent, 1);
876         if (i > 1)
877                 f2fs_put_page(npage[0], 0);
878 release_out:
879         dn->inode_page = NULL;
880         dn->node_page = NULL;
881         if (err == -ENOENT) {
882                 dn->cur_level = i;
883                 dn->max_level = level;
884                 dn->ofs_in_node = offset[level];
885         }
886         return err;
887 }
888
889 static int truncate_node(struct dnode_of_data *dn)
890 {
891         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
892         struct node_info ni;
893         int err;
894         pgoff_t index;
895
896         err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
897         if (err)
898                 return err;
899
900         /* Deallocate node address */
901         f2fs_invalidate_blocks(sbi, ni.blk_addr);
902         dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
903         set_node_addr(sbi, &ni, NULL_ADDR, false);
904
905         if (dn->nid == dn->inode->i_ino) {
906                 f2fs_remove_orphan_inode(sbi, dn->nid);
907                 dec_valid_inode_count(sbi);
908                 f2fs_inode_synced(dn->inode);
909         }
910
911         clear_node_page_dirty(dn->node_page);
912         set_sbi_flag(sbi, SBI_IS_DIRTY);
913
914         index = dn->node_page->index;
915         f2fs_put_page(dn->node_page, 1);
916
917         invalidate_mapping_pages(NODE_MAPPING(sbi),
918                         index, index);
919
920         dn->node_page = NULL;
921         trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
922
923         return 0;
924 }
925
926 static int truncate_dnode(struct dnode_of_data *dn)
927 {
928         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
929         struct page *page;
930         int err;
931
932         if (dn->nid == 0)
933                 return 1;
934
935         /* get direct node */
936         page = f2fs_get_node_page(sbi, dn->nid);
937         if (PTR_ERR(page) == -ENOENT)
938                 return 1;
939         else if (IS_ERR(page))
940                 return PTR_ERR(page);
941
942         if (IS_INODE(page) || ino_of_node(page) != dn->inode->i_ino) {
943                 f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u",
944                                 dn->inode->i_ino, dn->nid, ino_of_node(page));
945                 set_sbi_flag(sbi, SBI_NEED_FSCK);
946                 f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE);
947                 f2fs_put_page(page, 1);
948                 return -EFSCORRUPTED;
949         }
950
951         /* Make dnode_of_data for parameter */
952         dn->node_page = page;
953         dn->ofs_in_node = 0;
954         f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
955         err = truncate_node(dn);
956         if (err) {
957                 f2fs_put_page(page, 1);
958                 return err;
959         }
960
961         return 1;
962 }
963
964 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
965                                                 int ofs, int depth)
966 {
967         struct dnode_of_data rdn = *dn;
968         struct page *page;
969         struct f2fs_node *rn;
970         nid_t child_nid;
971         unsigned int child_nofs;
972         int freed = 0;
973         int i, ret;
974
975         if (dn->nid == 0)
976                 return NIDS_PER_BLOCK + 1;
977
978         trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
979
980         page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
981         if (IS_ERR(page)) {
982                 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
983                 return PTR_ERR(page);
984         }
985
986         f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
987
988         rn = F2FS_NODE(page);
989         if (depth < 3) {
990                 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
991                         child_nid = le32_to_cpu(rn->in.nid[i]);
992                         if (child_nid == 0)
993                                 continue;
994                         rdn.nid = child_nid;
995                         ret = truncate_dnode(&rdn);
996                         if (ret < 0)
997                                 goto out_err;
998                         if (set_nid(page, i, 0, false))
999                                 dn->node_changed = true;
1000                 }
1001         } else {
1002                 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
1003                 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
1004                         child_nid = le32_to_cpu(rn->in.nid[i]);
1005                         if (child_nid == 0) {
1006                                 child_nofs += NIDS_PER_BLOCK + 1;
1007                                 continue;
1008                         }
1009                         rdn.nid = child_nid;
1010                         ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
1011                         if (ret == (NIDS_PER_BLOCK + 1)) {
1012                                 if (set_nid(page, i, 0, false))
1013                                         dn->node_changed = true;
1014                                 child_nofs += ret;
1015                         } else if (ret < 0 && ret != -ENOENT) {
1016                                 goto out_err;
1017                         }
1018                 }
1019                 freed = child_nofs;
1020         }
1021
1022         if (!ofs) {
1023                 /* remove current indirect node */
1024                 dn->node_page = page;
1025                 ret = truncate_node(dn);
1026                 if (ret)
1027                         goto out_err;
1028                 freed++;
1029         } else {
1030                 f2fs_put_page(page, 1);
1031         }
1032         trace_f2fs_truncate_nodes_exit(dn->inode, freed);
1033         return freed;
1034
1035 out_err:
1036         f2fs_put_page(page, 1);
1037         trace_f2fs_truncate_nodes_exit(dn->inode, ret);
1038         return ret;
1039 }
1040
1041 static int truncate_partial_nodes(struct dnode_of_data *dn,
1042                         struct f2fs_inode *ri, int *offset, int depth)
1043 {
1044         struct page *pages[2];
1045         nid_t nid[3];
1046         nid_t child_nid;
1047         int err = 0;
1048         int i;
1049         int idx = depth - 2;
1050
1051         nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1052         if (!nid[0])
1053                 return 0;
1054
1055         /* get indirect nodes in the path */
1056         for (i = 0; i < idx + 1; i++) {
1057                 /* reference count'll be increased */
1058                 pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
1059                 if (IS_ERR(pages[i])) {
1060                         err = PTR_ERR(pages[i]);
1061                         idx = i - 1;
1062                         goto fail;
1063                 }
1064                 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
1065         }
1066
1067         f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
1068
1069         /* free direct nodes linked to a partial indirect node */
1070         for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
1071                 child_nid = get_nid(pages[idx], i, false);
1072                 if (!child_nid)
1073                         continue;
1074                 dn->nid = child_nid;
1075                 err = truncate_dnode(dn);
1076                 if (err < 0)
1077                         goto fail;
1078                 if (set_nid(pages[idx], i, 0, false))
1079                         dn->node_changed = true;
1080         }
1081
1082         if (offset[idx + 1] == 0) {
1083                 dn->node_page = pages[idx];
1084                 dn->nid = nid[idx];
1085                 err = truncate_node(dn);
1086                 if (err)
1087                         goto fail;
1088         } else {
1089                 f2fs_put_page(pages[idx], 1);
1090         }
1091         offset[idx]++;
1092         offset[idx + 1] = 0;
1093         idx--;
1094 fail:
1095         for (i = idx; i >= 0; i--)
1096                 f2fs_put_page(pages[i], 1);
1097
1098         trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
1099
1100         return err;
1101 }
1102
1103 /*
1104  * All the block addresses of data and nodes should be nullified.
1105  */
1106 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1107 {
1108         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1109         int err = 0, cont = 1;
1110         int level, offset[4], noffset[4];
1111         unsigned int nofs = 0;
1112         struct f2fs_inode *ri;
1113         struct dnode_of_data dn;
1114         struct page *page;
1115
1116         trace_f2fs_truncate_inode_blocks_enter(inode, from);
1117
1118         level = get_node_path(inode, from, offset, noffset);
1119         if (level < 0) {
1120                 trace_f2fs_truncate_inode_blocks_exit(inode, level);
1121                 return level;
1122         }
1123
1124         page = f2fs_get_node_page(sbi, inode->i_ino);
1125         if (IS_ERR(page)) {
1126                 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
1127                 return PTR_ERR(page);
1128         }
1129
1130         set_new_dnode(&dn, inode, page, NULL, 0);
1131         unlock_page(page);
1132
1133         ri = F2FS_INODE(page);
1134         switch (level) {
1135         case 0:
1136         case 1:
1137                 nofs = noffset[1];
1138                 break;
1139         case 2:
1140                 nofs = noffset[1];
1141                 if (!offset[level - 1])
1142                         goto skip_partial;
1143                 err = truncate_partial_nodes(&dn, ri, offset, level);
1144                 if (err < 0 && err != -ENOENT)
1145                         goto fail;
1146                 nofs += 1 + NIDS_PER_BLOCK;
1147                 break;
1148         case 3:
1149                 nofs = 5 + 2 * NIDS_PER_BLOCK;
1150                 if (!offset[level - 1])
1151                         goto skip_partial;
1152                 err = truncate_partial_nodes(&dn, ri, offset, level);
1153                 if (err < 0 && err != -ENOENT)
1154                         goto fail;
1155                 break;
1156         default:
1157                 BUG();
1158         }
1159
1160 skip_partial:
1161         while (cont) {
1162                 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1163                 switch (offset[0]) {
1164                 case NODE_DIR1_BLOCK:
1165                 case NODE_DIR2_BLOCK:
1166                         err = truncate_dnode(&dn);
1167                         break;
1168
1169                 case NODE_IND1_BLOCK:
1170                 case NODE_IND2_BLOCK:
1171                         err = truncate_nodes(&dn, nofs, offset[1], 2);
1172                         break;
1173
1174                 case NODE_DIND_BLOCK:
1175                         err = truncate_nodes(&dn, nofs, offset[1], 3);
1176                         cont = 0;
1177                         break;
1178
1179                 default:
1180                         BUG();
1181                 }
1182                 if (err < 0 && err != -ENOENT)
1183                         goto fail;
1184                 if (offset[1] == 0 &&
1185                                 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
1186                         lock_page(page);
1187                         BUG_ON(page->mapping != NODE_MAPPING(sbi));
1188                         f2fs_wait_on_page_writeback(page, NODE, true, true);
1189                         ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
1190                         set_page_dirty(page);
1191                         unlock_page(page);
1192                 }
1193                 offset[1] = 0;
1194                 offset[0]++;
1195                 nofs += err;
1196         }
1197 fail:
1198         f2fs_put_page(page, 0);
1199         trace_f2fs_truncate_inode_blocks_exit(inode, err);
1200         return err > 0 ? 0 : err;
1201 }
1202
1203 /* caller must lock inode page */
1204 int f2fs_truncate_xattr_node(struct inode *inode)
1205 {
1206         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1207         nid_t nid = F2FS_I(inode)->i_xattr_nid;
1208         struct dnode_of_data dn;
1209         struct page *npage;
1210         int err;
1211
1212         if (!nid)
1213                 return 0;
1214
1215         npage = f2fs_get_node_page(sbi, nid);
1216         if (IS_ERR(npage))
1217                 return PTR_ERR(npage);
1218
1219         set_new_dnode(&dn, inode, NULL, npage, nid);
1220         err = truncate_node(&dn);
1221         if (err) {
1222                 f2fs_put_page(npage, 1);
1223                 return err;
1224         }
1225
1226         f2fs_i_xnid_write(inode, 0);
1227
1228         return 0;
1229 }
1230
1231 /*
1232  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1233  * f2fs_unlock_op().
1234  */
1235 int f2fs_remove_inode_page(struct inode *inode)
1236 {
1237         struct dnode_of_data dn;
1238         int err;
1239
1240         set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1241         err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
1242         if (err)
1243                 return err;
1244
1245         err = f2fs_truncate_xattr_node(inode);
1246         if (err) {
1247                 f2fs_put_dnode(&dn);
1248                 return err;
1249         }
1250
1251         /* remove potential inline_data blocks */
1252         if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1253                                 S_ISLNK(inode->i_mode))
1254                 f2fs_truncate_data_blocks_range(&dn, 1);
1255
1256         /* 0 is possible, after f2fs_new_inode() has failed */
1257         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1258                 f2fs_put_dnode(&dn);
1259                 return -EIO;
1260         }
1261
1262         if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1263                 f2fs_warn(F2FS_I_SB(inode),
1264                         "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
1265                         inode->i_ino, (unsigned long long)inode->i_blocks);
1266                 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
1267         }
1268
1269         /* will put inode & node pages */
1270         err = truncate_node(&dn);
1271         if (err) {
1272                 f2fs_put_dnode(&dn);
1273                 return err;
1274         }
1275         return 0;
1276 }
1277
1278 struct page *f2fs_new_inode_page(struct inode *inode)
1279 {
1280         struct dnode_of_data dn;
1281
1282         /* allocate inode page for new inode */
1283         set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1284
1285         /* caller should f2fs_put_page(page, 1); */
1286         return f2fs_new_node_page(&dn, 0);
1287 }
1288
1289 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1290 {
1291         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1292         struct node_info new_ni;
1293         struct page *page;
1294         int err;
1295
1296         if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1297                 return ERR_PTR(-EPERM);
1298
1299         page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1300         if (!page)
1301                 return ERR_PTR(-ENOMEM);
1302
1303         if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1304                 goto fail;
1305
1306 #ifdef CONFIG_F2FS_CHECK_FS
1307         err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false);
1308         if (err) {
1309                 dec_valid_node_count(sbi, dn->inode, !ofs);
1310                 goto fail;
1311         }
1312         if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
1313                 err = -EFSCORRUPTED;
1314                 set_sbi_flag(sbi, SBI_NEED_FSCK);
1315                 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1316                 goto fail;
1317         }
1318 #endif
1319         new_ni.nid = dn->nid;
1320         new_ni.ino = dn->inode->i_ino;
1321         new_ni.blk_addr = NULL_ADDR;
1322         new_ni.flag = 0;
1323         new_ni.version = 0;
1324         set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1325
1326         f2fs_wait_on_page_writeback(page, NODE, true, true);
1327         fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1328         set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1329         if (!PageUptodate(page))
1330                 SetPageUptodate(page);
1331         if (set_page_dirty(page))
1332                 dn->node_changed = true;
1333
1334         if (f2fs_has_xattr_block(ofs))
1335                 f2fs_i_xnid_write(dn->inode, dn->nid);
1336
1337         if (ofs == 0)
1338                 inc_valid_inode_count(sbi);
1339         return page;
1340
1341 fail:
1342         clear_node_page_dirty(page);
1343         f2fs_put_page(page, 1);
1344         return ERR_PTR(err);
1345 }
1346
1347 /*
1348  * Caller should do after getting the following values.
1349  * 0: f2fs_put_page(page, 0)
1350  * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1351  */
1352 static int read_node_page(struct page *page, blk_opf_t op_flags)
1353 {
1354         struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1355         struct node_info ni;
1356         struct f2fs_io_info fio = {
1357                 .sbi = sbi,
1358                 .type = NODE,
1359                 .op = REQ_OP_READ,
1360                 .op_flags = op_flags,
1361                 .page = page,
1362                 .encrypted_page = NULL,
1363         };
1364         int err;
1365
1366         if (PageUptodate(page)) {
1367                 if (!f2fs_inode_chksum_verify(sbi, page)) {
1368                         ClearPageUptodate(page);
1369                         return -EFSBADCRC;
1370                 }
1371                 return LOCKED_PAGE;
1372         }
1373
1374         err = f2fs_get_node_info(sbi, page->index, &ni, false);
1375         if (err)
1376                 return err;
1377
1378         /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
1379         if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) {
1380                 ClearPageUptodate(page);
1381                 return -ENOENT;
1382         }
1383
1384         fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1385
1386         err = f2fs_submit_page_bio(&fio);
1387
1388         if (!err)
1389                 f2fs_update_iostat(sbi, NULL, FS_NODE_READ_IO, F2FS_BLKSIZE);
1390
1391         return err;
1392 }
1393
1394 /*
1395  * Readahead a node page
1396  */
1397 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1398 {
1399         struct page *apage;
1400         int err;
1401
1402         if (!nid)
1403                 return;
1404         if (f2fs_check_nid_range(sbi, nid))
1405                 return;
1406
1407         apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
1408         if (apage)
1409                 return;
1410
1411         apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1412         if (!apage)
1413                 return;
1414
1415         err = read_node_page(apage, REQ_RAHEAD);
1416         f2fs_put_page(apage, err ? 1 : 0);
1417 }
1418
1419 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1420                                         struct page *parent, int start)
1421 {
1422         struct page *page;
1423         int err;
1424
1425         if (!nid)
1426                 return ERR_PTR(-ENOENT);
1427         if (f2fs_check_nid_range(sbi, nid))
1428                 return ERR_PTR(-EINVAL);
1429 repeat:
1430         page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1431         if (!page)
1432                 return ERR_PTR(-ENOMEM);
1433
1434         err = read_node_page(page, 0);
1435         if (err < 0) {
1436                 goto out_put_err;
1437         } else if (err == LOCKED_PAGE) {
1438                 err = 0;
1439                 goto page_hit;
1440         }
1441
1442         if (parent)
1443                 f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1444
1445         lock_page(page);
1446
1447         if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1448                 f2fs_put_page(page, 1);
1449                 goto repeat;
1450         }
1451
1452         if (unlikely(!PageUptodate(page))) {
1453                 err = -EIO;
1454                 goto out_err;
1455         }
1456
1457         if (!f2fs_inode_chksum_verify(sbi, page)) {
1458                 err = -EFSBADCRC;
1459                 goto out_err;
1460         }
1461 page_hit:
1462         if (likely(nid == nid_of_node(page)))
1463                 return page;
1464
1465         f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1466                           nid, nid_of_node(page), ino_of_node(page),
1467                           ofs_of_node(page), cpver_of_node(page),
1468                           next_blkaddr_of_node(page));
1469         set_sbi_flag(sbi, SBI_NEED_FSCK);
1470         f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
1471         err = -EFSCORRUPTED;
1472 out_err:
1473         ClearPageUptodate(page);
1474 out_put_err:
1475         /* ENOENT comes from read_node_page which is not an error. */
1476         if (err != -ENOENT)
1477                 f2fs_handle_page_eio(sbi, page->index, NODE);
1478         f2fs_put_page(page, 1);
1479         return ERR_PTR(err);
1480 }
1481
1482 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1483 {
1484         return __get_node_page(sbi, nid, NULL, 0);
1485 }
1486
1487 struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1488 {
1489         struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1490         nid_t nid = get_nid(parent, start, false);
1491
1492         return __get_node_page(sbi, nid, parent, start);
1493 }
1494
1495 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1496 {
1497         struct inode *inode;
1498         struct page *page;
1499         int ret;
1500
1501         /* should flush inline_data before evict_inode */
1502         inode = ilookup(sbi->sb, ino);
1503         if (!inode)
1504                 return;
1505
1506         page = f2fs_pagecache_get_page(inode->i_mapping, 0,
1507                                         FGP_LOCK|FGP_NOWAIT, 0);
1508         if (!page)
1509                 goto iput_out;
1510
1511         if (!PageUptodate(page))
1512                 goto page_out;
1513
1514         if (!PageDirty(page))
1515                 goto page_out;
1516
1517         if (!clear_page_dirty_for_io(page))
1518                 goto page_out;
1519
1520         ret = f2fs_write_inline_data(inode, page);
1521         inode_dec_dirty_pages(inode);
1522         f2fs_remove_dirty_inode(inode);
1523         if (ret)
1524                 set_page_dirty(page);
1525 page_out:
1526         f2fs_put_page(page, 1);
1527 iput_out:
1528         iput(inode);
1529 }
1530
1531 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1532 {
1533         pgoff_t index;
1534         struct folio_batch fbatch;
1535         struct page *last_page = NULL;
1536         int nr_folios;
1537
1538         folio_batch_init(&fbatch);
1539         index = 0;
1540
1541         while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1542                                         (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1543                                         &fbatch))) {
1544                 int i;
1545
1546                 for (i = 0; i < nr_folios; i++) {
1547                         struct page *page = &fbatch.folios[i]->page;
1548
1549                         if (unlikely(f2fs_cp_error(sbi))) {
1550                                 f2fs_put_page(last_page, 0);
1551                                 folio_batch_release(&fbatch);
1552                                 return ERR_PTR(-EIO);
1553                         }
1554
1555                         if (!IS_DNODE(page) || !is_cold_node(page))
1556                                 continue;
1557                         if (ino_of_node(page) != ino)
1558                                 continue;
1559
1560                         lock_page(page);
1561
1562                         if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1563 continue_unlock:
1564                                 unlock_page(page);
1565                                 continue;
1566                         }
1567                         if (ino_of_node(page) != ino)
1568                                 goto continue_unlock;
1569
1570                         if (!PageDirty(page)) {
1571                                 /* someone wrote it for us */
1572                                 goto continue_unlock;
1573                         }
1574
1575                         if (last_page)
1576                                 f2fs_put_page(last_page, 0);
1577
1578                         get_page(page);
1579                         last_page = page;
1580                         unlock_page(page);
1581                 }
1582                 folio_batch_release(&fbatch);
1583                 cond_resched();
1584         }
1585         return last_page;
1586 }
1587
1588 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1589                                 struct writeback_control *wbc, bool do_balance,
1590                                 enum iostat_type io_type, unsigned int *seq_id)
1591 {
1592         struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1593         nid_t nid;
1594         struct node_info ni;
1595         struct f2fs_io_info fio = {
1596                 .sbi = sbi,
1597                 .ino = ino_of_node(page),
1598                 .type = NODE,
1599                 .op = REQ_OP_WRITE,
1600                 .op_flags = wbc_to_write_flags(wbc),
1601                 .page = page,
1602                 .encrypted_page = NULL,
1603                 .submitted = 0,
1604                 .io_type = io_type,
1605                 .io_wbc = wbc,
1606         };
1607         unsigned int seq;
1608
1609         trace_f2fs_writepage(page, NODE);
1610
1611         if (unlikely(f2fs_cp_error(sbi))) {
1612                 /* keep node pages in remount-ro mode */
1613                 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
1614                         goto redirty_out;
1615                 ClearPageUptodate(page);
1616                 dec_page_count(sbi, F2FS_DIRTY_NODES);
1617                 unlock_page(page);
1618                 return 0;
1619         }
1620
1621         if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1622                 goto redirty_out;
1623
1624         if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1625                         wbc->sync_mode == WB_SYNC_NONE &&
1626                         IS_DNODE(page) && is_cold_node(page))
1627                 goto redirty_out;
1628
1629         /* get old block addr of this node page */
1630         nid = nid_of_node(page);
1631         f2fs_bug_on(sbi, page->index != nid);
1632
1633         if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
1634                 goto redirty_out;
1635
1636         if (wbc->for_reclaim) {
1637                 if (!f2fs_down_read_trylock(&sbi->node_write))
1638                         goto redirty_out;
1639         } else {
1640                 f2fs_down_read(&sbi->node_write);
1641         }
1642
1643         /* This page is already truncated */
1644         if (unlikely(ni.blk_addr == NULL_ADDR)) {
1645                 ClearPageUptodate(page);
1646                 dec_page_count(sbi, F2FS_DIRTY_NODES);
1647                 f2fs_up_read(&sbi->node_write);
1648                 unlock_page(page);
1649                 return 0;
1650         }
1651
1652         if (__is_valid_data_blkaddr(ni.blk_addr) &&
1653                 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
1654                                         DATA_GENERIC_ENHANCE)) {
1655                 f2fs_up_read(&sbi->node_write);
1656                 goto redirty_out;
1657         }
1658
1659         if (atomic && !test_opt(sbi, NOBARRIER) && !f2fs_sb_has_blkzoned(sbi))
1660                 fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1661
1662         /* should add to global list before clearing PAGECACHE status */
1663         if (f2fs_in_warm_node_list(sbi, page)) {
1664                 seq = f2fs_add_fsync_node_entry(sbi, page);
1665                 if (seq_id)
1666                         *seq_id = seq;
1667         }
1668
1669         set_page_writeback(page);
1670
1671         fio.old_blkaddr = ni.blk_addr;
1672         f2fs_do_write_node_page(nid, &fio);
1673         set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1674         dec_page_count(sbi, F2FS_DIRTY_NODES);
1675         f2fs_up_read(&sbi->node_write);
1676
1677         if (wbc->for_reclaim) {
1678                 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
1679                 submitted = NULL;
1680         }
1681
1682         unlock_page(page);
1683
1684         if (unlikely(f2fs_cp_error(sbi))) {
1685                 f2fs_submit_merged_write(sbi, NODE);
1686                 submitted = NULL;
1687         }
1688         if (submitted)
1689                 *submitted = fio.submitted;
1690
1691         if (do_balance)
1692                 f2fs_balance_fs(sbi, false);
1693         return 0;
1694
1695 redirty_out:
1696         redirty_page_for_writepage(wbc, page);
1697         return AOP_WRITEPAGE_ACTIVATE;
1698 }
1699
1700 int f2fs_move_node_page(struct page *node_page, int gc_type)
1701 {
1702         int err = 0;
1703
1704         if (gc_type == FG_GC) {
1705                 struct writeback_control wbc = {
1706                         .sync_mode = WB_SYNC_ALL,
1707                         .nr_to_write = 1,
1708                         .for_reclaim = 0,
1709                 };
1710
1711                 f2fs_wait_on_page_writeback(node_page, NODE, true, true);
1712
1713                 set_page_dirty(node_page);
1714
1715                 if (!clear_page_dirty_for_io(node_page)) {
1716                         err = -EAGAIN;
1717                         goto out_page;
1718                 }
1719
1720                 if (__write_node_page(node_page, false, NULL,
1721                                         &wbc, false, FS_GC_NODE_IO, NULL)) {
1722                         err = -EAGAIN;
1723                         unlock_page(node_page);
1724                 }
1725                 goto release_page;
1726         } else {
1727                 /* set page dirty and write it */
1728                 if (!PageWriteback(node_page))
1729                         set_page_dirty(node_page);
1730         }
1731 out_page:
1732         unlock_page(node_page);
1733 release_page:
1734         f2fs_put_page(node_page, 0);
1735         return err;
1736 }
1737
1738 static int f2fs_write_node_page(struct page *page,
1739                                 struct writeback_control *wbc)
1740 {
1741         return __write_node_page(page, false, NULL, wbc, false,
1742                                                 FS_NODE_IO, NULL);
1743 }
1744
1745 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1746                         struct writeback_control *wbc, bool atomic,
1747                         unsigned int *seq_id)
1748 {
1749         pgoff_t index;
1750         struct folio_batch fbatch;
1751         int ret = 0;
1752         struct page *last_page = NULL;
1753         bool marked = false;
1754         nid_t ino = inode->i_ino;
1755         int nr_folios;
1756         int nwritten = 0;
1757
1758         if (atomic) {
1759                 last_page = last_fsync_dnode(sbi, ino);
1760                 if (IS_ERR_OR_NULL(last_page))
1761                         return PTR_ERR_OR_ZERO(last_page);
1762         }
1763 retry:
1764         folio_batch_init(&fbatch);
1765         index = 0;
1766
1767         while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1768                                         (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1769                                         &fbatch))) {
1770                 int i;
1771
1772                 for (i = 0; i < nr_folios; i++) {
1773                         struct page *page = &fbatch.folios[i]->page;
1774                         bool submitted = false;
1775
1776                         if (unlikely(f2fs_cp_error(sbi))) {
1777                                 f2fs_put_page(last_page, 0);
1778                                 folio_batch_release(&fbatch);
1779                                 ret = -EIO;
1780                                 goto out;
1781                         }
1782
1783                         if (!IS_DNODE(page) || !is_cold_node(page))
1784                                 continue;
1785                         if (ino_of_node(page) != ino)
1786                                 continue;
1787
1788                         lock_page(page);
1789
1790                         if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1791 continue_unlock:
1792                                 unlock_page(page);
1793                                 continue;
1794                         }
1795                         if (ino_of_node(page) != ino)
1796                                 goto continue_unlock;
1797
1798                         if (!PageDirty(page) && page != last_page) {
1799                                 /* someone wrote it for us */
1800                                 goto continue_unlock;
1801                         }
1802
1803                         f2fs_wait_on_page_writeback(page, NODE, true, true);
1804
1805                         set_fsync_mark(page, 0);
1806                         set_dentry_mark(page, 0);
1807
1808                         if (!atomic || page == last_page) {
1809                                 set_fsync_mark(page, 1);
1810                                 percpu_counter_inc(&sbi->rf_node_block_count);
1811                                 if (IS_INODE(page)) {
1812                                         if (is_inode_flag_set(inode,
1813                                                                 FI_DIRTY_INODE))
1814                                                 f2fs_update_inode(inode, page);
1815                                         set_dentry_mark(page,
1816                                                 f2fs_need_dentry_mark(sbi, ino));
1817                                 }
1818                                 /* may be written by other thread */
1819                                 if (!PageDirty(page))
1820                                         set_page_dirty(page);
1821                         }
1822
1823                         if (!clear_page_dirty_for_io(page))
1824                                 goto continue_unlock;
1825
1826                         ret = __write_node_page(page, atomic &&
1827                                                 page == last_page,
1828                                                 &submitted, wbc, true,
1829                                                 FS_NODE_IO, seq_id);
1830                         if (ret) {
1831                                 unlock_page(page);
1832                                 f2fs_put_page(last_page, 0);
1833                                 break;
1834                         } else if (submitted) {
1835                                 nwritten++;
1836                         }
1837
1838                         if (page == last_page) {
1839                                 f2fs_put_page(page, 0);
1840                                 marked = true;
1841                                 break;
1842                         }
1843                 }
1844                 folio_batch_release(&fbatch);
1845                 cond_resched();
1846
1847                 if (ret || marked)
1848                         break;
1849         }
1850         if (!ret && atomic && !marked) {
1851                 f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
1852                            ino, last_page->index);
1853                 lock_page(last_page);
1854                 f2fs_wait_on_page_writeback(last_page, NODE, true, true);
1855                 set_page_dirty(last_page);
1856                 unlock_page(last_page);
1857                 goto retry;
1858         }
1859 out:
1860         if (nwritten)
1861                 f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
1862         return ret ? -EIO : 0;
1863 }
1864
1865 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
1866 {
1867         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1868         bool clean;
1869
1870         if (inode->i_ino != ino)
1871                 return 0;
1872
1873         if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
1874                 return 0;
1875
1876         spin_lock(&sbi->inode_lock[DIRTY_META]);
1877         clean = list_empty(&F2FS_I(inode)->gdirty_list);
1878         spin_unlock(&sbi->inode_lock[DIRTY_META]);
1879
1880         if (clean)
1881                 return 0;
1882
1883         inode = igrab(inode);
1884         if (!inode)
1885                 return 0;
1886         return 1;
1887 }
1888
1889 static bool flush_dirty_inode(struct page *page)
1890 {
1891         struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1892         struct inode *inode;
1893         nid_t ino = ino_of_node(page);
1894
1895         inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
1896         if (!inode)
1897                 return false;
1898
1899         f2fs_update_inode(inode, page);
1900         unlock_page(page);
1901
1902         iput(inode);
1903         return true;
1904 }
1905
1906 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
1907 {
1908         pgoff_t index = 0;
1909         struct folio_batch fbatch;
1910         int nr_folios;
1911
1912         folio_batch_init(&fbatch);
1913
1914         while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1915                                         (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1916                                         &fbatch))) {
1917                 int i;
1918
1919                 for (i = 0; i < nr_folios; i++) {
1920                         struct page *page = &fbatch.folios[i]->page;
1921
1922                         if (!IS_DNODE(page))
1923                                 continue;
1924
1925                         lock_page(page);
1926
1927                         if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1928 continue_unlock:
1929                                 unlock_page(page);
1930                                 continue;
1931                         }
1932
1933                         if (!PageDirty(page)) {
1934                                 /* someone wrote it for us */
1935                                 goto continue_unlock;
1936                         }
1937
1938                         /* flush inline_data, if it's async context. */
1939                         if (page_private_inline(page)) {
1940                                 clear_page_private_inline(page);
1941                                 unlock_page(page);
1942                                 flush_inline_data(sbi, ino_of_node(page));
1943                                 continue;
1944                         }
1945                         unlock_page(page);
1946                 }
1947                 folio_batch_release(&fbatch);
1948                 cond_resched();
1949         }
1950 }
1951
1952 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
1953                                 struct writeback_control *wbc,
1954                                 bool do_balance, enum iostat_type io_type)
1955 {
1956         pgoff_t index;
1957         struct folio_batch fbatch;
1958         int step = 0;
1959         int nwritten = 0;
1960         int ret = 0;
1961         int nr_folios, done = 0;
1962
1963         folio_batch_init(&fbatch);
1964
1965 next_step:
1966         index = 0;
1967
1968         while (!done && (nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi),
1969                                 &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1970                                 &fbatch))) {
1971                 int i;
1972
1973                 for (i = 0; i < nr_folios; i++) {
1974                         struct page *page = &fbatch.folios[i]->page;
1975                         bool submitted = false;
1976
1977                         /* give a priority to WB_SYNC threads */
1978                         if (atomic_read(&sbi->wb_sync_req[NODE]) &&
1979                                         wbc->sync_mode == WB_SYNC_NONE) {
1980                                 done = 1;
1981                                 break;
1982                         }
1983
1984                         /*
1985                          * flushing sequence with step:
1986                          * 0. indirect nodes
1987                          * 1. dentry dnodes
1988                          * 2. file dnodes
1989                          */
1990                         if (step == 0 && IS_DNODE(page))
1991                                 continue;
1992                         if (step == 1 && (!IS_DNODE(page) ||
1993                                                 is_cold_node(page)))
1994                                 continue;
1995                         if (step == 2 && (!IS_DNODE(page) ||
1996                                                 !is_cold_node(page)))
1997                                 continue;
1998 lock_node:
1999                         if (wbc->sync_mode == WB_SYNC_ALL)
2000                                 lock_page(page);
2001                         else if (!trylock_page(page))
2002                                 continue;
2003
2004                         if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
2005 continue_unlock:
2006                                 unlock_page(page);
2007                                 continue;
2008                         }
2009
2010                         if (!PageDirty(page)) {
2011                                 /* someone wrote it for us */
2012                                 goto continue_unlock;
2013                         }
2014
2015                         /* flush inline_data/inode, if it's async context. */
2016                         if (!do_balance)
2017                                 goto write_node;
2018
2019                         /* flush inline_data */
2020                         if (page_private_inline(page)) {
2021                                 clear_page_private_inline(page);
2022                                 unlock_page(page);
2023                                 flush_inline_data(sbi, ino_of_node(page));
2024                                 goto lock_node;
2025                         }
2026
2027                         /* flush dirty inode */
2028                         if (IS_INODE(page) && flush_dirty_inode(page))
2029                                 goto lock_node;
2030 write_node:
2031                         f2fs_wait_on_page_writeback(page, NODE, true, true);
2032
2033                         if (!clear_page_dirty_for_io(page))
2034                                 goto continue_unlock;
2035
2036                         set_fsync_mark(page, 0);
2037                         set_dentry_mark(page, 0);
2038
2039                         ret = __write_node_page(page, false, &submitted,
2040                                                 wbc, do_balance, io_type, NULL);
2041                         if (ret)
2042                                 unlock_page(page);
2043                         else if (submitted)
2044                                 nwritten++;
2045
2046                         if (--wbc->nr_to_write == 0)
2047                                 break;
2048                 }
2049                 folio_batch_release(&fbatch);
2050                 cond_resched();
2051
2052                 if (wbc->nr_to_write == 0) {
2053                         step = 2;
2054                         break;
2055                 }
2056         }
2057
2058         if (step < 2) {
2059                 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2060                                 wbc->sync_mode == WB_SYNC_NONE && step == 1)
2061                         goto out;
2062                 step++;
2063                 goto next_step;
2064         }
2065 out:
2066         if (nwritten)
2067                 f2fs_submit_merged_write(sbi, NODE);
2068
2069         if (unlikely(f2fs_cp_error(sbi)))
2070                 return -EIO;
2071         return ret;
2072 }
2073
2074 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
2075                                                 unsigned int seq_id)
2076 {
2077         struct fsync_node_entry *fn;
2078         struct page *page;
2079         struct list_head *head = &sbi->fsync_node_list;
2080         unsigned long flags;
2081         unsigned int cur_seq_id = 0;
2082
2083         while (seq_id && cur_seq_id < seq_id) {
2084                 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
2085                 if (list_empty(head)) {
2086                         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2087                         break;
2088                 }
2089                 fn = list_first_entry(head, struct fsync_node_entry, list);
2090                 if (fn->seq_id > seq_id) {
2091                         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2092                         break;
2093                 }
2094                 cur_seq_id = fn->seq_id;
2095                 page = fn->page;
2096                 get_page(page);
2097                 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2098
2099                 f2fs_wait_on_page_writeback(page, NODE, true, false);
2100
2101                 put_page(page);
2102         }
2103
2104         return filemap_check_errors(NODE_MAPPING(sbi));
2105 }
2106
2107 static int f2fs_write_node_pages(struct address_space *mapping,
2108                             struct writeback_control *wbc)
2109 {
2110         struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2111         struct blk_plug plug;
2112         long diff;
2113
2114         if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2115                 goto skip_write;
2116
2117         /* balancing f2fs's metadata in background */
2118         f2fs_balance_fs_bg(sbi, true);
2119
2120         /* collect a number of dirty node pages and write together */
2121         if (wbc->sync_mode != WB_SYNC_ALL &&
2122                         get_pages(sbi, F2FS_DIRTY_NODES) <
2123                                         nr_pages_to_skip(sbi, NODE))
2124                 goto skip_write;
2125
2126         if (wbc->sync_mode == WB_SYNC_ALL)
2127                 atomic_inc(&sbi->wb_sync_req[NODE]);
2128         else if (atomic_read(&sbi->wb_sync_req[NODE])) {
2129                 /* to avoid potential deadlock */
2130                 if (current->plug)
2131                         blk_finish_plug(current->plug);
2132                 goto skip_write;
2133         }
2134
2135         trace_f2fs_writepages(mapping->host, wbc, NODE);
2136
2137         diff = nr_pages_to_write(sbi, NODE, wbc);
2138         blk_start_plug(&plug);
2139         f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
2140         blk_finish_plug(&plug);
2141         wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
2142
2143         if (wbc->sync_mode == WB_SYNC_ALL)
2144                 atomic_dec(&sbi->wb_sync_req[NODE]);
2145         return 0;
2146
2147 skip_write:
2148         wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
2149         trace_f2fs_writepages(mapping->host, wbc, NODE);
2150         return 0;
2151 }
2152
2153 static bool f2fs_dirty_node_folio(struct address_space *mapping,
2154                 struct folio *folio)
2155 {
2156         trace_f2fs_set_page_dirty(&folio->page, NODE);
2157
2158         if (!folio_test_uptodate(folio))
2159                 folio_mark_uptodate(folio);
2160 #ifdef CONFIG_F2FS_CHECK_FS
2161         if (IS_INODE(&folio->page))
2162                 f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page);
2163 #endif
2164         if (filemap_dirty_folio(mapping, folio)) {
2165                 inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
2166                 set_page_private_reference(&folio->page);
2167                 return true;
2168         }
2169         return false;
2170 }
2171
2172 /*
2173  * Structure of the f2fs node operations
2174  */
2175 const struct address_space_operations f2fs_node_aops = {
2176         .writepage      = f2fs_write_node_page,
2177         .writepages     = f2fs_write_node_pages,
2178         .dirty_folio    = f2fs_dirty_node_folio,
2179         .invalidate_folio = f2fs_invalidate_folio,
2180         .release_folio  = f2fs_release_folio,
2181         .migrate_folio  = filemap_migrate_folio,
2182 };
2183
2184 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
2185                                                 nid_t n)
2186 {
2187         return radix_tree_lookup(&nm_i->free_nid_root, n);
2188 }
2189
2190 static int __insert_free_nid(struct f2fs_sb_info *sbi,
2191                                 struct free_nid *i)
2192 {
2193         struct f2fs_nm_info *nm_i = NM_I(sbi);
2194         int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
2195
2196         if (err)
2197                 return err;
2198
2199         nm_i->nid_cnt[FREE_NID]++;
2200         list_add_tail(&i->list, &nm_i->free_nid_list);
2201         return 0;
2202 }
2203
2204 static void __remove_free_nid(struct f2fs_sb_info *sbi,
2205                         struct free_nid *i, enum nid_state state)
2206 {
2207         struct f2fs_nm_info *nm_i = NM_I(sbi);
2208
2209         f2fs_bug_on(sbi, state != i->state);
2210         nm_i->nid_cnt[state]--;
2211         if (state == FREE_NID)
2212                 list_del(&i->list);
2213         radix_tree_delete(&nm_i->free_nid_root, i->nid);
2214 }
2215
2216 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2217                         enum nid_state org_state, enum nid_state dst_state)
2218 {
2219         struct f2fs_nm_info *nm_i = NM_I(sbi);
2220
2221         f2fs_bug_on(sbi, org_state != i->state);
2222         i->state = dst_state;
2223         nm_i->nid_cnt[org_state]--;
2224         nm_i->nid_cnt[dst_state]++;
2225
2226         switch (dst_state) {
2227         case PREALLOC_NID:
2228                 list_del(&i->list);
2229                 break;
2230         case FREE_NID:
2231                 list_add_tail(&i->list, &nm_i->free_nid_list);
2232                 break;
2233         default:
2234                 BUG_ON(1);
2235         }
2236 }
2237
2238 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi)
2239 {
2240         struct f2fs_nm_info *nm_i = NM_I(sbi);
2241         unsigned int i;
2242         bool ret = true;
2243
2244         f2fs_down_read(&nm_i->nat_tree_lock);
2245         for (i = 0; i < nm_i->nat_blocks; i++) {
2246                 if (!test_bit_le(i, nm_i->nat_block_bitmap)) {
2247                         ret = false;
2248                         break;
2249                 }
2250         }
2251         f2fs_up_read(&nm_i->nat_tree_lock);
2252
2253         return ret;
2254 }
2255
2256 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
2257                                                         bool set, bool build)
2258 {
2259         struct f2fs_nm_info *nm_i = NM_I(sbi);
2260         unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
2261         unsigned int nid_ofs = nid - START_NID(nid);
2262
2263         if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2264                 return;
2265
2266         if (set) {
2267                 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2268                         return;
2269                 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2270                 nm_i->free_nid_count[nat_ofs]++;
2271         } else {
2272                 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2273                         return;
2274                 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2275                 if (!build)
2276                         nm_i->free_nid_count[nat_ofs]--;
2277         }
2278 }
2279
2280 /* return if the nid is recognized as free */
2281 static bool add_free_nid(struct f2fs_sb_info *sbi,
2282                                 nid_t nid, bool build, bool update)
2283 {
2284         struct f2fs_nm_info *nm_i = NM_I(sbi);
2285         struct free_nid *i, *e;
2286         struct nat_entry *ne;
2287         int err = -EINVAL;
2288         bool ret = false;
2289
2290         /* 0 nid should not be used */
2291         if (unlikely(nid == 0))
2292                 return false;
2293
2294         if (unlikely(f2fs_check_nid_range(sbi, nid)))
2295                 return false;
2296
2297         i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL);
2298         i->nid = nid;
2299         i->state = FREE_NID;
2300
2301         radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2302
2303         spin_lock(&nm_i->nid_list_lock);
2304
2305         if (build) {
2306                 /*
2307                  *   Thread A             Thread B
2308                  *  - f2fs_create
2309                  *   - f2fs_new_inode
2310                  *    - f2fs_alloc_nid
2311                  *     - __insert_nid_to_list(PREALLOC_NID)
2312                  *                     - f2fs_balance_fs_bg
2313                  *                      - f2fs_build_free_nids
2314                  *                       - __f2fs_build_free_nids
2315                  *                        - scan_nat_page
2316                  *                         - add_free_nid
2317                  *                          - __lookup_nat_cache
2318                  *  - f2fs_add_link
2319                  *   - f2fs_init_inode_metadata
2320                  *    - f2fs_new_inode_page
2321                  *     - f2fs_new_node_page
2322                  *      - set_node_addr
2323                  *  - f2fs_alloc_nid_done
2324                  *   - __remove_nid_from_list(PREALLOC_NID)
2325                  *                         - __insert_nid_to_list(FREE_NID)
2326                  */
2327                 ne = __lookup_nat_cache(nm_i, nid);
2328                 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2329                                 nat_get_blkaddr(ne) != NULL_ADDR))
2330                         goto err_out;
2331
2332                 e = __lookup_free_nid_list(nm_i, nid);
2333                 if (e) {
2334                         if (e->state == FREE_NID)
2335                                 ret = true;
2336                         goto err_out;
2337                 }
2338         }
2339         ret = true;
2340         err = __insert_free_nid(sbi, i);
2341 err_out:
2342         if (update) {
2343                 update_free_nid_bitmap(sbi, nid, ret, build);
2344                 if (!build)
2345                         nm_i->available_nids++;
2346         }
2347         spin_unlock(&nm_i->nid_list_lock);
2348         radix_tree_preload_end();
2349
2350         if (err)
2351                 kmem_cache_free(free_nid_slab, i);
2352         return ret;
2353 }
2354
2355 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2356 {
2357         struct f2fs_nm_info *nm_i = NM_I(sbi);
2358         struct free_nid *i;
2359         bool need_free = false;
2360
2361         spin_lock(&nm_i->nid_list_lock);
2362         i = __lookup_free_nid_list(nm_i, nid);
2363         if (i && i->state == FREE_NID) {
2364                 __remove_free_nid(sbi, i, FREE_NID);
2365                 need_free = true;
2366         }
2367         spin_unlock(&nm_i->nid_list_lock);
2368
2369         if (need_free)
2370                 kmem_cache_free(free_nid_slab, i);
2371 }
2372
2373 static int scan_nat_page(struct f2fs_sb_info *sbi,
2374                         struct page *nat_page, nid_t start_nid)
2375 {
2376         struct f2fs_nm_info *nm_i = NM_I(sbi);
2377         struct f2fs_nat_block *nat_blk = page_address(nat_page);
2378         block_t blk_addr;
2379         unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2380         int i;
2381
2382         __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2383
2384         i = start_nid % NAT_ENTRY_PER_BLOCK;
2385
2386         for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2387                 if (unlikely(start_nid >= nm_i->max_nid))
2388                         break;
2389
2390                 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2391
2392                 if (blk_addr == NEW_ADDR)
2393                         return -EFSCORRUPTED;
2394
2395                 if (blk_addr == NULL_ADDR) {
2396                         add_free_nid(sbi, start_nid, true, true);
2397                 } else {
2398                         spin_lock(&NM_I(sbi)->nid_list_lock);
2399                         update_free_nid_bitmap(sbi, start_nid, false, true);
2400                         spin_unlock(&NM_I(sbi)->nid_list_lock);
2401                 }
2402         }
2403
2404         return 0;
2405 }
2406
2407 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
2408 {
2409         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2410         struct f2fs_journal *journal = curseg->journal;
2411         int i;
2412
2413         down_read(&curseg->journal_rwsem);
2414         for (i = 0; i < nats_in_cursum(journal); i++) {
2415                 block_t addr;
2416                 nid_t nid;
2417
2418                 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2419                 nid = le32_to_cpu(nid_in_journal(journal, i));
2420                 if (addr == NULL_ADDR)
2421                         add_free_nid(sbi, nid, true, false);
2422                 else
2423                         remove_free_nid(sbi, nid);
2424         }
2425         up_read(&curseg->journal_rwsem);
2426 }
2427
2428 static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
2429 {
2430         struct f2fs_nm_info *nm_i = NM_I(sbi);
2431         unsigned int i, idx;
2432         nid_t nid;
2433
2434         f2fs_down_read(&nm_i->nat_tree_lock);
2435
2436         for (i = 0; i < nm_i->nat_blocks; i++) {
2437                 if (!test_bit_le(i, nm_i->nat_block_bitmap))
2438                         continue;
2439                 if (!nm_i->free_nid_count[i])
2440                         continue;
2441                 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2442                         idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2443                                                 NAT_ENTRY_PER_BLOCK, idx);
2444                         if (idx >= NAT_ENTRY_PER_BLOCK)
2445                                 break;
2446
2447                         nid = i * NAT_ENTRY_PER_BLOCK + idx;
2448                         add_free_nid(sbi, nid, true, false);
2449
2450                         if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2451                                 goto out;
2452                 }
2453         }
2454 out:
2455         scan_curseg_cache(sbi);
2456
2457         f2fs_up_read(&nm_i->nat_tree_lock);
2458 }
2459
2460 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2461                                                 bool sync, bool mount)
2462 {
2463         struct f2fs_nm_info *nm_i = NM_I(sbi);
2464         int i = 0, ret;
2465         nid_t nid = nm_i->next_scan_nid;
2466
2467         if (unlikely(nid >= nm_i->max_nid))
2468                 nid = 0;
2469
2470         if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
2471                 nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
2472
2473         /* Enough entries */
2474         if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2475                 return 0;
2476
2477         if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2478                 return 0;
2479
2480         if (!mount) {
2481                 /* try to find free nids in free_nid_bitmap */
2482                 scan_free_nid_bits(sbi);
2483
2484                 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2485                         return 0;
2486         }
2487
2488         /* readahead nat pages to be scanned */
2489         f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2490                                                         META_NAT, true);
2491
2492         f2fs_down_read(&nm_i->nat_tree_lock);
2493
2494         while (1) {
2495                 if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2496                                                 nm_i->nat_block_bitmap)) {
2497                         struct page *page = get_current_nat_page(sbi, nid);
2498
2499                         if (IS_ERR(page)) {
2500                                 ret = PTR_ERR(page);
2501                         } else {
2502                                 ret = scan_nat_page(sbi, page, nid);
2503                                 f2fs_put_page(page, 1);
2504                         }
2505
2506                         if (ret) {
2507                                 f2fs_up_read(&nm_i->nat_tree_lock);
2508
2509                                 if (ret == -EFSCORRUPTED) {
2510                                         f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
2511                                         set_sbi_flag(sbi, SBI_NEED_FSCK);
2512                                         f2fs_handle_error(sbi,
2513                                                 ERROR_INCONSISTENT_NAT);
2514                                 }
2515
2516                                 return ret;
2517                         }
2518                 }
2519
2520                 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2521                 if (unlikely(nid >= nm_i->max_nid))
2522                         nid = 0;
2523
2524                 if (++i >= FREE_NID_PAGES)
2525                         break;
2526         }
2527
2528         /* go to the next free nat pages to find free nids abundantly */
2529         nm_i->next_scan_nid = nid;
2530
2531         /* find free nids from current sum_pages */
2532         scan_curseg_cache(sbi);
2533
2534         f2fs_up_read(&nm_i->nat_tree_lock);
2535
2536         f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2537                                         nm_i->ra_nid_pages, META_NAT, false);
2538
2539         return 0;
2540 }
2541
2542 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2543 {
2544         int ret;
2545
2546         mutex_lock(&NM_I(sbi)->build_lock);
2547         ret = __f2fs_build_free_nids(sbi, sync, mount);
2548         mutex_unlock(&NM_I(sbi)->build_lock);
2549
2550         return ret;
2551 }
2552
2553 /*
2554  * If this function returns success, caller can obtain a new nid
2555  * from second parameter of this function.
2556  * The returned nid could be used ino as well as nid when inode is created.
2557  */
2558 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2559 {
2560         struct f2fs_nm_info *nm_i = NM_I(sbi);
2561         struct free_nid *i = NULL;
2562 retry:
2563         if (time_to_inject(sbi, FAULT_ALLOC_NID))
2564                 return false;
2565
2566         spin_lock(&nm_i->nid_list_lock);
2567
2568         if (unlikely(nm_i->available_nids == 0)) {
2569                 spin_unlock(&nm_i->nid_list_lock);
2570                 return false;
2571         }
2572
2573         /* We should not use stale free nids created by f2fs_build_free_nids */
2574         if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2575                 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2576                 i = list_first_entry(&nm_i->free_nid_list,
2577                                         struct free_nid, list);
2578                 *nid = i->nid;
2579
2580                 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2581                 nm_i->available_nids--;
2582
2583                 update_free_nid_bitmap(sbi, *nid, false, false);
2584
2585                 spin_unlock(&nm_i->nid_list_lock);
2586                 return true;
2587         }
2588         spin_unlock(&nm_i->nid_list_lock);
2589
2590         /* Let's scan nat pages and its caches to get free nids */
2591         if (!f2fs_build_free_nids(sbi, true, false))
2592                 goto retry;
2593         return false;
2594 }
2595
2596 /*
2597  * f2fs_alloc_nid() should be called prior to this function.
2598  */
2599 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2600 {
2601         struct f2fs_nm_info *nm_i = NM_I(sbi);
2602         struct free_nid *i;
2603
2604         spin_lock(&nm_i->nid_list_lock);
2605         i = __lookup_free_nid_list(nm_i, nid);
2606         f2fs_bug_on(sbi, !i);
2607         __remove_free_nid(sbi, i, PREALLOC_NID);
2608         spin_unlock(&nm_i->nid_list_lock);
2609
2610         kmem_cache_free(free_nid_slab, i);
2611 }
2612
2613 /*
2614  * f2fs_alloc_nid() should be called prior to this function.
2615  */
2616 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2617 {
2618         struct f2fs_nm_info *nm_i = NM_I(sbi);
2619         struct free_nid *i;
2620         bool need_free = false;
2621
2622         if (!nid)
2623                 return;
2624
2625         spin_lock(&nm_i->nid_list_lock);
2626         i = __lookup_free_nid_list(nm_i, nid);
2627         f2fs_bug_on(sbi, !i);
2628
2629         if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2630                 __remove_free_nid(sbi, i, PREALLOC_NID);
2631                 need_free = true;
2632         } else {
2633                 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2634         }
2635
2636         nm_i->available_nids++;
2637
2638         update_free_nid_bitmap(sbi, nid, true, false);
2639
2640         spin_unlock(&nm_i->nid_list_lock);
2641
2642         if (need_free)
2643                 kmem_cache_free(free_nid_slab, i);
2644 }
2645
2646 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2647 {
2648         struct f2fs_nm_info *nm_i = NM_I(sbi);
2649         int nr = nr_shrink;
2650
2651         if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2652                 return 0;
2653
2654         if (!mutex_trylock(&nm_i->build_lock))
2655                 return 0;
2656
2657         while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
2658                 struct free_nid *i, *next;
2659                 unsigned int batch = SHRINK_NID_BATCH_SIZE;
2660
2661                 spin_lock(&nm_i->nid_list_lock);
2662                 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2663                         if (!nr_shrink || !batch ||
2664                                 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2665                                 break;
2666                         __remove_free_nid(sbi, i, FREE_NID);
2667                         kmem_cache_free(free_nid_slab, i);
2668                         nr_shrink--;
2669                         batch--;
2670                 }
2671                 spin_unlock(&nm_i->nid_list_lock);
2672         }
2673
2674         mutex_unlock(&nm_i->build_lock);
2675
2676         return nr - nr_shrink;
2677 }
2678
2679 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
2680 {
2681         void *src_addr, *dst_addr;
2682         size_t inline_size;
2683         struct page *ipage;
2684         struct f2fs_inode *ri;
2685
2686         ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
2687         if (IS_ERR(ipage))
2688                 return PTR_ERR(ipage);
2689
2690         ri = F2FS_INODE(page);
2691         if (ri->i_inline & F2FS_INLINE_XATTR) {
2692                 if (!f2fs_has_inline_xattr(inode)) {
2693                         set_inode_flag(inode, FI_INLINE_XATTR);
2694                         stat_inc_inline_xattr(inode);
2695                 }
2696         } else {
2697                 if (f2fs_has_inline_xattr(inode)) {
2698                         stat_dec_inline_xattr(inode);
2699                         clear_inode_flag(inode, FI_INLINE_XATTR);
2700                 }
2701                 goto update_inode;
2702         }
2703
2704         dst_addr = inline_xattr_addr(inode, ipage);
2705         src_addr = inline_xattr_addr(inode, page);
2706         inline_size = inline_xattr_size(inode);
2707
2708         f2fs_wait_on_page_writeback(ipage, NODE, true, true);
2709         memcpy(dst_addr, src_addr, inline_size);
2710 update_inode:
2711         f2fs_update_inode(inode, ipage);
2712         f2fs_put_page(ipage, 1);
2713         return 0;
2714 }
2715
2716 int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2717 {
2718         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2719         nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2720         nid_t new_xnid;
2721         struct dnode_of_data dn;
2722         struct node_info ni;
2723         struct page *xpage;
2724         int err;
2725
2726         if (!prev_xnid)
2727                 goto recover_xnid;
2728
2729         /* 1: invalidate the previous xattr nid */
2730         err = f2fs_get_node_info(sbi, prev_xnid, &ni, false);
2731         if (err)
2732                 return err;
2733
2734         f2fs_invalidate_blocks(sbi, ni.blk_addr);
2735         dec_valid_node_count(sbi, inode, false);
2736         set_node_addr(sbi, &ni, NULL_ADDR, false);
2737
2738 recover_xnid:
2739         /* 2: update xattr nid in inode */
2740         if (!f2fs_alloc_nid(sbi, &new_xnid))
2741                 return -ENOSPC;
2742
2743         set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2744         xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
2745         if (IS_ERR(xpage)) {
2746                 f2fs_alloc_nid_failed(sbi, new_xnid);
2747                 return PTR_ERR(xpage);
2748         }
2749
2750         f2fs_alloc_nid_done(sbi, new_xnid);
2751         f2fs_update_inode_page(inode);
2752
2753         /* 3: update and set xattr node page dirty */
2754         if (page) {
2755                 memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
2756                                 VALID_XATTR_BLOCK_SIZE);
2757                 set_page_dirty(xpage);
2758         }
2759         f2fs_put_page(xpage, 1);
2760
2761         return 0;
2762 }
2763
2764 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2765 {
2766         struct f2fs_inode *src, *dst;
2767         nid_t ino = ino_of_node(page);
2768         struct node_info old_ni, new_ni;
2769         struct page *ipage;
2770         int err;
2771
2772         err = f2fs_get_node_info(sbi, ino, &old_ni, false);
2773         if (err)
2774                 return err;
2775
2776         if (unlikely(old_ni.blk_addr != NULL_ADDR))
2777                 return -EINVAL;
2778 retry:
2779         ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2780         if (!ipage) {
2781                 memalloc_retry_wait(GFP_NOFS);
2782                 goto retry;
2783         }
2784
2785         /* Should not use this inode from free nid list */
2786         remove_free_nid(sbi, ino);
2787
2788         if (!PageUptodate(ipage))
2789                 SetPageUptodate(ipage);
2790         fill_node_footer(ipage, ino, ino, 0, true);
2791         set_cold_node(ipage, false);
2792
2793         src = F2FS_INODE(page);
2794         dst = F2FS_INODE(ipage);
2795
2796         memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
2797         dst->i_size = 0;
2798         dst->i_blocks = cpu_to_le64(1);
2799         dst->i_links = cpu_to_le32(1);
2800         dst->i_xattr_nid = 0;
2801         dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2802         if (dst->i_inline & F2FS_EXTRA_ATTR) {
2803                 dst->i_extra_isize = src->i_extra_isize;
2804
2805                 if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
2806                         F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2807                                                         i_inline_xattr_size))
2808                         dst->i_inline_xattr_size = src->i_inline_xattr_size;
2809
2810                 if (f2fs_sb_has_project_quota(sbi) &&
2811                         F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2812                                                                 i_projid))
2813                         dst->i_projid = src->i_projid;
2814
2815                 if (f2fs_sb_has_inode_crtime(sbi) &&
2816                         F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2817                                                         i_crtime_nsec)) {
2818                         dst->i_crtime = src->i_crtime;
2819                         dst->i_crtime_nsec = src->i_crtime_nsec;
2820                 }
2821         }
2822
2823         new_ni = old_ni;
2824         new_ni.ino = ino;
2825
2826         if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2827                 WARN_ON(1);
2828         set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2829         inc_valid_inode_count(sbi);
2830         set_page_dirty(ipage);
2831         f2fs_put_page(ipage, 1);
2832         return 0;
2833 }
2834
2835 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2836                         unsigned int segno, struct f2fs_summary_block *sum)
2837 {
2838         struct f2fs_node *rn;
2839         struct f2fs_summary *sum_entry;
2840         block_t addr;
2841         int i, idx, last_offset, nrpages;
2842
2843         /* scan the node segment */
2844         last_offset = sbi->blocks_per_seg;
2845         addr = START_BLOCK(sbi, segno);
2846         sum_entry = &sum->entries[0];
2847
2848         for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2849                 nrpages = bio_max_segs(last_offset - i);
2850
2851                 /* readahead node pages */
2852                 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2853
2854                 for (idx = addr; idx < addr + nrpages; idx++) {
2855                         struct page *page = f2fs_get_tmp_page(sbi, idx);
2856
2857                         if (IS_ERR(page))
2858                                 return PTR_ERR(page);
2859
2860                         rn = F2FS_NODE(page);
2861                         sum_entry->nid = rn->footer.nid;
2862                         sum_entry->version = 0;
2863                         sum_entry->ofs_in_node = 0;
2864                         sum_entry++;
2865                         f2fs_put_page(page, 1);
2866                 }
2867
2868                 invalidate_mapping_pages(META_MAPPING(sbi), addr,
2869                                                         addr + nrpages);
2870         }
2871         return 0;
2872 }
2873
2874 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2875 {
2876         struct f2fs_nm_info *nm_i = NM_I(sbi);
2877         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2878         struct f2fs_journal *journal = curseg->journal;
2879         int i;
2880
2881         down_write(&curseg->journal_rwsem);
2882         for (i = 0; i < nats_in_cursum(journal); i++) {
2883                 struct nat_entry *ne;
2884                 struct f2fs_nat_entry raw_ne;
2885                 nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2886
2887                 if (f2fs_check_nid_range(sbi, nid))
2888                         continue;
2889
2890                 raw_ne = nat_in_journal(journal, i);
2891
2892                 ne = __lookup_nat_cache(nm_i, nid);
2893                 if (!ne) {
2894                         ne = __alloc_nat_entry(sbi, nid, true);
2895                         __init_nat_entry(nm_i, ne, &raw_ne, true);
2896                 }
2897
2898                 /*
2899                  * if a free nat in journal has not been used after last
2900                  * checkpoint, we should remove it from available nids,
2901                  * since later we will add it again.
2902                  */
2903                 if (!get_nat_flag(ne, IS_DIRTY) &&
2904                                 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2905                         spin_lock(&nm_i->nid_list_lock);
2906                         nm_i->available_nids--;
2907                         spin_unlock(&nm_i->nid_list_lock);
2908                 }
2909
2910                 __set_nat_cache_dirty(nm_i, ne);
2911         }
2912         update_nats_in_cursum(journal, -i);
2913         up_write(&curseg->journal_rwsem);
2914 }
2915
2916 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2917                                                 struct list_head *head, int max)
2918 {
2919         struct nat_entry_set *cur;
2920
2921         if (nes->entry_cnt >= max)
2922                 goto add_out;
2923
2924         list_for_each_entry(cur, head, set_list) {
2925                 if (cur->entry_cnt >= nes->entry_cnt) {
2926                         list_add(&nes->set_list, cur->set_list.prev);
2927                         return;
2928                 }
2929         }
2930 add_out:
2931         list_add_tail(&nes->set_list, head);
2932 }
2933
2934 static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs,
2935                                                         unsigned int valid)
2936 {
2937         if (valid == 0) {
2938                 __set_bit_le(nat_ofs, nm_i->empty_nat_bits);
2939                 __clear_bit_le(nat_ofs, nm_i->full_nat_bits);
2940                 return;
2941         }
2942
2943         __clear_bit_le(nat_ofs, nm_i->empty_nat_bits);
2944         if (valid == NAT_ENTRY_PER_BLOCK)
2945                 __set_bit_le(nat_ofs, nm_i->full_nat_bits);
2946         else
2947                 __clear_bit_le(nat_ofs, nm_i->full_nat_bits);
2948 }
2949
2950 static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2951                                                 struct page *page)
2952 {
2953         struct f2fs_nm_info *nm_i = NM_I(sbi);
2954         unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
2955         struct f2fs_nat_block *nat_blk = page_address(page);
2956         int valid = 0;
2957         int i = 0;
2958
2959         if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
2960                 return;
2961
2962         if (nat_index == 0) {
2963                 valid = 1;
2964                 i = 1;
2965         }
2966         for (; i < NAT_ENTRY_PER_BLOCK; i++) {
2967                 if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
2968                         valid++;
2969         }
2970
2971         __update_nat_bits(nm_i, nat_index, valid);
2972 }
2973
2974 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi)
2975 {
2976         struct f2fs_nm_info *nm_i = NM_I(sbi);
2977         unsigned int nat_ofs;
2978
2979         f2fs_down_read(&nm_i->nat_tree_lock);
2980
2981         for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) {
2982                 unsigned int valid = 0, nid_ofs = 0;
2983
2984                 /* handle nid zero due to it should never be used */
2985                 if (unlikely(nat_ofs == 0)) {
2986                         valid = 1;
2987                         nid_ofs = 1;
2988                 }
2989
2990                 for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) {
2991                         if (!test_bit_le(nid_ofs,
2992                                         nm_i->free_nid_bitmap[nat_ofs]))
2993                                 valid++;
2994                 }
2995
2996                 __update_nat_bits(nm_i, nat_ofs, valid);
2997         }
2998
2999         f2fs_up_read(&nm_i->nat_tree_lock);
3000 }
3001
3002 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
3003                 struct nat_entry_set *set, struct cp_control *cpc)
3004 {
3005         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3006         struct f2fs_journal *journal = curseg->journal;
3007         nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
3008         bool to_journal = true;
3009         struct f2fs_nat_block *nat_blk;
3010         struct nat_entry *ne, *cur;
3011         struct page *page = NULL;
3012
3013         /*
3014          * there are two steps to flush nat entries:
3015          * #1, flush nat entries to journal in current hot data summary block.
3016          * #2, flush nat entries to nat page.
3017          */
3018         if ((cpc->reason & CP_UMOUNT) ||
3019                 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
3020                 to_journal = false;
3021
3022         if (to_journal) {
3023                 down_write(&curseg->journal_rwsem);
3024         } else {
3025                 page = get_next_nat_page(sbi, start_nid);
3026                 if (IS_ERR(page))
3027                         return PTR_ERR(page);
3028
3029                 nat_blk = page_address(page);
3030                 f2fs_bug_on(sbi, !nat_blk);
3031         }
3032
3033         /* flush dirty nats in nat entry set */
3034         list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
3035                 struct f2fs_nat_entry *raw_ne;
3036                 nid_t nid = nat_get_nid(ne);
3037                 int offset;
3038
3039                 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
3040
3041                 if (to_journal) {
3042                         offset = f2fs_lookup_journal_in_cursum(journal,
3043                                                         NAT_JOURNAL, nid, 1);
3044                         f2fs_bug_on(sbi, offset < 0);
3045                         raw_ne = &nat_in_journal(journal, offset);
3046                         nid_in_journal(journal, offset) = cpu_to_le32(nid);
3047                 } else {
3048                         raw_ne = &nat_blk->entries[nid - start_nid];
3049                 }
3050                 raw_nat_from_node_info(raw_ne, &ne->ni);
3051                 nat_reset_flag(ne);
3052                 __clear_nat_cache_dirty(NM_I(sbi), set, ne);
3053                 if (nat_get_blkaddr(ne) == NULL_ADDR) {
3054                         add_free_nid(sbi, nid, false, true);
3055                 } else {
3056                         spin_lock(&NM_I(sbi)->nid_list_lock);
3057                         update_free_nid_bitmap(sbi, nid, false, false);
3058                         spin_unlock(&NM_I(sbi)->nid_list_lock);
3059                 }
3060         }
3061
3062         if (to_journal) {
3063                 up_write(&curseg->journal_rwsem);
3064         } else {
3065                 update_nat_bits(sbi, start_nid, page);
3066                 f2fs_put_page(page, 1);
3067         }
3068
3069         /* Allow dirty nats by node block allocation in write_begin */
3070         if (!set->entry_cnt) {
3071                 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
3072                 kmem_cache_free(nat_entry_set_slab, set);
3073         }
3074         return 0;
3075 }
3076
3077 /*
3078  * This function is called during the checkpointing process.
3079  */
3080 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3081 {
3082         struct f2fs_nm_info *nm_i = NM_I(sbi);
3083         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3084         struct f2fs_journal *journal = curseg->journal;
3085         struct nat_entry_set *setvec[NAT_VEC_SIZE];
3086         struct nat_entry_set *set, *tmp;
3087         unsigned int found;
3088         nid_t set_idx = 0;
3089         LIST_HEAD(sets);
3090         int err = 0;
3091
3092         /*
3093          * during unmount, let's flush nat_bits before checking
3094          * nat_cnt[DIRTY_NAT].
3095          */
3096         if (cpc->reason & CP_UMOUNT) {
3097                 f2fs_down_write(&nm_i->nat_tree_lock);
3098                 remove_nats_in_journal(sbi);
3099                 f2fs_up_write(&nm_i->nat_tree_lock);
3100         }
3101
3102         if (!nm_i->nat_cnt[DIRTY_NAT])
3103                 return 0;
3104
3105         f2fs_down_write(&nm_i->nat_tree_lock);
3106
3107         /*
3108          * if there are no enough space in journal to store dirty nat
3109          * entries, remove all entries from journal and merge them
3110          * into nat entry set.
3111          */
3112         if (cpc->reason & CP_UMOUNT ||
3113                 !__has_cursum_space(journal,
3114                         nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
3115                 remove_nats_in_journal(sbi);
3116
3117         while ((found = __gang_lookup_nat_set(nm_i,
3118                                         set_idx, NAT_VEC_SIZE, setvec))) {
3119                 unsigned idx;
3120
3121                 set_idx = setvec[found - 1]->set + 1;
3122                 for (idx = 0; idx < found; idx++)
3123                         __adjust_nat_entry_set(setvec[idx], &sets,
3124                                                 MAX_NAT_JENTRIES(journal));
3125         }
3126
3127         /* flush dirty nats in nat entry set */
3128         list_for_each_entry_safe(set, tmp, &sets, set_list) {
3129                 err = __flush_nat_entry_set(sbi, set, cpc);
3130                 if (err)
3131                         break;
3132         }
3133
3134         f2fs_up_write(&nm_i->nat_tree_lock);
3135         /* Allow dirty nats by node block allocation in write_begin */
3136
3137         return err;
3138 }
3139
3140 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
3141 {
3142         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3143         struct f2fs_nm_info *nm_i = NM_I(sbi);
3144         unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
3145         unsigned int i;
3146         __u64 cp_ver = cur_cp_version(ckpt);
3147         block_t nat_bits_addr;
3148
3149         nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3150         nm_i->nat_bits = f2fs_kvzalloc(sbi,
3151                         nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
3152         if (!nm_i->nat_bits)
3153                 return -ENOMEM;
3154
3155         nm_i->full_nat_bits = nm_i->nat_bits + 8;
3156         nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
3157
3158         if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
3159                 return 0;
3160
3161         nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
3162                                                 nm_i->nat_bits_blocks;
3163         for (i = 0; i < nm_i->nat_bits_blocks; i++) {
3164                 struct page *page;
3165
3166                 page = f2fs_get_meta_page(sbi, nat_bits_addr++);
3167                 if (IS_ERR(page))
3168                         return PTR_ERR(page);
3169
3170                 memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
3171                                         page_address(page), F2FS_BLKSIZE);
3172                 f2fs_put_page(page, 1);
3173         }
3174
3175         cp_ver |= (cur_cp_crc(ckpt) << 32);
3176         if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
3177                 clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
3178                 f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)",
3179                         cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits));
3180                 return 0;
3181         }
3182
3183         f2fs_notice(sbi, "Found nat_bits in checkpoint");
3184         return 0;
3185 }
3186
3187 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
3188 {
3189         struct f2fs_nm_info *nm_i = NM_I(sbi);
3190         unsigned int i = 0;
3191         nid_t nid, last_nid;
3192
3193         if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
3194                 return;
3195
3196         for (i = 0; i < nm_i->nat_blocks; i++) {
3197                 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
3198                 if (i >= nm_i->nat_blocks)
3199                         break;
3200
3201                 __set_bit_le(i, nm_i->nat_block_bitmap);
3202
3203                 nid = i * NAT_ENTRY_PER_BLOCK;
3204                 last_nid = nid + NAT_ENTRY_PER_BLOCK;
3205
3206                 spin_lock(&NM_I(sbi)->nid_list_lock);
3207                 for (; nid < last_nid; nid++)
3208                         update_free_nid_bitmap(sbi, nid, true, true);
3209                 spin_unlock(&NM_I(sbi)->nid_list_lock);
3210         }
3211
3212         for (i = 0; i < nm_i->nat_blocks; i++) {
3213                 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
3214                 if (i >= nm_i->nat_blocks)
3215                         break;
3216
3217                 __set_bit_le(i, nm_i->nat_block_bitmap);
3218         }
3219 }
3220
3221 static int init_node_manager(struct f2fs_sb_info *sbi)
3222 {
3223         struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
3224         struct f2fs_nm_info *nm_i = NM_I(sbi);
3225         unsigned char *version_bitmap;
3226         unsigned int nat_segs;
3227         int err;
3228
3229         nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
3230
3231         /* segment_count_nat includes pair segment so divide to 2. */
3232         nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
3233         nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
3234         nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
3235
3236         /* not used nids: 0, node, meta, (and root counted as valid node) */
3237         nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
3238                                                 F2FS_RESERVED_NODE_NUM;
3239         nm_i->nid_cnt[FREE_NID] = 0;
3240         nm_i->nid_cnt[PREALLOC_NID] = 0;
3241         nm_i->ram_thresh = DEF_RAM_THRESHOLD;
3242         nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
3243         nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
3244         nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS;
3245
3246         INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
3247         INIT_LIST_HEAD(&nm_i->free_nid_list);
3248         INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
3249         INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
3250         INIT_LIST_HEAD(&nm_i->nat_entries);
3251         spin_lock_init(&nm_i->nat_list_lock);
3252
3253         mutex_init(&nm_i->build_lock);
3254         spin_lock_init(&nm_i->nid_list_lock);
3255         init_f2fs_rwsem(&nm_i->nat_tree_lock);
3256
3257         nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
3258         nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
3259         version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
3260         nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
3261                                         GFP_KERNEL);
3262         if (!nm_i->nat_bitmap)
3263                 return -ENOMEM;
3264
3265         err = __get_nat_bitmaps(sbi);
3266         if (err)
3267                 return err;
3268
3269 #ifdef CONFIG_F2FS_CHECK_FS
3270         nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3271                                         GFP_KERNEL);
3272         if (!nm_i->nat_bitmap_mir)
3273                 return -ENOMEM;
3274 #endif
3275
3276         return 0;
3277 }
3278
3279 static int init_free_nid_cache(struct f2fs_sb_info *sbi)
3280 {
3281         struct f2fs_nm_info *nm_i = NM_I(sbi);
3282         int i;
3283
3284         nm_i->free_nid_bitmap =
3285                 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
3286                                               nm_i->nat_blocks),
3287                               GFP_KERNEL);
3288         if (!nm_i->free_nid_bitmap)
3289                 return -ENOMEM;
3290
3291         for (i = 0; i < nm_i->nat_blocks; i++) {
3292                 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3293                         f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
3294                 if (!nm_i->free_nid_bitmap[i])
3295                         return -ENOMEM;
3296         }
3297
3298         nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3299                                                                 GFP_KERNEL);
3300         if (!nm_i->nat_block_bitmap)
3301                 return -ENOMEM;
3302
3303         nm_i->free_nid_count =
3304                 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
3305                                               nm_i->nat_blocks),
3306                               GFP_KERNEL);
3307         if (!nm_i->free_nid_count)
3308                 return -ENOMEM;
3309         return 0;
3310 }
3311
3312 int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3313 {
3314         int err;
3315
3316         sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3317                                                         GFP_KERNEL);
3318         if (!sbi->nm_info)
3319                 return -ENOMEM;
3320
3321         err = init_node_manager(sbi);
3322         if (err)
3323                 return err;
3324
3325         err = init_free_nid_cache(sbi);
3326         if (err)
3327                 return err;
3328
3329         /* load free nid status from nat_bits table */
3330         load_free_nid_bitmap(sbi);
3331
3332         return f2fs_build_free_nids(sbi, true, true);
3333 }
3334
3335 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3336 {
3337         struct f2fs_nm_info *nm_i = NM_I(sbi);
3338         struct free_nid *i, *next_i;
3339         void *vec[NAT_VEC_SIZE];
3340         struct nat_entry **natvec = (struct nat_entry **)vec;
3341         struct nat_entry_set **setvec = (struct nat_entry_set **)vec;
3342         nid_t nid = 0;
3343         unsigned int found;
3344
3345         if (!nm_i)
3346                 return;
3347
3348         /* destroy free nid list */
3349         spin_lock(&nm_i->nid_list_lock);
3350         list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3351                 __remove_free_nid(sbi, i, FREE_NID);
3352                 spin_unlock(&nm_i->nid_list_lock);
3353                 kmem_cache_free(free_nid_slab, i);
3354                 spin_lock(&nm_i->nid_list_lock);
3355         }
3356         f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3357         f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3358         f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3359         spin_unlock(&nm_i->nid_list_lock);
3360
3361         /* destroy nat cache */
3362         f2fs_down_write(&nm_i->nat_tree_lock);
3363         while ((found = __gang_lookup_nat_cache(nm_i,
3364                                         nid, NAT_VEC_SIZE, natvec))) {
3365                 unsigned idx;
3366
3367                 nid = nat_get_nid(natvec[found - 1]) + 1;
3368                 for (idx = 0; idx < found; idx++) {
3369                         spin_lock(&nm_i->nat_list_lock);
3370                         list_del(&natvec[idx]->list);
3371                         spin_unlock(&nm_i->nat_list_lock);
3372
3373                         __del_from_nat_cache(nm_i, natvec[idx]);
3374                 }
3375         }
3376         f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
3377
3378         /* destroy nat set cache */
3379         nid = 0;
3380         memset(vec, 0, sizeof(void *) * NAT_VEC_SIZE);
3381         while ((found = __gang_lookup_nat_set(nm_i,
3382                                         nid, NAT_VEC_SIZE, setvec))) {
3383                 unsigned idx;
3384
3385                 nid = setvec[found - 1]->set + 1;
3386                 for (idx = 0; idx < found; idx++) {
3387                         /* entry_cnt is not zero, when cp_error was occurred */
3388                         f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
3389                         radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3390                         kmem_cache_free(nat_entry_set_slab, setvec[idx]);
3391                 }
3392         }
3393         f2fs_up_write(&nm_i->nat_tree_lock);
3394
3395         kvfree(nm_i->nat_block_bitmap);
3396         if (nm_i->free_nid_bitmap) {
3397                 int i;
3398
3399                 for (i = 0; i < nm_i->nat_blocks; i++)
3400                         kvfree(nm_i->free_nid_bitmap[i]);
3401                 kvfree(nm_i->free_nid_bitmap);
3402         }
3403         kvfree(nm_i->free_nid_count);
3404
3405         kvfree(nm_i->nat_bitmap);
3406         kvfree(nm_i->nat_bits);
3407 #ifdef CONFIG_F2FS_CHECK_FS
3408         kvfree(nm_i->nat_bitmap_mir);
3409 #endif
3410         sbi->nm_info = NULL;
3411         kfree(nm_i);
3412 }
3413
3414 int __init f2fs_create_node_manager_caches(void)
3415 {
3416         nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
3417                         sizeof(struct nat_entry));
3418         if (!nat_entry_slab)
3419                 goto fail;
3420
3421         free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
3422                         sizeof(struct free_nid));
3423         if (!free_nid_slab)
3424                 goto destroy_nat_entry;
3425
3426         nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
3427                         sizeof(struct nat_entry_set));
3428         if (!nat_entry_set_slab)
3429                 goto destroy_free_nid;
3430
3431         fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
3432                         sizeof(struct fsync_node_entry));
3433         if (!fsync_node_entry_slab)
3434                 goto destroy_nat_entry_set;
3435         return 0;
3436
3437 destroy_nat_entry_set:
3438         kmem_cache_destroy(nat_entry_set_slab);
3439 destroy_free_nid:
3440         kmem_cache_destroy(free_nid_slab);
3441 destroy_nat_entry:
3442         kmem_cache_destroy(nat_entry_slab);
3443 fail:
3444         return -ENOMEM;
3445 }
3446
3447 void f2fs_destroy_node_manager_caches(void)
3448 {
3449         kmem_cache_destroy(fsync_node_entry_slab);
3450         kmem_cache_destroy(nat_entry_set_slab);
3451         kmem_cache_destroy(free_nid_slab);
3452         kmem_cache_destroy(nat_entry_slab);
3453 }