Merge branches 'acpi-scan', 'acpi-resource', 'acpi-apei', 'acpi-extlog' and 'acpi...
[sfrench/cifs-2.6.git] / fs / f2fs / node.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/node.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/mpage.h>
11 #include <linux/sched/mm.h>
12 #include <linux/blkdev.h>
13 #include <linux/pagevec.h>
14 #include <linux/swap.h>
15
16 #include "f2fs.h"
17 #include "node.h"
18 #include "segment.h"
19 #include "xattr.h"
20 #include "iostat.h"
21 #include <trace/events/f2fs.h>
22
23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
24
25 static struct kmem_cache *nat_entry_slab;
26 static struct kmem_cache *free_nid_slab;
27 static struct kmem_cache *nat_entry_set_slab;
28 static struct kmem_cache *fsync_node_entry_slab;
29
30 /*
31  * Check whether the given nid is within node id range.
32  */
33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
34 {
35         if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
36                 set_sbi_flag(sbi, SBI_NEED_FSCK);
37                 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
38                           __func__, nid);
39                 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
40                 return -EFSCORRUPTED;
41         }
42         return 0;
43 }
44
45 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
46 {
47         struct f2fs_nm_info *nm_i = NM_I(sbi);
48         struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
49         struct sysinfo val;
50         unsigned long avail_ram;
51         unsigned long mem_size = 0;
52         bool res = false;
53
54         if (!nm_i)
55                 return true;
56
57         si_meminfo(&val);
58
59         /* only uses low memory */
60         avail_ram = val.totalram - val.totalhigh;
61
62         /*
63          * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
64          */
65         if (type == FREE_NIDS) {
66                 mem_size = (nm_i->nid_cnt[FREE_NID] *
67                                 sizeof(struct free_nid)) >> PAGE_SHIFT;
68                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
69         } else if (type == NAT_ENTRIES) {
70                 mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
71                                 sizeof(struct nat_entry)) >> PAGE_SHIFT;
72                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
73                 if (excess_cached_nats(sbi))
74                         res = false;
75         } else if (type == DIRTY_DENTS) {
76                 if (sbi->sb->s_bdi->wb.dirty_exceeded)
77                         return false;
78                 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
79                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
80         } else if (type == INO_ENTRIES) {
81                 int i;
82
83                 for (i = 0; i < MAX_INO_ENTRY; i++)
84                         mem_size += sbi->im[i].ino_num *
85                                                 sizeof(struct ino_entry);
86                 mem_size >>= PAGE_SHIFT;
87                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
88         } else if (type == EXTENT_CACHE) {
89                 mem_size = (atomic_read(&sbi->total_ext_tree) *
90                                 sizeof(struct extent_tree) +
91                                 atomic_read(&sbi->total_ext_node) *
92                                 sizeof(struct extent_node)) >> PAGE_SHIFT;
93                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
94         } else if (type == DISCARD_CACHE) {
95                 mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
96                                 sizeof(struct discard_cmd)) >> PAGE_SHIFT;
97                 res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
98         } else if (type == COMPRESS_PAGE) {
99 #ifdef CONFIG_F2FS_FS_COMPRESSION
100                 unsigned long free_ram = val.freeram;
101
102                 /*
103                  * free memory is lower than watermark or cached page count
104                  * exceed threshold, deny caching compress page.
105                  */
106                 res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
107                         (COMPRESS_MAPPING(sbi)->nrpages <
108                          free_ram * sbi->compress_percent / 100);
109 #else
110                 res = false;
111 #endif
112         } else {
113                 if (!sbi->sb->s_bdi->wb.dirty_exceeded)
114                         return true;
115         }
116         return res;
117 }
118
119 static void clear_node_page_dirty(struct page *page)
120 {
121         if (PageDirty(page)) {
122                 f2fs_clear_page_cache_dirty_tag(page);
123                 clear_page_dirty_for_io(page);
124                 dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
125         }
126         ClearPageUptodate(page);
127 }
128
129 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
130 {
131         return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid));
132 }
133
134 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
135 {
136         struct page *src_page;
137         struct page *dst_page;
138         pgoff_t dst_off;
139         void *src_addr;
140         void *dst_addr;
141         struct f2fs_nm_info *nm_i = NM_I(sbi);
142
143         dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
144
145         /* get current nat block page with lock */
146         src_page = get_current_nat_page(sbi, nid);
147         if (IS_ERR(src_page))
148                 return src_page;
149         dst_page = f2fs_grab_meta_page(sbi, dst_off);
150         f2fs_bug_on(sbi, PageDirty(src_page));
151
152         src_addr = page_address(src_page);
153         dst_addr = page_address(dst_page);
154         memcpy(dst_addr, src_addr, PAGE_SIZE);
155         set_page_dirty(dst_page);
156         f2fs_put_page(src_page, 1);
157
158         set_to_next_nat(nm_i, nid);
159
160         return dst_page;
161 }
162
163 static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi,
164                                                 nid_t nid, bool no_fail)
165 {
166         struct nat_entry *new;
167
168         new = f2fs_kmem_cache_alloc(nat_entry_slab,
169                                         GFP_F2FS_ZERO, no_fail, sbi);
170         if (new) {
171                 nat_set_nid(new, nid);
172                 nat_reset_flag(new);
173         }
174         return new;
175 }
176
177 static void __free_nat_entry(struct nat_entry *e)
178 {
179         kmem_cache_free(nat_entry_slab, e);
180 }
181
182 /* must be locked by nat_tree_lock */
183 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
184         struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
185 {
186         if (no_fail)
187                 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
188         else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
189                 return NULL;
190
191         if (raw_ne)
192                 node_info_from_raw_nat(&ne->ni, raw_ne);
193
194         spin_lock(&nm_i->nat_list_lock);
195         list_add_tail(&ne->list, &nm_i->nat_entries);
196         spin_unlock(&nm_i->nat_list_lock);
197
198         nm_i->nat_cnt[TOTAL_NAT]++;
199         nm_i->nat_cnt[RECLAIMABLE_NAT]++;
200         return ne;
201 }
202
203 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
204 {
205         struct nat_entry *ne;
206
207         ne = radix_tree_lookup(&nm_i->nat_root, n);
208
209         /* for recent accessed nat entry, move it to tail of lru list */
210         if (ne && !get_nat_flag(ne, IS_DIRTY)) {
211                 spin_lock(&nm_i->nat_list_lock);
212                 if (!list_empty(&ne->list))
213                         list_move_tail(&ne->list, &nm_i->nat_entries);
214                 spin_unlock(&nm_i->nat_list_lock);
215         }
216
217         return ne;
218 }
219
220 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
221                 nid_t start, unsigned int nr, struct nat_entry **ep)
222 {
223         return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
224 }
225
226 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
227 {
228         radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
229         nm_i->nat_cnt[TOTAL_NAT]--;
230         nm_i->nat_cnt[RECLAIMABLE_NAT]--;
231         __free_nat_entry(e);
232 }
233
234 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
235                                                         struct nat_entry *ne)
236 {
237         nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
238         struct nat_entry_set *head;
239
240         head = radix_tree_lookup(&nm_i->nat_set_root, set);
241         if (!head) {
242                 head = f2fs_kmem_cache_alloc(nat_entry_set_slab,
243                                                 GFP_NOFS, true, NULL);
244
245                 INIT_LIST_HEAD(&head->entry_list);
246                 INIT_LIST_HEAD(&head->set_list);
247                 head->set = set;
248                 head->entry_cnt = 0;
249                 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
250         }
251         return head;
252 }
253
254 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
255                                                 struct nat_entry *ne)
256 {
257         struct nat_entry_set *head;
258         bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
259
260         if (!new_ne)
261                 head = __grab_nat_entry_set(nm_i, ne);
262
263         /*
264          * update entry_cnt in below condition:
265          * 1. update NEW_ADDR to valid block address;
266          * 2. update old block address to new one;
267          */
268         if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
269                                 !get_nat_flag(ne, IS_DIRTY)))
270                 head->entry_cnt++;
271
272         set_nat_flag(ne, IS_PREALLOC, new_ne);
273
274         if (get_nat_flag(ne, IS_DIRTY))
275                 goto refresh_list;
276
277         nm_i->nat_cnt[DIRTY_NAT]++;
278         nm_i->nat_cnt[RECLAIMABLE_NAT]--;
279         set_nat_flag(ne, IS_DIRTY, true);
280 refresh_list:
281         spin_lock(&nm_i->nat_list_lock);
282         if (new_ne)
283                 list_del_init(&ne->list);
284         else
285                 list_move_tail(&ne->list, &head->entry_list);
286         spin_unlock(&nm_i->nat_list_lock);
287 }
288
289 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
290                 struct nat_entry_set *set, struct nat_entry *ne)
291 {
292         spin_lock(&nm_i->nat_list_lock);
293         list_move_tail(&ne->list, &nm_i->nat_entries);
294         spin_unlock(&nm_i->nat_list_lock);
295
296         set_nat_flag(ne, IS_DIRTY, false);
297         set->entry_cnt--;
298         nm_i->nat_cnt[DIRTY_NAT]--;
299         nm_i->nat_cnt[RECLAIMABLE_NAT]++;
300 }
301
302 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
303                 nid_t start, unsigned int nr, struct nat_entry_set **ep)
304 {
305         return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
306                                                         start, nr);
307 }
308
309 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
310 {
311         return NODE_MAPPING(sbi) == page->mapping &&
312                         IS_DNODE(page) && is_cold_node(page);
313 }
314
315 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
316 {
317         spin_lock_init(&sbi->fsync_node_lock);
318         INIT_LIST_HEAD(&sbi->fsync_node_list);
319         sbi->fsync_seg_id = 0;
320         sbi->fsync_node_num = 0;
321 }
322
323 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
324                                                         struct page *page)
325 {
326         struct fsync_node_entry *fn;
327         unsigned long flags;
328         unsigned int seq_id;
329
330         fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
331                                         GFP_NOFS, true, NULL);
332
333         get_page(page);
334         fn->page = page;
335         INIT_LIST_HEAD(&fn->list);
336
337         spin_lock_irqsave(&sbi->fsync_node_lock, flags);
338         list_add_tail(&fn->list, &sbi->fsync_node_list);
339         fn->seq_id = sbi->fsync_seg_id++;
340         seq_id = fn->seq_id;
341         sbi->fsync_node_num++;
342         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
343
344         return seq_id;
345 }
346
347 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
348 {
349         struct fsync_node_entry *fn;
350         unsigned long flags;
351
352         spin_lock_irqsave(&sbi->fsync_node_lock, flags);
353         list_for_each_entry(fn, &sbi->fsync_node_list, list) {
354                 if (fn->page == page) {
355                         list_del(&fn->list);
356                         sbi->fsync_node_num--;
357                         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
358                         kmem_cache_free(fsync_node_entry_slab, fn);
359                         put_page(page);
360                         return;
361                 }
362         }
363         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
364         f2fs_bug_on(sbi, 1);
365 }
366
367 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
368 {
369         unsigned long flags;
370
371         spin_lock_irqsave(&sbi->fsync_node_lock, flags);
372         sbi->fsync_seg_id = 0;
373         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
374 }
375
376 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
377 {
378         struct f2fs_nm_info *nm_i = NM_I(sbi);
379         struct nat_entry *e;
380         bool need = false;
381
382         f2fs_down_read(&nm_i->nat_tree_lock);
383         e = __lookup_nat_cache(nm_i, nid);
384         if (e) {
385                 if (!get_nat_flag(e, IS_CHECKPOINTED) &&
386                                 !get_nat_flag(e, HAS_FSYNCED_INODE))
387                         need = true;
388         }
389         f2fs_up_read(&nm_i->nat_tree_lock);
390         return need;
391 }
392
393 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
394 {
395         struct f2fs_nm_info *nm_i = NM_I(sbi);
396         struct nat_entry *e;
397         bool is_cp = true;
398
399         f2fs_down_read(&nm_i->nat_tree_lock);
400         e = __lookup_nat_cache(nm_i, nid);
401         if (e && !get_nat_flag(e, IS_CHECKPOINTED))
402                 is_cp = false;
403         f2fs_up_read(&nm_i->nat_tree_lock);
404         return is_cp;
405 }
406
407 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
408 {
409         struct f2fs_nm_info *nm_i = NM_I(sbi);
410         struct nat_entry *e;
411         bool need_update = true;
412
413         f2fs_down_read(&nm_i->nat_tree_lock);
414         e = __lookup_nat_cache(nm_i, ino);
415         if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
416                         (get_nat_flag(e, IS_CHECKPOINTED) ||
417                          get_nat_flag(e, HAS_FSYNCED_INODE)))
418                 need_update = false;
419         f2fs_up_read(&nm_i->nat_tree_lock);
420         return need_update;
421 }
422
423 /* must be locked by nat_tree_lock */
424 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
425                                                 struct f2fs_nat_entry *ne)
426 {
427         struct f2fs_nm_info *nm_i = NM_I(sbi);
428         struct nat_entry *new, *e;
429
430         /* Let's mitigate lock contention of nat_tree_lock during checkpoint */
431         if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
432                 return;
433
434         new = __alloc_nat_entry(sbi, nid, false);
435         if (!new)
436                 return;
437
438         f2fs_down_write(&nm_i->nat_tree_lock);
439         e = __lookup_nat_cache(nm_i, nid);
440         if (!e)
441                 e = __init_nat_entry(nm_i, new, ne, false);
442         else
443                 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
444                                 nat_get_blkaddr(e) !=
445                                         le32_to_cpu(ne->block_addr) ||
446                                 nat_get_version(e) != ne->version);
447         f2fs_up_write(&nm_i->nat_tree_lock);
448         if (e != new)
449                 __free_nat_entry(new);
450 }
451
452 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
453                         block_t new_blkaddr, bool fsync_done)
454 {
455         struct f2fs_nm_info *nm_i = NM_I(sbi);
456         struct nat_entry *e;
457         struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true);
458
459         f2fs_down_write(&nm_i->nat_tree_lock);
460         e = __lookup_nat_cache(nm_i, ni->nid);
461         if (!e) {
462                 e = __init_nat_entry(nm_i, new, NULL, true);
463                 copy_node_info(&e->ni, ni);
464                 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
465         } else if (new_blkaddr == NEW_ADDR) {
466                 /*
467                  * when nid is reallocated,
468                  * previous nat entry can be remained in nat cache.
469                  * So, reinitialize it with new information.
470                  */
471                 copy_node_info(&e->ni, ni);
472                 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
473         }
474         /* let's free early to reduce memory consumption */
475         if (e != new)
476                 __free_nat_entry(new);
477
478         /* sanity check */
479         f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
480         f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
481                         new_blkaddr == NULL_ADDR);
482         f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
483                         new_blkaddr == NEW_ADDR);
484         f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
485                         new_blkaddr == NEW_ADDR);
486
487         /* increment version no as node is removed */
488         if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
489                 unsigned char version = nat_get_version(e);
490
491                 nat_set_version(e, inc_node_version(version));
492         }
493
494         /* change address */
495         nat_set_blkaddr(e, new_blkaddr);
496         if (!__is_valid_data_blkaddr(new_blkaddr))
497                 set_nat_flag(e, IS_CHECKPOINTED, false);
498         __set_nat_cache_dirty(nm_i, e);
499
500         /* update fsync_mark if its inode nat entry is still alive */
501         if (ni->nid != ni->ino)
502                 e = __lookup_nat_cache(nm_i, ni->ino);
503         if (e) {
504                 if (fsync_done && ni->nid == ni->ino)
505                         set_nat_flag(e, HAS_FSYNCED_INODE, true);
506                 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
507         }
508         f2fs_up_write(&nm_i->nat_tree_lock);
509 }
510
511 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
512 {
513         struct f2fs_nm_info *nm_i = NM_I(sbi);
514         int nr = nr_shrink;
515
516         if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
517                 return 0;
518
519         spin_lock(&nm_i->nat_list_lock);
520         while (nr_shrink) {
521                 struct nat_entry *ne;
522
523                 if (list_empty(&nm_i->nat_entries))
524                         break;
525
526                 ne = list_first_entry(&nm_i->nat_entries,
527                                         struct nat_entry, list);
528                 list_del(&ne->list);
529                 spin_unlock(&nm_i->nat_list_lock);
530
531                 __del_from_nat_cache(nm_i, ne);
532                 nr_shrink--;
533
534                 spin_lock(&nm_i->nat_list_lock);
535         }
536         spin_unlock(&nm_i->nat_list_lock);
537
538         f2fs_up_write(&nm_i->nat_tree_lock);
539         return nr - nr_shrink;
540 }
541
542 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
543                                 struct node_info *ni, bool checkpoint_context)
544 {
545         struct f2fs_nm_info *nm_i = NM_I(sbi);
546         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
547         struct f2fs_journal *journal = curseg->journal;
548         nid_t start_nid = START_NID(nid);
549         struct f2fs_nat_block *nat_blk;
550         struct page *page = NULL;
551         struct f2fs_nat_entry ne;
552         struct nat_entry *e;
553         pgoff_t index;
554         block_t blkaddr;
555         int i;
556
557         ni->nid = nid;
558 retry:
559         /* Check nat cache */
560         f2fs_down_read(&nm_i->nat_tree_lock);
561         e = __lookup_nat_cache(nm_i, nid);
562         if (e) {
563                 ni->ino = nat_get_ino(e);
564                 ni->blk_addr = nat_get_blkaddr(e);
565                 ni->version = nat_get_version(e);
566                 f2fs_up_read(&nm_i->nat_tree_lock);
567                 return 0;
568         }
569
570         /*
571          * Check current segment summary by trying to grab journal_rwsem first.
572          * This sem is on the critical path on the checkpoint requiring the above
573          * nat_tree_lock. Therefore, we should retry, if we failed to grab here
574          * while not bothering checkpoint.
575          */
576         if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
577                 down_read(&curseg->journal_rwsem);
578         } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
579                                 !down_read_trylock(&curseg->journal_rwsem)) {
580                 f2fs_up_read(&nm_i->nat_tree_lock);
581                 goto retry;
582         }
583
584         i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
585         if (i >= 0) {
586                 ne = nat_in_journal(journal, i);
587                 node_info_from_raw_nat(ni, &ne);
588         }
589         up_read(&curseg->journal_rwsem);
590         if (i >= 0) {
591                 f2fs_up_read(&nm_i->nat_tree_lock);
592                 goto cache;
593         }
594
595         /* Fill node_info from nat page */
596         index = current_nat_addr(sbi, nid);
597         f2fs_up_read(&nm_i->nat_tree_lock);
598
599         page = f2fs_get_meta_page(sbi, index);
600         if (IS_ERR(page))
601                 return PTR_ERR(page);
602
603         nat_blk = (struct f2fs_nat_block *)page_address(page);
604         ne = nat_blk->entries[nid - start_nid];
605         node_info_from_raw_nat(ni, &ne);
606         f2fs_put_page(page, 1);
607 cache:
608         blkaddr = le32_to_cpu(ne.block_addr);
609         if (__is_valid_data_blkaddr(blkaddr) &&
610                 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
611                 return -EFAULT;
612
613         /* cache nat entry */
614         cache_nat_entry(sbi, nid, &ne);
615         return 0;
616 }
617
618 /*
619  * readahead MAX_RA_NODE number of node pages.
620  */
621 static void f2fs_ra_node_pages(struct page *parent, int start, int n)
622 {
623         struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
624         struct blk_plug plug;
625         int i, end;
626         nid_t nid;
627
628         blk_start_plug(&plug);
629
630         /* Then, try readahead for siblings of the desired node */
631         end = start + n;
632         end = min(end, NIDS_PER_BLOCK);
633         for (i = start; i < end; i++) {
634                 nid = get_nid(parent, i, false);
635                 f2fs_ra_node_page(sbi, nid);
636         }
637
638         blk_finish_plug(&plug);
639 }
640
641 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
642 {
643         const long direct_index = ADDRS_PER_INODE(dn->inode);
644         const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
645         const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
646         unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
647         int cur_level = dn->cur_level;
648         int max_level = dn->max_level;
649         pgoff_t base = 0;
650
651         if (!dn->max_level)
652                 return pgofs + 1;
653
654         while (max_level-- > cur_level)
655                 skipped_unit *= NIDS_PER_BLOCK;
656
657         switch (dn->max_level) {
658         case 3:
659                 base += 2 * indirect_blks;
660                 fallthrough;
661         case 2:
662                 base += 2 * direct_blks;
663                 fallthrough;
664         case 1:
665                 base += direct_index;
666                 break;
667         default:
668                 f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
669         }
670
671         return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
672 }
673
674 /*
675  * The maximum depth is four.
676  * Offset[0] will have raw inode offset.
677  */
678 static int get_node_path(struct inode *inode, long block,
679                                 int offset[4], unsigned int noffset[4])
680 {
681         const long direct_index = ADDRS_PER_INODE(inode);
682         const long direct_blks = ADDRS_PER_BLOCK(inode);
683         const long dptrs_per_blk = NIDS_PER_BLOCK;
684         const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
685         const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
686         int n = 0;
687         int level = 0;
688
689         noffset[0] = 0;
690
691         if (block < direct_index) {
692                 offset[n] = block;
693                 goto got;
694         }
695         block -= direct_index;
696         if (block < direct_blks) {
697                 offset[n++] = NODE_DIR1_BLOCK;
698                 noffset[n] = 1;
699                 offset[n] = block;
700                 level = 1;
701                 goto got;
702         }
703         block -= direct_blks;
704         if (block < direct_blks) {
705                 offset[n++] = NODE_DIR2_BLOCK;
706                 noffset[n] = 2;
707                 offset[n] = block;
708                 level = 1;
709                 goto got;
710         }
711         block -= direct_blks;
712         if (block < indirect_blks) {
713                 offset[n++] = NODE_IND1_BLOCK;
714                 noffset[n] = 3;
715                 offset[n++] = block / direct_blks;
716                 noffset[n] = 4 + offset[n - 1];
717                 offset[n] = block % direct_blks;
718                 level = 2;
719                 goto got;
720         }
721         block -= indirect_blks;
722         if (block < indirect_blks) {
723                 offset[n++] = NODE_IND2_BLOCK;
724                 noffset[n] = 4 + dptrs_per_blk;
725                 offset[n++] = block / direct_blks;
726                 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
727                 offset[n] = block % direct_blks;
728                 level = 2;
729                 goto got;
730         }
731         block -= indirect_blks;
732         if (block < dindirect_blks) {
733                 offset[n++] = NODE_DIND_BLOCK;
734                 noffset[n] = 5 + (dptrs_per_blk * 2);
735                 offset[n++] = block / indirect_blks;
736                 noffset[n] = 6 + (dptrs_per_blk * 2) +
737                               offset[n - 1] * (dptrs_per_blk + 1);
738                 offset[n++] = (block / direct_blks) % dptrs_per_blk;
739                 noffset[n] = 7 + (dptrs_per_blk * 2) +
740                               offset[n - 2] * (dptrs_per_blk + 1) +
741                               offset[n - 1];
742                 offset[n] = block % direct_blks;
743                 level = 3;
744                 goto got;
745         } else {
746                 return -E2BIG;
747         }
748 got:
749         return level;
750 }
751
752 /*
753  * Caller should call f2fs_put_dnode(dn).
754  * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
755  * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
756  */
757 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
758 {
759         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
760         struct page *npage[4];
761         struct page *parent = NULL;
762         int offset[4];
763         unsigned int noffset[4];
764         nid_t nids[4];
765         int level, i = 0;
766         int err = 0;
767
768         level = get_node_path(dn->inode, index, offset, noffset);
769         if (level < 0)
770                 return level;
771
772         nids[0] = dn->inode->i_ino;
773         npage[0] = dn->inode_page;
774
775         if (!npage[0]) {
776                 npage[0] = f2fs_get_node_page(sbi, nids[0]);
777                 if (IS_ERR(npage[0]))
778                         return PTR_ERR(npage[0]);
779         }
780
781         /* if inline_data is set, should not report any block indices */
782         if (f2fs_has_inline_data(dn->inode) && index) {
783                 err = -ENOENT;
784                 f2fs_put_page(npage[0], 1);
785                 goto release_out;
786         }
787
788         parent = npage[0];
789         if (level != 0)
790                 nids[1] = get_nid(parent, offset[0], true);
791         dn->inode_page = npage[0];
792         dn->inode_page_locked = true;
793
794         /* get indirect or direct nodes */
795         for (i = 1; i <= level; i++) {
796                 bool done = false;
797
798                 if (!nids[i] && mode == ALLOC_NODE) {
799                         /* alloc new node */
800                         if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
801                                 err = -ENOSPC;
802                                 goto release_pages;
803                         }
804
805                         dn->nid = nids[i];
806                         npage[i] = f2fs_new_node_page(dn, noffset[i]);
807                         if (IS_ERR(npage[i])) {
808                                 f2fs_alloc_nid_failed(sbi, nids[i]);
809                                 err = PTR_ERR(npage[i]);
810                                 goto release_pages;
811                         }
812
813                         set_nid(parent, offset[i - 1], nids[i], i == 1);
814                         f2fs_alloc_nid_done(sbi, nids[i]);
815                         done = true;
816                 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
817                         npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
818                         if (IS_ERR(npage[i])) {
819                                 err = PTR_ERR(npage[i]);
820                                 goto release_pages;
821                         }
822                         done = true;
823                 }
824                 if (i == 1) {
825                         dn->inode_page_locked = false;
826                         unlock_page(parent);
827                 } else {
828                         f2fs_put_page(parent, 1);
829                 }
830
831                 if (!done) {
832                         npage[i] = f2fs_get_node_page(sbi, nids[i]);
833                         if (IS_ERR(npage[i])) {
834                                 err = PTR_ERR(npage[i]);
835                                 f2fs_put_page(npage[0], 0);
836                                 goto release_out;
837                         }
838                 }
839                 if (i < level) {
840                         parent = npage[i];
841                         nids[i + 1] = get_nid(parent, offset[i], false);
842                 }
843         }
844         dn->nid = nids[level];
845         dn->ofs_in_node = offset[level];
846         dn->node_page = npage[level];
847         dn->data_blkaddr = f2fs_data_blkaddr(dn);
848
849         if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
850                                         f2fs_sb_has_readonly(sbi)) {
851                 unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn);
852                 block_t blkaddr;
853
854                 if (!c_len)
855                         goto out;
856
857                 blkaddr = f2fs_data_blkaddr(dn);
858                 if (blkaddr == COMPRESS_ADDR)
859                         blkaddr = data_blkaddr(dn->inode, dn->node_page,
860                                                 dn->ofs_in_node + 1);
861
862                 f2fs_update_extent_tree_range_compressed(dn->inode,
863                                         index, blkaddr,
864                                         F2FS_I(dn->inode)->i_cluster_size,
865                                         c_len);
866         }
867 out:
868         return 0;
869
870 release_pages:
871         f2fs_put_page(parent, 1);
872         if (i > 1)
873                 f2fs_put_page(npage[0], 0);
874 release_out:
875         dn->inode_page = NULL;
876         dn->node_page = NULL;
877         if (err == -ENOENT) {
878                 dn->cur_level = i;
879                 dn->max_level = level;
880                 dn->ofs_in_node = offset[level];
881         }
882         return err;
883 }
884
885 static int truncate_node(struct dnode_of_data *dn)
886 {
887         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
888         struct node_info ni;
889         int err;
890         pgoff_t index;
891
892         err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
893         if (err)
894                 return err;
895
896         /* Deallocate node address */
897         f2fs_invalidate_blocks(sbi, ni.blk_addr);
898         dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
899         set_node_addr(sbi, &ni, NULL_ADDR, false);
900
901         if (dn->nid == dn->inode->i_ino) {
902                 f2fs_remove_orphan_inode(sbi, dn->nid);
903                 dec_valid_inode_count(sbi);
904                 f2fs_inode_synced(dn->inode);
905         }
906
907         clear_node_page_dirty(dn->node_page);
908         set_sbi_flag(sbi, SBI_IS_DIRTY);
909
910         index = dn->node_page->index;
911         f2fs_put_page(dn->node_page, 1);
912
913         invalidate_mapping_pages(NODE_MAPPING(sbi),
914                         index, index);
915
916         dn->node_page = NULL;
917         trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
918
919         return 0;
920 }
921
922 static int truncate_dnode(struct dnode_of_data *dn)
923 {
924         struct page *page;
925         int err;
926
927         if (dn->nid == 0)
928                 return 1;
929
930         /* get direct node */
931         page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
932         if (PTR_ERR(page) == -ENOENT)
933                 return 1;
934         else if (IS_ERR(page))
935                 return PTR_ERR(page);
936
937         /* Make dnode_of_data for parameter */
938         dn->node_page = page;
939         dn->ofs_in_node = 0;
940         f2fs_truncate_data_blocks(dn);
941         err = truncate_node(dn);
942         if (err)
943                 return err;
944
945         return 1;
946 }
947
948 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
949                                                 int ofs, int depth)
950 {
951         struct dnode_of_data rdn = *dn;
952         struct page *page;
953         struct f2fs_node *rn;
954         nid_t child_nid;
955         unsigned int child_nofs;
956         int freed = 0;
957         int i, ret;
958
959         if (dn->nid == 0)
960                 return NIDS_PER_BLOCK + 1;
961
962         trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
963
964         page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
965         if (IS_ERR(page)) {
966                 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
967                 return PTR_ERR(page);
968         }
969
970         f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
971
972         rn = F2FS_NODE(page);
973         if (depth < 3) {
974                 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
975                         child_nid = le32_to_cpu(rn->in.nid[i]);
976                         if (child_nid == 0)
977                                 continue;
978                         rdn.nid = child_nid;
979                         ret = truncate_dnode(&rdn);
980                         if (ret < 0)
981                                 goto out_err;
982                         if (set_nid(page, i, 0, false))
983                                 dn->node_changed = true;
984                 }
985         } else {
986                 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
987                 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
988                         child_nid = le32_to_cpu(rn->in.nid[i]);
989                         if (child_nid == 0) {
990                                 child_nofs += NIDS_PER_BLOCK + 1;
991                                 continue;
992                         }
993                         rdn.nid = child_nid;
994                         ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
995                         if (ret == (NIDS_PER_BLOCK + 1)) {
996                                 if (set_nid(page, i, 0, false))
997                                         dn->node_changed = true;
998                                 child_nofs += ret;
999                         } else if (ret < 0 && ret != -ENOENT) {
1000                                 goto out_err;
1001                         }
1002                 }
1003                 freed = child_nofs;
1004         }
1005
1006         if (!ofs) {
1007                 /* remove current indirect node */
1008                 dn->node_page = page;
1009                 ret = truncate_node(dn);
1010                 if (ret)
1011                         goto out_err;
1012                 freed++;
1013         } else {
1014                 f2fs_put_page(page, 1);
1015         }
1016         trace_f2fs_truncate_nodes_exit(dn->inode, freed);
1017         return freed;
1018
1019 out_err:
1020         f2fs_put_page(page, 1);
1021         trace_f2fs_truncate_nodes_exit(dn->inode, ret);
1022         return ret;
1023 }
1024
1025 static int truncate_partial_nodes(struct dnode_of_data *dn,
1026                         struct f2fs_inode *ri, int *offset, int depth)
1027 {
1028         struct page *pages[2];
1029         nid_t nid[3];
1030         nid_t child_nid;
1031         int err = 0;
1032         int i;
1033         int idx = depth - 2;
1034
1035         nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1036         if (!nid[0])
1037                 return 0;
1038
1039         /* get indirect nodes in the path */
1040         for (i = 0; i < idx + 1; i++) {
1041                 /* reference count'll be increased */
1042                 pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
1043                 if (IS_ERR(pages[i])) {
1044                         err = PTR_ERR(pages[i]);
1045                         idx = i - 1;
1046                         goto fail;
1047                 }
1048                 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
1049         }
1050
1051         f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
1052
1053         /* free direct nodes linked to a partial indirect node */
1054         for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
1055                 child_nid = get_nid(pages[idx], i, false);
1056                 if (!child_nid)
1057                         continue;
1058                 dn->nid = child_nid;
1059                 err = truncate_dnode(dn);
1060                 if (err < 0)
1061                         goto fail;
1062                 if (set_nid(pages[idx], i, 0, false))
1063                         dn->node_changed = true;
1064         }
1065
1066         if (offset[idx + 1] == 0) {
1067                 dn->node_page = pages[idx];
1068                 dn->nid = nid[idx];
1069                 err = truncate_node(dn);
1070                 if (err)
1071                         goto fail;
1072         } else {
1073                 f2fs_put_page(pages[idx], 1);
1074         }
1075         offset[idx]++;
1076         offset[idx + 1] = 0;
1077         idx--;
1078 fail:
1079         for (i = idx; i >= 0; i--)
1080                 f2fs_put_page(pages[i], 1);
1081
1082         trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
1083
1084         return err;
1085 }
1086
1087 /*
1088  * All the block addresses of data and nodes should be nullified.
1089  */
1090 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1091 {
1092         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1093         int err = 0, cont = 1;
1094         int level, offset[4], noffset[4];
1095         unsigned int nofs = 0;
1096         struct f2fs_inode *ri;
1097         struct dnode_of_data dn;
1098         struct page *page;
1099
1100         trace_f2fs_truncate_inode_blocks_enter(inode, from);
1101
1102         level = get_node_path(inode, from, offset, noffset);
1103         if (level < 0) {
1104                 trace_f2fs_truncate_inode_blocks_exit(inode, level);
1105                 return level;
1106         }
1107
1108         page = f2fs_get_node_page(sbi, inode->i_ino);
1109         if (IS_ERR(page)) {
1110                 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
1111                 return PTR_ERR(page);
1112         }
1113
1114         set_new_dnode(&dn, inode, page, NULL, 0);
1115         unlock_page(page);
1116
1117         ri = F2FS_INODE(page);
1118         switch (level) {
1119         case 0:
1120         case 1:
1121                 nofs = noffset[1];
1122                 break;
1123         case 2:
1124                 nofs = noffset[1];
1125                 if (!offset[level - 1])
1126                         goto skip_partial;
1127                 err = truncate_partial_nodes(&dn, ri, offset, level);
1128                 if (err < 0 && err != -ENOENT)
1129                         goto fail;
1130                 nofs += 1 + NIDS_PER_BLOCK;
1131                 break;
1132         case 3:
1133                 nofs = 5 + 2 * NIDS_PER_BLOCK;
1134                 if (!offset[level - 1])
1135                         goto skip_partial;
1136                 err = truncate_partial_nodes(&dn, ri, offset, level);
1137                 if (err < 0 && err != -ENOENT)
1138                         goto fail;
1139                 break;
1140         default:
1141                 BUG();
1142         }
1143
1144 skip_partial:
1145         while (cont) {
1146                 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1147                 switch (offset[0]) {
1148                 case NODE_DIR1_BLOCK:
1149                 case NODE_DIR2_BLOCK:
1150                         err = truncate_dnode(&dn);
1151                         break;
1152
1153                 case NODE_IND1_BLOCK:
1154                 case NODE_IND2_BLOCK:
1155                         err = truncate_nodes(&dn, nofs, offset[1], 2);
1156                         break;
1157
1158                 case NODE_DIND_BLOCK:
1159                         err = truncate_nodes(&dn, nofs, offset[1], 3);
1160                         cont = 0;
1161                         break;
1162
1163                 default:
1164                         BUG();
1165                 }
1166                 if (err < 0 && err != -ENOENT)
1167                         goto fail;
1168                 if (offset[1] == 0 &&
1169                                 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
1170                         lock_page(page);
1171                         BUG_ON(page->mapping != NODE_MAPPING(sbi));
1172                         f2fs_wait_on_page_writeback(page, NODE, true, true);
1173                         ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
1174                         set_page_dirty(page);
1175                         unlock_page(page);
1176                 }
1177                 offset[1] = 0;
1178                 offset[0]++;
1179                 nofs += err;
1180         }
1181 fail:
1182         f2fs_put_page(page, 0);
1183         trace_f2fs_truncate_inode_blocks_exit(inode, err);
1184         return err > 0 ? 0 : err;
1185 }
1186
1187 /* caller must lock inode page */
1188 int f2fs_truncate_xattr_node(struct inode *inode)
1189 {
1190         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1191         nid_t nid = F2FS_I(inode)->i_xattr_nid;
1192         struct dnode_of_data dn;
1193         struct page *npage;
1194         int err;
1195
1196         if (!nid)
1197                 return 0;
1198
1199         npage = f2fs_get_node_page(sbi, nid);
1200         if (IS_ERR(npage))
1201                 return PTR_ERR(npage);
1202
1203         set_new_dnode(&dn, inode, NULL, npage, nid);
1204         err = truncate_node(&dn);
1205         if (err) {
1206                 f2fs_put_page(npage, 1);
1207                 return err;
1208         }
1209
1210         f2fs_i_xnid_write(inode, 0);
1211
1212         return 0;
1213 }
1214
1215 /*
1216  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1217  * f2fs_unlock_op().
1218  */
1219 int f2fs_remove_inode_page(struct inode *inode)
1220 {
1221         struct dnode_of_data dn;
1222         int err;
1223
1224         set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1225         err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
1226         if (err)
1227                 return err;
1228
1229         err = f2fs_truncate_xattr_node(inode);
1230         if (err) {
1231                 f2fs_put_dnode(&dn);
1232                 return err;
1233         }
1234
1235         /* remove potential inline_data blocks */
1236         if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1237                                 S_ISLNK(inode->i_mode))
1238                 f2fs_truncate_data_blocks_range(&dn, 1);
1239
1240         /* 0 is possible, after f2fs_new_inode() has failed */
1241         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1242                 f2fs_put_dnode(&dn);
1243                 return -EIO;
1244         }
1245
1246         if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1247                 f2fs_warn(F2FS_I_SB(inode),
1248                         "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
1249                         inode->i_ino, (unsigned long long)inode->i_blocks);
1250                 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
1251         }
1252
1253         /* will put inode & node pages */
1254         err = truncate_node(&dn);
1255         if (err) {
1256                 f2fs_put_dnode(&dn);
1257                 return err;
1258         }
1259         return 0;
1260 }
1261
1262 struct page *f2fs_new_inode_page(struct inode *inode)
1263 {
1264         struct dnode_of_data dn;
1265
1266         /* allocate inode page for new inode */
1267         set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1268
1269         /* caller should f2fs_put_page(page, 1); */
1270         return f2fs_new_node_page(&dn, 0);
1271 }
1272
1273 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1274 {
1275         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1276         struct node_info new_ni;
1277         struct page *page;
1278         int err;
1279
1280         if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1281                 return ERR_PTR(-EPERM);
1282
1283         page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1284         if (!page)
1285                 return ERR_PTR(-ENOMEM);
1286
1287         if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1288                 goto fail;
1289
1290 #ifdef CONFIG_F2FS_CHECK_FS
1291         err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false);
1292         if (err) {
1293                 dec_valid_node_count(sbi, dn->inode, !ofs);
1294                 goto fail;
1295         }
1296         if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
1297                 err = -EFSCORRUPTED;
1298                 set_sbi_flag(sbi, SBI_NEED_FSCK);
1299                 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1300                 goto fail;
1301         }
1302 #endif
1303         new_ni.nid = dn->nid;
1304         new_ni.ino = dn->inode->i_ino;
1305         new_ni.blk_addr = NULL_ADDR;
1306         new_ni.flag = 0;
1307         new_ni.version = 0;
1308         set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1309
1310         f2fs_wait_on_page_writeback(page, NODE, true, true);
1311         fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1312         set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1313         if (!PageUptodate(page))
1314                 SetPageUptodate(page);
1315         if (set_page_dirty(page))
1316                 dn->node_changed = true;
1317
1318         if (f2fs_has_xattr_block(ofs))
1319                 f2fs_i_xnid_write(dn->inode, dn->nid);
1320
1321         if (ofs == 0)
1322                 inc_valid_inode_count(sbi);
1323         return page;
1324
1325 fail:
1326         clear_node_page_dirty(page);
1327         f2fs_put_page(page, 1);
1328         return ERR_PTR(err);
1329 }
1330
1331 /*
1332  * Caller should do after getting the following values.
1333  * 0: f2fs_put_page(page, 0)
1334  * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1335  */
1336 static int read_node_page(struct page *page, blk_opf_t op_flags)
1337 {
1338         struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1339         struct node_info ni;
1340         struct f2fs_io_info fio = {
1341                 .sbi = sbi,
1342                 .type = NODE,
1343                 .op = REQ_OP_READ,
1344                 .op_flags = op_flags,
1345                 .page = page,
1346                 .encrypted_page = NULL,
1347         };
1348         int err;
1349
1350         if (PageUptodate(page)) {
1351                 if (!f2fs_inode_chksum_verify(sbi, page)) {
1352                         ClearPageUptodate(page);
1353                         return -EFSBADCRC;
1354                 }
1355                 return LOCKED_PAGE;
1356         }
1357
1358         err = f2fs_get_node_info(sbi, page->index, &ni, false);
1359         if (err)
1360                 return err;
1361
1362         /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
1363         if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR) ||
1364                         is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
1365                 ClearPageUptodate(page);
1366                 return -ENOENT;
1367         }
1368
1369         fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1370
1371         err = f2fs_submit_page_bio(&fio);
1372
1373         if (!err)
1374                 f2fs_update_iostat(sbi, NULL, FS_NODE_READ_IO, F2FS_BLKSIZE);
1375
1376         return err;
1377 }
1378
1379 /*
1380  * Readahead a node page
1381  */
1382 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1383 {
1384         struct page *apage;
1385         int err;
1386
1387         if (!nid)
1388                 return;
1389         if (f2fs_check_nid_range(sbi, nid))
1390                 return;
1391
1392         apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
1393         if (apage)
1394                 return;
1395
1396         apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1397         if (!apage)
1398                 return;
1399
1400         err = read_node_page(apage, REQ_RAHEAD);
1401         f2fs_put_page(apage, err ? 1 : 0);
1402 }
1403
1404 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1405                                         struct page *parent, int start)
1406 {
1407         struct page *page;
1408         int err;
1409
1410         if (!nid)
1411                 return ERR_PTR(-ENOENT);
1412         if (f2fs_check_nid_range(sbi, nid))
1413                 return ERR_PTR(-EINVAL);
1414 repeat:
1415         page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1416         if (!page)
1417                 return ERR_PTR(-ENOMEM);
1418
1419         err = read_node_page(page, 0);
1420         if (err < 0) {
1421                 goto out_put_err;
1422         } else if (err == LOCKED_PAGE) {
1423                 err = 0;
1424                 goto page_hit;
1425         }
1426
1427         if (parent)
1428                 f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1429
1430         lock_page(page);
1431
1432         if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1433                 f2fs_put_page(page, 1);
1434                 goto repeat;
1435         }
1436
1437         if (unlikely(!PageUptodate(page))) {
1438                 err = -EIO;
1439                 goto out_err;
1440         }
1441
1442         if (!f2fs_inode_chksum_verify(sbi, page)) {
1443                 err = -EFSBADCRC;
1444                 goto out_err;
1445         }
1446 page_hit:
1447         if (likely(nid == nid_of_node(page)))
1448                 return page;
1449
1450         f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1451                           nid, nid_of_node(page), ino_of_node(page),
1452                           ofs_of_node(page), cpver_of_node(page),
1453                           next_blkaddr_of_node(page));
1454         set_sbi_flag(sbi, SBI_NEED_FSCK);
1455         err = -EINVAL;
1456 out_err:
1457         ClearPageUptodate(page);
1458 out_put_err:
1459         /* ENOENT comes from read_node_page which is not an error. */
1460         if (err != -ENOENT)
1461                 f2fs_handle_page_eio(sbi, page->index, NODE);
1462         f2fs_put_page(page, 1);
1463         return ERR_PTR(err);
1464 }
1465
1466 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1467 {
1468         return __get_node_page(sbi, nid, NULL, 0);
1469 }
1470
1471 struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1472 {
1473         struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1474         nid_t nid = get_nid(parent, start, false);
1475
1476         return __get_node_page(sbi, nid, parent, start);
1477 }
1478
1479 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1480 {
1481         struct inode *inode;
1482         struct page *page;
1483         int ret;
1484
1485         /* should flush inline_data before evict_inode */
1486         inode = ilookup(sbi->sb, ino);
1487         if (!inode)
1488                 return;
1489
1490         page = f2fs_pagecache_get_page(inode->i_mapping, 0,
1491                                         FGP_LOCK|FGP_NOWAIT, 0);
1492         if (!page)
1493                 goto iput_out;
1494
1495         if (!PageUptodate(page))
1496                 goto page_out;
1497
1498         if (!PageDirty(page))
1499                 goto page_out;
1500
1501         if (!clear_page_dirty_for_io(page))
1502                 goto page_out;
1503
1504         ret = f2fs_write_inline_data(inode, page);
1505         inode_dec_dirty_pages(inode);
1506         f2fs_remove_dirty_inode(inode);
1507         if (ret)
1508                 set_page_dirty(page);
1509 page_out:
1510         f2fs_put_page(page, 1);
1511 iput_out:
1512         iput(inode);
1513 }
1514
1515 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1516 {
1517         pgoff_t index;
1518         struct pagevec pvec;
1519         struct page *last_page = NULL;
1520         int nr_pages;
1521
1522         pagevec_init(&pvec);
1523         index = 0;
1524
1525         while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1526                                 PAGECACHE_TAG_DIRTY))) {
1527                 int i;
1528
1529                 for (i = 0; i < nr_pages; i++) {
1530                         struct page *page = pvec.pages[i];
1531
1532                         if (unlikely(f2fs_cp_error(sbi))) {
1533                                 f2fs_put_page(last_page, 0);
1534                                 pagevec_release(&pvec);
1535                                 return ERR_PTR(-EIO);
1536                         }
1537
1538                         if (!IS_DNODE(page) || !is_cold_node(page))
1539                                 continue;
1540                         if (ino_of_node(page) != ino)
1541                                 continue;
1542
1543                         lock_page(page);
1544
1545                         if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1546 continue_unlock:
1547                                 unlock_page(page);
1548                                 continue;
1549                         }
1550                         if (ino_of_node(page) != ino)
1551                                 goto continue_unlock;
1552
1553                         if (!PageDirty(page)) {
1554                                 /* someone wrote it for us */
1555                                 goto continue_unlock;
1556                         }
1557
1558                         if (last_page)
1559                                 f2fs_put_page(last_page, 0);
1560
1561                         get_page(page);
1562                         last_page = page;
1563                         unlock_page(page);
1564                 }
1565                 pagevec_release(&pvec);
1566                 cond_resched();
1567         }
1568         return last_page;
1569 }
1570
1571 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1572                                 struct writeback_control *wbc, bool do_balance,
1573                                 enum iostat_type io_type, unsigned int *seq_id)
1574 {
1575         struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1576         nid_t nid;
1577         struct node_info ni;
1578         struct f2fs_io_info fio = {
1579                 .sbi = sbi,
1580                 .ino = ino_of_node(page),
1581                 .type = NODE,
1582                 .op = REQ_OP_WRITE,
1583                 .op_flags = wbc_to_write_flags(wbc),
1584                 .page = page,
1585                 .encrypted_page = NULL,
1586                 .submitted = false,
1587                 .io_type = io_type,
1588                 .io_wbc = wbc,
1589         };
1590         unsigned int seq;
1591
1592         trace_f2fs_writepage(page, NODE);
1593
1594         if (unlikely(f2fs_cp_error(sbi))) {
1595                 ClearPageUptodate(page);
1596                 dec_page_count(sbi, F2FS_DIRTY_NODES);
1597                 unlock_page(page);
1598                 return 0;
1599         }
1600
1601         if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1602                 goto redirty_out;
1603
1604         if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1605                         wbc->sync_mode == WB_SYNC_NONE &&
1606                         IS_DNODE(page) && is_cold_node(page))
1607                 goto redirty_out;
1608
1609         /* get old block addr of this node page */
1610         nid = nid_of_node(page);
1611         f2fs_bug_on(sbi, page->index != nid);
1612
1613         if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
1614                 goto redirty_out;
1615
1616         if (wbc->for_reclaim) {
1617                 if (!f2fs_down_read_trylock(&sbi->node_write))
1618                         goto redirty_out;
1619         } else {
1620                 f2fs_down_read(&sbi->node_write);
1621         }
1622
1623         /* This page is already truncated */
1624         if (unlikely(ni.blk_addr == NULL_ADDR)) {
1625                 ClearPageUptodate(page);
1626                 dec_page_count(sbi, F2FS_DIRTY_NODES);
1627                 f2fs_up_read(&sbi->node_write);
1628                 unlock_page(page);
1629                 return 0;
1630         }
1631
1632         if (__is_valid_data_blkaddr(ni.blk_addr) &&
1633                 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
1634                                         DATA_GENERIC_ENHANCE)) {
1635                 f2fs_up_read(&sbi->node_write);
1636                 goto redirty_out;
1637         }
1638
1639         if (atomic && !test_opt(sbi, NOBARRIER) && !f2fs_sb_has_blkzoned(sbi))
1640                 fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1641
1642         /* should add to global list before clearing PAGECACHE status */
1643         if (f2fs_in_warm_node_list(sbi, page)) {
1644                 seq = f2fs_add_fsync_node_entry(sbi, page);
1645                 if (seq_id)
1646                         *seq_id = seq;
1647         }
1648
1649         set_page_writeback(page);
1650         ClearPageError(page);
1651
1652         fio.old_blkaddr = ni.blk_addr;
1653         f2fs_do_write_node_page(nid, &fio);
1654         set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1655         dec_page_count(sbi, F2FS_DIRTY_NODES);
1656         f2fs_up_read(&sbi->node_write);
1657
1658         if (wbc->for_reclaim) {
1659                 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
1660                 submitted = NULL;
1661         }
1662
1663         unlock_page(page);
1664
1665         if (unlikely(f2fs_cp_error(sbi))) {
1666                 f2fs_submit_merged_write(sbi, NODE);
1667                 submitted = NULL;
1668         }
1669         if (submitted)
1670                 *submitted = fio.submitted;
1671
1672         if (do_balance)
1673                 f2fs_balance_fs(sbi, false);
1674         return 0;
1675
1676 redirty_out:
1677         redirty_page_for_writepage(wbc, page);
1678         return AOP_WRITEPAGE_ACTIVATE;
1679 }
1680
1681 int f2fs_move_node_page(struct page *node_page, int gc_type)
1682 {
1683         int err = 0;
1684
1685         if (gc_type == FG_GC) {
1686                 struct writeback_control wbc = {
1687                         .sync_mode = WB_SYNC_ALL,
1688                         .nr_to_write = 1,
1689                         .for_reclaim = 0,
1690                 };
1691
1692                 f2fs_wait_on_page_writeback(node_page, NODE, true, true);
1693
1694                 set_page_dirty(node_page);
1695
1696                 if (!clear_page_dirty_for_io(node_page)) {
1697                         err = -EAGAIN;
1698                         goto out_page;
1699                 }
1700
1701                 if (__write_node_page(node_page, false, NULL,
1702                                         &wbc, false, FS_GC_NODE_IO, NULL)) {
1703                         err = -EAGAIN;
1704                         unlock_page(node_page);
1705                 }
1706                 goto release_page;
1707         } else {
1708                 /* set page dirty and write it */
1709                 if (!PageWriteback(node_page))
1710                         set_page_dirty(node_page);
1711         }
1712 out_page:
1713         unlock_page(node_page);
1714 release_page:
1715         f2fs_put_page(node_page, 0);
1716         return err;
1717 }
1718
1719 static int f2fs_write_node_page(struct page *page,
1720                                 struct writeback_control *wbc)
1721 {
1722         return __write_node_page(page, false, NULL, wbc, false,
1723                                                 FS_NODE_IO, NULL);
1724 }
1725
1726 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1727                         struct writeback_control *wbc, bool atomic,
1728                         unsigned int *seq_id)
1729 {
1730         pgoff_t index;
1731         struct pagevec pvec;
1732         int ret = 0;
1733         struct page *last_page = NULL;
1734         bool marked = false;
1735         nid_t ino = inode->i_ino;
1736         int nr_pages;
1737         int nwritten = 0;
1738
1739         if (atomic) {
1740                 last_page = last_fsync_dnode(sbi, ino);
1741                 if (IS_ERR_OR_NULL(last_page))
1742                         return PTR_ERR_OR_ZERO(last_page);
1743         }
1744 retry:
1745         pagevec_init(&pvec);
1746         index = 0;
1747
1748         while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1749                                 PAGECACHE_TAG_DIRTY))) {
1750                 int i;
1751
1752                 for (i = 0; i < nr_pages; i++) {
1753                         struct page *page = pvec.pages[i];
1754                         bool submitted = false;
1755
1756                         if (unlikely(f2fs_cp_error(sbi))) {
1757                                 f2fs_put_page(last_page, 0);
1758                                 pagevec_release(&pvec);
1759                                 ret = -EIO;
1760                                 goto out;
1761                         }
1762
1763                         if (!IS_DNODE(page) || !is_cold_node(page))
1764                                 continue;
1765                         if (ino_of_node(page) != ino)
1766                                 continue;
1767
1768                         lock_page(page);
1769
1770                         if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1771 continue_unlock:
1772                                 unlock_page(page);
1773                                 continue;
1774                         }
1775                         if (ino_of_node(page) != ino)
1776                                 goto continue_unlock;
1777
1778                         if (!PageDirty(page) && page != last_page) {
1779                                 /* someone wrote it for us */
1780                                 goto continue_unlock;
1781                         }
1782
1783                         f2fs_wait_on_page_writeback(page, NODE, true, true);
1784
1785                         set_fsync_mark(page, 0);
1786                         set_dentry_mark(page, 0);
1787
1788                         if (!atomic || page == last_page) {
1789                                 set_fsync_mark(page, 1);
1790                                 percpu_counter_inc(&sbi->rf_node_block_count);
1791                                 if (IS_INODE(page)) {
1792                                         if (is_inode_flag_set(inode,
1793                                                                 FI_DIRTY_INODE))
1794                                                 f2fs_update_inode(inode, page);
1795                                         set_dentry_mark(page,
1796                                                 f2fs_need_dentry_mark(sbi, ino));
1797                                 }
1798                                 /* may be written by other thread */
1799                                 if (!PageDirty(page))
1800                                         set_page_dirty(page);
1801                         }
1802
1803                         if (!clear_page_dirty_for_io(page))
1804                                 goto continue_unlock;
1805
1806                         ret = __write_node_page(page, atomic &&
1807                                                 page == last_page,
1808                                                 &submitted, wbc, true,
1809                                                 FS_NODE_IO, seq_id);
1810                         if (ret) {
1811                                 unlock_page(page);
1812                                 f2fs_put_page(last_page, 0);
1813                                 break;
1814                         } else if (submitted) {
1815                                 nwritten++;
1816                         }
1817
1818                         if (page == last_page) {
1819                                 f2fs_put_page(page, 0);
1820                                 marked = true;
1821                                 break;
1822                         }
1823                 }
1824                 pagevec_release(&pvec);
1825                 cond_resched();
1826
1827                 if (ret || marked)
1828                         break;
1829         }
1830         if (!ret && atomic && !marked) {
1831                 f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
1832                            ino, last_page->index);
1833                 lock_page(last_page);
1834                 f2fs_wait_on_page_writeback(last_page, NODE, true, true);
1835                 set_page_dirty(last_page);
1836                 unlock_page(last_page);
1837                 goto retry;
1838         }
1839 out:
1840         if (nwritten)
1841                 f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
1842         return ret ? -EIO : 0;
1843 }
1844
1845 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
1846 {
1847         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1848         bool clean;
1849
1850         if (inode->i_ino != ino)
1851                 return 0;
1852
1853         if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
1854                 return 0;
1855
1856         spin_lock(&sbi->inode_lock[DIRTY_META]);
1857         clean = list_empty(&F2FS_I(inode)->gdirty_list);
1858         spin_unlock(&sbi->inode_lock[DIRTY_META]);
1859
1860         if (clean)
1861                 return 0;
1862
1863         inode = igrab(inode);
1864         if (!inode)
1865                 return 0;
1866         return 1;
1867 }
1868
1869 static bool flush_dirty_inode(struct page *page)
1870 {
1871         struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1872         struct inode *inode;
1873         nid_t ino = ino_of_node(page);
1874
1875         inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
1876         if (!inode)
1877                 return false;
1878
1879         f2fs_update_inode(inode, page);
1880         unlock_page(page);
1881
1882         iput(inode);
1883         return true;
1884 }
1885
1886 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
1887 {
1888         pgoff_t index = 0;
1889         struct pagevec pvec;
1890         int nr_pages;
1891
1892         pagevec_init(&pvec);
1893
1894         while ((nr_pages = pagevec_lookup_tag(&pvec,
1895                         NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1896                 int i;
1897
1898                 for (i = 0; i < nr_pages; i++) {
1899                         struct page *page = pvec.pages[i];
1900
1901                         if (!IS_DNODE(page))
1902                                 continue;
1903
1904                         lock_page(page);
1905
1906                         if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1907 continue_unlock:
1908                                 unlock_page(page);
1909                                 continue;
1910                         }
1911
1912                         if (!PageDirty(page)) {
1913                                 /* someone wrote it for us */
1914                                 goto continue_unlock;
1915                         }
1916
1917                         /* flush inline_data, if it's async context. */
1918                         if (page_private_inline(page)) {
1919                                 clear_page_private_inline(page);
1920                                 unlock_page(page);
1921                                 flush_inline_data(sbi, ino_of_node(page));
1922                                 continue;
1923                         }
1924                         unlock_page(page);
1925                 }
1926                 pagevec_release(&pvec);
1927                 cond_resched();
1928         }
1929 }
1930
1931 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
1932                                 struct writeback_control *wbc,
1933                                 bool do_balance, enum iostat_type io_type)
1934 {
1935         pgoff_t index;
1936         struct pagevec pvec;
1937         int step = 0;
1938         int nwritten = 0;
1939         int ret = 0;
1940         int nr_pages, done = 0;
1941
1942         pagevec_init(&pvec);
1943
1944 next_step:
1945         index = 0;
1946
1947         while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
1948                         NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1949                 int i;
1950
1951                 for (i = 0; i < nr_pages; i++) {
1952                         struct page *page = pvec.pages[i];
1953                         bool submitted = false;
1954
1955                         /* give a priority to WB_SYNC threads */
1956                         if (atomic_read(&sbi->wb_sync_req[NODE]) &&
1957                                         wbc->sync_mode == WB_SYNC_NONE) {
1958                                 done = 1;
1959                                 break;
1960                         }
1961
1962                         /*
1963                          * flushing sequence with step:
1964                          * 0. indirect nodes
1965                          * 1. dentry dnodes
1966                          * 2. file dnodes
1967                          */
1968                         if (step == 0 && IS_DNODE(page))
1969                                 continue;
1970                         if (step == 1 && (!IS_DNODE(page) ||
1971                                                 is_cold_node(page)))
1972                                 continue;
1973                         if (step == 2 && (!IS_DNODE(page) ||
1974                                                 !is_cold_node(page)))
1975                                 continue;
1976 lock_node:
1977                         if (wbc->sync_mode == WB_SYNC_ALL)
1978                                 lock_page(page);
1979                         else if (!trylock_page(page))
1980                                 continue;
1981
1982                         if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1983 continue_unlock:
1984                                 unlock_page(page);
1985                                 continue;
1986                         }
1987
1988                         if (!PageDirty(page)) {
1989                                 /* someone wrote it for us */
1990                                 goto continue_unlock;
1991                         }
1992
1993                         /* flush inline_data/inode, if it's async context. */
1994                         if (!do_balance)
1995                                 goto write_node;
1996
1997                         /* flush inline_data */
1998                         if (page_private_inline(page)) {
1999                                 clear_page_private_inline(page);
2000                                 unlock_page(page);
2001                                 flush_inline_data(sbi, ino_of_node(page));
2002                                 goto lock_node;
2003                         }
2004
2005                         /* flush dirty inode */
2006                         if (IS_INODE(page) && flush_dirty_inode(page))
2007                                 goto lock_node;
2008 write_node:
2009                         f2fs_wait_on_page_writeback(page, NODE, true, true);
2010
2011                         if (!clear_page_dirty_for_io(page))
2012                                 goto continue_unlock;
2013
2014                         set_fsync_mark(page, 0);
2015                         set_dentry_mark(page, 0);
2016
2017                         ret = __write_node_page(page, false, &submitted,
2018                                                 wbc, do_balance, io_type, NULL);
2019                         if (ret)
2020                                 unlock_page(page);
2021                         else if (submitted)
2022                                 nwritten++;
2023
2024                         if (--wbc->nr_to_write == 0)
2025                                 break;
2026                 }
2027                 pagevec_release(&pvec);
2028                 cond_resched();
2029
2030                 if (wbc->nr_to_write == 0) {
2031                         step = 2;
2032                         break;
2033                 }
2034         }
2035
2036         if (step < 2) {
2037                 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2038                                 wbc->sync_mode == WB_SYNC_NONE && step == 1)
2039                         goto out;
2040                 step++;
2041                 goto next_step;
2042         }
2043 out:
2044         if (nwritten)
2045                 f2fs_submit_merged_write(sbi, NODE);
2046
2047         if (unlikely(f2fs_cp_error(sbi)))
2048                 return -EIO;
2049         return ret;
2050 }
2051
2052 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
2053                                                 unsigned int seq_id)
2054 {
2055         struct fsync_node_entry *fn;
2056         struct page *page;
2057         struct list_head *head = &sbi->fsync_node_list;
2058         unsigned long flags;
2059         unsigned int cur_seq_id = 0;
2060         int ret2, ret = 0;
2061
2062         while (seq_id && cur_seq_id < seq_id) {
2063                 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
2064                 if (list_empty(head)) {
2065                         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2066                         break;
2067                 }
2068                 fn = list_first_entry(head, struct fsync_node_entry, list);
2069                 if (fn->seq_id > seq_id) {
2070                         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2071                         break;
2072                 }
2073                 cur_seq_id = fn->seq_id;
2074                 page = fn->page;
2075                 get_page(page);
2076                 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2077
2078                 f2fs_wait_on_page_writeback(page, NODE, true, false);
2079                 if (TestClearPageError(page))
2080                         ret = -EIO;
2081
2082                 put_page(page);
2083
2084                 if (ret)
2085                         break;
2086         }
2087
2088         ret2 = filemap_check_errors(NODE_MAPPING(sbi));
2089         if (!ret)
2090                 ret = ret2;
2091
2092         return ret;
2093 }
2094
2095 static int f2fs_write_node_pages(struct address_space *mapping,
2096                             struct writeback_control *wbc)
2097 {
2098         struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2099         struct blk_plug plug;
2100         long diff;
2101
2102         if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2103                 goto skip_write;
2104
2105         /* balancing f2fs's metadata in background */
2106         f2fs_balance_fs_bg(sbi, true);
2107
2108         /* collect a number of dirty node pages and write together */
2109         if (wbc->sync_mode != WB_SYNC_ALL &&
2110                         get_pages(sbi, F2FS_DIRTY_NODES) <
2111                                         nr_pages_to_skip(sbi, NODE))
2112                 goto skip_write;
2113
2114         if (wbc->sync_mode == WB_SYNC_ALL)
2115                 atomic_inc(&sbi->wb_sync_req[NODE]);
2116         else if (atomic_read(&sbi->wb_sync_req[NODE])) {
2117                 /* to avoid potential deadlock */
2118                 if (current->plug)
2119                         blk_finish_plug(current->plug);
2120                 goto skip_write;
2121         }
2122
2123         trace_f2fs_writepages(mapping->host, wbc, NODE);
2124
2125         diff = nr_pages_to_write(sbi, NODE, wbc);
2126         blk_start_plug(&plug);
2127         f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
2128         blk_finish_plug(&plug);
2129         wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
2130
2131         if (wbc->sync_mode == WB_SYNC_ALL)
2132                 atomic_dec(&sbi->wb_sync_req[NODE]);
2133         return 0;
2134
2135 skip_write:
2136         wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
2137         trace_f2fs_writepages(mapping->host, wbc, NODE);
2138         return 0;
2139 }
2140
2141 static bool f2fs_dirty_node_folio(struct address_space *mapping,
2142                 struct folio *folio)
2143 {
2144         trace_f2fs_set_page_dirty(&folio->page, NODE);
2145
2146         if (!folio_test_uptodate(folio))
2147                 folio_mark_uptodate(folio);
2148 #ifdef CONFIG_F2FS_CHECK_FS
2149         if (IS_INODE(&folio->page))
2150                 f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page);
2151 #endif
2152         if (filemap_dirty_folio(mapping, folio)) {
2153                 inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
2154                 set_page_private_reference(&folio->page);
2155                 return true;
2156         }
2157         return false;
2158 }
2159
2160 /*
2161  * Structure of the f2fs node operations
2162  */
2163 const struct address_space_operations f2fs_node_aops = {
2164         .writepage      = f2fs_write_node_page,
2165         .writepages     = f2fs_write_node_pages,
2166         .dirty_folio    = f2fs_dirty_node_folio,
2167         .invalidate_folio = f2fs_invalidate_folio,
2168         .release_folio  = f2fs_release_folio,
2169         .migrate_folio  = filemap_migrate_folio,
2170 };
2171
2172 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
2173                                                 nid_t n)
2174 {
2175         return radix_tree_lookup(&nm_i->free_nid_root, n);
2176 }
2177
2178 static int __insert_free_nid(struct f2fs_sb_info *sbi,
2179                                 struct free_nid *i)
2180 {
2181         struct f2fs_nm_info *nm_i = NM_I(sbi);
2182         int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
2183
2184         if (err)
2185                 return err;
2186
2187         nm_i->nid_cnt[FREE_NID]++;
2188         list_add_tail(&i->list, &nm_i->free_nid_list);
2189         return 0;
2190 }
2191
2192 static void __remove_free_nid(struct f2fs_sb_info *sbi,
2193                         struct free_nid *i, enum nid_state state)
2194 {
2195         struct f2fs_nm_info *nm_i = NM_I(sbi);
2196
2197         f2fs_bug_on(sbi, state != i->state);
2198         nm_i->nid_cnt[state]--;
2199         if (state == FREE_NID)
2200                 list_del(&i->list);
2201         radix_tree_delete(&nm_i->free_nid_root, i->nid);
2202 }
2203
2204 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2205                         enum nid_state org_state, enum nid_state dst_state)
2206 {
2207         struct f2fs_nm_info *nm_i = NM_I(sbi);
2208
2209         f2fs_bug_on(sbi, org_state != i->state);
2210         i->state = dst_state;
2211         nm_i->nid_cnt[org_state]--;
2212         nm_i->nid_cnt[dst_state]++;
2213
2214         switch (dst_state) {
2215         case PREALLOC_NID:
2216                 list_del(&i->list);
2217                 break;
2218         case FREE_NID:
2219                 list_add_tail(&i->list, &nm_i->free_nid_list);
2220                 break;
2221         default:
2222                 BUG_ON(1);
2223         }
2224 }
2225
2226 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi)
2227 {
2228         struct f2fs_nm_info *nm_i = NM_I(sbi);
2229         unsigned int i;
2230         bool ret = true;
2231
2232         f2fs_down_read(&nm_i->nat_tree_lock);
2233         for (i = 0; i < nm_i->nat_blocks; i++) {
2234                 if (!test_bit_le(i, nm_i->nat_block_bitmap)) {
2235                         ret = false;
2236                         break;
2237                 }
2238         }
2239         f2fs_up_read(&nm_i->nat_tree_lock);
2240
2241         return ret;
2242 }
2243
2244 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
2245                                                         bool set, bool build)
2246 {
2247         struct f2fs_nm_info *nm_i = NM_I(sbi);
2248         unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
2249         unsigned int nid_ofs = nid - START_NID(nid);
2250
2251         if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2252                 return;
2253
2254         if (set) {
2255                 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2256                         return;
2257                 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2258                 nm_i->free_nid_count[nat_ofs]++;
2259         } else {
2260                 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2261                         return;
2262                 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2263                 if (!build)
2264                         nm_i->free_nid_count[nat_ofs]--;
2265         }
2266 }
2267
2268 /* return if the nid is recognized as free */
2269 static bool add_free_nid(struct f2fs_sb_info *sbi,
2270                                 nid_t nid, bool build, bool update)
2271 {
2272         struct f2fs_nm_info *nm_i = NM_I(sbi);
2273         struct free_nid *i, *e;
2274         struct nat_entry *ne;
2275         int err = -EINVAL;
2276         bool ret = false;
2277
2278         /* 0 nid should not be used */
2279         if (unlikely(nid == 0))
2280                 return false;
2281
2282         if (unlikely(f2fs_check_nid_range(sbi, nid)))
2283                 return false;
2284
2285         i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL);
2286         i->nid = nid;
2287         i->state = FREE_NID;
2288
2289         radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2290
2291         spin_lock(&nm_i->nid_list_lock);
2292
2293         if (build) {
2294                 /*
2295                  *   Thread A             Thread B
2296                  *  - f2fs_create
2297                  *   - f2fs_new_inode
2298                  *    - f2fs_alloc_nid
2299                  *     - __insert_nid_to_list(PREALLOC_NID)
2300                  *                     - f2fs_balance_fs_bg
2301                  *                      - f2fs_build_free_nids
2302                  *                       - __f2fs_build_free_nids
2303                  *                        - scan_nat_page
2304                  *                         - add_free_nid
2305                  *                          - __lookup_nat_cache
2306                  *  - f2fs_add_link
2307                  *   - f2fs_init_inode_metadata
2308                  *    - f2fs_new_inode_page
2309                  *     - f2fs_new_node_page
2310                  *      - set_node_addr
2311                  *  - f2fs_alloc_nid_done
2312                  *   - __remove_nid_from_list(PREALLOC_NID)
2313                  *                         - __insert_nid_to_list(FREE_NID)
2314                  */
2315                 ne = __lookup_nat_cache(nm_i, nid);
2316                 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2317                                 nat_get_blkaddr(ne) != NULL_ADDR))
2318                         goto err_out;
2319
2320                 e = __lookup_free_nid_list(nm_i, nid);
2321                 if (e) {
2322                         if (e->state == FREE_NID)
2323                                 ret = true;
2324                         goto err_out;
2325                 }
2326         }
2327         ret = true;
2328         err = __insert_free_nid(sbi, i);
2329 err_out:
2330         if (update) {
2331                 update_free_nid_bitmap(sbi, nid, ret, build);
2332                 if (!build)
2333                         nm_i->available_nids++;
2334         }
2335         spin_unlock(&nm_i->nid_list_lock);
2336         radix_tree_preload_end();
2337
2338         if (err)
2339                 kmem_cache_free(free_nid_slab, i);
2340         return ret;
2341 }
2342
2343 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2344 {
2345         struct f2fs_nm_info *nm_i = NM_I(sbi);
2346         struct free_nid *i;
2347         bool need_free = false;
2348
2349         spin_lock(&nm_i->nid_list_lock);
2350         i = __lookup_free_nid_list(nm_i, nid);
2351         if (i && i->state == FREE_NID) {
2352                 __remove_free_nid(sbi, i, FREE_NID);
2353                 need_free = true;
2354         }
2355         spin_unlock(&nm_i->nid_list_lock);
2356
2357         if (need_free)
2358                 kmem_cache_free(free_nid_slab, i);
2359 }
2360
2361 static int scan_nat_page(struct f2fs_sb_info *sbi,
2362                         struct page *nat_page, nid_t start_nid)
2363 {
2364         struct f2fs_nm_info *nm_i = NM_I(sbi);
2365         struct f2fs_nat_block *nat_blk = page_address(nat_page);
2366         block_t blk_addr;
2367         unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2368         int i;
2369
2370         __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2371
2372         i = start_nid % NAT_ENTRY_PER_BLOCK;
2373
2374         for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2375                 if (unlikely(start_nid >= nm_i->max_nid))
2376                         break;
2377
2378                 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2379
2380                 if (blk_addr == NEW_ADDR)
2381                         return -EINVAL;
2382
2383                 if (blk_addr == NULL_ADDR) {
2384                         add_free_nid(sbi, start_nid, true, true);
2385                 } else {
2386                         spin_lock(&NM_I(sbi)->nid_list_lock);
2387                         update_free_nid_bitmap(sbi, start_nid, false, true);
2388                         spin_unlock(&NM_I(sbi)->nid_list_lock);
2389                 }
2390         }
2391
2392         return 0;
2393 }
2394
2395 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
2396 {
2397         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2398         struct f2fs_journal *journal = curseg->journal;
2399         int i;
2400
2401         down_read(&curseg->journal_rwsem);
2402         for (i = 0; i < nats_in_cursum(journal); i++) {
2403                 block_t addr;
2404                 nid_t nid;
2405
2406                 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2407                 nid = le32_to_cpu(nid_in_journal(journal, i));
2408                 if (addr == NULL_ADDR)
2409                         add_free_nid(sbi, nid, true, false);
2410                 else
2411                         remove_free_nid(sbi, nid);
2412         }
2413         up_read(&curseg->journal_rwsem);
2414 }
2415
2416 static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
2417 {
2418         struct f2fs_nm_info *nm_i = NM_I(sbi);
2419         unsigned int i, idx;
2420         nid_t nid;
2421
2422         f2fs_down_read(&nm_i->nat_tree_lock);
2423
2424         for (i = 0; i < nm_i->nat_blocks; i++) {
2425                 if (!test_bit_le(i, nm_i->nat_block_bitmap))
2426                         continue;
2427                 if (!nm_i->free_nid_count[i])
2428                         continue;
2429                 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2430                         idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2431                                                 NAT_ENTRY_PER_BLOCK, idx);
2432                         if (idx >= NAT_ENTRY_PER_BLOCK)
2433                                 break;
2434
2435                         nid = i * NAT_ENTRY_PER_BLOCK + idx;
2436                         add_free_nid(sbi, nid, true, false);
2437
2438                         if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2439                                 goto out;
2440                 }
2441         }
2442 out:
2443         scan_curseg_cache(sbi);
2444
2445         f2fs_up_read(&nm_i->nat_tree_lock);
2446 }
2447
2448 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2449                                                 bool sync, bool mount)
2450 {
2451         struct f2fs_nm_info *nm_i = NM_I(sbi);
2452         int i = 0, ret;
2453         nid_t nid = nm_i->next_scan_nid;
2454
2455         if (unlikely(nid >= nm_i->max_nid))
2456                 nid = 0;
2457
2458         if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
2459                 nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
2460
2461         /* Enough entries */
2462         if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2463                 return 0;
2464
2465         if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2466                 return 0;
2467
2468         if (!mount) {
2469                 /* try to find free nids in free_nid_bitmap */
2470                 scan_free_nid_bits(sbi);
2471
2472                 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2473                         return 0;
2474         }
2475
2476         /* readahead nat pages to be scanned */
2477         f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2478                                                         META_NAT, true);
2479
2480         f2fs_down_read(&nm_i->nat_tree_lock);
2481
2482         while (1) {
2483                 if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2484                                                 nm_i->nat_block_bitmap)) {
2485                         struct page *page = get_current_nat_page(sbi, nid);
2486
2487                         if (IS_ERR(page)) {
2488                                 ret = PTR_ERR(page);
2489                         } else {
2490                                 ret = scan_nat_page(sbi, page, nid);
2491                                 f2fs_put_page(page, 1);
2492                         }
2493
2494                         if (ret) {
2495                                 f2fs_up_read(&nm_i->nat_tree_lock);
2496                                 f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
2497                                 return ret;
2498                         }
2499                 }
2500
2501                 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2502                 if (unlikely(nid >= nm_i->max_nid))
2503                         nid = 0;
2504
2505                 if (++i >= FREE_NID_PAGES)
2506                         break;
2507         }
2508
2509         /* go to the next free nat pages to find free nids abundantly */
2510         nm_i->next_scan_nid = nid;
2511
2512         /* find free nids from current sum_pages */
2513         scan_curseg_cache(sbi);
2514
2515         f2fs_up_read(&nm_i->nat_tree_lock);
2516
2517         f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2518                                         nm_i->ra_nid_pages, META_NAT, false);
2519
2520         return 0;
2521 }
2522
2523 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2524 {
2525         int ret;
2526
2527         mutex_lock(&NM_I(sbi)->build_lock);
2528         ret = __f2fs_build_free_nids(sbi, sync, mount);
2529         mutex_unlock(&NM_I(sbi)->build_lock);
2530
2531         return ret;
2532 }
2533
2534 /*
2535  * If this function returns success, caller can obtain a new nid
2536  * from second parameter of this function.
2537  * The returned nid could be used ino as well as nid when inode is created.
2538  */
2539 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2540 {
2541         struct f2fs_nm_info *nm_i = NM_I(sbi);
2542         struct free_nid *i = NULL;
2543 retry:
2544         if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
2545                 f2fs_show_injection_info(sbi, FAULT_ALLOC_NID);
2546                 return false;
2547         }
2548
2549         spin_lock(&nm_i->nid_list_lock);
2550
2551         if (unlikely(nm_i->available_nids == 0)) {
2552                 spin_unlock(&nm_i->nid_list_lock);
2553                 return false;
2554         }
2555
2556         /* We should not use stale free nids created by f2fs_build_free_nids */
2557         if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2558                 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2559                 i = list_first_entry(&nm_i->free_nid_list,
2560                                         struct free_nid, list);
2561                 *nid = i->nid;
2562
2563                 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2564                 nm_i->available_nids--;
2565
2566                 update_free_nid_bitmap(sbi, *nid, false, false);
2567
2568                 spin_unlock(&nm_i->nid_list_lock);
2569                 return true;
2570         }
2571         spin_unlock(&nm_i->nid_list_lock);
2572
2573         /* Let's scan nat pages and its caches to get free nids */
2574         if (!f2fs_build_free_nids(sbi, true, false))
2575                 goto retry;
2576         return false;
2577 }
2578
2579 /*
2580  * f2fs_alloc_nid() should be called prior to this function.
2581  */
2582 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2583 {
2584         struct f2fs_nm_info *nm_i = NM_I(sbi);
2585         struct free_nid *i;
2586
2587         spin_lock(&nm_i->nid_list_lock);
2588         i = __lookup_free_nid_list(nm_i, nid);
2589         f2fs_bug_on(sbi, !i);
2590         __remove_free_nid(sbi, i, PREALLOC_NID);
2591         spin_unlock(&nm_i->nid_list_lock);
2592
2593         kmem_cache_free(free_nid_slab, i);
2594 }
2595
2596 /*
2597  * f2fs_alloc_nid() should be called prior to this function.
2598  */
2599 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2600 {
2601         struct f2fs_nm_info *nm_i = NM_I(sbi);
2602         struct free_nid *i;
2603         bool need_free = false;
2604
2605         if (!nid)
2606                 return;
2607
2608         spin_lock(&nm_i->nid_list_lock);
2609         i = __lookup_free_nid_list(nm_i, nid);
2610         f2fs_bug_on(sbi, !i);
2611
2612         if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2613                 __remove_free_nid(sbi, i, PREALLOC_NID);
2614                 need_free = true;
2615         } else {
2616                 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2617         }
2618
2619         nm_i->available_nids++;
2620
2621         update_free_nid_bitmap(sbi, nid, true, false);
2622
2623         spin_unlock(&nm_i->nid_list_lock);
2624
2625         if (need_free)
2626                 kmem_cache_free(free_nid_slab, i);
2627 }
2628
2629 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2630 {
2631         struct f2fs_nm_info *nm_i = NM_I(sbi);
2632         int nr = nr_shrink;
2633
2634         if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2635                 return 0;
2636
2637         if (!mutex_trylock(&nm_i->build_lock))
2638                 return 0;
2639
2640         while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
2641                 struct free_nid *i, *next;
2642                 unsigned int batch = SHRINK_NID_BATCH_SIZE;
2643
2644                 spin_lock(&nm_i->nid_list_lock);
2645                 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2646                         if (!nr_shrink || !batch ||
2647                                 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2648                                 break;
2649                         __remove_free_nid(sbi, i, FREE_NID);
2650                         kmem_cache_free(free_nid_slab, i);
2651                         nr_shrink--;
2652                         batch--;
2653                 }
2654                 spin_unlock(&nm_i->nid_list_lock);
2655         }
2656
2657         mutex_unlock(&nm_i->build_lock);
2658
2659         return nr - nr_shrink;
2660 }
2661
2662 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
2663 {
2664         void *src_addr, *dst_addr;
2665         size_t inline_size;
2666         struct page *ipage;
2667         struct f2fs_inode *ri;
2668
2669         ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
2670         if (IS_ERR(ipage))
2671                 return PTR_ERR(ipage);
2672
2673         ri = F2FS_INODE(page);
2674         if (ri->i_inline & F2FS_INLINE_XATTR) {
2675                 if (!f2fs_has_inline_xattr(inode)) {
2676                         set_inode_flag(inode, FI_INLINE_XATTR);
2677                         stat_inc_inline_xattr(inode);
2678                 }
2679         } else {
2680                 if (f2fs_has_inline_xattr(inode)) {
2681                         stat_dec_inline_xattr(inode);
2682                         clear_inode_flag(inode, FI_INLINE_XATTR);
2683                 }
2684                 goto update_inode;
2685         }
2686
2687         dst_addr = inline_xattr_addr(inode, ipage);
2688         src_addr = inline_xattr_addr(inode, page);
2689         inline_size = inline_xattr_size(inode);
2690
2691         f2fs_wait_on_page_writeback(ipage, NODE, true, true);
2692         memcpy(dst_addr, src_addr, inline_size);
2693 update_inode:
2694         f2fs_update_inode(inode, ipage);
2695         f2fs_put_page(ipage, 1);
2696         return 0;
2697 }
2698
2699 int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2700 {
2701         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2702         nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2703         nid_t new_xnid;
2704         struct dnode_of_data dn;
2705         struct node_info ni;
2706         struct page *xpage;
2707         int err;
2708
2709         if (!prev_xnid)
2710                 goto recover_xnid;
2711
2712         /* 1: invalidate the previous xattr nid */
2713         err = f2fs_get_node_info(sbi, prev_xnid, &ni, false);
2714         if (err)
2715                 return err;
2716
2717         f2fs_invalidate_blocks(sbi, ni.blk_addr);
2718         dec_valid_node_count(sbi, inode, false);
2719         set_node_addr(sbi, &ni, NULL_ADDR, false);
2720
2721 recover_xnid:
2722         /* 2: update xattr nid in inode */
2723         if (!f2fs_alloc_nid(sbi, &new_xnid))
2724                 return -ENOSPC;
2725
2726         set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2727         xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
2728         if (IS_ERR(xpage)) {
2729                 f2fs_alloc_nid_failed(sbi, new_xnid);
2730                 return PTR_ERR(xpage);
2731         }
2732
2733         f2fs_alloc_nid_done(sbi, new_xnid);
2734         f2fs_update_inode_page(inode);
2735
2736         /* 3: update and set xattr node page dirty */
2737         memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
2738
2739         set_page_dirty(xpage);
2740         f2fs_put_page(xpage, 1);
2741
2742         return 0;
2743 }
2744
2745 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2746 {
2747         struct f2fs_inode *src, *dst;
2748         nid_t ino = ino_of_node(page);
2749         struct node_info old_ni, new_ni;
2750         struct page *ipage;
2751         int err;
2752
2753         err = f2fs_get_node_info(sbi, ino, &old_ni, false);
2754         if (err)
2755                 return err;
2756
2757         if (unlikely(old_ni.blk_addr != NULL_ADDR))
2758                 return -EINVAL;
2759 retry:
2760         ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2761         if (!ipage) {
2762                 memalloc_retry_wait(GFP_NOFS);
2763                 goto retry;
2764         }
2765
2766         /* Should not use this inode from free nid list */
2767         remove_free_nid(sbi, ino);
2768
2769         if (!PageUptodate(ipage))
2770                 SetPageUptodate(ipage);
2771         fill_node_footer(ipage, ino, ino, 0, true);
2772         set_cold_node(ipage, false);
2773
2774         src = F2FS_INODE(page);
2775         dst = F2FS_INODE(ipage);
2776
2777         memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
2778         dst->i_size = 0;
2779         dst->i_blocks = cpu_to_le64(1);
2780         dst->i_links = cpu_to_le32(1);
2781         dst->i_xattr_nid = 0;
2782         dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2783         if (dst->i_inline & F2FS_EXTRA_ATTR) {
2784                 dst->i_extra_isize = src->i_extra_isize;
2785
2786                 if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
2787                         F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2788                                                         i_inline_xattr_size))
2789                         dst->i_inline_xattr_size = src->i_inline_xattr_size;
2790
2791                 if (f2fs_sb_has_project_quota(sbi) &&
2792                         F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2793                                                                 i_projid))
2794                         dst->i_projid = src->i_projid;
2795
2796                 if (f2fs_sb_has_inode_crtime(sbi) &&
2797                         F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2798                                                         i_crtime_nsec)) {
2799                         dst->i_crtime = src->i_crtime;
2800                         dst->i_crtime_nsec = src->i_crtime_nsec;
2801                 }
2802         }
2803
2804         new_ni = old_ni;
2805         new_ni.ino = ino;
2806
2807         if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2808                 WARN_ON(1);
2809         set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2810         inc_valid_inode_count(sbi);
2811         set_page_dirty(ipage);
2812         f2fs_put_page(ipage, 1);
2813         return 0;
2814 }
2815
2816 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2817                         unsigned int segno, struct f2fs_summary_block *sum)
2818 {
2819         struct f2fs_node *rn;
2820         struct f2fs_summary *sum_entry;
2821         block_t addr;
2822         int i, idx, last_offset, nrpages;
2823
2824         /* scan the node segment */
2825         last_offset = sbi->blocks_per_seg;
2826         addr = START_BLOCK(sbi, segno);
2827         sum_entry = &sum->entries[0];
2828
2829         for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2830                 nrpages = bio_max_segs(last_offset - i);
2831
2832                 /* readahead node pages */
2833                 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2834
2835                 for (idx = addr; idx < addr + nrpages; idx++) {
2836                         struct page *page = f2fs_get_tmp_page(sbi, idx);
2837
2838                         if (IS_ERR(page))
2839                                 return PTR_ERR(page);
2840
2841                         rn = F2FS_NODE(page);
2842                         sum_entry->nid = rn->footer.nid;
2843                         sum_entry->version = 0;
2844                         sum_entry->ofs_in_node = 0;
2845                         sum_entry++;
2846                         f2fs_put_page(page, 1);
2847                 }
2848
2849                 invalidate_mapping_pages(META_MAPPING(sbi), addr,
2850                                                         addr + nrpages);
2851         }
2852         return 0;
2853 }
2854
2855 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2856 {
2857         struct f2fs_nm_info *nm_i = NM_I(sbi);
2858         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2859         struct f2fs_journal *journal = curseg->journal;
2860         int i;
2861
2862         down_write(&curseg->journal_rwsem);
2863         for (i = 0; i < nats_in_cursum(journal); i++) {
2864                 struct nat_entry *ne;
2865                 struct f2fs_nat_entry raw_ne;
2866                 nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2867
2868                 if (f2fs_check_nid_range(sbi, nid))
2869                         continue;
2870
2871                 raw_ne = nat_in_journal(journal, i);
2872
2873                 ne = __lookup_nat_cache(nm_i, nid);
2874                 if (!ne) {
2875                         ne = __alloc_nat_entry(sbi, nid, true);
2876                         __init_nat_entry(nm_i, ne, &raw_ne, true);
2877                 }
2878
2879                 /*
2880                  * if a free nat in journal has not been used after last
2881                  * checkpoint, we should remove it from available nids,
2882                  * since later we will add it again.
2883                  */
2884                 if (!get_nat_flag(ne, IS_DIRTY) &&
2885                                 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2886                         spin_lock(&nm_i->nid_list_lock);
2887                         nm_i->available_nids--;
2888                         spin_unlock(&nm_i->nid_list_lock);
2889                 }
2890
2891                 __set_nat_cache_dirty(nm_i, ne);
2892         }
2893         update_nats_in_cursum(journal, -i);
2894         up_write(&curseg->journal_rwsem);
2895 }
2896
2897 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2898                                                 struct list_head *head, int max)
2899 {
2900         struct nat_entry_set *cur;
2901
2902         if (nes->entry_cnt >= max)
2903                 goto add_out;
2904
2905         list_for_each_entry(cur, head, set_list) {
2906                 if (cur->entry_cnt >= nes->entry_cnt) {
2907                         list_add(&nes->set_list, cur->set_list.prev);
2908                         return;
2909                 }
2910         }
2911 add_out:
2912         list_add_tail(&nes->set_list, head);
2913 }
2914
2915 static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs,
2916                                                         unsigned int valid)
2917 {
2918         if (valid == 0) {
2919                 __set_bit_le(nat_ofs, nm_i->empty_nat_bits);
2920                 __clear_bit_le(nat_ofs, nm_i->full_nat_bits);
2921                 return;
2922         }
2923
2924         __clear_bit_le(nat_ofs, nm_i->empty_nat_bits);
2925         if (valid == NAT_ENTRY_PER_BLOCK)
2926                 __set_bit_le(nat_ofs, nm_i->full_nat_bits);
2927         else
2928                 __clear_bit_le(nat_ofs, nm_i->full_nat_bits);
2929 }
2930
2931 static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2932                                                 struct page *page)
2933 {
2934         struct f2fs_nm_info *nm_i = NM_I(sbi);
2935         unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
2936         struct f2fs_nat_block *nat_blk = page_address(page);
2937         int valid = 0;
2938         int i = 0;
2939
2940         if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
2941                 return;
2942
2943         if (nat_index == 0) {
2944                 valid = 1;
2945                 i = 1;
2946         }
2947         for (; i < NAT_ENTRY_PER_BLOCK; i++) {
2948                 if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
2949                         valid++;
2950         }
2951
2952         __update_nat_bits(nm_i, nat_index, valid);
2953 }
2954
2955 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi)
2956 {
2957         struct f2fs_nm_info *nm_i = NM_I(sbi);
2958         unsigned int nat_ofs;
2959
2960         f2fs_down_read(&nm_i->nat_tree_lock);
2961
2962         for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) {
2963                 unsigned int valid = 0, nid_ofs = 0;
2964
2965                 /* handle nid zero due to it should never be used */
2966                 if (unlikely(nat_ofs == 0)) {
2967                         valid = 1;
2968                         nid_ofs = 1;
2969                 }
2970
2971                 for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) {
2972                         if (!test_bit_le(nid_ofs,
2973                                         nm_i->free_nid_bitmap[nat_ofs]))
2974                                 valid++;
2975                 }
2976
2977                 __update_nat_bits(nm_i, nat_ofs, valid);
2978         }
2979
2980         f2fs_up_read(&nm_i->nat_tree_lock);
2981 }
2982
2983 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2984                 struct nat_entry_set *set, struct cp_control *cpc)
2985 {
2986         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2987         struct f2fs_journal *journal = curseg->journal;
2988         nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2989         bool to_journal = true;
2990         struct f2fs_nat_block *nat_blk;
2991         struct nat_entry *ne, *cur;
2992         struct page *page = NULL;
2993
2994         /*
2995          * there are two steps to flush nat entries:
2996          * #1, flush nat entries to journal in current hot data summary block.
2997          * #2, flush nat entries to nat page.
2998          */
2999         if ((cpc->reason & CP_UMOUNT) ||
3000                 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
3001                 to_journal = false;
3002
3003         if (to_journal) {
3004                 down_write(&curseg->journal_rwsem);
3005         } else {
3006                 page = get_next_nat_page(sbi, start_nid);
3007                 if (IS_ERR(page))
3008                         return PTR_ERR(page);
3009
3010                 nat_blk = page_address(page);
3011                 f2fs_bug_on(sbi, !nat_blk);
3012         }
3013
3014         /* flush dirty nats in nat entry set */
3015         list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
3016                 struct f2fs_nat_entry *raw_ne;
3017                 nid_t nid = nat_get_nid(ne);
3018                 int offset;
3019
3020                 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
3021
3022                 if (to_journal) {
3023                         offset = f2fs_lookup_journal_in_cursum(journal,
3024                                                         NAT_JOURNAL, nid, 1);
3025                         f2fs_bug_on(sbi, offset < 0);
3026                         raw_ne = &nat_in_journal(journal, offset);
3027                         nid_in_journal(journal, offset) = cpu_to_le32(nid);
3028                 } else {
3029                         raw_ne = &nat_blk->entries[nid - start_nid];
3030                 }
3031                 raw_nat_from_node_info(raw_ne, &ne->ni);
3032                 nat_reset_flag(ne);
3033                 __clear_nat_cache_dirty(NM_I(sbi), set, ne);
3034                 if (nat_get_blkaddr(ne) == NULL_ADDR) {
3035                         add_free_nid(sbi, nid, false, true);
3036                 } else {
3037                         spin_lock(&NM_I(sbi)->nid_list_lock);
3038                         update_free_nid_bitmap(sbi, nid, false, false);
3039                         spin_unlock(&NM_I(sbi)->nid_list_lock);
3040                 }
3041         }
3042
3043         if (to_journal) {
3044                 up_write(&curseg->journal_rwsem);
3045         } else {
3046                 update_nat_bits(sbi, start_nid, page);
3047                 f2fs_put_page(page, 1);
3048         }
3049
3050         /* Allow dirty nats by node block allocation in write_begin */
3051         if (!set->entry_cnt) {
3052                 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
3053                 kmem_cache_free(nat_entry_set_slab, set);
3054         }
3055         return 0;
3056 }
3057
3058 /*
3059  * This function is called during the checkpointing process.
3060  */
3061 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3062 {
3063         struct f2fs_nm_info *nm_i = NM_I(sbi);
3064         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3065         struct f2fs_journal *journal = curseg->journal;
3066         struct nat_entry_set *setvec[SETVEC_SIZE];
3067         struct nat_entry_set *set, *tmp;
3068         unsigned int found;
3069         nid_t set_idx = 0;
3070         LIST_HEAD(sets);
3071         int err = 0;
3072
3073         /*
3074          * during unmount, let's flush nat_bits before checking
3075          * nat_cnt[DIRTY_NAT].
3076          */
3077         if (cpc->reason & CP_UMOUNT) {
3078                 f2fs_down_write(&nm_i->nat_tree_lock);
3079                 remove_nats_in_journal(sbi);
3080                 f2fs_up_write(&nm_i->nat_tree_lock);
3081         }
3082
3083         if (!nm_i->nat_cnt[DIRTY_NAT])
3084                 return 0;
3085
3086         f2fs_down_write(&nm_i->nat_tree_lock);
3087
3088         /*
3089          * if there are no enough space in journal to store dirty nat
3090          * entries, remove all entries from journal and merge them
3091          * into nat entry set.
3092          */
3093         if (cpc->reason & CP_UMOUNT ||
3094                 !__has_cursum_space(journal,
3095                         nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
3096                 remove_nats_in_journal(sbi);
3097
3098         while ((found = __gang_lookup_nat_set(nm_i,
3099                                         set_idx, SETVEC_SIZE, setvec))) {
3100                 unsigned idx;
3101
3102                 set_idx = setvec[found - 1]->set + 1;
3103                 for (idx = 0; idx < found; idx++)
3104                         __adjust_nat_entry_set(setvec[idx], &sets,
3105                                                 MAX_NAT_JENTRIES(journal));
3106         }
3107
3108         /* flush dirty nats in nat entry set */
3109         list_for_each_entry_safe(set, tmp, &sets, set_list) {
3110                 err = __flush_nat_entry_set(sbi, set, cpc);
3111                 if (err)
3112                         break;
3113         }
3114
3115         f2fs_up_write(&nm_i->nat_tree_lock);
3116         /* Allow dirty nats by node block allocation in write_begin */
3117
3118         return err;
3119 }
3120
3121 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
3122 {
3123         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3124         struct f2fs_nm_info *nm_i = NM_I(sbi);
3125         unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
3126         unsigned int i;
3127         __u64 cp_ver = cur_cp_version(ckpt);
3128         block_t nat_bits_addr;
3129
3130         nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3131         nm_i->nat_bits = f2fs_kvzalloc(sbi,
3132                         nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
3133         if (!nm_i->nat_bits)
3134                 return -ENOMEM;
3135
3136         nm_i->full_nat_bits = nm_i->nat_bits + 8;
3137         nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
3138
3139         if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
3140                 return 0;
3141
3142         nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
3143                                                 nm_i->nat_bits_blocks;
3144         for (i = 0; i < nm_i->nat_bits_blocks; i++) {
3145                 struct page *page;
3146
3147                 page = f2fs_get_meta_page(sbi, nat_bits_addr++);
3148                 if (IS_ERR(page))
3149                         return PTR_ERR(page);
3150
3151                 memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
3152                                         page_address(page), F2FS_BLKSIZE);
3153                 f2fs_put_page(page, 1);
3154         }
3155
3156         cp_ver |= (cur_cp_crc(ckpt) << 32);
3157         if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
3158                 clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
3159                 f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)",
3160                         cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits));
3161                 return 0;
3162         }
3163
3164         f2fs_notice(sbi, "Found nat_bits in checkpoint");
3165         return 0;
3166 }
3167
3168 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
3169 {
3170         struct f2fs_nm_info *nm_i = NM_I(sbi);
3171         unsigned int i = 0;
3172         nid_t nid, last_nid;
3173
3174         if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
3175                 return;
3176
3177         for (i = 0; i < nm_i->nat_blocks; i++) {
3178                 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
3179                 if (i >= nm_i->nat_blocks)
3180                         break;
3181
3182                 __set_bit_le(i, nm_i->nat_block_bitmap);
3183
3184                 nid = i * NAT_ENTRY_PER_BLOCK;
3185                 last_nid = nid + NAT_ENTRY_PER_BLOCK;
3186
3187                 spin_lock(&NM_I(sbi)->nid_list_lock);
3188                 for (; nid < last_nid; nid++)
3189                         update_free_nid_bitmap(sbi, nid, true, true);
3190                 spin_unlock(&NM_I(sbi)->nid_list_lock);
3191         }
3192
3193         for (i = 0; i < nm_i->nat_blocks; i++) {
3194                 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
3195                 if (i >= nm_i->nat_blocks)
3196                         break;
3197
3198                 __set_bit_le(i, nm_i->nat_block_bitmap);
3199         }
3200 }
3201
3202 static int init_node_manager(struct f2fs_sb_info *sbi)
3203 {
3204         struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
3205         struct f2fs_nm_info *nm_i = NM_I(sbi);
3206         unsigned char *version_bitmap;
3207         unsigned int nat_segs;
3208         int err;
3209
3210         nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
3211
3212         /* segment_count_nat includes pair segment so divide to 2. */
3213         nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
3214         nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
3215         nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
3216
3217         /* not used nids: 0, node, meta, (and root counted as valid node) */
3218         nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
3219                                                 F2FS_RESERVED_NODE_NUM;
3220         nm_i->nid_cnt[FREE_NID] = 0;
3221         nm_i->nid_cnt[PREALLOC_NID] = 0;
3222         nm_i->ram_thresh = DEF_RAM_THRESHOLD;
3223         nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
3224         nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
3225         nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS;
3226
3227         INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
3228         INIT_LIST_HEAD(&nm_i->free_nid_list);
3229         INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
3230         INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
3231         INIT_LIST_HEAD(&nm_i->nat_entries);
3232         spin_lock_init(&nm_i->nat_list_lock);
3233
3234         mutex_init(&nm_i->build_lock);
3235         spin_lock_init(&nm_i->nid_list_lock);
3236         init_f2fs_rwsem(&nm_i->nat_tree_lock);
3237
3238         nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
3239         nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
3240         version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
3241         nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
3242                                         GFP_KERNEL);
3243         if (!nm_i->nat_bitmap)
3244                 return -ENOMEM;
3245
3246         err = __get_nat_bitmaps(sbi);
3247         if (err)
3248                 return err;
3249
3250 #ifdef CONFIG_F2FS_CHECK_FS
3251         nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3252                                         GFP_KERNEL);
3253         if (!nm_i->nat_bitmap_mir)
3254                 return -ENOMEM;
3255 #endif
3256
3257         return 0;
3258 }
3259
3260 static int init_free_nid_cache(struct f2fs_sb_info *sbi)
3261 {
3262         struct f2fs_nm_info *nm_i = NM_I(sbi);
3263         int i;
3264
3265         nm_i->free_nid_bitmap =
3266                 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
3267                                               nm_i->nat_blocks),
3268                               GFP_KERNEL);
3269         if (!nm_i->free_nid_bitmap)
3270                 return -ENOMEM;
3271
3272         for (i = 0; i < nm_i->nat_blocks; i++) {
3273                 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3274                         f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
3275                 if (!nm_i->free_nid_bitmap[i])
3276                         return -ENOMEM;
3277         }
3278
3279         nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3280                                                                 GFP_KERNEL);
3281         if (!nm_i->nat_block_bitmap)
3282                 return -ENOMEM;
3283
3284         nm_i->free_nid_count =
3285                 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
3286                                               nm_i->nat_blocks),
3287                               GFP_KERNEL);
3288         if (!nm_i->free_nid_count)
3289                 return -ENOMEM;
3290         return 0;
3291 }
3292
3293 int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3294 {
3295         int err;
3296
3297         sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3298                                                         GFP_KERNEL);
3299         if (!sbi->nm_info)
3300                 return -ENOMEM;
3301
3302         err = init_node_manager(sbi);
3303         if (err)
3304                 return err;
3305
3306         err = init_free_nid_cache(sbi);
3307         if (err)
3308                 return err;
3309
3310         /* load free nid status from nat_bits table */
3311         load_free_nid_bitmap(sbi);
3312
3313         return f2fs_build_free_nids(sbi, true, true);
3314 }
3315
3316 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3317 {
3318         struct f2fs_nm_info *nm_i = NM_I(sbi);
3319         struct free_nid *i, *next_i;
3320         struct nat_entry *natvec[NATVEC_SIZE];
3321         struct nat_entry_set *setvec[SETVEC_SIZE];
3322         nid_t nid = 0;
3323         unsigned int found;
3324
3325         if (!nm_i)
3326                 return;
3327
3328         /* destroy free nid list */
3329         spin_lock(&nm_i->nid_list_lock);
3330         list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3331                 __remove_free_nid(sbi, i, FREE_NID);
3332                 spin_unlock(&nm_i->nid_list_lock);
3333                 kmem_cache_free(free_nid_slab, i);
3334                 spin_lock(&nm_i->nid_list_lock);
3335         }
3336         f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3337         f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3338         f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3339         spin_unlock(&nm_i->nid_list_lock);
3340
3341         /* destroy nat cache */
3342         f2fs_down_write(&nm_i->nat_tree_lock);
3343         while ((found = __gang_lookup_nat_cache(nm_i,
3344                                         nid, NATVEC_SIZE, natvec))) {
3345                 unsigned idx;
3346
3347                 nid = nat_get_nid(natvec[found - 1]) + 1;
3348                 for (idx = 0; idx < found; idx++) {
3349                         spin_lock(&nm_i->nat_list_lock);
3350                         list_del(&natvec[idx]->list);
3351                         spin_unlock(&nm_i->nat_list_lock);
3352
3353                         __del_from_nat_cache(nm_i, natvec[idx]);
3354                 }
3355         }
3356         f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
3357
3358         /* destroy nat set cache */
3359         nid = 0;
3360         while ((found = __gang_lookup_nat_set(nm_i,
3361                                         nid, SETVEC_SIZE, setvec))) {
3362                 unsigned idx;
3363
3364                 nid = setvec[found - 1]->set + 1;
3365                 for (idx = 0; idx < found; idx++) {
3366                         /* entry_cnt is not zero, when cp_error was occurred */
3367                         f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
3368                         radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3369                         kmem_cache_free(nat_entry_set_slab, setvec[idx]);
3370                 }
3371         }
3372         f2fs_up_write(&nm_i->nat_tree_lock);
3373
3374         kvfree(nm_i->nat_block_bitmap);
3375         if (nm_i->free_nid_bitmap) {
3376                 int i;
3377
3378                 for (i = 0; i < nm_i->nat_blocks; i++)
3379                         kvfree(nm_i->free_nid_bitmap[i]);
3380                 kvfree(nm_i->free_nid_bitmap);
3381         }
3382         kvfree(nm_i->free_nid_count);
3383
3384         kvfree(nm_i->nat_bitmap);
3385         kvfree(nm_i->nat_bits);
3386 #ifdef CONFIG_F2FS_CHECK_FS
3387         kvfree(nm_i->nat_bitmap_mir);
3388 #endif
3389         sbi->nm_info = NULL;
3390         kfree(nm_i);
3391 }
3392
3393 int __init f2fs_create_node_manager_caches(void)
3394 {
3395         nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
3396                         sizeof(struct nat_entry));
3397         if (!nat_entry_slab)
3398                 goto fail;
3399
3400         free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
3401                         sizeof(struct free_nid));
3402         if (!free_nid_slab)
3403                 goto destroy_nat_entry;
3404
3405         nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
3406                         sizeof(struct nat_entry_set));
3407         if (!nat_entry_set_slab)
3408                 goto destroy_free_nid;
3409
3410         fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
3411                         sizeof(struct fsync_node_entry));
3412         if (!fsync_node_entry_slab)
3413                 goto destroy_nat_entry_set;
3414         return 0;
3415
3416 destroy_nat_entry_set:
3417         kmem_cache_destroy(nat_entry_set_slab);
3418 destroy_free_nid:
3419         kmem_cache_destroy(free_nid_slab);
3420 destroy_nat_entry:
3421         kmem_cache_destroy(nat_entry_slab);
3422 fail:
3423         return -ENOMEM;
3424 }
3425
3426 void f2fs_destroy_node_manager_caches(void)
3427 {
3428         kmem_cache_destroy(fsync_node_entry_slab);
3429         kmem_cache_destroy(nat_entry_set_slab);
3430         kmem_cache_destroy(free_nid_slab);
3431         kmem_cache_destroy(nat_entry_slab);
3432 }