Merge branch 'for-3.18/core' of git://git.kernel.dk/linux-block
[sfrench/cifs-2.6.git] / fs / f2fs / gc.c
1 /*
2  * fs/f2fs/gc.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
19 #include <linux/blkdev.h>
20
21 #include "f2fs.h"
22 #include "node.h"
23 #include "segment.h"
24 #include "gc.h"
25 #include <trace/events/f2fs.h>
26
27 static struct kmem_cache *winode_slab;
28
29 static int gc_thread_func(void *data)
30 {
31         struct f2fs_sb_info *sbi = data;
32         struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
33         wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
34         long wait_ms;
35
36         wait_ms = gc_th->min_sleep_time;
37
38         do {
39                 if (try_to_freeze())
40                         continue;
41                 else
42                         wait_event_interruptible_timeout(*wq,
43                                                 kthread_should_stop(),
44                                                 msecs_to_jiffies(wait_ms));
45                 if (kthread_should_stop())
46                         break;
47
48                 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
49                         wait_ms = increase_sleep_time(gc_th, wait_ms);
50                         continue;
51                 }
52
53                 /*
54                  * [GC triggering condition]
55                  * 0. GC is not conducted currently.
56                  * 1. There are enough dirty segments.
57                  * 2. IO subsystem is idle by checking the # of writeback pages.
58                  * 3. IO subsystem is idle by checking the # of requests in
59                  *    bdev's request list.
60                  *
61                  * Note) We have to avoid triggering GCs frequently.
62                  * Because it is possible that some segments can be
63                  * invalidated soon after by user update or deletion.
64                  * So, I'd like to wait some time to collect dirty segments.
65                  */
66                 if (!mutex_trylock(&sbi->gc_mutex))
67                         continue;
68
69                 if (!is_idle(sbi)) {
70                         wait_ms = increase_sleep_time(gc_th, wait_ms);
71                         mutex_unlock(&sbi->gc_mutex);
72                         continue;
73                 }
74
75                 if (has_enough_invalid_blocks(sbi))
76                         wait_ms = decrease_sleep_time(gc_th, wait_ms);
77                 else
78                         wait_ms = increase_sleep_time(gc_th, wait_ms);
79
80                 stat_inc_bggc_count(sbi);
81
82                 /* if return value is not zero, no victim was selected */
83                 if (f2fs_gc(sbi))
84                         wait_ms = gc_th->no_gc_sleep_time;
85
86                 /* balancing f2fs's metadata periodically */
87                 f2fs_balance_fs_bg(sbi);
88
89         } while (!kthread_should_stop());
90         return 0;
91 }
92
93 int start_gc_thread(struct f2fs_sb_info *sbi)
94 {
95         struct f2fs_gc_kthread *gc_th;
96         dev_t dev = sbi->sb->s_bdev->bd_dev;
97         int err = 0;
98
99         if (!test_opt(sbi, BG_GC))
100                 goto out;
101         gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
102         if (!gc_th) {
103                 err = -ENOMEM;
104                 goto out;
105         }
106
107         gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
108         gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
109         gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
110
111         gc_th->gc_idle = 0;
112
113         sbi->gc_thread = gc_th;
114         init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
115         sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
116                         "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
117         if (IS_ERR(gc_th->f2fs_gc_task)) {
118                 err = PTR_ERR(gc_th->f2fs_gc_task);
119                 kfree(gc_th);
120                 sbi->gc_thread = NULL;
121         }
122 out:
123         return err;
124 }
125
126 void stop_gc_thread(struct f2fs_sb_info *sbi)
127 {
128         struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
129         if (!gc_th)
130                 return;
131         kthread_stop(gc_th->f2fs_gc_task);
132         kfree(gc_th);
133         sbi->gc_thread = NULL;
134 }
135
136 static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
137 {
138         int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
139
140         if (gc_th && gc_th->gc_idle) {
141                 if (gc_th->gc_idle == 1)
142                         gc_mode = GC_CB;
143                 else if (gc_th->gc_idle == 2)
144                         gc_mode = GC_GREEDY;
145         }
146         return gc_mode;
147 }
148
149 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
150                         int type, struct victim_sel_policy *p)
151 {
152         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
153
154         if (p->alloc_mode == SSR) {
155                 p->gc_mode = GC_GREEDY;
156                 p->dirty_segmap = dirty_i->dirty_segmap[type];
157                 p->max_search = dirty_i->nr_dirty[type];
158                 p->ofs_unit = 1;
159         } else {
160                 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
161                 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
162                 p->max_search = dirty_i->nr_dirty[DIRTY];
163                 p->ofs_unit = sbi->segs_per_sec;
164         }
165
166         if (p->max_search > sbi->max_victim_search)
167                 p->max_search = sbi->max_victim_search;
168
169         p->offset = sbi->last_victim[p->gc_mode];
170 }
171
172 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
173                                 struct victim_sel_policy *p)
174 {
175         /* SSR allocates in a segment unit */
176         if (p->alloc_mode == SSR)
177                 return 1 << sbi->log_blocks_per_seg;
178         if (p->gc_mode == GC_GREEDY)
179                 return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
180         else if (p->gc_mode == GC_CB)
181                 return UINT_MAX;
182         else /* No other gc_mode */
183                 return 0;
184 }
185
186 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
187 {
188         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
189         unsigned int secno;
190
191         /*
192          * If the gc_type is FG_GC, we can select victim segments
193          * selected by background GC before.
194          * Those segments guarantee they have small valid blocks.
195          */
196         for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
197                 if (sec_usage_check(sbi, secno))
198                         continue;
199                 clear_bit(secno, dirty_i->victim_secmap);
200                 return secno * sbi->segs_per_sec;
201         }
202         return NULL_SEGNO;
203 }
204
205 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
206 {
207         struct sit_info *sit_i = SIT_I(sbi);
208         unsigned int secno = GET_SECNO(sbi, segno);
209         unsigned int start = secno * sbi->segs_per_sec;
210         unsigned long long mtime = 0;
211         unsigned int vblocks;
212         unsigned char age = 0;
213         unsigned char u;
214         unsigned int i;
215
216         for (i = 0; i < sbi->segs_per_sec; i++)
217                 mtime += get_seg_entry(sbi, start + i)->mtime;
218         vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
219
220         mtime = div_u64(mtime, sbi->segs_per_sec);
221         vblocks = div_u64(vblocks, sbi->segs_per_sec);
222
223         u = (vblocks * 100) >> sbi->log_blocks_per_seg;
224
225         /* Handle if the system time has changed by the user */
226         if (mtime < sit_i->min_mtime)
227                 sit_i->min_mtime = mtime;
228         if (mtime > sit_i->max_mtime)
229                 sit_i->max_mtime = mtime;
230         if (sit_i->max_mtime != sit_i->min_mtime)
231                 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
232                                 sit_i->max_mtime - sit_i->min_mtime);
233
234         return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
235 }
236
237 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
238                         unsigned int segno, struct victim_sel_policy *p)
239 {
240         if (p->alloc_mode == SSR)
241                 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
242
243         /* alloc_mode == LFS */
244         if (p->gc_mode == GC_GREEDY)
245                 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
246         else
247                 return get_cb_cost(sbi, segno);
248 }
249
250 /*
251  * This function is called from two paths.
252  * One is garbage collection and the other is SSR segment selection.
253  * When it is called during GC, it just gets a victim segment
254  * and it does not remove it from dirty seglist.
255  * When it is called from SSR segment selection, it finds a segment
256  * which has minimum valid blocks and removes it from dirty seglist.
257  */
258 static int get_victim_by_default(struct f2fs_sb_info *sbi,
259                 unsigned int *result, int gc_type, int type, char alloc_mode)
260 {
261         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
262         struct victim_sel_policy p;
263         unsigned int secno, max_cost;
264         int nsearched = 0;
265
266         mutex_lock(&dirty_i->seglist_lock);
267
268         p.alloc_mode = alloc_mode;
269         select_policy(sbi, gc_type, type, &p);
270
271         p.min_segno = NULL_SEGNO;
272         p.min_cost = max_cost = get_max_cost(sbi, &p);
273
274         if (p.alloc_mode == LFS && gc_type == FG_GC) {
275                 p.min_segno = check_bg_victims(sbi);
276                 if (p.min_segno != NULL_SEGNO)
277                         goto got_it;
278         }
279
280         while (1) {
281                 unsigned long cost;
282                 unsigned int segno;
283
284                 segno = find_next_bit(p.dirty_segmap, MAIN_SEGS(sbi), p.offset);
285                 if (segno >= MAIN_SEGS(sbi)) {
286                         if (sbi->last_victim[p.gc_mode]) {
287                                 sbi->last_victim[p.gc_mode] = 0;
288                                 p.offset = 0;
289                                 continue;
290                         }
291                         break;
292                 }
293
294                 p.offset = segno + p.ofs_unit;
295                 if (p.ofs_unit > 1)
296                         p.offset -= segno % p.ofs_unit;
297
298                 secno = GET_SECNO(sbi, segno);
299
300                 if (sec_usage_check(sbi, secno))
301                         continue;
302                 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
303                         continue;
304
305                 cost = get_gc_cost(sbi, segno, &p);
306
307                 if (p.min_cost > cost) {
308                         p.min_segno = segno;
309                         p.min_cost = cost;
310                 } else if (unlikely(cost == max_cost)) {
311                         continue;
312                 }
313
314                 if (nsearched++ >= p.max_search) {
315                         sbi->last_victim[p.gc_mode] = segno;
316                         break;
317                 }
318         }
319         if (p.min_segno != NULL_SEGNO) {
320 got_it:
321                 if (p.alloc_mode == LFS) {
322                         secno = GET_SECNO(sbi, p.min_segno);
323                         if (gc_type == FG_GC)
324                                 sbi->cur_victim_sec = secno;
325                         else
326                                 set_bit(secno, dirty_i->victim_secmap);
327                 }
328                 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
329
330                 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
331                                 sbi->cur_victim_sec,
332                                 prefree_segments(sbi), free_segments(sbi));
333         }
334         mutex_unlock(&dirty_i->seglist_lock);
335
336         return (p.min_segno == NULL_SEGNO) ? 0 : 1;
337 }
338
339 static const struct victim_selection default_v_ops = {
340         .get_victim = get_victim_by_default,
341 };
342
343 static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist)
344 {
345         struct inode_entry *ie;
346
347         list_for_each_entry(ie, ilist, list)
348                 if (ie->inode->i_ino == ino)
349                         return ie->inode;
350         return NULL;
351 }
352
353 static void add_gc_inode(struct inode *inode, struct list_head *ilist)
354 {
355         struct inode_entry *new_ie;
356
357         if (inode == find_gc_inode(inode->i_ino, ilist)) {
358                 iput(inode);
359                 return;
360         }
361
362         new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS);
363         new_ie->inode = inode;
364         list_add_tail(&new_ie->list, ilist);
365 }
366
367 static void put_gc_inode(struct list_head *ilist)
368 {
369         struct inode_entry *ie, *next_ie;
370         list_for_each_entry_safe(ie, next_ie, ilist, list) {
371                 iput(ie->inode);
372                 list_del(&ie->list);
373                 kmem_cache_free(winode_slab, ie);
374         }
375 }
376
377 static int check_valid_map(struct f2fs_sb_info *sbi,
378                                 unsigned int segno, int offset)
379 {
380         struct sit_info *sit_i = SIT_I(sbi);
381         struct seg_entry *sentry;
382         int ret;
383
384         mutex_lock(&sit_i->sentry_lock);
385         sentry = get_seg_entry(sbi, segno);
386         ret = f2fs_test_bit(offset, sentry->cur_valid_map);
387         mutex_unlock(&sit_i->sentry_lock);
388         return ret;
389 }
390
391 /*
392  * This function compares node address got in summary with that in NAT.
393  * On validity, copy that node with cold status, otherwise (invalid node)
394  * ignore that.
395  */
396 static void gc_node_segment(struct f2fs_sb_info *sbi,
397                 struct f2fs_summary *sum, unsigned int segno, int gc_type)
398 {
399         bool initial = true;
400         struct f2fs_summary *entry;
401         int off;
402
403 next_step:
404         entry = sum;
405
406         for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
407                 nid_t nid = le32_to_cpu(entry->nid);
408                 struct page *node_page;
409
410                 /* stop BG_GC if there is not enough free sections. */
411                 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
412                         return;
413
414                 if (check_valid_map(sbi, segno, off) == 0)
415                         continue;
416
417                 if (initial) {
418                         ra_node_page(sbi, nid);
419                         continue;
420                 }
421                 node_page = get_node_page(sbi, nid);
422                 if (IS_ERR(node_page))
423                         continue;
424
425                 /* block may become invalid during get_node_page */
426                 if (check_valid_map(sbi, segno, off) == 0) {
427                         f2fs_put_page(node_page, 1);
428                         continue;
429                 }
430
431                 /* set page dirty and write it */
432                 if (gc_type == FG_GC) {
433                         f2fs_wait_on_page_writeback(node_page, NODE);
434                         set_page_dirty(node_page);
435                 } else {
436                         if (!PageWriteback(node_page))
437                                 set_page_dirty(node_page);
438                 }
439                 f2fs_put_page(node_page, 1);
440                 stat_inc_node_blk_count(sbi, 1);
441         }
442
443         if (initial) {
444                 initial = false;
445                 goto next_step;
446         }
447
448         if (gc_type == FG_GC) {
449                 struct writeback_control wbc = {
450                         .sync_mode = WB_SYNC_ALL,
451                         .nr_to_write = LONG_MAX,
452                         .for_reclaim = 0,
453                 };
454                 sync_node_pages(sbi, 0, &wbc);
455
456                 /*
457                  * In the case of FG_GC, it'd be better to reclaim this victim
458                  * completely.
459                  */
460                 if (get_valid_blocks(sbi, segno, 1) != 0)
461                         goto next_step;
462         }
463 }
464
465 /*
466  * Calculate start block index indicating the given node offset.
467  * Be careful, caller should give this node offset only indicating direct node
468  * blocks. If any node offsets, which point the other types of node blocks such
469  * as indirect or double indirect node blocks, are given, it must be a caller's
470  * bug.
471  */
472 block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
473 {
474         unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
475         unsigned int bidx;
476
477         if (node_ofs == 0)
478                 return 0;
479
480         if (node_ofs <= 2) {
481                 bidx = node_ofs - 1;
482         } else if (node_ofs <= indirect_blks) {
483                 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
484                 bidx = node_ofs - 2 - dec;
485         } else {
486                 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
487                 bidx = node_ofs - 5 - dec;
488         }
489         return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
490 }
491
492 static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
493                 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
494 {
495         struct page *node_page;
496         nid_t nid;
497         unsigned int ofs_in_node;
498         block_t source_blkaddr;
499
500         nid = le32_to_cpu(sum->nid);
501         ofs_in_node = le16_to_cpu(sum->ofs_in_node);
502
503         node_page = get_node_page(sbi, nid);
504         if (IS_ERR(node_page))
505                 return 0;
506
507         get_node_info(sbi, nid, dni);
508
509         if (sum->version != dni->version) {
510                 f2fs_put_page(node_page, 1);
511                 return 0;
512         }
513
514         *nofs = ofs_of_node(node_page);
515         source_blkaddr = datablock_addr(node_page, ofs_in_node);
516         f2fs_put_page(node_page, 1);
517
518         if (source_blkaddr != blkaddr)
519                 return 0;
520         return 1;
521 }
522
523 static void move_data_page(struct inode *inode, struct page *page, int gc_type)
524 {
525         struct f2fs_io_info fio = {
526                 .type = DATA,
527                 .rw = WRITE_SYNC,
528         };
529
530         if (gc_type == BG_GC) {
531                 if (PageWriteback(page))
532                         goto out;
533                 set_page_dirty(page);
534                 set_cold_data(page);
535         } else {
536                 f2fs_wait_on_page_writeback(page, DATA);
537
538                 if (clear_page_dirty_for_io(page))
539                         inode_dec_dirty_pages(inode);
540                 set_cold_data(page);
541                 do_write_data_page(page, &fio);
542                 clear_cold_data(page);
543         }
544 out:
545         f2fs_put_page(page, 1);
546 }
547
548 /*
549  * This function tries to get parent node of victim data block, and identifies
550  * data block validity. If the block is valid, copy that with cold status and
551  * modify parent node.
552  * If the parent node is not valid or the data block address is different,
553  * the victim data block is ignored.
554  */
555 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
556                 struct list_head *ilist, unsigned int segno, int gc_type)
557 {
558         struct super_block *sb = sbi->sb;
559         struct f2fs_summary *entry;
560         block_t start_addr;
561         int off;
562         int phase = 0;
563
564         start_addr = START_BLOCK(sbi, segno);
565
566 next_step:
567         entry = sum;
568
569         for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
570                 struct page *data_page;
571                 struct inode *inode;
572                 struct node_info dni; /* dnode info for the data */
573                 unsigned int ofs_in_node, nofs;
574                 block_t start_bidx;
575
576                 /* stop BG_GC if there is not enough free sections. */
577                 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
578                         return;
579
580                 if (check_valid_map(sbi, segno, off) == 0)
581                         continue;
582
583                 if (phase == 0) {
584                         ra_node_page(sbi, le32_to_cpu(entry->nid));
585                         continue;
586                 }
587
588                 /* Get an inode by ino with checking validity */
589                 if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
590                         continue;
591
592                 if (phase == 1) {
593                         ra_node_page(sbi, dni.ino);
594                         continue;
595                 }
596
597                 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
598
599                 if (phase == 2) {
600                         inode = f2fs_iget(sb, dni.ino);
601                         if (IS_ERR(inode) || is_bad_inode(inode))
602                                 continue;
603
604                         start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
605
606                         data_page = find_data_page(inode,
607                                         start_bidx + ofs_in_node, false);
608                         if (IS_ERR(data_page))
609                                 goto next_iput;
610
611                         f2fs_put_page(data_page, 0);
612                         add_gc_inode(inode, ilist);
613                 } else {
614                         inode = find_gc_inode(dni.ino, ilist);
615                         if (inode) {
616                                 start_bidx = start_bidx_of_node(nofs,
617                                                                 F2FS_I(inode));
618                                 data_page = get_lock_data_page(inode,
619                                                 start_bidx + ofs_in_node);
620                                 if (IS_ERR(data_page))
621                                         continue;
622                                 move_data_page(inode, data_page, gc_type);
623                                 stat_inc_data_blk_count(sbi, 1);
624                         }
625                 }
626                 continue;
627 next_iput:
628                 iput(inode);
629         }
630
631         if (++phase < 4)
632                 goto next_step;
633
634         if (gc_type == FG_GC) {
635                 f2fs_submit_merged_bio(sbi, DATA, WRITE);
636
637                 /*
638                  * In the case of FG_GC, it'd be better to reclaim this victim
639                  * completely.
640                  */
641                 if (get_valid_blocks(sbi, segno, 1) != 0) {
642                         phase = 2;
643                         goto next_step;
644                 }
645         }
646 }
647
648 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
649                                                 int gc_type, int type)
650 {
651         struct sit_info *sit_i = SIT_I(sbi);
652         int ret;
653         mutex_lock(&sit_i->sentry_lock);
654         ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS);
655         mutex_unlock(&sit_i->sentry_lock);
656         return ret;
657 }
658
659 static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
660                                 struct list_head *ilist, int gc_type)
661 {
662         struct page *sum_page;
663         struct f2fs_summary_block *sum;
664         struct blk_plug plug;
665
666         /* read segment summary of victim */
667         sum_page = get_sum_page(sbi, segno);
668
669         blk_start_plug(&plug);
670
671         sum = page_address(sum_page);
672
673         switch (GET_SUM_TYPE((&sum->footer))) {
674         case SUM_TYPE_NODE:
675                 gc_node_segment(sbi, sum->entries, segno, gc_type);
676                 break;
677         case SUM_TYPE_DATA:
678                 gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
679                 break;
680         }
681         blk_finish_plug(&plug);
682
683         stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
684         stat_inc_call_count(sbi->stat_info);
685
686         f2fs_put_page(sum_page, 1);
687 }
688
689 int f2fs_gc(struct f2fs_sb_info *sbi)
690 {
691         struct list_head ilist;
692         unsigned int segno, i;
693         int gc_type = BG_GC;
694         int nfree = 0;
695         int ret = -1;
696         struct cp_control cpc = {
697                 .reason = CP_SYNC,
698         };
699
700         INIT_LIST_HEAD(&ilist);
701 gc_more:
702         if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
703                 goto stop;
704         if (unlikely(f2fs_cp_error(sbi)))
705                 goto stop;
706
707         if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
708                 gc_type = FG_GC;
709                 write_checkpoint(sbi, &cpc);
710         }
711
712         if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
713                 goto stop;
714         ret = 0;
715
716         /* readahead multi ssa blocks those have contiguous address */
717         if (sbi->segs_per_sec > 1)
718                 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
719                                                                 META_SSA);
720
721         for (i = 0; i < sbi->segs_per_sec; i++)
722                 do_garbage_collect(sbi, segno + i, &ilist, gc_type);
723
724         if (gc_type == FG_GC) {
725                 sbi->cur_victim_sec = NULL_SEGNO;
726                 nfree++;
727                 WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
728         }
729
730         if (has_not_enough_free_secs(sbi, nfree))
731                 goto gc_more;
732
733         if (gc_type == FG_GC)
734                 write_checkpoint(sbi, &cpc);
735 stop:
736         mutex_unlock(&sbi->gc_mutex);
737
738         put_gc_inode(&ilist);
739         return ret;
740 }
741
742 void build_gc_manager(struct f2fs_sb_info *sbi)
743 {
744         DIRTY_I(sbi)->v_ops = &default_v_ops;
745 }
746
747 int __init create_gc_caches(void)
748 {
749         winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
750                         sizeof(struct inode_entry));
751         if (!winode_slab)
752                 return -ENOMEM;
753         return 0;
754 }
755
756 void destroy_gc_caches(void)
757 {
758         kmem_cache_destroy(winode_slab);
759 }