Merge branch 'openvswitch-net'
[cascardo/linux.git] / fs / f2fs / gc.c
1 /*
2  * fs/f2fs/gc.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
19 #include <linux/blkdev.h>
20
21 #include "f2fs.h"
22 #include "node.h"
23 #include "segment.h"
24 #include "gc.h"
25 #include <trace/events/f2fs.h>
26
27 static struct kmem_cache *winode_slab;
28
29 static int gc_thread_func(void *data)
30 {
31         struct f2fs_sb_info *sbi = data;
32         struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
33         wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
34         long wait_ms;
35
36         wait_ms = gc_th->min_sleep_time;
37
38         do {
39                 if (try_to_freeze())
40                         continue;
41                 else
42                         wait_event_interruptible_timeout(*wq,
43                                                 kthread_should_stop(),
44                                                 msecs_to_jiffies(wait_ms));
45                 if (kthread_should_stop())
46                         break;
47
48                 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
49                         wait_ms = increase_sleep_time(gc_th, wait_ms);
50                         continue;
51                 }
52
53                 /*
54                  * [GC triggering condition]
55                  * 0. GC is not conducted currently.
56                  * 1. There are enough dirty segments.
57                  * 2. IO subsystem is idle by checking the # of writeback pages.
58                  * 3. IO subsystem is idle by checking the # of requests in
59                  *    bdev's request list.
60                  *
61                  * Note) We have to avoid triggering GCs frequently.
62                  * Because it is possible that some segments can be
63                  * invalidated soon after by user update or deletion.
64                  * So, I'd like to wait some time to collect dirty segments.
65                  */
66                 if (!mutex_trylock(&sbi->gc_mutex))
67                         continue;
68
69                 if (!is_idle(sbi)) {
70                         wait_ms = increase_sleep_time(gc_th, wait_ms);
71                         mutex_unlock(&sbi->gc_mutex);
72                         continue;
73                 }
74
75                 if (has_enough_invalid_blocks(sbi))
76                         wait_ms = decrease_sleep_time(gc_th, wait_ms);
77                 else
78                         wait_ms = increase_sleep_time(gc_th, wait_ms);
79
80                 stat_inc_bggc_count(sbi);
81
82                 /* if return value is not zero, no victim was selected */
83                 if (f2fs_gc(sbi))
84                         wait_ms = gc_th->no_gc_sleep_time;
85
86                 /* balancing f2fs's metadata periodically */
87                 f2fs_balance_fs_bg(sbi);
88
89         } while (!kthread_should_stop());
90         return 0;
91 }
92
93 int start_gc_thread(struct f2fs_sb_info *sbi)
94 {
95         struct f2fs_gc_kthread *gc_th;
96         dev_t dev = sbi->sb->s_bdev->bd_dev;
97         int err = 0;
98
99         gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
100         if (!gc_th) {
101                 err = -ENOMEM;
102                 goto out;
103         }
104
105         gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
106         gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
107         gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
108
109         gc_th->gc_idle = 0;
110
111         sbi->gc_thread = gc_th;
112         init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
113         sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
114                         "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
115         if (IS_ERR(gc_th->f2fs_gc_task)) {
116                 err = PTR_ERR(gc_th->f2fs_gc_task);
117                 kfree(gc_th);
118                 sbi->gc_thread = NULL;
119         }
120 out:
121         return err;
122 }
123
124 void stop_gc_thread(struct f2fs_sb_info *sbi)
125 {
126         struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
127         if (!gc_th)
128                 return;
129         kthread_stop(gc_th->f2fs_gc_task);
130         kfree(gc_th);
131         sbi->gc_thread = NULL;
132 }
133
134 static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
135 {
136         int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
137
138         if (gc_th && gc_th->gc_idle) {
139                 if (gc_th->gc_idle == 1)
140                         gc_mode = GC_CB;
141                 else if (gc_th->gc_idle == 2)
142                         gc_mode = GC_GREEDY;
143         }
144         return gc_mode;
145 }
146
147 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
148                         int type, struct victim_sel_policy *p)
149 {
150         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
151
152         if (p->alloc_mode == SSR) {
153                 p->gc_mode = GC_GREEDY;
154                 p->dirty_segmap = dirty_i->dirty_segmap[type];
155                 p->max_search = dirty_i->nr_dirty[type];
156                 p->ofs_unit = 1;
157         } else {
158                 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
159                 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
160                 p->max_search = dirty_i->nr_dirty[DIRTY];
161                 p->ofs_unit = sbi->segs_per_sec;
162         }
163
164         if (p->max_search > sbi->max_victim_search)
165                 p->max_search = sbi->max_victim_search;
166
167         p->offset = sbi->last_victim[p->gc_mode];
168 }
169
170 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
171                                 struct victim_sel_policy *p)
172 {
173         /* SSR allocates in a segment unit */
174         if (p->alloc_mode == SSR)
175                 return 1 << sbi->log_blocks_per_seg;
176         if (p->gc_mode == GC_GREEDY)
177                 return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
178         else if (p->gc_mode == GC_CB)
179                 return UINT_MAX;
180         else /* No other gc_mode */
181                 return 0;
182 }
183
184 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
185 {
186         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
187         unsigned int secno;
188
189         /*
190          * If the gc_type is FG_GC, we can select victim segments
191          * selected by background GC before.
192          * Those segments guarantee they have small valid blocks.
193          */
194         for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
195                 if (sec_usage_check(sbi, secno))
196                         continue;
197                 clear_bit(secno, dirty_i->victim_secmap);
198                 return secno * sbi->segs_per_sec;
199         }
200         return NULL_SEGNO;
201 }
202
203 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
204 {
205         struct sit_info *sit_i = SIT_I(sbi);
206         unsigned int secno = GET_SECNO(sbi, segno);
207         unsigned int start = secno * sbi->segs_per_sec;
208         unsigned long long mtime = 0;
209         unsigned int vblocks;
210         unsigned char age = 0;
211         unsigned char u;
212         unsigned int i;
213
214         for (i = 0; i < sbi->segs_per_sec; i++)
215                 mtime += get_seg_entry(sbi, start + i)->mtime;
216         vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
217
218         mtime = div_u64(mtime, sbi->segs_per_sec);
219         vblocks = div_u64(vblocks, sbi->segs_per_sec);
220
221         u = (vblocks * 100) >> sbi->log_blocks_per_seg;
222
223         /* Handle if the system time has changed by the user */
224         if (mtime < sit_i->min_mtime)
225                 sit_i->min_mtime = mtime;
226         if (mtime > sit_i->max_mtime)
227                 sit_i->max_mtime = mtime;
228         if (sit_i->max_mtime != sit_i->min_mtime)
229                 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
230                                 sit_i->max_mtime - sit_i->min_mtime);
231
232         return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
233 }
234
235 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
236                         unsigned int segno, struct victim_sel_policy *p)
237 {
238         if (p->alloc_mode == SSR)
239                 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
240
241         /* alloc_mode == LFS */
242         if (p->gc_mode == GC_GREEDY)
243                 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
244         else
245                 return get_cb_cost(sbi, segno);
246 }
247
248 /*
249  * This function is called from two paths.
250  * One is garbage collection and the other is SSR segment selection.
251  * When it is called during GC, it just gets a victim segment
252  * and it does not remove it from dirty seglist.
253  * When it is called from SSR segment selection, it finds a segment
254  * which has minimum valid blocks and removes it from dirty seglist.
255  */
256 static int get_victim_by_default(struct f2fs_sb_info *sbi,
257                 unsigned int *result, int gc_type, int type, char alloc_mode)
258 {
259         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
260         struct victim_sel_policy p;
261         unsigned int secno, max_cost;
262         int nsearched = 0;
263
264         mutex_lock(&dirty_i->seglist_lock);
265
266         p.alloc_mode = alloc_mode;
267         select_policy(sbi, gc_type, type, &p);
268
269         p.min_segno = NULL_SEGNO;
270         p.min_cost = max_cost = get_max_cost(sbi, &p);
271
272         if (p.alloc_mode == LFS && gc_type == FG_GC) {
273                 p.min_segno = check_bg_victims(sbi);
274                 if (p.min_segno != NULL_SEGNO)
275                         goto got_it;
276         }
277
278         while (1) {
279                 unsigned long cost;
280                 unsigned int segno;
281
282                 segno = find_next_bit(p.dirty_segmap, MAIN_SEGS(sbi), p.offset);
283                 if (segno >= MAIN_SEGS(sbi)) {
284                         if (sbi->last_victim[p.gc_mode]) {
285                                 sbi->last_victim[p.gc_mode] = 0;
286                                 p.offset = 0;
287                                 continue;
288                         }
289                         break;
290                 }
291
292                 p.offset = segno + p.ofs_unit;
293                 if (p.ofs_unit > 1)
294                         p.offset -= segno % p.ofs_unit;
295
296                 secno = GET_SECNO(sbi, segno);
297
298                 if (sec_usage_check(sbi, secno))
299                         continue;
300                 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
301                         continue;
302
303                 cost = get_gc_cost(sbi, segno, &p);
304
305                 if (p.min_cost > cost) {
306                         p.min_segno = segno;
307                         p.min_cost = cost;
308                 } else if (unlikely(cost == max_cost)) {
309                         continue;
310                 }
311
312                 if (nsearched++ >= p.max_search) {
313                         sbi->last_victim[p.gc_mode] = segno;
314                         break;
315                 }
316         }
317         if (p.min_segno != NULL_SEGNO) {
318 got_it:
319                 if (p.alloc_mode == LFS) {
320                         secno = GET_SECNO(sbi, p.min_segno);
321                         if (gc_type == FG_GC)
322                                 sbi->cur_victim_sec = secno;
323                         else
324                                 set_bit(secno, dirty_i->victim_secmap);
325                 }
326                 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
327
328                 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
329                                 sbi->cur_victim_sec,
330                                 prefree_segments(sbi), free_segments(sbi));
331         }
332         mutex_unlock(&dirty_i->seglist_lock);
333
334         return (p.min_segno == NULL_SEGNO) ? 0 : 1;
335 }
336
337 static const struct victim_selection default_v_ops = {
338         .get_victim = get_victim_by_default,
339 };
340
341 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
342 {
343         struct inode_entry *ie;
344
345         ie = radix_tree_lookup(&gc_list->iroot, ino);
346         if (ie)
347                 return ie->inode;
348         return NULL;
349 }
350
351 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
352 {
353         struct inode_entry *new_ie;
354
355         if (inode == find_gc_inode(gc_list, inode->i_ino)) {
356                 iput(inode);
357                 return;
358         }
359         new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS);
360         new_ie->inode = inode;
361 retry:
362         if (radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie)) {
363                 cond_resched();
364                 goto retry;
365         }
366         list_add_tail(&new_ie->list, &gc_list->ilist);
367 }
368
369 static void put_gc_inode(struct gc_inode_list *gc_list)
370 {
371         struct inode_entry *ie, *next_ie;
372         list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
373                 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
374                 iput(ie->inode);
375                 list_del(&ie->list);
376                 kmem_cache_free(winode_slab, ie);
377         }
378 }
379
380 static int check_valid_map(struct f2fs_sb_info *sbi,
381                                 unsigned int segno, int offset)
382 {
383         struct sit_info *sit_i = SIT_I(sbi);
384         struct seg_entry *sentry;
385         int ret;
386
387         mutex_lock(&sit_i->sentry_lock);
388         sentry = get_seg_entry(sbi, segno);
389         ret = f2fs_test_bit(offset, sentry->cur_valid_map);
390         mutex_unlock(&sit_i->sentry_lock);
391         return ret;
392 }
393
394 /*
395  * This function compares node address got in summary with that in NAT.
396  * On validity, copy that node with cold status, otherwise (invalid node)
397  * ignore that.
398  */
399 static void gc_node_segment(struct f2fs_sb_info *sbi,
400                 struct f2fs_summary *sum, unsigned int segno, int gc_type)
401 {
402         bool initial = true;
403         struct f2fs_summary *entry;
404         int off;
405
406 next_step:
407         entry = sum;
408
409         for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
410                 nid_t nid = le32_to_cpu(entry->nid);
411                 struct page *node_page;
412
413                 /* stop BG_GC if there is not enough free sections. */
414                 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
415                         return;
416
417                 if (check_valid_map(sbi, segno, off) == 0)
418                         continue;
419
420                 if (initial) {
421                         ra_node_page(sbi, nid);
422                         continue;
423                 }
424                 node_page = get_node_page(sbi, nid);
425                 if (IS_ERR(node_page))
426                         continue;
427
428                 /* block may become invalid during get_node_page */
429                 if (check_valid_map(sbi, segno, off) == 0) {
430                         f2fs_put_page(node_page, 1);
431                         continue;
432                 }
433
434                 /* set page dirty and write it */
435                 if (gc_type == FG_GC) {
436                         f2fs_wait_on_page_writeback(node_page, NODE);
437                         set_page_dirty(node_page);
438                 } else {
439                         if (!PageWriteback(node_page))
440                                 set_page_dirty(node_page);
441                 }
442                 f2fs_put_page(node_page, 1);
443                 stat_inc_node_blk_count(sbi, 1);
444         }
445
446         if (initial) {
447                 initial = false;
448                 goto next_step;
449         }
450
451         if (gc_type == FG_GC) {
452                 struct writeback_control wbc = {
453                         .sync_mode = WB_SYNC_ALL,
454                         .nr_to_write = LONG_MAX,
455                         .for_reclaim = 0,
456                 };
457                 sync_node_pages(sbi, 0, &wbc);
458
459                 /*
460                  * In the case of FG_GC, it'd be better to reclaim this victim
461                  * completely.
462                  */
463                 if (get_valid_blocks(sbi, segno, 1) != 0)
464                         goto next_step;
465         }
466 }
467
468 /*
469  * Calculate start block index indicating the given node offset.
470  * Be careful, caller should give this node offset only indicating direct node
471  * blocks. If any node offsets, which point the other types of node blocks such
472  * as indirect or double indirect node blocks, are given, it must be a caller's
473  * bug.
474  */
475 block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
476 {
477         unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
478         unsigned int bidx;
479
480         if (node_ofs == 0)
481                 return 0;
482
483         if (node_ofs <= 2) {
484                 bidx = node_ofs - 1;
485         } else if (node_ofs <= indirect_blks) {
486                 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
487                 bidx = node_ofs - 2 - dec;
488         } else {
489                 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
490                 bidx = node_ofs - 5 - dec;
491         }
492         return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
493 }
494
495 static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
496                 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
497 {
498         struct page *node_page;
499         nid_t nid;
500         unsigned int ofs_in_node;
501         block_t source_blkaddr;
502
503         nid = le32_to_cpu(sum->nid);
504         ofs_in_node = le16_to_cpu(sum->ofs_in_node);
505
506         node_page = get_node_page(sbi, nid);
507         if (IS_ERR(node_page))
508                 return 0;
509
510         get_node_info(sbi, nid, dni);
511
512         if (sum->version != dni->version) {
513                 f2fs_put_page(node_page, 1);
514                 return 0;
515         }
516
517         *nofs = ofs_of_node(node_page);
518         source_blkaddr = datablock_addr(node_page, ofs_in_node);
519         f2fs_put_page(node_page, 1);
520
521         if (source_blkaddr != blkaddr)
522                 return 0;
523         return 1;
524 }
525
526 static void move_data_page(struct inode *inode, struct page *page, int gc_type)
527 {
528         struct f2fs_io_info fio = {
529                 .type = DATA,
530                 .rw = WRITE_SYNC,
531         };
532
533         if (gc_type == BG_GC) {
534                 if (PageWriteback(page))
535                         goto out;
536                 set_page_dirty(page);
537                 set_cold_data(page);
538         } else {
539                 f2fs_wait_on_page_writeback(page, DATA);
540
541                 if (clear_page_dirty_for_io(page))
542                         inode_dec_dirty_pages(inode);
543                 set_cold_data(page);
544                 do_write_data_page(page, &fio);
545                 clear_cold_data(page);
546         }
547 out:
548         f2fs_put_page(page, 1);
549 }
550
551 /*
552  * This function tries to get parent node of victim data block, and identifies
553  * data block validity. If the block is valid, copy that with cold status and
554  * modify parent node.
555  * If the parent node is not valid or the data block address is different,
556  * the victim data block is ignored.
557  */
558 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
559                 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
560 {
561         struct super_block *sb = sbi->sb;
562         struct f2fs_summary *entry;
563         block_t start_addr;
564         int off;
565         int phase = 0;
566
567         start_addr = START_BLOCK(sbi, segno);
568
569 next_step:
570         entry = sum;
571
572         for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
573                 struct page *data_page;
574                 struct inode *inode;
575                 struct node_info dni; /* dnode info for the data */
576                 unsigned int ofs_in_node, nofs;
577                 block_t start_bidx;
578
579                 /* stop BG_GC if there is not enough free sections. */
580                 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
581                         return;
582
583                 if (check_valid_map(sbi, segno, off) == 0)
584                         continue;
585
586                 if (phase == 0) {
587                         ra_node_page(sbi, le32_to_cpu(entry->nid));
588                         continue;
589                 }
590
591                 /* Get an inode by ino with checking validity */
592                 if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
593                         continue;
594
595                 if (phase == 1) {
596                         ra_node_page(sbi, dni.ino);
597                         continue;
598                 }
599
600                 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
601
602                 if (phase == 2) {
603                         inode = f2fs_iget(sb, dni.ino);
604                         if (IS_ERR(inode) || is_bad_inode(inode))
605                                 continue;
606
607                         start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
608
609                         data_page = find_data_page(inode,
610                                         start_bidx + ofs_in_node, false);
611                         if (IS_ERR(data_page)) {
612                                 iput(inode);
613                                 continue;
614                         }
615
616                         f2fs_put_page(data_page, 0);
617                         add_gc_inode(gc_list, inode);
618                         continue;
619                 }
620
621                 /* phase 3 */
622                 inode = find_gc_inode(gc_list, dni.ino);
623                 if (inode) {
624                         start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
625                         data_page = get_lock_data_page(inode,
626                                                 start_bidx + ofs_in_node);
627                         if (IS_ERR(data_page))
628                                 continue;
629                         move_data_page(inode, data_page, gc_type);
630                         stat_inc_data_blk_count(sbi, 1);
631                 }
632         }
633
634         if (++phase < 4)
635                 goto next_step;
636
637         if (gc_type == FG_GC) {
638                 f2fs_submit_merged_bio(sbi, DATA, WRITE);
639
640                 /*
641                  * In the case of FG_GC, it'd be better to reclaim this victim
642                  * completely.
643                  */
644                 if (get_valid_blocks(sbi, segno, 1) != 0) {
645                         phase = 2;
646                         goto next_step;
647                 }
648         }
649 }
650
651 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
652                         int gc_type)
653 {
654         struct sit_info *sit_i = SIT_I(sbi);
655         int ret;
656
657         mutex_lock(&sit_i->sentry_lock);
658         ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
659                                               NO_CHECK_TYPE, LFS);
660         mutex_unlock(&sit_i->sentry_lock);
661         return ret;
662 }
663
664 static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
665                                 struct gc_inode_list *gc_list, int gc_type)
666 {
667         struct page *sum_page;
668         struct f2fs_summary_block *sum;
669         struct blk_plug plug;
670
671         /* read segment summary of victim */
672         sum_page = get_sum_page(sbi, segno);
673
674         blk_start_plug(&plug);
675
676         sum = page_address(sum_page);
677
678         switch (GET_SUM_TYPE((&sum->footer))) {
679         case SUM_TYPE_NODE:
680                 gc_node_segment(sbi, sum->entries, segno, gc_type);
681                 break;
682         case SUM_TYPE_DATA:
683                 gc_data_segment(sbi, sum->entries, gc_list, segno, gc_type);
684                 break;
685         }
686         blk_finish_plug(&plug);
687
688         stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
689         stat_inc_call_count(sbi->stat_info);
690
691         f2fs_put_page(sum_page, 1);
692 }
693
694 int f2fs_gc(struct f2fs_sb_info *sbi)
695 {
696         unsigned int segno, i;
697         int gc_type = BG_GC;
698         int nfree = 0;
699         int ret = -1;
700         struct cp_control cpc;
701         struct gc_inode_list gc_list = {
702                 .ilist = LIST_HEAD_INIT(gc_list.ilist),
703                 .iroot = RADIX_TREE_INIT(GFP_NOFS),
704         };
705
706         cpc.reason = test_opt(sbi, FASTBOOT) ? CP_UMOUNT : CP_SYNC;
707
708 gc_more:
709         if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
710                 goto stop;
711         if (unlikely(f2fs_cp_error(sbi)))
712                 goto stop;
713
714         if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
715                 gc_type = FG_GC;
716                 write_checkpoint(sbi, &cpc);
717         }
718
719         if (!__get_victim(sbi, &segno, gc_type))
720                 goto stop;
721         ret = 0;
722
723         /* readahead multi ssa blocks those have contiguous address */
724         if (sbi->segs_per_sec > 1)
725                 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
726                                                                 META_SSA);
727
728         for (i = 0; i < sbi->segs_per_sec; i++)
729                 do_garbage_collect(sbi, segno + i, &gc_list, gc_type);
730
731         if (gc_type == FG_GC) {
732                 sbi->cur_victim_sec = NULL_SEGNO;
733                 nfree++;
734                 WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
735         }
736
737         if (has_not_enough_free_secs(sbi, nfree))
738                 goto gc_more;
739
740         if (gc_type == FG_GC)
741                 write_checkpoint(sbi, &cpc);
742 stop:
743         mutex_unlock(&sbi->gc_mutex);
744
745         put_gc_inode(&gc_list);
746         return ret;
747 }
748
749 void build_gc_manager(struct f2fs_sb_info *sbi)
750 {
751         DIRTY_I(sbi)->v_ops = &default_v_ops;
752 }
753
754 int __init create_gc_caches(void)
755 {
756         winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
757                         sizeof(struct inode_entry));
758         if (!winode_slab)
759                 return -ENOMEM;
760         return 0;
761 }
762
763 void destroy_gc_caches(void)
764 {
765         kmem_cache_destroy(winode_slab);
766 }