Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net...
[cascardo/linux.git] / fs / f2fs / segment.c
1 /*
2  * fs/f2fs/segment.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/prefetch.h>
16 #include <linux/kthread.h>
17 #include <linux/swap.h>
18 #include <linux/timer.h>
19
20 #include "f2fs.h"
21 #include "segment.h"
22 #include "node.h"
23 #include "trace.h"
24 #include <trace/events/f2fs.h>
25
26 #define __reverse_ffz(x) __reverse_ffs(~(x))
27
28 static struct kmem_cache *discard_entry_slab;
29 static struct kmem_cache *bio_entry_slab;
30 static struct kmem_cache *sit_entry_set_slab;
31 static struct kmem_cache *inmem_entry_slab;
32
33 static unsigned long __reverse_ulong(unsigned char *str)
34 {
35         unsigned long tmp = 0;
36         int shift = 24, idx = 0;
37
38 #if BITS_PER_LONG == 64
39         shift = 56;
40 #endif
41         while (shift >= 0) {
42                 tmp |= (unsigned long)str[idx++] << shift;
43                 shift -= BITS_PER_BYTE;
44         }
45         return tmp;
46 }
47
48 /*
49  * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
50  * MSB and LSB are reversed in a byte by f2fs_set_bit.
51  */
52 static inline unsigned long __reverse_ffs(unsigned long word)
53 {
54         int num = 0;
55
56 #if BITS_PER_LONG == 64
57         if ((word & 0xffffffff00000000UL) == 0)
58                 num += 32;
59         else
60                 word >>= 32;
61 #endif
62         if ((word & 0xffff0000) == 0)
63                 num += 16;
64         else
65                 word >>= 16;
66
67         if ((word & 0xff00) == 0)
68                 num += 8;
69         else
70                 word >>= 8;
71
72         if ((word & 0xf0) == 0)
73                 num += 4;
74         else
75                 word >>= 4;
76
77         if ((word & 0xc) == 0)
78                 num += 2;
79         else
80                 word >>= 2;
81
82         if ((word & 0x2) == 0)
83                 num += 1;
84         return num;
85 }
86
87 /*
88  * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
89  * f2fs_set_bit makes MSB and LSB reversed in a byte.
90  * @size must be integral times of unsigned long.
91  * Example:
92  *                             MSB <--> LSB
93  *   f2fs_set_bit(0, bitmap) => 1000 0000
94  *   f2fs_set_bit(7, bitmap) => 0000 0001
95  */
96 static unsigned long __find_rev_next_bit(const unsigned long *addr,
97                         unsigned long size, unsigned long offset)
98 {
99         const unsigned long *p = addr + BIT_WORD(offset);
100         unsigned long result = size;
101         unsigned long tmp;
102
103         if (offset >= size)
104                 return size;
105
106         size -= (offset & ~(BITS_PER_LONG - 1));
107         offset %= BITS_PER_LONG;
108
109         while (1) {
110                 if (*p == 0)
111                         goto pass;
112
113                 tmp = __reverse_ulong((unsigned char *)p);
114
115                 tmp &= ~0UL >> offset;
116                 if (size < BITS_PER_LONG)
117                         tmp &= (~0UL << (BITS_PER_LONG - size));
118                 if (tmp)
119                         goto found;
120 pass:
121                 if (size <= BITS_PER_LONG)
122                         break;
123                 size -= BITS_PER_LONG;
124                 offset = 0;
125                 p++;
126         }
127         return result;
128 found:
129         return result - size + __reverse_ffs(tmp);
130 }
131
132 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
133                         unsigned long size, unsigned long offset)
134 {
135         const unsigned long *p = addr + BIT_WORD(offset);
136         unsigned long result = size;
137         unsigned long tmp;
138
139         if (offset >= size)
140                 return size;
141
142         size -= (offset & ~(BITS_PER_LONG - 1));
143         offset %= BITS_PER_LONG;
144
145         while (1) {
146                 if (*p == ~0UL)
147                         goto pass;
148
149                 tmp = __reverse_ulong((unsigned char *)p);
150
151                 if (offset)
152                         tmp |= ~0UL << (BITS_PER_LONG - offset);
153                 if (size < BITS_PER_LONG)
154                         tmp |= ~0UL >> size;
155                 if (tmp != ~0UL)
156                         goto found;
157 pass:
158                 if (size <= BITS_PER_LONG)
159                         break;
160                 size -= BITS_PER_LONG;
161                 offset = 0;
162                 p++;
163         }
164         return result;
165 found:
166         return result - size + __reverse_ffz(tmp);
167 }
168
169 void register_inmem_page(struct inode *inode, struct page *page)
170 {
171         struct f2fs_inode_info *fi = F2FS_I(inode);
172         struct inmem_pages *new;
173
174         f2fs_trace_pid(page);
175
176         set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
177         SetPagePrivate(page);
178
179         new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
180
181         /* add atomic page indices to the list */
182         new->page = page;
183         INIT_LIST_HEAD(&new->list);
184
185         /* increase reference count with clean state */
186         mutex_lock(&fi->inmem_lock);
187         get_page(page);
188         list_add_tail(&new->list, &fi->inmem_pages);
189         inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
190         mutex_unlock(&fi->inmem_lock);
191
192         trace_f2fs_register_inmem_page(page, INMEM);
193 }
194
195 static int __revoke_inmem_pages(struct inode *inode,
196                                 struct list_head *head, bool drop, bool recover)
197 {
198         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
199         struct inmem_pages *cur, *tmp;
200         int err = 0;
201
202         list_for_each_entry_safe(cur, tmp, head, list) {
203                 struct page *page = cur->page;
204
205                 if (drop)
206                         trace_f2fs_commit_inmem_page(page, INMEM_DROP);
207
208                 lock_page(page);
209
210                 if (recover) {
211                         struct dnode_of_data dn;
212                         struct node_info ni;
213
214                         trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
215
216                         set_new_dnode(&dn, inode, NULL, NULL, 0);
217                         if (get_dnode_of_data(&dn, page->index, LOOKUP_NODE)) {
218                                 err = -EAGAIN;
219                                 goto next;
220                         }
221                         get_node_info(sbi, dn.nid, &ni);
222                         f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
223                                         cur->old_addr, ni.version, true, true);
224                         f2fs_put_dnode(&dn);
225                 }
226 next:
227                 /* we don't need to invalidate this in the sccessful status */
228                 if (drop || recover)
229                         ClearPageUptodate(page);
230                 set_page_private(page, 0);
231                 ClearPagePrivate(page);
232                 f2fs_put_page(page, 1);
233
234                 list_del(&cur->list);
235                 kmem_cache_free(inmem_entry_slab, cur);
236                 dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
237         }
238         return err;
239 }
240
241 void drop_inmem_pages(struct inode *inode)
242 {
243         struct f2fs_inode_info *fi = F2FS_I(inode);
244
245         clear_inode_flag(inode, FI_ATOMIC_FILE);
246
247         mutex_lock(&fi->inmem_lock);
248         __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
249         mutex_unlock(&fi->inmem_lock);
250 }
251
252 static int __commit_inmem_pages(struct inode *inode,
253                                         struct list_head *revoke_list)
254 {
255         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
256         struct f2fs_inode_info *fi = F2FS_I(inode);
257         struct inmem_pages *cur, *tmp;
258         struct f2fs_io_info fio = {
259                 .sbi = sbi,
260                 .type = DATA,
261                 .op = REQ_OP_WRITE,
262                 .op_flags = WRITE_SYNC | REQ_PRIO,
263                 .encrypted_page = NULL,
264         };
265         bool submit_bio = false;
266         int err = 0;
267
268         list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
269                 struct page *page = cur->page;
270
271                 lock_page(page);
272                 if (page->mapping == inode->i_mapping) {
273                         trace_f2fs_commit_inmem_page(page, INMEM);
274
275                         set_page_dirty(page);
276                         f2fs_wait_on_page_writeback(page, DATA, true);
277                         if (clear_page_dirty_for_io(page))
278                                 inode_dec_dirty_pages(inode);
279
280                         fio.page = page;
281                         err = do_write_data_page(&fio);
282                         if (err) {
283                                 unlock_page(page);
284                                 break;
285                         }
286
287                         /* record old blkaddr for revoking */
288                         cur->old_addr = fio.old_blkaddr;
289
290                         clear_cold_data(page);
291                         submit_bio = true;
292                 }
293                 unlock_page(page);
294                 list_move_tail(&cur->list, revoke_list);
295         }
296
297         if (submit_bio)
298                 f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE);
299
300         if (!err)
301                 __revoke_inmem_pages(inode, revoke_list, false, false);
302
303         return err;
304 }
305
306 int commit_inmem_pages(struct inode *inode)
307 {
308         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
309         struct f2fs_inode_info *fi = F2FS_I(inode);
310         struct list_head revoke_list;
311         int err;
312
313         INIT_LIST_HEAD(&revoke_list);
314         f2fs_balance_fs(sbi, true);
315         f2fs_lock_op(sbi);
316
317         mutex_lock(&fi->inmem_lock);
318         err = __commit_inmem_pages(inode, &revoke_list);
319         if (err) {
320                 int ret;
321                 /*
322                  * try to revoke all committed pages, but still we could fail
323                  * due to no memory or other reason, if that happened, EAGAIN
324                  * will be returned, which means in such case, transaction is
325                  * already not integrity, caller should use journal to do the
326                  * recovery or rewrite & commit last transaction. For other
327                  * error number, revoking was done by filesystem itself.
328                  */
329                 ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
330                 if (ret)
331                         err = ret;
332
333                 /* drop all uncommitted pages */
334                 __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
335         }
336         mutex_unlock(&fi->inmem_lock);
337
338         f2fs_unlock_op(sbi);
339         return err;
340 }
341
342 /*
343  * This function balances dirty node and dentry pages.
344  * In addition, it controls garbage collection.
345  */
346 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
347 {
348 #ifdef CONFIG_F2FS_FAULT_INJECTION
349         if (time_to_inject(sbi, FAULT_CHECKPOINT))
350                 f2fs_stop_checkpoint(sbi, false);
351 #endif
352
353         if (!need)
354                 return;
355
356         /* balance_fs_bg is able to be pending */
357         if (excess_cached_nats(sbi))
358                 f2fs_balance_fs_bg(sbi);
359
360         /*
361          * We should do GC or end up with checkpoint, if there are so many dirty
362          * dir/node pages without enough free segments.
363          */
364         if (has_not_enough_free_secs(sbi, 0, 0)) {
365                 mutex_lock(&sbi->gc_mutex);
366                 f2fs_gc(sbi, false);
367         }
368 }
369
370 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
371 {
372         /* try to shrink extent cache when there is no enough memory */
373         if (!available_free_memory(sbi, EXTENT_CACHE))
374                 f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
375
376         /* check the # of cached NAT entries */
377         if (!available_free_memory(sbi, NAT_ENTRIES))
378                 try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
379
380         if (!available_free_memory(sbi, FREE_NIDS))
381                 try_to_free_nids(sbi, MAX_FREE_NIDS);
382         else
383                 build_free_nids(sbi);
384
385         /* checkpoint is the only way to shrink partial cached entries */
386         if (!available_free_memory(sbi, NAT_ENTRIES) ||
387                         !available_free_memory(sbi, INO_ENTRIES) ||
388                         excess_prefree_segs(sbi) ||
389                         excess_dirty_nats(sbi) ||
390                         (is_idle(sbi) && f2fs_time_over(sbi, CP_TIME))) {
391                 if (test_opt(sbi, DATA_FLUSH)) {
392                         struct blk_plug plug;
393
394                         blk_start_plug(&plug);
395                         sync_dirty_inodes(sbi, FILE_INODE);
396                         blk_finish_plug(&plug);
397                 }
398                 f2fs_sync_fs(sbi->sb, true);
399                 stat_inc_bg_cp_count(sbi->stat_info);
400         }
401 }
402
403 static int issue_flush_thread(void *data)
404 {
405         struct f2fs_sb_info *sbi = data;
406         struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
407         wait_queue_head_t *q = &fcc->flush_wait_queue;
408 repeat:
409         if (kthread_should_stop())
410                 return 0;
411
412         if (!llist_empty(&fcc->issue_list)) {
413                 struct bio *bio;
414                 struct flush_cmd *cmd, *next;
415                 int ret;
416
417                 bio = f2fs_bio_alloc(0);
418
419                 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
420                 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
421
422                 bio->bi_bdev = sbi->sb->s_bdev;
423                 bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
424                 ret = submit_bio_wait(bio);
425
426                 llist_for_each_entry_safe(cmd, next,
427                                           fcc->dispatch_list, llnode) {
428                         cmd->ret = ret;
429                         complete(&cmd->wait);
430                 }
431                 bio_put(bio);
432                 fcc->dispatch_list = NULL;
433         }
434
435         wait_event_interruptible(*q,
436                 kthread_should_stop() || !llist_empty(&fcc->issue_list));
437         goto repeat;
438 }
439
440 int f2fs_issue_flush(struct f2fs_sb_info *sbi)
441 {
442         struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
443         struct flush_cmd cmd;
444
445         trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER),
446                                         test_opt(sbi, FLUSH_MERGE));
447
448         if (test_opt(sbi, NOBARRIER))
449                 return 0;
450
451         if (!test_opt(sbi, FLUSH_MERGE) || !atomic_read(&fcc->submit_flush)) {
452                 struct bio *bio = f2fs_bio_alloc(0);
453                 int ret;
454
455                 atomic_inc(&fcc->submit_flush);
456                 bio->bi_bdev = sbi->sb->s_bdev;
457                 bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
458                 ret = submit_bio_wait(bio);
459                 atomic_dec(&fcc->submit_flush);
460                 bio_put(bio);
461                 return ret;
462         }
463
464         init_completion(&cmd.wait);
465
466         atomic_inc(&fcc->submit_flush);
467         llist_add(&cmd.llnode, &fcc->issue_list);
468
469         if (!fcc->dispatch_list)
470                 wake_up(&fcc->flush_wait_queue);
471
472         wait_for_completion(&cmd.wait);
473         atomic_dec(&fcc->submit_flush);
474
475         return cmd.ret;
476 }
477
478 int create_flush_cmd_control(struct f2fs_sb_info *sbi)
479 {
480         dev_t dev = sbi->sb->s_bdev->bd_dev;
481         struct flush_cmd_control *fcc;
482         int err = 0;
483
484         fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
485         if (!fcc)
486                 return -ENOMEM;
487         atomic_set(&fcc->submit_flush, 0);
488         init_waitqueue_head(&fcc->flush_wait_queue);
489         init_llist_head(&fcc->issue_list);
490         SM_I(sbi)->cmd_control_info = fcc;
491         fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
492                                 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
493         if (IS_ERR(fcc->f2fs_issue_flush)) {
494                 err = PTR_ERR(fcc->f2fs_issue_flush);
495                 kfree(fcc);
496                 SM_I(sbi)->cmd_control_info = NULL;
497                 return err;
498         }
499
500         return err;
501 }
502
503 void destroy_flush_cmd_control(struct f2fs_sb_info *sbi)
504 {
505         struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
506
507         if (fcc && fcc->f2fs_issue_flush)
508                 kthread_stop(fcc->f2fs_issue_flush);
509         kfree(fcc);
510         SM_I(sbi)->cmd_control_info = NULL;
511 }
512
513 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
514                 enum dirty_type dirty_type)
515 {
516         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
517
518         /* need not be added */
519         if (IS_CURSEG(sbi, segno))
520                 return;
521
522         if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
523                 dirty_i->nr_dirty[dirty_type]++;
524
525         if (dirty_type == DIRTY) {
526                 struct seg_entry *sentry = get_seg_entry(sbi, segno);
527                 enum dirty_type t = sentry->type;
528
529                 if (unlikely(t >= DIRTY)) {
530                         f2fs_bug_on(sbi, 1);
531                         return;
532                 }
533                 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
534                         dirty_i->nr_dirty[t]++;
535         }
536 }
537
538 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
539                 enum dirty_type dirty_type)
540 {
541         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
542
543         if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
544                 dirty_i->nr_dirty[dirty_type]--;
545
546         if (dirty_type == DIRTY) {
547                 struct seg_entry *sentry = get_seg_entry(sbi, segno);
548                 enum dirty_type t = sentry->type;
549
550                 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
551                         dirty_i->nr_dirty[t]--;
552
553                 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
554                         clear_bit(GET_SECNO(sbi, segno),
555                                                 dirty_i->victim_secmap);
556         }
557 }
558
559 /*
560  * Should not occur error such as -ENOMEM.
561  * Adding dirty entry into seglist is not critical operation.
562  * If a given segment is one of current working segments, it won't be added.
563  */
564 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
565 {
566         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
567         unsigned short valid_blocks;
568
569         if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
570                 return;
571
572         mutex_lock(&dirty_i->seglist_lock);
573
574         valid_blocks = get_valid_blocks(sbi, segno, 0);
575
576         if (valid_blocks == 0) {
577                 __locate_dirty_segment(sbi, segno, PRE);
578                 __remove_dirty_segment(sbi, segno, DIRTY);
579         } else if (valid_blocks < sbi->blocks_per_seg) {
580                 __locate_dirty_segment(sbi, segno, DIRTY);
581         } else {
582                 /* Recovery routine with SSR needs this */
583                 __remove_dirty_segment(sbi, segno, DIRTY);
584         }
585
586         mutex_unlock(&dirty_i->seglist_lock);
587 }
588
589 static struct bio_entry *__add_bio_entry(struct f2fs_sb_info *sbi,
590                                                         struct bio *bio)
591 {
592         struct list_head *wait_list = &(SM_I(sbi)->wait_list);
593         struct bio_entry *be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
594
595         INIT_LIST_HEAD(&be->list);
596         be->bio = bio;
597         init_completion(&be->event);
598         list_add_tail(&be->list, wait_list);
599
600         return be;
601 }
602
603 void f2fs_wait_all_discard_bio(struct f2fs_sb_info *sbi)
604 {
605         struct list_head *wait_list = &(SM_I(sbi)->wait_list);
606         struct bio_entry *be, *tmp;
607
608         list_for_each_entry_safe(be, tmp, wait_list, list) {
609                 struct bio *bio = be->bio;
610                 int err;
611
612                 wait_for_completion_io(&be->event);
613                 err = be->error;
614                 if (err == -EOPNOTSUPP)
615                         err = 0;
616
617                 if (err)
618                         f2fs_msg(sbi->sb, KERN_INFO,
619                                 "Issue discard failed, ret: %d", err);
620
621                 bio_put(bio);
622                 list_del(&be->list);
623                 kmem_cache_free(bio_entry_slab, be);
624         }
625 }
626
627 static void f2fs_submit_bio_wait_endio(struct bio *bio)
628 {
629         struct bio_entry *be = (struct bio_entry *)bio->bi_private;
630
631         be->error = bio->bi_error;
632         complete(&be->event);
633 }
634
635 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
636 int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi, sector_t sector,
637                 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
638 {
639         struct block_device *bdev = sbi->sb->s_bdev;
640         struct bio *bio = NULL;
641         int err;
642
643         err = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
644                         &bio);
645         if (!err && bio) {
646                 struct bio_entry *be = __add_bio_entry(sbi, bio);
647
648                 bio->bi_private = be;
649                 bio->bi_end_io = f2fs_submit_bio_wait_endio;
650                 bio->bi_opf |= REQ_SYNC;
651                 submit_bio(bio);
652         }
653
654         return err;
655 }
656
657 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
658                                 block_t blkstart, block_t blklen)
659 {
660         sector_t start = SECTOR_FROM_BLOCK(blkstart);
661         sector_t len = SECTOR_FROM_BLOCK(blklen);
662         struct seg_entry *se;
663         unsigned int offset;
664         block_t i;
665
666         for (i = blkstart; i < blkstart + blklen; i++) {
667                 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
668                 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
669
670                 if (!f2fs_test_and_set_bit(offset, se->discard_map))
671                         sbi->discard_blks--;
672         }
673         trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
674         return __f2fs_issue_discard_async(sbi, start, len, GFP_NOFS, 0);
675 }
676
677 static void __add_discard_entry(struct f2fs_sb_info *sbi,
678                 struct cp_control *cpc, struct seg_entry *se,
679                 unsigned int start, unsigned int end)
680 {
681         struct list_head *head = &SM_I(sbi)->discard_list;
682         struct discard_entry *new, *last;
683
684         if (!list_empty(head)) {
685                 last = list_last_entry(head, struct discard_entry, list);
686                 if (START_BLOCK(sbi, cpc->trim_start) + start ==
687                                                 last->blkaddr + last->len) {
688                         last->len += end - start;
689                         goto done;
690                 }
691         }
692
693         new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
694         INIT_LIST_HEAD(&new->list);
695         new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start;
696         new->len = end - start;
697         list_add_tail(&new->list, head);
698 done:
699         SM_I(sbi)->nr_discards += end - start;
700 }
701
702 static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
703 {
704         int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
705         int max_blocks = sbi->blocks_per_seg;
706         struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
707         unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
708         unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
709         unsigned long *discard_map = (unsigned long *)se->discard_map;
710         unsigned long *dmap = SIT_I(sbi)->tmp_map;
711         unsigned int start = 0, end = -1;
712         bool force = (cpc->reason == CP_DISCARD);
713         int i;
714
715         if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
716                 return;
717
718         if (!force) {
719                 if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
720                     SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards)
721                         return;
722         }
723
724         /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
725         for (i = 0; i < entries; i++)
726                 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
727                                 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
728
729         while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) {
730                 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
731                 if (start >= max_blocks)
732                         break;
733
734                 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
735                 if (force && start && end != max_blocks
736                                         && (end - start) < cpc->trim_minlen)
737                         continue;
738
739                 __add_discard_entry(sbi, cpc, se, start, end);
740         }
741 }
742
743 void release_discard_addrs(struct f2fs_sb_info *sbi)
744 {
745         struct list_head *head = &(SM_I(sbi)->discard_list);
746         struct discard_entry *entry, *this;
747
748         /* drop caches */
749         list_for_each_entry_safe(entry, this, head, list) {
750                 list_del(&entry->list);
751                 kmem_cache_free(discard_entry_slab, entry);
752         }
753 }
754
755 /*
756  * Should call clear_prefree_segments after checkpoint is done.
757  */
758 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
759 {
760         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
761         unsigned int segno;
762
763         mutex_lock(&dirty_i->seglist_lock);
764         for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
765                 __set_test_and_free(sbi, segno);
766         mutex_unlock(&dirty_i->seglist_lock);
767 }
768
769 void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
770 {
771         struct list_head *head = &(SM_I(sbi)->discard_list);
772         struct discard_entry *entry, *this;
773         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
774         struct blk_plug plug;
775         unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
776         unsigned int start = 0, end = -1;
777         unsigned int secno, start_segno;
778         bool force = (cpc->reason == CP_DISCARD);
779
780         blk_start_plug(&plug);
781
782         mutex_lock(&dirty_i->seglist_lock);
783
784         while (1) {
785                 int i;
786                 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
787                 if (start >= MAIN_SEGS(sbi))
788                         break;
789                 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
790                                                                 start + 1);
791
792                 for (i = start; i < end; i++)
793                         clear_bit(i, prefree_map);
794
795                 dirty_i->nr_dirty[PRE] -= end - start;
796
797                 if (force || !test_opt(sbi, DISCARD))
798                         continue;
799
800                 if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
801                         f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
802                                 (end - start) << sbi->log_blocks_per_seg);
803                         continue;
804                 }
805 next:
806                 secno = GET_SECNO(sbi, start);
807                 start_segno = secno * sbi->segs_per_sec;
808                 if (!IS_CURSEC(sbi, secno) &&
809                         !get_valid_blocks(sbi, start, sbi->segs_per_sec))
810                         f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
811                                 sbi->segs_per_sec << sbi->log_blocks_per_seg);
812
813                 start = start_segno + sbi->segs_per_sec;
814                 if (start < end)
815                         goto next;
816         }
817         mutex_unlock(&dirty_i->seglist_lock);
818
819         /* send small discards */
820         list_for_each_entry_safe(entry, this, head, list) {
821                 if (force && entry->len < cpc->trim_minlen)
822                         goto skip;
823                 f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
824                 cpc->trimmed += entry->len;
825 skip:
826                 list_del(&entry->list);
827                 SM_I(sbi)->nr_discards -= entry->len;
828                 kmem_cache_free(discard_entry_slab, entry);
829         }
830
831         blk_finish_plug(&plug);
832 }
833
834 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
835 {
836         struct sit_info *sit_i = SIT_I(sbi);
837
838         if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
839                 sit_i->dirty_sentries++;
840                 return false;
841         }
842
843         return true;
844 }
845
846 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
847                                         unsigned int segno, int modified)
848 {
849         struct seg_entry *se = get_seg_entry(sbi, segno);
850         se->type = type;
851         if (modified)
852                 __mark_sit_entry_dirty(sbi, segno);
853 }
854
855 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
856 {
857         struct seg_entry *se;
858         unsigned int segno, offset;
859         long int new_vblocks;
860
861         segno = GET_SEGNO(sbi, blkaddr);
862
863         se = get_seg_entry(sbi, segno);
864         new_vblocks = se->valid_blocks + del;
865         offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
866
867         f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
868                                 (new_vblocks > sbi->blocks_per_seg)));
869
870         se->valid_blocks = new_vblocks;
871         se->mtime = get_mtime(sbi);
872         SIT_I(sbi)->max_mtime = se->mtime;
873
874         /* Update valid block bitmap */
875         if (del > 0) {
876                 if (f2fs_test_and_set_bit(offset, se->cur_valid_map))
877                         f2fs_bug_on(sbi, 1);
878                 if (f2fs_discard_en(sbi) &&
879                         !f2fs_test_and_set_bit(offset, se->discard_map))
880                         sbi->discard_blks--;
881         } else {
882                 if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map))
883                         f2fs_bug_on(sbi, 1);
884                 if (f2fs_discard_en(sbi) &&
885                         f2fs_test_and_clear_bit(offset, se->discard_map))
886                         sbi->discard_blks++;
887         }
888         if (!f2fs_test_bit(offset, se->ckpt_valid_map))
889                 se->ckpt_valid_blocks += del;
890
891         __mark_sit_entry_dirty(sbi, segno);
892
893         /* update total number of valid blocks to be written in ckpt area */
894         SIT_I(sbi)->written_valid_blocks += del;
895
896         if (sbi->segs_per_sec > 1)
897                 get_sec_entry(sbi, segno)->valid_blocks += del;
898 }
899
900 void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
901 {
902         update_sit_entry(sbi, new, 1);
903         if (GET_SEGNO(sbi, old) != NULL_SEGNO)
904                 update_sit_entry(sbi, old, -1);
905
906         locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
907         locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
908 }
909
910 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
911 {
912         unsigned int segno = GET_SEGNO(sbi, addr);
913         struct sit_info *sit_i = SIT_I(sbi);
914
915         f2fs_bug_on(sbi, addr == NULL_ADDR);
916         if (addr == NEW_ADDR)
917                 return;
918
919         /* add it into sit main buffer */
920         mutex_lock(&sit_i->sentry_lock);
921
922         update_sit_entry(sbi, addr, -1);
923
924         /* add it into dirty seglist */
925         locate_dirty_segment(sbi, segno);
926
927         mutex_unlock(&sit_i->sentry_lock);
928 }
929
930 bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
931 {
932         struct sit_info *sit_i = SIT_I(sbi);
933         unsigned int segno, offset;
934         struct seg_entry *se;
935         bool is_cp = false;
936
937         if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
938                 return true;
939
940         mutex_lock(&sit_i->sentry_lock);
941
942         segno = GET_SEGNO(sbi, blkaddr);
943         se = get_seg_entry(sbi, segno);
944         offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
945
946         if (f2fs_test_bit(offset, se->ckpt_valid_map))
947                 is_cp = true;
948
949         mutex_unlock(&sit_i->sentry_lock);
950
951         return is_cp;
952 }
953
954 /*
955  * This function should be resided under the curseg_mutex lock
956  */
957 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
958                                         struct f2fs_summary *sum)
959 {
960         struct curseg_info *curseg = CURSEG_I(sbi, type);
961         void *addr = curseg->sum_blk;
962         addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
963         memcpy(addr, sum, sizeof(struct f2fs_summary));
964 }
965
966 /*
967  * Calculate the number of current summary pages for writing
968  */
969 int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
970 {
971         int valid_sum_count = 0;
972         int i, sum_in_page;
973
974         for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
975                 if (sbi->ckpt->alloc_type[i] == SSR)
976                         valid_sum_count += sbi->blocks_per_seg;
977                 else {
978                         if (for_ra)
979                                 valid_sum_count += le16_to_cpu(
980                                         F2FS_CKPT(sbi)->cur_data_blkoff[i]);
981                         else
982                                 valid_sum_count += curseg_blkoff(sbi, i);
983                 }
984         }
985
986         sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
987                         SUM_FOOTER_SIZE) / SUMMARY_SIZE;
988         if (valid_sum_count <= sum_in_page)
989                 return 1;
990         else if ((valid_sum_count - sum_in_page) <=
991                 (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
992                 return 2;
993         return 3;
994 }
995
996 /*
997  * Caller should put this summary page
998  */
999 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
1000 {
1001         return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
1002 }
1003
1004 void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
1005 {
1006         struct page *page = grab_meta_page(sbi, blk_addr);
1007         void *dst = page_address(page);
1008
1009         if (src)
1010                 memcpy(dst, src, PAGE_SIZE);
1011         else
1012                 memset(dst, 0, PAGE_SIZE);
1013         set_page_dirty(page);
1014         f2fs_put_page(page, 1);
1015 }
1016
1017 static void write_sum_page(struct f2fs_sb_info *sbi,
1018                         struct f2fs_summary_block *sum_blk, block_t blk_addr)
1019 {
1020         update_meta_page(sbi, (void *)sum_blk, blk_addr);
1021 }
1022
1023 static void write_current_sum_page(struct f2fs_sb_info *sbi,
1024                                                 int type, block_t blk_addr)
1025 {
1026         struct curseg_info *curseg = CURSEG_I(sbi, type);
1027         struct page *page = grab_meta_page(sbi, blk_addr);
1028         struct f2fs_summary_block *src = curseg->sum_blk;
1029         struct f2fs_summary_block *dst;
1030
1031         dst = (struct f2fs_summary_block *)page_address(page);
1032
1033         mutex_lock(&curseg->curseg_mutex);
1034
1035         down_read(&curseg->journal_rwsem);
1036         memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
1037         up_read(&curseg->journal_rwsem);
1038
1039         memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
1040         memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
1041
1042         mutex_unlock(&curseg->curseg_mutex);
1043
1044         set_page_dirty(page);
1045         f2fs_put_page(page, 1);
1046 }
1047
1048 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
1049 {
1050         struct curseg_info *curseg = CURSEG_I(sbi, type);
1051         unsigned int segno = curseg->segno + 1;
1052         struct free_segmap_info *free_i = FREE_I(sbi);
1053
1054         if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
1055                 return !test_bit(segno, free_i->free_segmap);
1056         return 0;
1057 }
1058
1059 /*
1060  * Find a new segment from the free segments bitmap to right order
1061  * This function should be returned with success, otherwise BUG
1062  */
1063 static void get_new_segment(struct f2fs_sb_info *sbi,
1064                         unsigned int *newseg, bool new_sec, int dir)
1065 {
1066         struct free_segmap_info *free_i = FREE_I(sbi);
1067         unsigned int segno, secno, zoneno;
1068         unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
1069         unsigned int hint = *newseg / sbi->segs_per_sec;
1070         unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
1071         unsigned int left_start = hint;
1072         bool init = true;
1073         int go_left = 0;
1074         int i;
1075
1076         spin_lock(&free_i->segmap_lock);
1077
1078         if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
1079                 segno = find_next_zero_bit(free_i->free_segmap,
1080                                 (hint + 1) * sbi->segs_per_sec, *newseg + 1);
1081                 if (segno < (hint + 1) * sbi->segs_per_sec)
1082                         goto got_it;
1083         }
1084 find_other_zone:
1085         secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
1086         if (secno >= MAIN_SECS(sbi)) {
1087                 if (dir == ALLOC_RIGHT) {
1088                         secno = find_next_zero_bit(free_i->free_secmap,
1089                                                         MAIN_SECS(sbi), 0);
1090                         f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
1091                 } else {
1092                         go_left = 1;
1093                         left_start = hint - 1;
1094                 }
1095         }
1096         if (go_left == 0)
1097                 goto skip_left;
1098
1099         while (test_bit(left_start, free_i->free_secmap)) {
1100                 if (left_start > 0) {
1101                         left_start--;
1102                         continue;
1103                 }
1104                 left_start = find_next_zero_bit(free_i->free_secmap,
1105                                                         MAIN_SECS(sbi), 0);
1106                 f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
1107                 break;
1108         }
1109         secno = left_start;
1110 skip_left:
1111         hint = secno;
1112         segno = secno * sbi->segs_per_sec;
1113         zoneno = secno / sbi->secs_per_zone;
1114
1115         /* give up on finding another zone */
1116         if (!init)
1117                 goto got_it;
1118         if (sbi->secs_per_zone == 1)
1119                 goto got_it;
1120         if (zoneno == old_zoneno)
1121                 goto got_it;
1122         if (dir == ALLOC_LEFT) {
1123                 if (!go_left && zoneno + 1 >= total_zones)
1124                         goto got_it;
1125                 if (go_left && zoneno == 0)
1126                         goto got_it;
1127         }
1128         for (i = 0; i < NR_CURSEG_TYPE; i++)
1129                 if (CURSEG_I(sbi, i)->zone == zoneno)
1130                         break;
1131
1132         if (i < NR_CURSEG_TYPE) {
1133                 /* zone is in user, try another */
1134                 if (go_left)
1135                         hint = zoneno * sbi->secs_per_zone - 1;
1136                 else if (zoneno + 1 >= total_zones)
1137                         hint = 0;
1138                 else
1139                         hint = (zoneno + 1) * sbi->secs_per_zone;
1140                 init = false;
1141                 goto find_other_zone;
1142         }
1143 got_it:
1144         /* set it as dirty segment in free segmap */
1145         f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
1146         __set_inuse(sbi, segno);
1147         *newseg = segno;
1148         spin_unlock(&free_i->segmap_lock);
1149 }
1150
1151 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
1152 {
1153         struct curseg_info *curseg = CURSEG_I(sbi, type);
1154         struct summary_footer *sum_footer;
1155
1156         curseg->segno = curseg->next_segno;
1157         curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
1158         curseg->next_blkoff = 0;
1159         curseg->next_segno = NULL_SEGNO;
1160
1161         sum_footer = &(curseg->sum_blk->footer);
1162         memset(sum_footer, 0, sizeof(struct summary_footer));
1163         if (IS_DATASEG(type))
1164                 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
1165         if (IS_NODESEG(type))
1166                 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
1167         __set_sit_entry_type(sbi, type, curseg->segno, modified);
1168 }
1169
1170 /*
1171  * Allocate a current working segment.
1172  * This function always allocates a free segment in LFS manner.
1173  */
1174 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
1175 {
1176         struct curseg_info *curseg = CURSEG_I(sbi, type);
1177         unsigned int segno = curseg->segno;
1178         int dir = ALLOC_LEFT;
1179
1180         write_sum_page(sbi, curseg->sum_blk,
1181                                 GET_SUM_BLOCK(sbi, segno));
1182         if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
1183                 dir = ALLOC_RIGHT;
1184
1185         if (test_opt(sbi, NOHEAP))
1186                 dir = ALLOC_RIGHT;
1187
1188         get_new_segment(sbi, &segno, new_sec, dir);
1189         curseg->next_segno = segno;
1190         reset_curseg(sbi, type, 1);
1191         curseg->alloc_type = LFS;
1192 }
1193
1194 static void __next_free_blkoff(struct f2fs_sb_info *sbi,
1195                         struct curseg_info *seg, block_t start)
1196 {
1197         struct seg_entry *se = get_seg_entry(sbi, seg->segno);
1198         int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1199         unsigned long *target_map = SIT_I(sbi)->tmp_map;
1200         unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
1201         unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1202         int i, pos;
1203
1204         for (i = 0; i < entries; i++)
1205                 target_map[i] = ckpt_map[i] | cur_map[i];
1206
1207         pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
1208
1209         seg->next_blkoff = pos;
1210 }
1211
1212 /*
1213  * If a segment is written by LFS manner, next block offset is just obtained
1214  * by increasing the current block offset. However, if a segment is written by
1215  * SSR manner, next block offset obtained by calling __next_free_blkoff
1216  */
1217 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
1218                                 struct curseg_info *seg)
1219 {
1220         if (seg->alloc_type == SSR)
1221                 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
1222         else
1223                 seg->next_blkoff++;
1224 }
1225
1226 /*
1227  * This function always allocates a used segment(from dirty seglist) by SSR
1228  * manner, so it should recover the existing segment information of valid blocks
1229  */
1230 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
1231 {
1232         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1233         struct curseg_info *curseg = CURSEG_I(sbi, type);
1234         unsigned int new_segno = curseg->next_segno;
1235         struct f2fs_summary_block *sum_node;
1236         struct page *sum_page;
1237
1238         write_sum_page(sbi, curseg->sum_blk,
1239                                 GET_SUM_BLOCK(sbi, curseg->segno));
1240         __set_test_and_inuse(sbi, new_segno);
1241
1242         mutex_lock(&dirty_i->seglist_lock);
1243         __remove_dirty_segment(sbi, new_segno, PRE);
1244         __remove_dirty_segment(sbi, new_segno, DIRTY);
1245         mutex_unlock(&dirty_i->seglist_lock);
1246
1247         reset_curseg(sbi, type, 1);
1248         curseg->alloc_type = SSR;
1249         __next_free_blkoff(sbi, curseg, 0);
1250
1251         if (reuse) {
1252                 sum_page = get_sum_page(sbi, new_segno);
1253                 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
1254                 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
1255                 f2fs_put_page(sum_page, 1);
1256         }
1257 }
1258
1259 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
1260 {
1261         struct curseg_info *curseg = CURSEG_I(sbi, type);
1262         const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
1263
1264         if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0, 0))
1265                 return v_ops->get_victim(sbi,
1266                                 &(curseg)->next_segno, BG_GC, type, SSR);
1267
1268         /* For data segments, let's do SSR more intensively */
1269         for (; type >= CURSEG_HOT_DATA; type--)
1270                 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
1271                                                 BG_GC, type, SSR))
1272                         return 1;
1273         return 0;
1274 }
1275
1276 /*
1277  * flush out current segment and replace it with new segment
1278  * This function should be returned with success, otherwise BUG
1279  */
1280 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
1281                                                 int type, bool force)
1282 {
1283         struct curseg_info *curseg = CURSEG_I(sbi, type);
1284
1285         if (force)
1286                 new_curseg(sbi, type, true);
1287         else if (type == CURSEG_WARM_NODE)
1288                 new_curseg(sbi, type, false);
1289         else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
1290                 new_curseg(sbi, type, false);
1291         else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
1292                 change_curseg(sbi, type, true);
1293         else
1294                 new_curseg(sbi, type, false);
1295
1296         stat_inc_seg_type(sbi, curseg);
1297 }
1298
1299 static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type)
1300 {
1301         struct curseg_info *curseg = CURSEG_I(sbi, type);
1302         unsigned int old_segno;
1303
1304         old_segno = curseg->segno;
1305         SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
1306         locate_dirty_segment(sbi, old_segno);
1307 }
1308
1309 void allocate_new_segments(struct f2fs_sb_info *sbi)
1310 {
1311         int i;
1312
1313         if (test_opt(sbi, LFS))
1314                 return;
1315
1316         for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
1317                 __allocate_new_segments(sbi, i);
1318 }
1319
1320 static const struct segment_allocation default_salloc_ops = {
1321         .allocate_segment = allocate_segment_by_default,
1322 };
1323
1324 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
1325 {
1326         __u64 start = F2FS_BYTES_TO_BLK(range->start);
1327         __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
1328         unsigned int start_segno, end_segno;
1329         struct cp_control cpc;
1330         int err = 0;
1331
1332         if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
1333                 return -EINVAL;
1334
1335         cpc.trimmed = 0;
1336         if (end <= MAIN_BLKADDR(sbi))
1337                 goto out;
1338
1339         if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1340                 f2fs_msg(sbi->sb, KERN_WARNING,
1341                         "Found FS corruption, run fsck to fix.");
1342                 goto out;
1343         }
1344
1345         /* start/end segment number in main_area */
1346         start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
1347         end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
1348                                                 GET_SEGNO(sbi, end);
1349         cpc.reason = CP_DISCARD;
1350         cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
1351
1352         /* do checkpoint to issue discard commands safely */
1353         for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
1354                 cpc.trim_start = start_segno;
1355
1356                 if (sbi->discard_blks == 0)
1357                         break;
1358                 else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
1359                         cpc.trim_end = end_segno;
1360                 else
1361                         cpc.trim_end = min_t(unsigned int,
1362                                 rounddown(start_segno +
1363                                 BATCHED_TRIM_SEGMENTS(sbi),
1364                                 sbi->segs_per_sec) - 1, end_segno);
1365
1366                 mutex_lock(&sbi->gc_mutex);
1367                 err = write_checkpoint(sbi, &cpc);
1368                 mutex_unlock(&sbi->gc_mutex);
1369                 if (err)
1370                         break;
1371
1372                 schedule();
1373         }
1374 out:
1375         range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
1376         return err;
1377 }
1378
1379 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
1380 {
1381         struct curseg_info *curseg = CURSEG_I(sbi, type);
1382         if (curseg->next_blkoff < sbi->blocks_per_seg)
1383                 return true;
1384         return false;
1385 }
1386
1387 static int __get_segment_type_2(struct page *page, enum page_type p_type)
1388 {
1389         if (p_type == DATA)
1390                 return CURSEG_HOT_DATA;
1391         else
1392                 return CURSEG_HOT_NODE;
1393 }
1394
1395 static int __get_segment_type_4(struct page *page, enum page_type p_type)
1396 {
1397         if (p_type == DATA) {
1398                 struct inode *inode = page->mapping->host;
1399
1400                 if (S_ISDIR(inode->i_mode))
1401                         return CURSEG_HOT_DATA;
1402                 else
1403                         return CURSEG_COLD_DATA;
1404         } else {
1405                 if (IS_DNODE(page) && is_cold_node(page))
1406                         return CURSEG_WARM_NODE;
1407                 else
1408                         return CURSEG_COLD_NODE;
1409         }
1410 }
1411
1412 static int __get_segment_type_6(struct page *page, enum page_type p_type)
1413 {
1414         if (p_type == DATA) {
1415                 struct inode *inode = page->mapping->host;
1416
1417                 if (S_ISDIR(inode->i_mode))
1418                         return CURSEG_HOT_DATA;
1419                 else if (is_cold_data(page) || file_is_cold(inode))
1420                         return CURSEG_COLD_DATA;
1421                 else
1422                         return CURSEG_WARM_DATA;
1423         } else {
1424                 if (IS_DNODE(page))
1425                         return is_cold_node(page) ? CURSEG_WARM_NODE :
1426                                                 CURSEG_HOT_NODE;
1427                 else
1428                         return CURSEG_COLD_NODE;
1429         }
1430 }
1431
1432 static int __get_segment_type(struct page *page, enum page_type p_type)
1433 {
1434         switch (F2FS_P_SB(page)->active_logs) {
1435         case 2:
1436                 return __get_segment_type_2(page, p_type);
1437         case 4:
1438                 return __get_segment_type_4(page, p_type);
1439         }
1440         /* NR_CURSEG_TYPE(6) logs by default */
1441         f2fs_bug_on(F2FS_P_SB(page),
1442                 F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE);
1443         return __get_segment_type_6(page, p_type);
1444 }
1445
1446 void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
1447                 block_t old_blkaddr, block_t *new_blkaddr,
1448                 struct f2fs_summary *sum, int type)
1449 {
1450         struct sit_info *sit_i = SIT_I(sbi);
1451         struct curseg_info *curseg;
1452         bool direct_io = (type == CURSEG_DIRECT_IO);
1453
1454         type = direct_io ? CURSEG_WARM_DATA : type;
1455
1456         curseg = CURSEG_I(sbi, type);
1457
1458         mutex_lock(&curseg->curseg_mutex);
1459         mutex_lock(&sit_i->sentry_lock);
1460
1461         /* direct_io'ed data is aligned to the segment for better performance */
1462         if (direct_io && curseg->next_blkoff &&
1463                                 !has_not_enough_free_secs(sbi, 0, 0))
1464                 __allocate_new_segments(sbi, type);
1465
1466         *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
1467
1468         /*
1469          * __add_sum_entry should be resided under the curseg_mutex
1470          * because, this function updates a summary entry in the
1471          * current summary block.
1472          */
1473         __add_sum_entry(sbi, type, sum);
1474
1475         __refresh_next_blkoff(sbi, curseg);
1476
1477         stat_inc_block_count(sbi, curseg);
1478
1479         if (!__has_curseg_space(sbi, type))
1480                 sit_i->s_ops->allocate_segment(sbi, type, false);
1481         /*
1482          * SIT information should be updated before segment allocation,
1483          * since SSR needs latest valid block information.
1484          */
1485         refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
1486
1487         mutex_unlock(&sit_i->sentry_lock);
1488
1489         if (page && IS_NODESEG(type))
1490                 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
1491
1492         mutex_unlock(&curseg->curseg_mutex);
1493 }
1494
1495 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
1496 {
1497         int type = __get_segment_type(fio->page, fio->type);
1498
1499         if (fio->type == NODE || fio->type == DATA)
1500                 mutex_lock(&fio->sbi->wio_mutex[fio->type]);
1501
1502         allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
1503                                         &fio->new_blkaddr, sum, type);
1504
1505         /* writeout dirty page into bdev */
1506         f2fs_submit_page_mbio(fio);
1507
1508         if (fio->type == NODE || fio->type == DATA)
1509                 mutex_unlock(&fio->sbi->wio_mutex[fio->type]);
1510 }
1511
1512 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
1513 {
1514         struct f2fs_io_info fio = {
1515                 .sbi = sbi,
1516                 .type = META,
1517                 .op = REQ_OP_WRITE,
1518                 .op_flags = WRITE_SYNC | REQ_META | REQ_PRIO,
1519                 .old_blkaddr = page->index,
1520                 .new_blkaddr = page->index,
1521                 .page = page,
1522                 .encrypted_page = NULL,
1523         };
1524
1525         if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
1526                 fio.op_flags &= ~REQ_META;
1527
1528         set_page_writeback(page);
1529         f2fs_submit_page_mbio(&fio);
1530 }
1531
1532 void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
1533 {
1534         struct f2fs_summary sum;
1535
1536         set_summary(&sum, nid, 0, 0);
1537         do_write_page(&sum, fio);
1538 }
1539
1540 void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
1541 {
1542         struct f2fs_sb_info *sbi = fio->sbi;
1543         struct f2fs_summary sum;
1544         struct node_info ni;
1545
1546         f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
1547         get_node_info(sbi, dn->nid, &ni);
1548         set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1549         do_write_page(&sum, fio);
1550         f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
1551 }
1552
1553 void rewrite_data_page(struct f2fs_io_info *fio)
1554 {
1555         fio->new_blkaddr = fio->old_blkaddr;
1556         stat_inc_inplace_blocks(fio->sbi);
1557         f2fs_submit_page_mbio(fio);
1558 }
1559
1560 void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1561                                 block_t old_blkaddr, block_t new_blkaddr,
1562                                 bool recover_curseg, bool recover_newaddr)
1563 {
1564         struct sit_info *sit_i = SIT_I(sbi);
1565         struct curseg_info *curseg;
1566         unsigned int segno, old_cursegno;
1567         struct seg_entry *se;
1568         int type;
1569         unsigned short old_blkoff;
1570
1571         segno = GET_SEGNO(sbi, new_blkaddr);
1572         se = get_seg_entry(sbi, segno);
1573         type = se->type;
1574
1575         if (!recover_curseg) {
1576                 /* for recovery flow */
1577                 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
1578                         if (old_blkaddr == NULL_ADDR)
1579                                 type = CURSEG_COLD_DATA;
1580                         else
1581                                 type = CURSEG_WARM_DATA;
1582                 }
1583         } else {
1584                 if (!IS_CURSEG(sbi, segno))
1585                         type = CURSEG_WARM_DATA;
1586         }
1587
1588         curseg = CURSEG_I(sbi, type);
1589
1590         mutex_lock(&curseg->curseg_mutex);
1591         mutex_lock(&sit_i->sentry_lock);
1592
1593         old_cursegno = curseg->segno;
1594         old_blkoff = curseg->next_blkoff;
1595
1596         /* change the current segment */
1597         if (segno != curseg->segno) {
1598                 curseg->next_segno = segno;
1599                 change_curseg(sbi, type, true);
1600         }
1601
1602         curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
1603         __add_sum_entry(sbi, type, sum);
1604
1605         if (!recover_curseg || recover_newaddr)
1606                 update_sit_entry(sbi, new_blkaddr, 1);
1607         if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
1608                 update_sit_entry(sbi, old_blkaddr, -1);
1609
1610         locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
1611         locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
1612
1613         locate_dirty_segment(sbi, old_cursegno);
1614
1615         if (recover_curseg) {
1616                 if (old_cursegno != curseg->segno) {
1617                         curseg->next_segno = old_cursegno;
1618                         change_curseg(sbi, type, true);
1619                 }
1620                 curseg->next_blkoff = old_blkoff;
1621         }
1622
1623         mutex_unlock(&sit_i->sentry_lock);
1624         mutex_unlock(&curseg->curseg_mutex);
1625 }
1626
1627 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
1628                                 block_t old_addr, block_t new_addr,
1629                                 unsigned char version, bool recover_curseg,
1630                                 bool recover_newaddr)
1631 {
1632         struct f2fs_summary sum;
1633
1634         set_summary(&sum, dn->nid, dn->ofs_in_node, version);
1635
1636         __f2fs_replace_block(sbi, &sum, old_addr, new_addr,
1637                                         recover_curseg, recover_newaddr);
1638
1639         f2fs_update_data_blkaddr(dn, new_addr);
1640 }
1641
1642 void f2fs_wait_on_page_writeback(struct page *page,
1643                                 enum page_type type, bool ordered)
1644 {
1645         if (PageWriteback(page)) {
1646                 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1647
1648                 f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, type, WRITE);
1649                 if (ordered)
1650                         wait_on_page_writeback(page);
1651                 else
1652                         wait_for_stable_page(page);
1653         }
1654 }
1655
1656 void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
1657                                                         block_t blkaddr)
1658 {
1659         struct page *cpage;
1660
1661         if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
1662                 return;
1663
1664         cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
1665         if (cpage) {
1666                 f2fs_wait_on_page_writeback(cpage, DATA, true);
1667                 f2fs_put_page(cpage, 1);
1668         }
1669 }
1670
1671 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1672 {
1673         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1674         struct curseg_info *seg_i;
1675         unsigned char *kaddr;
1676         struct page *page;
1677         block_t start;
1678         int i, j, offset;
1679
1680         start = start_sum_block(sbi);
1681
1682         page = get_meta_page(sbi, start++);
1683         kaddr = (unsigned char *)page_address(page);
1684
1685         /* Step 1: restore nat cache */
1686         seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1687         memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
1688
1689         /* Step 2: restore sit cache */
1690         seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1691         memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
1692         offset = 2 * SUM_JOURNAL_SIZE;
1693
1694         /* Step 3: restore summary entries */
1695         for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1696                 unsigned short blk_off;
1697                 unsigned int segno;
1698
1699                 seg_i = CURSEG_I(sbi, i);
1700                 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
1701                 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
1702                 seg_i->next_segno = segno;
1703                 reset_curseg(sbi, i, 0);
1704                 seg_i->alloc_type = ckpt->alloc_type[i];
1705                 seg_i->next_blkoff = blk_off;
1706
1707                 if (seg_i->alloc_type == SSR)
1708                         blk_off = sbi->blocks_per_seg;
1709
1710                 for (j = 0; j < blk_off; j++) {
1711                         struct f2fs_summary *s;
1712                         s = (struct f2fs_summary *)(kaddr + offset);
1713                         seg_i->sum_blk->entries[j] = *s;
1714                         offset += SUMMARY_SIZE;
1715                         if (offset + SUMMARY_SIZE <= PAGE_SIZE -
1716                                                 SUM_FOOTER_SIZE)
1717                                 continue;
1718
1719                         f2fs_put_page(page, 1);
1720                         page = NULL;
1721
1722                         page = get_meta_page(sbi, start++);
1723                         kaddr = (unsigned char *)page_address(page);
1724                         offset = 0;
1725                 }
1726         }
1727         f2fs_put_page(page, 1);
1728         return 0;
1729 }
1730
1731 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1732 {
1733         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1734         struct f2fs_summary_block *sum;
1735         struct curseg_info *curseg;
1736         struct page *new;
1737         unsigned short blk_off;
1738         unsigned int segno = 0;
1739         block_t blk_addr = 0;
1740
1741         /* get segment number and block addr */
1742         if (IS_DATASEG(type)) {
1743                 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1744                 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1745                                                         CURSEG_HOT_DATA]);
1746                 if (__exist_node_summaries(sbi))
1747                         blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1748                 else
1749                         blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1750         } else {
1751                 segno = le32_to_cpu(ckpt->cur_node_segno[type -
1752                                                         CURSEG_HOT_NODE]);
1753                 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1754                                                         CURSEG_HOT_NODE]);
1755                 if (__exist_node_summaries(sbi))
1756                         blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1757                                                         type - CURSEG_HOT_NODE);
1758                 else
1759                         blk_addr = GET_SUM_BLOCK(sbi, segno);
1760         }
1761
1762         new = get_meta_page(sbi, blk_addr);
1763         sum = (struct f2fs_summary_block *)page_address(new);
1764
1765         if (IS_NODESEG(type)) {
1766                 if (__exist_node_summaries(sbi)) {
1767                         struct f2fs_summary *ns = &sum->entries[0];
1768                         int i;
1769                         for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1770                                 ns->version = 0;
1771                                 ns->ofs_in_node = 0;
1772                         }
1773                 } else {
1774                         int err;
1775
1776                         err = restore_node_summary(sbi, segno, sum);
1777                         if (err) {
1778                                 f2fs_put_page(new, 1);
1779                                 return err;
1780                         }
1781                 }
1782         }
1783
1784         /* set uncompleted segment to curseg */
1785         curseg = CURSEG_I(sbi, type);
1786         mutex_lock(&curseg->curseg_mutex);
1787
1788         /* update journal info */
1789         down_write(&curseg->journal_rwsem);
1790         memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
1791         up_write(&curseg->journal_rwsem);
1792
1793         memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
1794         memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
1795         curseg->next_segno = segno;
1796         reset_curseg(sbi, type, 0);
1797         curseg->alloc_type = ckpt->alloc_type[type];
1798         curseg->next_blkoff = blk_off;
1799         mutex_unlock(&curseg->curseg_mutex);
1800         f2fs_put_page(new, 1);
1801         return 0;
1802 }
1803
1804 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1805 {
1806         int type = CURSEG_HOT_DATA;
1807         int err;
1808
1809         if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
1810                 int npages = npages_for_summary_flush(sbi, true);
1811
1812                 if (npages >= 2)
1813                         ra_meta_pages(sbi, start_sum_block(sbi), npages,
1814                                                         META_CP, true);
1815
1816                 /* restore for compacted data summary */
1817                 if (read_compacted_summaries(sbi))
1818                         return -EINVAL;
1819                 type = CURSEG_HOT_NODE;
1820         }
1821
1822         if (__exist_node_summaries(sbi))
1823                 ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
1824                                         NR_CURSEG_TYPE - type, META_CP, true);
1825
1826         for (; type <= CURSEG_COLD_NODE; type++) {
1827                 err = read_normal_summaries(sbi, type);
1828                 if (err)
1829                         return err;
1830         }
1831
1832         return 0;
1833 }
1834
1835 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1836 {
1837         struct page *page;
1838         unsigned char *kaddr;
1839         struct f2fs_summary *summary;
1840         struct curseg_info *seg_i;
1841         int written_size = 0;
1842         int i, j;
1843
1844         page = grab_meta_page(sbi, blkaddr++);
1845         kaddr = (unsigned char *)page_address(page);
1846
1847         /* Step 1: write nat cache */
1848         seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1849         memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
1850         written_size += SUM_JOURNAL_SIZE;
1851
1852         /* Step 2: write sit cache */
1853         seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1854         memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
1855         written_size += SUM_JOURNAL_SIZE;
1856
1857         /* Step 3: write summary entries */
1858         for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1859                 unsigned short blkoff;
1860                 seg_i = CURSEG_I(sbi, i);
1861                 if (sbi->ckpt->alloc_type[i] == SSR)
1862                         blkoff = sbi->blocks_per_seg;
1863                 else
1864                         blkoff = curseg_blkoff(sbi, i);
1865
1866                 for (j = 0; j < blkoff; j++) {
1867                         if (!page) {
1868                                 page = grab_meta_page(sbi, blkaddr++);
1869                                 kaddr = (unsigned char *)page_address(page);
1870                                 written_size = 0;
1871                         }
1872                         summary = (struct f2fs_summary *)(kaddr + written_size);
1873                         *summary = seg_i->sum_blk->entries[j];
1874                         written_size += SUMMARY_SIZE;
1875
1876                         if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
1877                                                         SUM_FOOTER_SIZE)
1878                                 continue;
1879
1880                         set_page_dirty(page);
1881                         f2fs_put_page(page, 1);
1882                         page = NULL;
1883                 }
1884         }
1885         if (page) {
1886                 set_page_dirty(page);
1887                 f2fs_put_page(page, 1);
1888         }
1889 }
1890
1891 static void write_normal_summaries(struct f2fs_sb_info *sbi,
1892                                         block_t blkaddr, int type)
1893 {
1894         int i, end;
1895         if (IS_DATASEG(type))
1896                 end = type + NR_CURSEG_DATA_TYPE;
1897         else
1898                 end = type + NR_CURSEG_NODE_TYPE;
1899
1900         for (i = type; i < end; i++)
1901                 write_current_sum_page(sbi, i, blkaddr + (i - type));
1902 }
1903
1904 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1905 {
1906         if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
1907                 write_compacted_summaries(sbi, start_blk);
1908         else
1909                 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1910 }
1911
1912 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1913 {
1914         write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
1915 }
1916
1917 int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
1918                                         unsigned int val, int alloc)
1919 {
1920         int i;
1921
1922         if (type == NAT_JOURNAL) {
1923                 for (i = 0; i < nats_in_cursum(journal); i++) {
1924                         if (le32_to_cpu(nid_in_journal(journal, i)) == val)
1925                                 return i;
1926                 }
1927                 if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
1928                         return update_nats_in_cursum(journal, 1);
1929         } else if (type == SIT_JOURNAL) {
1930                 for (i = 0; i < sits_in_cursum(journal); i++)
1931                         if (le32_to_cpu(segno_in_journal(journal, i)) == val)
1932                                 return i;
1933                 if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
1934                         return update_sits_in_cursum(journal, 1);
1935         }
1936         return -1;
1937 }
1938
1939 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1940                                         unsigned int segno)
1941 {
1942         return get_meta_page(sbi, current_sit_addr(sbi, segno));
1943 }
1944
1945 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1946                                         unsigned int start)
1947 {
1948         struct sit_info *sit_i = SIT_I(sbi);
1949         struct page *src_page, *dst_page;
1950         pgoff_t src_off, dst_off;
1951         void *src_addr, *dst_addr;
1952
1953         src_off = current_sit_addr(sbi, start);
1954         dst_off = next_sit_addr(sbi, src_off);
1955
1956         /* get current sit block page without lock */
1957         src_page = get_meta_page(sbi, src_off);
1958         dst_page = grab_meta_page(sbi, dst_off);
1959         f2fs_bug_on(sbi, PageDirty(src_page));
1960
1961         src_addr = page_address(src_page);
1962         dst_addr = page_address(dst_page);
1963         memcpy(dst_addr, src_addr, PAGE_SIZE);
1964
1965         set_page_dirty(dst_page);
1966         f2fs_put_page(src_page, 1);
1967
1968         set_to_next_sit(sit_i, start);
1969
1970         return dst_page;
1971 }
1972
1973 static struct sit_entry_set *grab_sit_entry_set(void)
1974 {
1975         struct sit_entry_set *ses =
1976                         f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
1977
1978         ses->entry_cnt = 0;
1979         INIT_LIST_HEAD(&ses->set_list);
1980         return ses;
1981 }
1982
1983 static void release_sit_entry_set(struct sit_entry_set *ses)
1984 {
1985         list_del(&ses->set_list);
1986         kmem_cache_free(sit_entry_set_slab, ses);
1987 }
1988
1989 static void adjust_sit_entry_set(struct sit_entry_set *ses,
1990                                                 struct list_head *head)
1991 {
1992         struct sit_entry_set *next = ses;
1993
1994         if (list_is_last(&ses->set_list, head))
1995                 return;
1996
1997         list_for_each_entry_continue(next, head, set_list)
1998                 if (ses->entry_cnt <= next->entry_cnt)
1999                         break;
2000
2001         list_move_tail(&ses->set_list, &next->set_list);
2002 }
2003
2004 static void add_sit_entry(unsigned int segno, struct list_head *head)
2005 {
2006         struct sit_entry_set *ses;
2007         unsigned int start_segno = START_SEGNO(segno);
2008
2009         list_for_each_entry(ses, head, set_list) {
2010                 if (ses->start_segno == start_segno) {
2011                         ses->entry_cnt++;
2012                         adjust_sit_entry_set(ses, head);
2013                         return;
2014                 }
2015         }
2016
2017         ses = grab_sit_entry_set();
2018
2019         ses->start_segno = start_segno;
2020         ses->entry_cnt++;
2021         list_add(&ses->set_list, head);
2022 }
2023
2024 static void add_sits_in_set(struct f2fs_sb_info *sbi)
2025 {
2026         struct f2fs_sm_info *sm_info = SM_I(sbi);
2027         struct list_head *set_list = &sm_info->sit_entry_set;
2028         unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
2029         unsigned int segno;
2030
2031         for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
2032                 add_sit_entry(segno, set_list);
2033 }
2034
2035 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
2036 {
2037         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2038         struct f2fs_journal *journal = curseg->journal;
2039         int i;
2040
2041         down_write(&curseg->journal_rwsem);
2042         for (i = 0; i < sits_in_cursum(journal); i++) {
2043                 unsigned int segno;
2044                 bool dirtied;
2045
2046                 segno = le32_to_cpu(segno_in_journal(journal, i));
2047                 dirtied = __mark_sit_entry_dirty(sbi, segno);
2048
2049                 if (!dirtied)
2050                         add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
2051         }
2052         update_sits_in_cursum(journal, -i);
2053         up_write(&curseg->journal_rwsem);
2054 }
2055
2056 /*
2057  * CP calls this function, which flushes SIT entries including sit_journal,
2058  * and moves prefree segs to free segs.
2059  */
2060 void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2061 {
2062         struct sit_info *sit_i = SIT_I(sbi);
2063         unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
2064         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2065         struct f2fs_journal *journal = curseg->journal;
2066         struct sit_entry_set *ses, *tmp;
2067         struct list_head *head = &SM_I(sbi)->sit_entry_set;
2068         bool to_journal = true;
2069         struct seg_entry *se;
2070
2071         mutex_lock(&sit_i->sentry_lock);
2072
2073         if (!sit_i->dirty_sentries)
2074                 goto out;
2075
2076         /*
2077          * add and account sit entries of dirty bitmap in sit entry
2078          * set temporarily
2079          */
2080         add_sits_in_set(sbi);
2081
2082         /*
2083          * if there are no enough space in journal to store dirty sit
2084          * entries, remove all entries from journal and add and account
2085          * them in sit entry set.
2086          */
2087         if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
2088                 remove_sits_in_journal(sbi);
2089
2090         /*
2091          * there are two steps to flush sit entries:
2092          * #1, flush sit entries to journal in current cold data summary block.
2093          * #2, flush sit entries to sit page.
2094          */
2095         list_for_each_entry_safe(ses, tmp, head, set_list) {
2096                 struct page *page = NULL;
2097                 struct f2fs_sit_block *raw_sit = NULL;
2098                 unsigned int start_segno = ses->start_segno;
2099                 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
2100                                                 (unsigned long)MAIN_SEGS(sbi));
2101                 unsigned int segno = start_segno;
2102
2103                 if (to_journal &&
2104                         !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
2105                         to_journal = false;
2106
2107                 if (to_journal) {
2108                         down_write(&curseg->journal_rwsem);
2109                 } else {
2110                         page = get_next_sit_page(sbi, start_segno);
2111                         raw_sit = page_address(page);
2112                 }
2113
2114                 /* flush dirty sit entries in region of current sit set */
2115                 for_each_set_bit_from(segno, bitmap, end) {
2116                         int offset, sit_offset;
2117
2118                         se = get_seg_entry(sbi, segno);
2119
2120                         /* add discard candidates */
2121                         if (cpc->reason != CP_DISCARD) {
2122                                 cpc->trim_start = segno;
2123                                 add_discard_addrs(sbi, cpc);
2124                         }
2125
2126                         if (to_journal) {
2127                                 offset = lookup_journal_in_cursum(journal,
2128                                                         SIT_JOURNAL, segno, 1);
2129                                 f2fs_bug_on(sbi, offset < 0);
2130                                 segno_in_journal(journal, offset) =
2131                                                         cpu_to_le32(segno);
2132                                 seg_info_to_raw_sit(se,
2133                                         &sit_in_journal(journal, offset));
2134                         } else {
2135                                 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
2136                                 seg_info_to_raw_sit(se,
2137                                                 &raw_sit->entries[sit_offset]);
2138                         }
2139
2140                         __clear_bit(segno, bitmap);
2141                         sit_i->dirty_sentries--;
2142                         ses->entry_cnt--;
2143                 }
2144
2145                 if (to_journal)
2146                         up_write(&curseg->journal_rwsem);
2147                 else
2148                         f2fs_put_page(page, 1);
2149
2150                 f2fs_bug_on(sbi, ses->entry_cnt);
2151                 release_sit_entry_set(ses);
2152         }
2153
2154         f2fs_bug_on(sbi, !list_empty(head));
2155         f2fs_bug_on(sbi, sit_i->dirty_sentries);
2156 out:
2157         if (cpc->reason == CP_DISCARD) {
2158                 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
2159                         add_discard_addrs(sbi, cpc);
2160         }
2161         mutex_unlock(&sit_i->sentry_lock);
2162
2163         set_prefree_as_free_segments(sbi);
2164 }
2165
2166 static int build_sit_info(struct f2fs_sb_info *sbi)
2167 {
2168         struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2169         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2170         struct sit_info *sit_i;
2171         unsigned int sit_segs, start;
2172         char *src_bitmap, *dst_bitmap;
2173         unsigned int bitmap_size;
2174
2175         /* allocate memory for SIT information */
2176         sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
2177         if (!sit_i)
2178                 return -ENOMEM;
2179
2180         SM_I(sbi)->sit_info = sit_i;
2181
2182         sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) *
2183                                         sizeof(struct seg_entry), GFP_KERNEL);
2184         if (!sit_i->sentries)
2185                 return -ENOMEM;
2186
2187         bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
2188         sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
2189         if (!sit_i->dirty_sentries_bitmap)
2190                 return -ENOMEM;
2191
2192         for (start = 0; start < MAIN_SEGS(sbi); start++) {
2193                 sit_i->sentries[start].cur_valid_map
2194                         = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2195                 sit_i->sentries[start].ckpt_valid_map
2196                         = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2197                 if (!sit_i->sentries[start].cur_valid_map ||
2198                                 !sit_i->sentries[start].ckpt_valid_map)
2199                         return -ENOMEM;
2200
2201                 if (f2fs_discard_en(sbi)) {
2202                         sit_i->sentries[start].discard_map
2203                                 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2204                         if (!sit_i->sentries[start].discard_map)
2205                                 return -ENOMEM;
2206                 }
2207         }
2208
2209         sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2210         if (!sit_i->tmp_map)
2211                 return -ENOMEM;
2212
2213         if (sbi->segs_per_sec > 1) {
2214                 sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) *
2215                                         sizeof(struct sec_entry), GFP_KERNEL);
2216                 if (!sit_i->sec_entries)
2217                         return -ENOMEM;
2218         }
2219
2220         /* get information related with SIT */
2221         sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
2222
2223         /* setup SIT bitmap from ckeckpoint pack */
2224         bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
2225         src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
2226
2227         dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
2228         if (!dst_bitmap)
2229                 return -ENOMEM;
2230
2231         /* init SIT information */
2232         sit_i->s_ops = &default_salloc_ops;
2233
2234         sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
2235         sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
2236         sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
2237         sit_i->sit_bitmap = dst_bitmap;
2238         sit_i->bitmap_size = bitmap_size;
2239         sit_i->dirty_sentries = 0;
2240         sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
2241         sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
2242         sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
2243         mutex_init(&sit_i->sentry_lock);
2244         return 0;
2245 }
2246
2247 static int build_free_segmap(struct f2fs_sb_info *sbi)
2248 {
2249         struct free_segmap_info *free_i;
2250         unsigned int bitmap_size, sec_bitmap_size;
2251
2252         /* allocate memory for free segmap information */
2253         free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
2254         if (!free_i)
2255                 return -ENOMEM;
2256
2257         SM_I(sbi)->free_info = free_i;
2258
2259         bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
2260         free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL);
2261         if (!free_i->free_segmap)
2262                 return -ENOMEM;
2263
2264         sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
2265         free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL);
2266         if (!free_i->free_secmap)
2267                 return -ENOMEM;
2268
2269         /* set all segments as dirty temporarily */
2270         memset(free_i->free_segmap, 0xff, bitmap_size);
2271         memset(free_i->free_secmap, 0xff, sec_bitmap_size);
2272
2273         /* init free segmap information */
2274         free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
2275         free_i->free_segments = 0;
2276         free_i->free_sections = 0;
2277         spin_lock_init(&free_i->segmap_lock);
2278         return 0;
2279 }
2280
2281 static int build_curseg(struct f2fs_sb_info *sbi)
2282 {
2283         struct curseg_info *array;
2284         int i;
2285
2286         array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
2287         if (!array)
2288                 return -ENOMEM;
2289
2290         SM_I(sbi)->curseg_array = array;
2291
2292         for (i = 0; i < NR_CURSEG_TYPE; i++) {
2293                 mutex_init(&array[i].curseg_mutex);
2294                 array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
2295                 if (!array[i].sum_blk)
2296                         return -ENOMEM;
2297                 init_rwsem(&array[i].journal_rwsem);
2298                 array[i].journal = kzalloc(sizeof(struct f2fs_journal),
2299                                                         GFP_KERNEL);
2300                 if (!array[i].journal)
2301                         return -ENOMEM;
2302                 array[i].segno = NULL_SEGNO;
2303                 array[i].next_blkoff = 0;
2304         }
2305         return restore_curseg_summaries(sbi);
2306 }
2307
2308 static void build_sit_entries(struct f2fs_sb_info *sbi)
2309 {
2310         struct sit_info *sit_i = SIT_I(sbi);
2311         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2312         struct f2fs_journal *journal = curseg->journal;
2313         struct seg_entry *se;
2314         struct f2fs_sit_entry sit;
2315         int sit_blk_cnt = SIT_BLK_CNT(sbi);
2316         unsigned int i, start, end;
2317         unsigned int readed, start_blk = 0;
2318         int nrpages = MAX_BIO_BLOCKS(sbi) * 8;
2319
2320         do {
2321                 readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT, true);
2322
2323                 start = start_blk * sit_i->sents_per_block;
2324                 end = (start_blk + readed) * sit_i->sents_per_block;
2325
2326                 for (; start < end && start < MAIN_SEGS(sbi); start++) {
2327                         struct f2fs_sit_block *sit_blk;
2328                         struct page *page;
2329
2330                         se = &sit_i->sentries[start];
2331                         page = get_current_sit_page(sbi, start);
2332                         sit_blk = (struct f2fs_sit_block *)page_address(page);
2333                         sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
2334                         f2fs_put_page(page, 1);
2335
2336                         check_block_count(sbi, start, &sit);
2337                         seg_info_from_raw_sit(se, &sit);
2338
2339                         /* build discard map only one time */
2340                         if (f2fs_discard_en(sbi)) {
2341                                 memcpy(se->discard_map, se->cur_valid_map,
2342                                                         SIT_VBLOCK_MAP_SIZE);
2343                                 sbi->discard_blks += sbi->blocks_per_seg -
2344                                                         se->valid_blocks;
2345                         }
2346
2347                         if (sbi->segs_per_sec > 1)
2348                                 get_sec_entry(sbi, start)->valid_blocks +=
2349                                                         se->valid_blocks;
2350                 }
2351                 start_blk += readed;
2352         } while (start_blk < sit_blk_cnt);
2353
2354         down_read(&curseg->journal_rwsem);
2355         for (i = 0; i < sits_in_cursum(journal); i++) {
2356                 unsigned int old_valid_blocks;
2357
2358                 start = le32_to_cpu(segno_in_journal(journal, i));
2359                 se = &sit_i->sentries[start];
2360                 sit = sit_in_journal(journal, i);
2361
2362                 old_valid_blocks = se->valid_blocks;
2363
2364                 check_block_count(sbi, start, &sit);
2365                 seg_info_from_raw_sit(se, &sit);
2366
2367                 if (f2fs_discard_en(sbi)) {
2368                         memcpy(se->discard_map, se->cur_valid_map,
2369                                                 SIT_VBLOCK_MAP_SIZE);
2370                         sbi->discard_blks += old_valid_blocks -
2371                                                 se->valid_blocks;
2372                 }
2373
2374                 if (sbi->segs_per_sec > 1)
2375                         get_sec_entry(sbi, start)->valid_blocks +=
2376                                 se->valid_blocks - old_valid_blocks;
2377         }
2378         up_read(&curseg->journal_rwsem);
2379 }
2380
2381 static void init_free_segmap(struct f2fs_sb_info *sbi)
2382 {
2383         unsigned int start;
2384         int type;
2385
2386         for (start = 0; start < MAIN_SEGS(sbi); start++) {
2387                 struct seg_entry *sentry = get_seg_entry(sbi, start);
2388                 if (!sentry->valid_blocks)
2389                         __set_free(sbi, start);
2390         }
2391
2392         /* set use the current segments */
2393         for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
2394                 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
2395                 __set_test_and_inuse(sbi, curseg_t->segno);
2396         }
2397 }
2398
2399 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
2400 {
2401         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2402         struct free_segmap_info *free_i = FREE_I(sbi);
2403         unsigned int segno = 0, offset = 0;
2404         unsigned short valid_blocks;
2405
2406         while (1) {
2407                 /* find dirty segment based on free segmap */
2408                 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
2409                 if (segno >= MAIN_SEGS(sbi))
2410                         break;
2411                 offset = segno + 1;
2412                 valid_blocks = get_valid_blocks(sbi, segno, 0);
2413                 if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
2414                         continue;
2415                 if (valid_blocks > sbi->blocks_per_seg) {
2416                         f2fs_bug_on(sbi, 1);
2417                         continue;
2418                 }
2419                 mutex_lock(&dirty_i->seglist_lock);
2420                 __locate_dirty_segment(sbi, segno, DIRTY);
2421                 mutex_unlock(&dirty_i->seglist_lock);
2422         }
2423 }
2424
2425 static int init_victim_secmap(struct f2fs_sb_info *sbi)
2426 {
2427         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2428         unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
2429
2430         dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
2431         if (!dirty_i->victim_secmap)
2432                 return -ENOMEM;
2433         return 0;
2434 }
2435
2436 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
2437 {
2438         struct dirty_seglist_info *dirty_i;
2439         unsigned int bitmap_size, i;
2440
2441         /* allocate memory for dirty segments list information */
2442         dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
2443         if (!dirty_i)
2444                 return -ENOMEM;
2445
2446         SM_I(sbi)->dirty_info = dirty_i;
2447         mutex_init(&dirty_i->seglist_lock);
2448
2449         bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
2450
2451         for (i = 0; i < NR_DIRTY_TYPE; i++) {
2452                 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
2453                 if (!dirty_i->dirty_segmap[i])
2454                         return -ENOMEM;
2455         }
2456
2457         init_dirty_segmap(sbi);
2458         return init_victim_secmap(sbi);
2459 }
2460
2461 /*
2462  * Update min, max modified time for cost-benefit GC algorithm
2463  */
2464 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
2465 {
2466         struct sit_info *sit_i = SIT_I(sbi);
2467         unsigned int segno;
2468
2469         mutex_lock(&sit_i->sentry_lock);
2470
2471         sit_i->min_mtime = LLONG_MAX;
2472
2473         for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
2474                 unsigned int i;
2475                 unsigned long long mtime = 0;
2476
2477                 for (i = 0; i < sbi->segs_per_sec; i++)
2478                         mtime += get_seg_entry(sbi, segno + i)->mtime;
2479
2480                 mtime = div_u64(mtime, sbi->segs_per_sec);
2481
2482                 if (sit_i->min_mtime > mtime)
2483                         sit_i->min_mtime = mtime;
2484         }
2485         sit_i->max_mtime = get_mtime(sbi);
2486         mutex_unlock(&sit_i->sentry_lock);
2487 }
2488
2489 int build_segment_manager(struct f2fs_sb_info *sbi)
2490 {
2491         struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2492         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2493         struct f2fs_sm_info *sm_info;
2494         int err;
2495
2496         sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
2497         if (!sm_info)
2498                 return -ENOMEM;
2499
2500         /* init sm info */
2501         sbi->sm_info = sm_info;
2502         sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2503         sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2504         sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
2505         sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
2506         sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
2507         sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
2508         sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
2509         sm_info->rec_prefree_segments = sm_info->main_segments *
2510                                         DEF_RECLAIM_PREFREE_SEGMENTS / 100;
2511         if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
2512                 sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
2513
2514         if (!test_opt(sbi, LFS))
2515                 sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
2516         sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
2517         sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
2518
2519         INIT_LIST_HEAD(&sm_info->discard_list);
2520         INIT_LIST_HEAD(&sm_info->wait_list);
2521         sm_info->nr_discards = 0;
2522         sm_info->max_discards = 0;
2523
2524         sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
2525
2526         INIT_LIST_HEAD(&sm_info->sit_entry_set);
2527
2528         if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
2529                 err = create_flush_cmd_control(sbi);
2530                 if (err)
2531                         return err;
2532         }
2533
2534         err = build_sit_info(sbi);
2535         if (err)
2536                 return err;
2537         err = build_free_segmap(sbi);
2538         if (err)
2539                 return err;
2540         err = build_curseg(sbi);
2541         if (err)
2542                 return err;
2543
2544         /* reinit free segmap based on SIT */
2545         build_sit_entries(sbi);
2546
2547         init_free_segmap(sbi);
2548         err = build_dirty_segmap(sbi);
2549         if (err)
2550                 return err;
2551
2552         init_min_max_mtime(sbi);
2553         return 0;
2554 }
2555
2556 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
2557                 enum dirty_type dirty_type)
2558 {
2559         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2560
2561         mutex_lock(&dirty_i->seglist_lock);
2562         kvfree(dirty_i->dirty_segmap[dirty_type]);
2563         dirty_i->nr_dirty[dirty_type] = 0;
2564         mutex_unlock(&dirty_i->seglist_lock);
2565 }
2566
2567 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
2568 {
2569         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2570         kvfree(dirty_i->victim_secmap);
2571 }
2572
2573 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
2574 {
2575         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2576         int i;
2577
2578         if (!dirty_i)
2579                 return;
2580
2581         /* discard pre-free/dirty segments list */
2582         for (i = 0; i < NR_DIRTY_TYPE; i++)
2583                 discard_dirty_segmap(sbi, i);
2584
2585         destroy_victim_secmap(sbi);
2586         SM_I(sbi)->dirty_info = NULL;
2587         kfree(dirty_i);
2588 }
2589
2590 static void destroy_curseg(struct f2fs_sb_info *sbi)
2591 {
2592         struct curseg_info *array = SM_I(sbi)->curseg_array;
2593         int i;
2594
2595         if (!array)
2596                 return;
2597         SM_I(sbi)->curseg_array = NULL;
2598         for (i = 0; i < NR_CURSEG_TYPE; i++) {
2599                 kfree(array[i].sum_blk);
2600                 kfree(array[i].journal);
2601         }
2602         kfree(array);
2603 }
2604
2605 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
2606 {
2607         struct free_segmap_info *free_i = SM_I(sbi)->free_info;
2608         if (!free_i)
2609                 return;
2610         SM_I(sbi)->free_info = NULL;
2611         kvfree(free_i->free_segmap);
2612         kvfree(free_i->free_secmap);
2613         kfree(free_i);
2614 }
2615
2616 static void destroy_sit_info(struct f2fs_sb_info *sbi)
2617 {
2618         struct sit_info *sit_i = SIT_I(sbi);
2619         unsigned int start;
2620
2621         if (!sit_i)
2622                 return;
2623
2624         if (sit_i->sentries) {
2625                 for (start = 0; start < MAIN_SEGS(sbi); start++) {
2626                         kfree(sit_i->sentries[start].cur_valid_map);
2627                         kfree(sit_i->sentries[start].ckpt_valid_map);
2628                         kfree(sit_i->sentries[start].discard_map);
2629                 }
2630         }
2631         kfree(sit_i->tmp_map);
2632
2633         kvfree(sit_i->sentries);
2634         kvfree(sit_i->sec_entries);
2635         kvfree(sit_i->dirty_sentries_bitmap);
2636
2637         SM_I(sbi)->sit_info = NULL;
2638         kfree(sit_i->sit_bitmap);
2639         kfree(sit_i);
2640 }
2641
2642 void destroy_segment_manager(struct f2fs_sb_info *sbi)
2643 {
2644         struct f2fs_sm_info *sm_info = SM_I(sbi);
2645
2646         if (!sm_info)
2647                 return;
2648         destroy_flush_cmd_control(sbi);
2649         destroy_dirty_segmap(sbi);
2650         destroy_curseg(sbi);
2651         destroy_free_segmap(sbi);
2652         destroy_sit_info(sbi);
2653         sbi->sm_info = NULL;
2654         kfree(sm_info);
2655 }
2656
2657 int __init create_segment_manager_caches(void)
2658 {
2659         discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
2660                         sizeof(struct discard_entry));
2661         if (!discard_entry_slab)
2662                 goto fail;
2663
2664         bio_entry_slab = f2fs_kmem_cache_create("bio_entry",
2665                         sizeof(struct bio_entry));
2666         if (!bio_entry_slab)
2667                 goto destroy_discard_entry;
2668
2669         sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
2670                         sizeof(struct sit_entry_set));
2671         if (!sit_entry_set_slab)
2672                 goto destroy_bio_entry;
2673
2674         inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
2675                         sizeof(struct inmem_pages));
2676         if (!inmem_entry_slab)
2677                 goto destroy_sit_entry_set;
2678         return 0;
2679
2680 destroy_sit_entry_set:
2681         kmem_cache_destroy(sit_entry_set_slab);
2682 destroy_bio_entry:
2683         kmem_cache_destroy(bio_entry_slab);
2684 destroy_discard_entry:
2685         kmem_cache_destroy(discard_entry_slab);
2686 fail:
2687         return -ENOMEM;
2688 }
2689
2690 void destroy_segment_manager_caches(void)
2691 {
2692         kmem_cache_destroy(sit_entry_set_slab);
2693         kmem_cache_destroy(bio_entry_slab);
2694         kmem_cache_destroy(discard_entry_slab);
2695         kmem_cache_destroy(inmem_entry_slab);
2696 }