Merge tag 'for-f2fs-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk...
[cascardo/linux.git] / fs / f2fs / segment.c
index 4c2d1fa..a46296f 100644 (file)
@@ -241,7 +241,7 @@ void drop_inmem_pages(struct inode *inode)
 {
        struct f2fs_inode_info *fi = F2FS_I(inode);
 
-       clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+       clear_inode_flag(inode, FI_ATOMIC_FILE);
 
        mutex_lock(&fi->inmem_lock);
        __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
@@ -346,6 +346,11 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
 {
        if (!need)
                return;
+
+       /* balance_fs_bg is able to be pending */
+       if (excess_cached_nats(sbi))
+               f2fs_balance_fs_bg(sbi);
+
        /*
         * We should do GC or end up with checkpoint, if there are so many dirty
         * dir/node pages without enough free segments.
@@ -367,7 +372,9 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
                try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
 
        if (!available_free_memory(sbi, FREE_NIDS))
-               try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES);
+               try_to_free_nids(sbi, MAX_FREE_NIDS);
+       else
+               build_free_nids(sbi);
 
        /* checkpoint is the only way to shrink partial cached entries */
        if (!available_free_memory(sbi, NAT_ENTRIES) ||
@@ -435,25 +442,29 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
        if (test_opt(sbi, NOBARRIER))
                return 0;
 
-       if (!test_opt(sbi, FLUSH_MERGE)) {
+       if (!test_opt(sbi, FLUSH_MERGE) || !atomic_read(&fcc->submit_flush)) {
                struct bio *bio = f2fs_bio_alloc(0);
                int ret;
 
+               atomic_inc(&fcc->submit_flush);
                bio->bi_bdev = sbi->sb->s_bdev;
                bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
                ret = submit_bio_wait(bio);
+               atomic_dec(&fcc->submit_flush);
                bio_put(bio);
                return ret;
        }
 
        init_completion(&cmd.wait);
 
+       atomic_inc(&fcc->submit_flush);
        llist_add(&cmd.llnode, &fcc->issue_list);
 
        if (!fcc->dispatch_list)
                wake_up(&fcc->flush_wait_queue);
 
        wait_for_completion(&cmd.wait);
+       atomic_dec(&fcc->submit_flush);
 
        return cmd.ret;
 }
@@ -467,6 +478,7 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
        fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
        if (!fcc)
                return -ENOMEM;
+       atomic_set(&fcc->submit_flush, 0);
        init_waitqueue_head(&fcc->flush_wait_queue);
        init_llist_head(&fcc->issue_list);
        SM_I(sbi)->cmd_control_info = fcc;
@@ -668,6 +680,10 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
                        break;
 
                end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
+               if (force && start && end != max_blocks
+                                       && (end - start) < cpc->trim_minlen)
+                       continue;
+
                __add_discard_entry(sbi, cpc, se, start, end);
        }
 }
@@ -705,6 +721,8 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
        unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
        unsigned int start = 0, end = -1;
+       unsigned int secno, start_segno;
+       bool force = (cpc->reason == CP_DISCARD);
 
        mutex_lock(&dirty_i->seglist_lock);
 
@@ -721,17 +739,31 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 
                dirty_i->nr_dirty[PRE] -= end - start;
 
-               if (!test_opt(sbi, DISCARD))
+               if (force || !test_opt(sbi, DISCARD))
                        continue;
 
-               f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
+               if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
+                       f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
                                (end - start) << sbi->log_blocks_per_seg);
+                       continue;
+               }
+next:
+               secno = GET_SECNO(sbi, start);
+               start_segno = secno * sbi->segs_per_sec;
+               if (!IS_CURSEC(sbi, secno) &&
+                       !get_valid_blocks(sbi, start, sbi->segs_per_sec))
+                       f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
+                               sbi->segs_per_sec << sbi->log_blocks_per_seg);
+
+               start = start_segno + sbi->segs_per_sec;
+               if (start < end)
+                       goto next;
        }
        mutex_unlock(&dirty_i->seglist_lock);
 
        /* send small discards */
        list_for_each_entry_safe(entry, this, head, list) {
-               if (cpc->reason == CP_DISCARD && entry->len < cpc->trim_minlen)
+               if (force && entry->len < cpc->trim_minlen)
                        goto skip;
                f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
                cpc->trimmed += entry->len;
@@ -1219,6 +1251,9 @@ void allocate_new_segments(struct f2fs_sb_info *sbi)
 {
        int i;
 
+       if (test_opt(sbi, LFS))
+               return;
+
        for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
                __allocate_new_segments(sbi, i);
 }
@@ -1392,11 +1427,17 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
 {
        int type = __get_segment_type(fio->page, fio->type);
 
+       if (fio->type == NODE || fio->type == DATA)
+               mutex_lock(&fio->sbi->wio_mutex[fio->type]);
+
        allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
                                        &fio->new_blkaddr, sum, type);
 
        /* writeout dirty page into bdev */
        f2fs_submit_page_mbio(fio);
+
+       if (fio->type == NODE || fio->type == DATA)
+               mutex_unlock(&fio->sbi->wio_mutex[fio->type]);
 }
 
 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
@@ -2377,7 +2418,11 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
        sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
        sm_info->rec_prefree_segments = sm_info->main_segments *
                                        DEF_RECLAIM_PREFREE_SEGMENTS / 100;
-       sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
+       if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
+               sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
+
+       if (!test_opt(sbi, LFS))
+               sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
        sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
        sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;