Merge branch 'akpm' (patches from Andrew)
[cascardo/linux.git] / fs / f2fs / data.c
index ccb401e..9ae194f 100644 (file)
@@ -34,6 +34,11 @@ static void f2fs_read_end_io(struct bio *bio)
        struct bio_vec *bvec;
        int i;
 
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO))
+               bio->bi_error = -EIO;
+#endif
+
        if (f2fs_bio_encrypted(bio)) {
                if (bio->bi_error) {
                        fscrypt_release_ctx(bio->bi_private);
@@ -70,7 +75,7 @@ static void f2fs_write_end_io(struct bio *bio)
                fscrypt_pullback_bio_page(&page, true);
 
                if (unlikely(bio->bi_error)) {
-                       set_bit(AS_EIO, &page->mapping->flags);
+                       mapping_set_error(page->mapping, -EIO);
                        f2fs_stop_checkpoint(sbi, true);
                }
                end_page_writeback(page);
@@ -626,11 +631,13 @@ ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
        ssize_t ret = 0;
 
        map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
-       map.m_len = F2FS_BYTES_TO_BLK(iov_iter_count(from));
-       map.m_next_pgofs = NULL;
+       map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
+       if (map.m_len > map.m_lblk)
+               map.m_len -= map.m_lblk;
+       else
+               map.m_len = 0;
 
-       if (f2fs_encrypted_inode(inode))
-               return 0;
+       map.m_next_pgofs = NULL;
 
        if (iocb->ki_flags & IOCB_DIRECT) {
                ret = f2fs_convert_inline_inode(inode);
@@ -672,6 +679,9 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
        bool allocated = false;
        block_t blkaddr;
 
+       if (!maxblocks)
+               return 0;
+
        map->m_len = 0;
        map->m_flags = 0;
 
@@ -783,6 +793,7 @@ skip:
                err = reserve_new_blocks(&dn, prealloc);
                if (err)
                        goto sync_out;
+               allocated = dn.node_changed;
 
                map->m_len += dn.ofs_in_node - ofs_in_node;
                if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
@@ -966,8 +977,8 @@ out:
        return ret;
 }
 
-struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
-                                                       unsigned nr_pages)
+static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
+                                unsigned nr_pages)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        struct fscrypt_ctx *ctx = NULL;
@@ -1284,7 +1295,7 @@ write:
 
        if (!wbc->for_reclaim)
                need_balance_fs = true;
-       else if (has_not_enough_free_secs(sbi, 0))
+       else if (has_not_enough_free_secs(sbi, 0, 0))
                goto redirty_out;
 
        err = -EAGAIN;
@@ -1344,6 +1355,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
        int cycled;
        int range_whole = 0;
        int tag;
+       int nwritten = 0;
 
        pagevec_init(&pvec, 0);
 
@@ -1418,6 +1430,8 @@ continue_unlock:
                                done_index = page->index + 1;
                                done = 1;
                                break;
+                       } else {
+                               nwritten++;
                        }
 
                        if (--wbc->nr_to_write <= 0 &&
@@ -1439,6 +1453,10 @@ continue_unlock:
        if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
                mapping->writeback_index = done_index;
 
+       if (nwritten)
+               f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping), mapping->host,
+                                                       NULL, 0, DATA, WRITE);
+
        return ret;
 }
 
@@ -1480,7 +1498,6 @@ static int f2fs_write_data_pages(struct address_space *mapping,
         * if some pages were truncated, we cannot guarantee its mapping->host
         * to detect pending bios.
         */
-       f2fs_submit_merged_bio(sbi, DATA, WRITE);
 
        remove_dirty_inode(inode);
        return ret;
@@ -1518,8 +1535,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
         * we already allocated all the blocks, so we don't need to get
         * the block addresses when there is no need to fill the page.
         */
-       if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
-                                       len == PAGE_SIZE)
+       if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE)
                return 0;
 
        if (f2fs_has_inline_data(inode) ||
@@ -1616,7 +1632,7 @@ repeat:
        if (err)
                goto fail;
 
-       if (need_balance && has_not_enough_free_secs(sbi, 0)) {
+       if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
                unlock_page(page);
                f2fs_balance_fs(sbi, true);
                lock_page(page);
@@ -1633,22 +1649,12 @@ repeat:
        if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
                f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
 
-       if (len == PAGE_SIZE)
-               goto out_update;
-       if (PageUptodate(page))
-               goto out_clear;
-
-       if ((pos & PAGE_MASK) >= i_size_read(inode)) {
-               unsigned start = pos & (PAGE_SIZE - 1);
-               unsigned end = start + len;
-
-               /* Reading beyond i_size is simple: memset to zero */
-               zero_user_segments(page, 0, start, end, PAGE_SIZE);
-               goto out_update;
-       }
+       if (len == PAGE_SIZE || PageUptodate(page))
+               return 0;
 
        if (blkaddr == NEW_ADDR) {
                zero_user_segment(page, 0, PAGE_SIZE);
+               SetPageUptodate(page);
        } else {
                struct bio *bio;
 
@@ -1676,11 +1682,6 @@ repeat:
                        goto fail;
                }
        }
-out_update:
-       if (!PageUptodate(page))
-               SetPageUptodate(page);
-out_clear:
-       clear_cold_data(page);
        return 0;
 
 fail:
@@ -1698,11 +1699,26 @@ static int f2fs_write_end(struct file *file,
 
        trace_f2fs_write_end(inode, pos, len, copied);
 
+       /*
+        * This should be come from len == PAGE_SIZE, and we expect copied
+        * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
+        * let generic_perform_write() try to copy data again through copied=0.
+        */
+       if (!PageUptodate(page)) {
+               if (unlikely(copied != PAGE_SIZE))
+                       copied = 0;
+               else
+                       SetPageUptodate(page);
+       }
+       if (!copied)
+               goto unlock_out;
+
        set_page_dirty(page);
+       clear_cold_data(page);
 
        if (pos + copied > i_size_read(inode))
                f2fs_i_size_write(inode, pos + copied);
-
+unlock_out:
        f2fs_put_page(page, 1);
        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
        return copied;
@@ -1873,6 +1889,58 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
        return generic_block_bmap(mapping, block, get_data_block_bmap);
 }
 
+#ifdef CONFIG_MIGRATION
+#include <linux/migrate.h>
+
+int f2fs_migrate_page(struct address_space *mapping,
+               struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+       int rc, extra_count;
+       struct f2fs_inode_info *fi = F2FS_I(mapping->host);
+       bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
+
+       BUG_ON(PageWriteback(page));
+
+       /* migrating an atomic written page is safe with the inmem_lock hold */
+       if (atomic_written && !mutex_trylock(&fi->inmem_lock))
+               return -EAGAIN;
+
+       /*
+        * A reference is expected if PagePrivate set when move mapping,
+        * however F2FS breaks this for maintaining dirty page counts when
+        * truncating pages. So here adjusting the 'extra_count' make it work.
+        */
+       extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
+       rc = migrate_page_move_mapping(mapping, newpage,
+                               page, NULL, mode, extra_count);
+       if (rc != MIGRATEPAGE_SUCCESS) {
+               if (atomic_written)
+                       mutex_unlock(&fi->inmem_lock);
+               return rc;
+       }
+
+       if (atomic_written) {
+               struct inmem_pages *cur;
+               list_for_each_entry(cur, &fi->inmem_pages, list)
+                       if (cur->page == page) {
+                               cur->page = newpage;
+                               break;
+                       }
+               mutex_unlock(&fi->inmem_lock);
+               put_page(page);
+               get_page(newpage);
+       }
+
+       if (PagePrivate(page))
+               SetPagePrivate(newpage);
+       set_page_private(newpage, page_private(page));
+
+       migrate_page_copy(newpage, page);
+
+       return MIGRATEPAGE_SUCCESS;
+}
+#endif
+
 const struct address_space_operations f2fs_dblock_aops = {
        .readpage       = f2fs_read_data_page,
        .readpages      = f2fs_read_data_pages,
@@ -1885,4 +1953,7 @@ const struct address_space_operations f2fs_dblock_aops = {
        .releasepage    = f2fs_release_page,
        .direct_IO      = f2fs_direct_IO,
        .bmap           = f2fs_bmap,
+#ifdef CONFIG_MIGRATION
+       .migratepage    = f2fs_migrate_page,
+#endif
 };