Merge branch 'xfs-4.9-delalloc-rework' into for-next
[cascardo/linux.git] / fs / iomap.c
index 48141b8..ec411a6 100644 (file)
@@ -84,8 +84,11 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
         * Now the data has been copied, commit the range we've copied.  This
         * should not fail unless the filesystem has had a fatal error.
         */
-       ret = ops->iomap_end(inode, pos, length, written > 0 ? written : 0,
-                       flags, &iomap);
+       if (ops->iomap_end) {
+               ret = ops->iomap_end(inode, pos, length,
+                                    written > 0 ? written : 0,
+                                    flags, &iomap);
+       }
 
        return written ? written : ret;
 }
@@ -194,12 +197,9 @@ again:
                if (mapping_writably_mapped(inode->i_mapping))
                        flush_dcache_page(page);
 
-               pagefault_disable();
                copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
-               pagefault_enable();
 
                flush_dcache_page(page);
-               mark_page_accessed(page);
 
                status = iomap_write_end(inode, pos, bytes, copied, page);
                if (unlikely(status < 0))
@@ -252,6 +252,88 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
 }
 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
 
+static struct page *
+__iomap_read_page(struct inode *inode, loff_t offset)
+{
+       struct address_space *mapping = inode->i_mapping;
+       struct page *page;
+
+       page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
+       if (IS_ERR(page))
+               return page;
+       if (!PageUptodate(page)) {
+               put_page(page);
+               return ERR_PTR(-EIO);
+       }
+       return page;
+}
+
+static loff_t
+iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+               struct iomap *iomap)
+{
+       long status = 0;
+       ssize_t written = 0;
+
+       do {
+               struct page *page, *rpage;
+               unsigned long offset;   /* Offset into pagecache page */
+               unsigned long bytes;    /* Bytes to write to page */
+
+               offset = (pos & (PAGE_SIZE - 1));
+               bytes = min_t(unsigned long, PAGE_SIZE - offset, length);
+
+               rpage = __iomap_read_page(inode, pos);
+               if (IS_ERR(rpage))
+                       return PTR_ERR(rpage);
+
+               status = iomap_write_begin(inode, pos, bytes,
+                               AOP_FLAG_NOFS | AOP_FLAG_UNINTERRUPTIBLE,
+                               &page, iomap);
+               put_page(rpage);
+               if (unlikely(status))
+                       return status;
+
+               WARN_ON_ONCE(!PageUptodate(page));
+
+               status = iomap_write_end(inode, pos, bytes, bytes, page);
+               if (unlikely(status <= 0)) {
+                       if (WARN_ON_ONCE(status == 0))
+                               return -EIO;
+                       return status;
+               }
+
+               cond_resched();
+
+               pos += status;
+               written += status;
+               length -= status;
+
+               balance_dirty_pages_ratelimited(inode->i_mapping);
+       } while (length);
+
+       return written;
+}
+
+int
+iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
+               struct iomap_ops *ops)
+{
+       loff_t ret;
+
+       while (len) {
+               ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
+                               iomap_dirty_actor);
+               if (ret <= 0)
+                       return ret;
+               pos += ret;
+               len -= ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(iomap_file_dirty);
+
 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
                unsigned bytes, struct iomap *iomap)
 {
@@ -428,9 +510,14 @@ static int iomap_to_fiemap(struct fiemap_extent_info *fi,
                break;
        }
 
+       if (iomap->flags & IOMAP_F_MERGED)
+               flags |= FIEMAP_EXTENT_MERGED;
+       if (iomap->flags & IOMAP_F_SHARED)
+               flags |= FIEMAP_EXTENT_SHARED;
+
        return fiemap_fill_next_extent(fi, iomap->offset,
                        iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
-                       iomap->length, flags | FIEMAP_EXTENT_MERGED);
+                       iomap->length, flags);
 
 }
 
@@ -470,13 +557,18 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
        if (ret)
                return ret;
 
-       ret = filemap_write_and_wait(inode->i_mapping);
-       if (ret)
-               return ret;
+       if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
+               ret = filemap_write_and_wait(inode->i_mapping);
+               if (ret)
+                       return ret;
+       }
 
        while (len > 0) {
                ret = iomap_apply(inode, start, len, 0, ops, &ctx,
                                iomap_fiemap_actor);
+               /* inode with no (attribute) mapping will give ENOENT */
+               if (ret == -ENOENT)
+                       break;
                if (ret < 0)
                        return ret;
                if (ret == 0)