Merge tag 'for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[cascardo/linux.git] / fs / ext4 / inode.c
index b49cf6e..55b187c 100644 (file)
@@ -20,7 +20,6 @@
 
 #include <linux/fs.h>
 #include <linux/time.h>
-#include <linux/jbd2.h>
 #include <linux/highuid.h>
 #include <linux/pagemap.h>
 #include <linux/quotaops.h>
@@ -36,7 +35,6 @@
 #include <linux/kernel.h>
 #include <linux/printk.h>
 #include <linux/slab.h>
-#include <linux/ratelimit.h>
 #include <linux/bitops.h>
 
 #include "ext4_jbd2.h"
@@ -140,7 +138,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
 /*
  * Test whether an inode is a fast symlink.
  */
-static int ext4_inode_is_fast_symlink(struct inode *inode)
+int ext4_inode_is_fast_symlink(struct inode *inode)
 {
         int ea_blocks = EXT4_I(inode)->i_file_acl ?
                EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
@@ -533,6 +531,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
                status = map->m_flags & EXT4_MAP_UNWRITTEN ?
                                EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
                if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
+                   !(status & EXTENT_STATUS_WRITTEN) &&
                    ext4_find_delalloc_range(inode, map->m_lblk,
                                             map->m_lblk + map->m_len - 1))
                        status |= EXTENT_STATUS_DELAYED;
@@ -637,6 +636,7 @@ found:
                status = map->m_flags & EXT4_MAP_UNWRITTEN ?
                                EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
                if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
+                   !(status & EXTENT_STATUS_WRITTEN) &&
                    ext4_find_delalloc_range(inode, map->m_lblk,
                                             map->m_lblk + map->m_len - 1))
                        status |= EXTENT_STATUS_DELAYED;
@@ -887,6 +887,95 @@ int do_journal_get_write_access(handle_t *handle,
 
 static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
                   struct buffer_head *bh_result, int create);
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
+                                 get_block_t *get_block)
+{
+       unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned to = from + len;
+       struct inode *inode = page->mapping->host;
+       unsigned block_start, block_end;
+       sector_t block;
+       int err = 0;
+       unsigned blocksize = inode->i_sb->s_blocksize;
+       unsigned bbits;
+       struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
+       bool decrypt = false;
+
+       BUG_ON(!PageLocked(page));
+       BUG_ON(from > PAGE_CACHE_SIZE);
+       BUG_ON(to > PAGE_CACHE_SIZE);
+       BUG_ON(from > to);
+
+       if (!page_has_buffers(page))
+               create_empty_buffers(page, blocksize, 0);
+       head = page_buffers(page);
+       bbits = ilog2(blocksize);
+       block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+
+       for (bh = head, block_start = 0; bh != head || !block_start;
+           block++, block_start = block_end, bh = bh->b_this_page) {
+               block_end = block_start + blocksize;
+               if (block_end <= from || block_start >= to) {
+                       if (PageUptodate(page)) {
+                               if (!buffer_uptodate(bh))
+                                       set_buffer_uptodate(bh);
+                       }
+                       continue;
+               }
+               if (buffer_new(bh))
+                       clear_buffer_new(bh);
+               if (!buffer_mapped(bh)) {
+                       WARN_ON(bh->b_size != blocksize);
+                       err = get_block(inode, block, bh, 1);
+                       if (err)
+                               break;
+                       if (buffer_new(bh)) {
+                               unmap_underlying_metadata(bh->b_bdev,
+                                                         bh->b_blocknr);
+                               if (PageUptodate(page)) {
+                                       clear_buffer_new(bh);
+                                       set_buffer_uptodate(bh);
+                                       mark_buffer_dirty(bh);
+                                       continue;
+                               }
+                               if (block_end > to || block_start < from)
+                                       zero_user_segments(page, to, block_end,
+                                                          block_start, from);
+                               continue;
+                       }
+               }
+               if (PageUptodate(page)) {
+                       if (!buffer_uptodate(bh))
+                               set_buffer_uptodate(bh);
+                       continue;
+               }
+               if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
+                   !buffer_unwritten(bh) &&
+                   (block_start < from || block_end > to)) {
+                       ll_rw_block(READ, 1, &bh);
+                       *wait_bh++ = bh;
+                       decrypt = ext4_encrypted_inode(inode) &&
+                               S_ISREG(inode->i_mode);
+               }
+       }
+       /*
+        * If we issued read requests, let them complete.
+        */
+       while (wait_bh > wait) {
+               wait_on_buffer(*--wait_bh);
+               if (!buffer_uptodate(*wait_bh))
+                       err = -EIO;
+       }
+       if (unlikely(err))
+               page_zero_new_buffers(page, from, to);
+       else if (decrypt)
+               err = ext4_decrypt_one(inode, page);
+       return err;
+}
+#endif
+
 static int ext4_write_begin(struct file *file, struct address_space *mapping,
                            loff_t pos, unsigned len, unsigned flags,
                            struct page **pagep, void **fsdata)
@@ -949,11 +1038,19 @@ retry_journal:
        /* In case writeback began while the page was unlocked */
        wait_for_stable_page(page);
 
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+       if (ext4_should_dioread_nolock(inode))
+               ret = ext4_block_write_begin(page, pos, len,
+                                            ext4_get_block_write);
+       else
+               ret = ext4_block_write_begin(page, pos, len,
+                                            ext4_get_block);
+#else
        if (ext4_should_dioread_nolock(inode))
                ret = __block_write_begin(page, pos, len, ext4_get_block_write);
        else
                ret = __block_write_begin(page, pos, len, ext4_get_block);
-
+#endif
        if (!ret && ext4_should_journal_data(inode)) {
                ret = ext4_walk_page_buffers(handle, page_buffers(page),
                                             from, to, NULL,
@@ -2575,7 +2672,12 @@ retry_journal:
        /* In case writeback began while the page was unlocked */
        wait_for_stable_page(page);
 
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+       ret = ext4_block_write_begin(page, pos, len,
+                                    ext4_da_get_block_prep);
+#else
        ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
+#endif
        if (ret < 0) {
                unlock_page(page);
                ext4_journal_stop(handle);
@@ -2821,7 +2923,7 @@ static int ext4_readpage(struct file *file, struct page *page)
                ret = ext4_readpage_inline(inode, page);
 
        if (ret == -EAGAIN)
-               return mpage_readpage(page, ext4_get_block);
+               return ext4_mpage_readpages(page->mapping, NULL, page, 1);
 
        return ret;
 }
@@ -2836,7 +2938,7 @@ ext4_readpages(struct file *file, struct address_space *mapping,
        if (ext4_has_inline_data(inode))
                return 0;
 
-       return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
+       return ext4_mpage_readpages(mapping, pages, NULL, nr_pages);
 }
 
 static void ext4_invalidatepage(struct page *page, unsigned int offset,
@@ -2977,7 +3079,7 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
         * overwrite DIO as i_dio_count needs to be incremented under i_mutex.
         */
        if (iov_iter_rw(iter) == WRITE)
-               atomic_inc(&inode->i_dio_count);
+               inode_dio_begin(inode);
 
        /* If we do a overwrite dio, i_mutex locking can be released */
        overwrite = *((int *)iocb->private);
@@ -3033,6 +3135,9 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                get_block_func = ext4_get_block_write;
                dio_flags = DIO_LOCKING;
        }
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+       BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
+#endif
        if (IS_DAX(inode))
                ret = dax_do_io(iocb, inode, iter, offset, get_block_func,
                                ext4_end_io_dio, dio_flags);
@@ -3079,7 +3184,7 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
 
 retake_lock:
        if (iov_iter_rw(iter) == WRITE)
-               inode_dio_done(inode);
+               inode_dio_end(inode);
        /* take i_mutex locking again if we do a ovewrite dio */
        if (overwrite) {
                up_read(&EXT4_I(inode)->i_data_sem);
@@ -3097,6 +3202,11 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
        size_t count = iov_iter_count(iter);
        ssize_t ret;
 
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+       if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+               return 0;
+#endif
+
        /*
         * If we are doing data journalling we don't support O_DIRECT
         */
@@ -3261,6 +3371,13 @@ static int __ext4_block_zero_page_range(handle_t *handle,
                /* Uhhuh. Read error. Complain and punt. */
                if (!buffer_uptodate(bh))
                        goto unlock;
+               if (S_ISREG(inode->i_mode) &&
+                   ext4_encrypted_inode(inode)) {
+                       /* We expect the key to be set. */
+                       BUG_ON(!ext4_has_encryption_key(inode));
+                       BUG_ON(blocksize != PAGE_CACHE_SIZE);
+                       WARN_ON_ONCE(ext4_decrypt_one(inode, page));
+               }
        }
        if (ext4_should_journal_data(inode)) {
                BUFFER_TRACE(bh, "get write access");
@@ -4096,7 +4213,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                inode->i_op = &ext4_dir_inode_operations;
                inode->i_fop = &ext4_dir_operations;
        } else if (S_ISLNK(inode->i_mode)) {
-               if (ext4_inode_is_fast_symlink(inode)) {
+               if (ext4_inode_is_fast_symlink(inode) &&
+                   !ext4_encrypted_inode(inode)) {
                        inode->i_op = &ext4_fast_symlink_inode_operations;
                        nd_terminate_link(ei->i_data, inode->i_size,
                                sizeof(ei->i_data) - 1);
@@ -4521,7 +4639,7 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
  */
 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
 {
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = d_inode(dentry);
        int error, rc = 0;
        int orphan = 0;
        const unsigned int ia_valid = attr->ia_valid;
@@ -4669,7 +4787,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
        struct inode *inode;
        unsigned long long delalloc_blocks;
 
-       inode = dentry->d_inode;
+       inode = d_inode(dentry);
        generic_fillattr(inode, stat);
 
        /*