zram: fix umount-reset_store-mount race condition
authorSergey Senozhatsky <sergey.senozhatsky@gmail.com>
Thu, 12 Feb 2015 23:00:36 +0000 (15:00 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 13 Feb 2015 02:54:11 +0000 (18:54 -0800)
Ganesh Mahendran was the first one who proposed to use bdev->bd_mutex to
avoid ->bd_holders race condition:

        CPU0                            CPU1
umount /* zram->init_done is true */
reset_store()
bdev->bd_holders == 0                   mount
...                                     zram_make_request()
zram_reset_device()

However, his solution required some considerable amount of code movement,
which we can avoid.

Apart from using bdev->bd_mutex in reset_store(), this patch also
simplifies zram_reset_device().

zram_reset_device() has a bool parameter reset_capacity which tells it
whether disk capacity and itself disk should be reset.  There are two
zram_reset_device() callers:

-- zram_exit() passes reset_capacity=false
-- reset_store() passes reset_capacity=true

So we can move reset_capacity-sensitive work out of zram_reset_device()
and perform it unconditionally in reset_store().  This also lets us drop
reset_capacity parameter from zram_reset_device() and pass zram pointer
only.

Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Reported-by: Ganesh Mahendran <opensource.ganesh@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/block/zram/zram_drv.c

index 0e07652..2607bd9 100644 (file)
@@ -715,7 +715,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
        }
 }
 
-static void zram_reset_device(struct zram *zram, bool reset_capacity)
+static void zram_reset_device(struct zram *zram)
 {
        down_write(&zram->init_lock);
 
@@ -734,18 +734,7 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
        memset(&zram->stats, 0, sizeof(zram->stats));
 
        zram->disksize = 0;
-       if (reset_capacity)
-               set_capacity(zram->disk, 0);
-
        up_write(&zram->init_lock);
-
-       /*
-        * Revalidate disk out of the init_lock to avoid lockdep splat.
-        * It's okay because disk's capacity is protected by init_lock
-        * so that revalidate_disk always sees up-to-date capacity.
-        */
-       if (reset_capacity)
-               revalidate_disk(zram->disk);
 }
 
 static ssize_t disksize_store(struct device *dev,
@@ -818,6 +807,7 @@ static ssize_t reset_store(struct device *dev,
        if (!bdev)
                return -ENOMEM;
 
+       mutex_lock(&bdev->bd_mutex);
        /* Do not reset an active device! */
        if (bdev->bd_holders) {
                ret = -EBUSY;
@@ -835,12 +825,17 @@ static ssize_t reset_store(struct device *dev,
 
        /* Make sure all pending I/O is finished */
        fsync_bdev(bdev);
+       zram_reset_device(zram);
+       set_capacity(zram->disk, 0);
+
+       mutex_unlock(&bdev->bd_mutex);
+       revalidate_disk(zram->disk);
        bdput(bdev);
 
-       zram_reset_device(zram, true);
        return len;
 
 out:
+       mutex_unlock(&bdev->bd_mutex);
        bdput(bdev);
        return ret;
 }
@@ -1186,7 +1181,7 @@ static void __exit zram_exit(void)
                 * Shouldn't access zram->disk after destroy_device
                 * because destroy_device already released zram->disk.
                 */
-               zram_reset_device(zram, false);
+               zram_reset_device(zram);
        }
 
        unregister_blkdev(zram_major, "zram");