block: kill off q->flush_flags
authorJens Axboe <axboe@fb.com>
Wed, 13 Apr 2016 19:33:19 +0000 (13:33 -0600)
committerJens Axboe <axboe@fb.com>
Wed, 13 Apr 2016 19:33:19 +0000 (13:33 -0600)
Now that we converted everything to the newer block write cache
interface, kill off the queue flush_flags and queueable flush
entries.

Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-core.c
block/blk-flush.c
block/blk-settings.c
drivers/block/xen-blkback/xenbus.c
drivers/md/dm-table.c
drivers/md/raid5-cache.c
drivers/target/target_core_iblock.c
include/linux/blkdev.h

index c502277..2475b1c 100644 (file)
@@ -1964,7 +1964,8 @@ generic_make_request_checks(struct bio *bio)
         * drivers without flush support don't have to worry
         * about them.
         */
-       if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
+       if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
+           !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
                bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
                if (!nr_sectors) {
                        err = 0;
index 9c423e5..b1c91d2 100644 (file)
@@ -95,17 +95,18 @@ enum {
 static bool blk_kick_flush(struct request_queue *q,
                           struct blk_flush_queue *fq);
 
-static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
+static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
 {
        unsigned int policy = 0;
 
        if (blk_rq_sectors(rq))
                policy |= REQ_FSEQ_DATA;
 
-       if (fflags & REQ_FLUSH) {
+       if (fflags & (1UL << QUEUE_FLAG_WC)) {
                if (rq->cmd_flags & REQ_FLUSH)
                        policy |= REQ_FSEQ_PREFLUSH;
-               if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
+               if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
+                   (rq->cmd_flags & REQ_FUA))
                        policy |= REQ_FSEQ_POSTFLUSH;
        }
        return policy;
@@ -384,7 +385,7 @@ static void mq_flush_data_end_io(struct request *rq, int error)
 void blk_insert_flush(struct request *rq)
 {
        struct request_queue *q = rq->q;
-       unsigned int fflags = q->flush_flags;   /* may change, cache */
+       unsigned long fflags = q->queue_flags;  /* may change, cache */
        unsigned int policy = blk_flush_policy(fflags, rq);
        struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
 
@@ -393,7 +394,7 @@ void blk_insert_flush(struct request *rq)
         * REQ_FLUSH and FUA for the driver.
         */
        rq->cmd_flags &= ~REQ_FLUSH;
-       if (!(fflags & REQ_FUA))
+       if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
                rq->cmd_flags &= ~REQ_FUA;
 
        /*
index 80d9327..f679ae1 100644 (file)
@@ -822,7 +822,12 @@ EXPORT_SYMBOL(blk_queue_update_dma_alignment);
 
 void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
 {
-       q->flush_not_queueable = !queueable;
+       spin_lock_irq(q->queue_lock);
+       if (queueable)
+               clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+       else
+               set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+       spin_unlock_irq(q->queue_lock);
 }
 EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
 
@@ -837,16 +842,13 @@ EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
 {
        spin_lock_irq(q->queue_lock);
-       if (wc) {
+       if (wc)
                queue_flag_set(QUEUE_FLAG_WC, q);
-               q->flush_flags = REQ_FLUSH;
-       } else
+       else
                queue_flag_clear(QUEUE_FLAG_WC, q);
-       if (fua) {
-               if (wc)
-                       q->flush_flags |= REQ_FUA;
+       if (fua)
                queue_flag_set(QUEUE_FLAG_FUA, q);
-       else
+       else
                queue_flag_clear(QUEUE_FLAG_FUA, q);
        spin_unlock_irq(q->queue_lock);
 }
index 26aa080..3355f1c 100644 (file)
@@ -477,7 +477,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
                vbd->type |= VDISK_REMOVABLE;
 
        q = bdev_get_queue(bdev);
-       if (q && q->flush_flags)
+       if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
                vbd->flush_support = true;
 
        if (q && blk_queue_secdiscard(q))
index 4b1ffc0..626a5ec 100644 (file)
@@ -1348,13 +1348,13 @@ static void dm_table_verify_integrity(struct dm_table *t)
 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
                                sector_t start, sector_t len, void *data)
 {
-       unsigned flush = (*(unsigned *)data);
+       unsigned long flush = (unsigned long) data;
        struct request_queue *q = bdev_get_queue(dev->bdev);
 
-       return q && (q->flush_flags & flush);
+       return q && (q->queue_flags & flush);
 }
 
-static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
+static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
 {
        struct dm_target *ti;
        unsigned i = 0;
@@ -1375,7 +1375,7 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
                        return true;
 
                if (ti->type->iterate_devices &&
-                   ti->type->iterate_devices(ti, device_flush_capable, &flush))
+                   ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
                        return true;
        }
 
@@ -1518,9 +1518,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
        else
                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 
-       if (dm_table_supports_flush(t, REQ_FLUSH)) {
+       if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
                wc = true;
-               if (dm_table_supports_flush(t, REQ_FUA))
+               if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
                        fua = true;
        }
        blk_queue_write_cache(q, wc, fua);
index 9531f5f..26f1497 100644 (file)
@@ -1188,6 +1188,7 @@ ioerr:
 
 int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
 {
+       struct request_queue *q = bdev_get_queue(rdev->bdev);
        struct r5l_log *log;
 
        if (PAGE_SIZE != 4096)
@@ -1197,7 +1198,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
                return -ENOMEM;
        log->rdev = rdev;
 
-       log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0);
+       log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0;
 
        log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
                                       sizeof(rdev->mddev->uuid));
index 026a758..7c4efb4 100644 (file)
@@ -687,10 +687,10 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                 * Force writethrough using WRITE_FUA if a volatile write cache
                 * is not enabled, or if initiator set the Force Unit Access bit.
                 */
-               if (q->flush_flags & REQ_FUA) {
+               if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
                        if (cmd->se_cmd_flags & SCF_FUA)
                                rw = WRITE_FUA;
-                       else if (!(q->flush_flags & REQ_FLUSH))
+                       else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
                                rw = WRITE_FUA;
                        else
                                rw = WRITE;
@@ -836,7 +836,7 @@ static bool iblock_get_write_cache(struct se_device *dev)
        struct block_device *bd = ib_dev->ibd_bd;
        struct request_queue *q = bdev_get_queue(bd);
 
-       return q->flush_flags & REQ_FLUSH;
+       return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
 }
 
 static const struct target_backend_ops iblock_ops = {
index f3f232f..57c0859 100644 (file)
@@ -433,8 +433,6 @@ struct request_queue {
        /*
         * for flush operations
         */
-       unsigned int            flush_flags;
-       unsigned int            flush_not_queueable:1;
        struct blk_flush_queue  *fq;
 
        struct list_head        requeue_list;
@@ -493,6 +491,7 @@ struct request_queue {
 #define QUEUE_FLAG_POLL               22       /* IO polling enabled if set */
 #define QUEUE_FLAG_WC         23       /* Write back caching */
 #define QUEUE_FLAG_FUA        24       /* device supports FUA writes */
+#define QUEUE_FLAG_FLUSH_NQ    25      /* flush not queueuable */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_STACKABLE)    |       \
@@ -1365,7 +1364,7 @@ static inline unsigned int block_size(struct block_device *bdev)
 
 static inline bool queue_flush_queueable(struct request_queue *q)
 {
-       return !q->flush_not_queueable;
+       return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
 }
 
 typedef struct {struct page *v;} Sector;