blk-mq: turn hctx->run_work into a regular work struct
[cascardo/linux.git] / block / blk-core.c
index a687e9c..34ff808 100644 (file)
@@ -288,7 +288,7 @@ void blk_sync_queue(struct request_queue *q)
                int i;
 
                queue_for_each_hw_ctx(q, hctx, i) {
-                       cancel_delayed_work_sync(&hctx->run_work);
+                       cancel_work_sync(&hctx->run_work);
                        cancel_delayed_work_sync(&hctx->delay_work);
                }
        } else {
@@ -515,7 +515,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
 
 void blk_set_queue_dying(struct request_queue *q)
 {
-       queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+       spin_lock_irq(q->queue_lock);
+       queue_flag_set(QUEUE_FLAG_DYING, q);
+       spin_unlock_irq(q->queue_lock);
 
        if (q->mq_ops)
                blk_mq_wake_waiters(q);
@@ -1029,7 +1031,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
         * Flush requests do not use the elevator so skip initialization.
         * This allows a request to share the flush and elevator data.
         */
-       if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA))
+       if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA))
                return false;
 
        return true;
@@ -1504,7 +1506,7 @@ EXPORT_SYMBOL_GPL(blk_add_request_payload);
 bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
                            struct bio *bio)
 {
-       const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+       const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
 
        if (!ll_back_merge_fn(q, req, bio))
                return false;
@@ -1526,7 +1528,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
 bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
                             struct bio *bio)
 {
-       const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+       const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
 
        if (!ll_front_merge_fn(q, req, bio))
                return false;
@@ -1648,8 +1650,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
 {
        req->cmd_type = REQ_TYPE_FS;
 
-       req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
-       if (bio->bi_rw & REQ_RAHEAD)
+       req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK;
+       if (bio->bi_opf & REQ_RAHEAD)
                req->cmd_flags |= REQ_FAILFAST_MASK;
 
        req->errors = 0;
@@ -1660,7 +1662,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
 
 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
-       const bool sync = !!(bio->bi_rw & REQ_SYNC);
+       const bool sync = !!(bio->bi_opf & REQ_SYNC);
        struct blk_plug *plug;
        int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
        struct request *req;
@@ -1681,7 +1683,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
                return BLK_QC_T_NONE;
        }
 
-       if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) {
+       if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) {
                spin_lock_irq(q->queue_lock);
                where = ELEVATOR_INSERT_FLUSH;
                goto get_rq;
@@ -1728,7 +1730,7 @@ get_rq:
        /*
         * Add in META/PRIO flags, if set, before we get to the IO scheduler
         */
-       rw_flags |= (bio->bi_rw & (REQ_META | REQ_PRIO));
+       rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO));
 
        /*
         * Grab a free request. This is might sleep but can not fail.
@@ -1805,7 +1807,7 @@ static void handle_bad_sector(struct bio *bio)
        printk(KERN_INFO "attempt to access beyond end of device\n");
        printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
                        bdevname(bio->bi_bdev, b),
-                       bio->bi_rw,
+                       bio->bi_opf,
                        (unsigned long long)bio_end_sector(bio),
                        (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
 }
@@ -1918,9 +1920,9 @@ generic_make_request_checks(struct bio *bio)
         * drivers without flush support don't have to worry
         * about them.
         */
-       if ((bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
+       if ((bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
            !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
-               bio->bi_rw &= ~(REQ_PREFLUSH | REQ_FUA);
+               bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
                if (!nr_sectors) {
                        err = 0;
                        goto end_io;
@@ -2219,7 +2221,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
         * one.
         */
        for (bio = rq->bio; bio; bio = bio->bi_next) {
-               if ((bio->bi_rw & ff) != ff)
+               if ((bio->bi_opf & ff) != ff)
                        break;
                bytes += bio->bi_iter.bi_size;
        }
@@ -2630,7 +2632,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
        /* mixed attributes always follow the first bio */
        if (req->cmd_flags & REQ_MIXED_MERGE) {
                req->cmd_flags &= ~REQ_FAILFAST_MASK;
-               req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
+               req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
        }
 
        /*
@@ -3095,6 +3097,12 @@ int kblockd_schedule_work(struct work_struct *work)
 }
 EXPORT_SYMBOL(kblockd_schedule_work);
 
+int kblockd_schedule_work_on(int cpu, struct work_struct *work)
+{
+       return queue_work_on(cpu, kblockd_workqueue, work);
+}
+EXPORT_SYMBOL(kblockd_schedule_work_on);
+
 int kblockd_schedule_delayed_work(struct delayed_work *dwork,
                                  unsigned long delay)
 {