Merge branch 'mymd/for-next' into mymd/for-linus
[cascardo/linux.git] / drivers / md / raid5.c
index 8959e6d..d189e89 100644 (file)
@@ -806,7 +806,8 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
        dd_idx = 0;
        while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
                dd_idx++;
-       if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw)
+       if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw ||
+           bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
                goto unlock_out;
 
        if (head->batch_head) {
@@ -891,29 +892,28 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
        if (r5l_write_stripe(conf->log, sh) == 0)
                return;
        for (i = disks; i--; ) {
-               int rw;
+               int op, op_flags = 0;
                int replace_only = 0;
                struct bio *bi, *rbi;
                struct md_rdev *rdev, *rrdev = NULL;
 
                sh = head_sh;
                if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
+                       op = REQ_OP_WRITE;
                        if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
-                               rw = WRITE_FUA;
-                       else
-                               rw = WRITE;
+                               op_flags = WRITE_FUA;
                        if (test_bit(R5_Discard, &sh->dev[i].flags))
-                               rw |= REQ_DISCARD;
+                               op = REQ_OP_DISCARD;
                } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
-                       rw = READ;
+                       op = REQ_OP_READ;
                else if (test_and_clear_bit(R5_WantReplace,
                                            &sh->dev[i].flags)) {
-                       rw = WRITE;
+                       op = REQ_OP_WRITE;
                        replace_only = 1;
                } else
                        continue;
                if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
-                       rw |= REQ_SYNC;
+                       op_flags |= REQ_SYNC;
 
 again:
                bi = &sh->dev[i].req;
@@ -927,7 +927,7 @@ again:
                        rdev = rrdev;
                        rrdev = NULL;
                }
-               if (rw & WRITE) {
+               if (op_is_write(op)) {
                        if (replace_only)
                                rdev = NULL;
                        if (rdev == rrdev)
@@ -953,7 +953,7 @@ again:
                 * need to check for writes.  We never accept write errors
                 * on the replacement, so we don't to check rrdev.
                 */
-               while ((rw & WRITE) && rdev &&
+               while (op_is_write(op) && rdev &&
                       test_bit(WriteErrorSeen, &rdev->flags)) {
                        sector_t first_bad;
                        int bad_sectors;
@@ -995,13 +995,13 @@ again:
 
                        bio_reset(bi);
                        bi->bi_bdev = rdev->bdev;
-                       bi->bi_rw = rw;
-                       bi->bi_end_io = (rw & WRITE)
+                       bio_set_op_attrs(bi, op, op_flags);
+                       bi->bi_end_io = op_is_write(op)
                                ? raid5_end_write_request
                                : raid5_end_read_request;
                        bi->bi_private = sh;
 
-                       pr_debug("%s: for %llu schedule op %ld on disc %d\n",
+                       pr_debug("%s: for %llu schedule op %d on disc %d\n",
                                __func__, (unsigned long long)sh->sector,
                                bi->bi_rw, i);
                        atomic_inc(&sh->count);
@@ -1027,7 +1027,7 @@ again:
                         * If this is discard request, set bi_vcnt 0. We don't
                         * want to confuse SCSI because SCSI will replace payload
                         */
-                       if (rw & REQ_DISCARD)
+                       if (op == REQ_OP_DISCARD)
                                bi->bi_vcnt = 0;
                        if (rrdev)
                                set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
@@ -1047,12 +1047,12 @@ again:
 
                        bio_reset(rbi);
                        rbi->bi_bdev = rrdev->bdev;
-                       rbi->bi_rw = rw;
-                       BUG_ON(!(rw & WRITE));
+                       bio_set_op_attrs(rbi, op, op_flags);
+                       BUG_ON(!op_is_write(op));
                        rbi->bi_end_io = raid5_end_write_request;
                        rbi->bi_private = sh;
 
-                       pr_debug("%s: for %llu schedule op %ld on "
+                       pr_debug("%s: for %llu schedule op %d on "
                                 "replacement disc %d\n",
                                __func__, (unsigned long long)sh->sector,
                                rbi->bi_rw, i);
@@ -1076,7 +1076,7 @@ again:
                         * If this is discard request, set bi_vcnt 0. We don't
                         * want to confuse SCSI because SCSI will replace payload
                         */
-                       if (rw & REQ_DISCARD)
+                       if (op == REQ_OP_DISCARD)
                                rbi->bi_vcnt = 0;
                        if (conf->mddev->gendisk)
                                trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
@@ -1085,9 +1085,9 @@ again:
                        generic_make_request(rbi);
                }
                if (!rdev && !rrdev) {
-                       if (rw & WRITE)
+                       if (op_is_write(op))
                                set_bit(STRIPE_DEGRADED, &sh->state);
-                       pr_debug("skip op %ld on disc %d for sector %llu\n",
+                       pr_debug("skip op %d on disc %d for sector %llu\n",
                                bi->bi_rw, i, (unsigned long long)sh->sector);
                        clear_bit(R5_LOCKED, &sh->dev[i].flags);
                        set_bit(STRIPE_HANDLE, &sh->state);
@@ -1623,7 +1623,7 @@ again:
                                        set_bit(R5_WantFUA, &dev->flags);
                                if (wbi->bi_rw & REQ_SYNC)
                                        set_bit(R5_SyncIO, &dev->flags);
-                               if (wbi->bi_rw & REQ_DISCARD)
+                               if (bio_op(wbi) == REQ_OP_DISCARD)
                                        set_bit(R5_Discard, &dev->flags);
                                else {
                                        tx = async_copy_data(1, wbi, &dev->page,
@@ -3080,7 +3080,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                        struct md_rdev *rdev;
                        rcu_read_lock();
                        rdev = rcu_dereference(conf->disks[i].rdev);
-                       if (rdev && test_bit(In_sync, &rdev->flags))
+                       if (rdev && test_bit(In_sync, &rdev->flags) &&
+                           !test_bit(Faulty, &rdev->flags))
                                atomic_inc(&rdev->nr_pending);
                        else
                                rdev = NULL;
@@ -3210,15 +3211,16 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
                /* During recovery devices cannot be removed, so
                 * locking and refcounting of rdevs is not needed
                 */
+               rcu_read_lock();
                for (i = 0; i < conf->raid_disks; i++) {
-                       struct md_rdev *rdev = conf->disks[i].rdev;
+                       struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
                        if (rdev
                            && !test_bit(Faulty, &rdev->flags)
                            && !test_bit(In_sync, &rdev->flags)
                            && !rdev_set_badblocks(rdev, sh->sector,
                                                   STRIPE_SECTORS, 0))
                                abort = 1;
-                       rdev = conf->disks[i].replacement;
+                       rdev = rcu_dereference(conf->disks[i].replacement);
                        if (rdev
                            && !test_bit(Faulty, &rdev->flags)
                            && !test_bit(In_sync, &rdev->flags)
@@ -3226,6 +3228,7 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
                                                   STRIPE_SECTORS, 0))
                                abort = 1;
                }
+               rcu_read_unlock();
                if (abort)
                        conf->recovery_disabled =
                                conf->mddev->recovery_disabled;
@@ -3237,15 +3240,16 @@ static int want_replace(struct stripe_head *sh, int disk_idx)
 {
        struct md_rdev *rdev;
        int rv = 0;
-       /* Doing recovery so rcu locking not required */
-       rdev = sh->raid_conf->disks[disk_idx].replacement;
+
+       rcu_read_lock();
+       rdev = rcu_dereference(sh->raid_conf->disks[disk_idx].replacement);
        if (rdev
            && !test_bit(Faulty, &rdev->flags)
            && !test_bit(In_sync, &rdev->flags)
            && (rdev->recovery_offset <= sh->sector
                || rdev->mddev->recovery_cp <= sh->sector))
                rv = 1;
-
+       rcu_read_unlock();
        return rv;
 }
 
@@ -3600,7 +3604,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
        pr_debug("for sector %llu, rmw=%d rcw=%d\n",
                (unsigned long long)sh->sector, rmw, rcw);
        set_bit(STRIPE_HANDLE, &sh->state);
-       if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_ENABLE_RMW)) && rmw > 0) {
+       if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) {
                /* prefer read-modify-write, but need to get some data */
                if (conf->mddev->queue)
                        blk_add_trace_msg(conf->mddev->queue,
@@ -3627,7 +3631,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
                        }
                }
        }
-       if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_ENABLE_RMW)) && rcw > 0) {
+       if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) {
                /* want reconstruct write, but need to get some data */
                int qread =0;
                rcw = 0;
@@ -5150,7 +5154,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
        DEFINE_WAIT(w);
        bool do_prepare;
 
-       if (unlikely(bi->bi_rw & REQ_FLUSH)) {
+       if (unlikely(bi->bi_rw & REQ_PREFLUSH)) {
                int ret = r5l_handle_flush_request(conf->log, bi);
 
                if (ret == 0)
@@ -5176,7 +5180,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
                        return;
        }
 
-       if (unlikely(bi->bi_rw & REQ_DISCARD)) {
+       if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) {
                make_discard_request(mddev, bi);
                return;
        }
@@ -5233,7 +5237,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
                        (unsigned long long)logical_sector);
 
                sh = raid5_get_active_stripe(conf, new_sector, previous,
-                                      (bi->bi_rw&RWA_MASK), 0);
+                                      (bi->bi_rw & REQ_RAHEAD), 0);
                if (sh) {
                        if (unlikely(previous)) {
                                /* expansion might have moved on while waiting for a
@@ -7066,10 +7070,12 @@ static void raid5_status(struct seq_file *seq, struct mddev *mddev)
        seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
                conf->chunk_sectors / 2, mddev->layout);
        seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
-       for (i = 0; i < conf->raid_disks; i++)
-               seq_printf (seq, "%s",
-                              conf->disks[i].rdev &&
-                              test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
+       rcu_read_lock();
+       for (i = 0; i < conf->raid_disks; i++) {
+               struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
+               seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
+       }
+       rcu_read_unlock();
        seq_printf (seq, "]");
 }
 
@@ -7191,12 +7197,15 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
                goto abort;
        }
        *rdevp = NULL;
-       synchronize_rcu();
-       if (atomic_read(&rdev->nr_pending)) {
-               /* lost the race, try later */
-               err = -EBUSY;
-               *rdevp = rdev;
-       } else if (p->replacement) {
+       if (!test_bit(RemoveSynchronized, &rdev->flags)) {
+               synchronize_rcu();
+               if (atomic_read(&rdev->nr_pending)) {
+                       /* lost the race, try later */
+                       err = -EBUSY;
+                       *rdevp = rdev;
+               }
+       }
+       if (p->replacement) {
                /* We must have just cleared 'rdev' */
                p->rdev = p->replacement;
                clear_bit(Replacement, &p->replacement->flags);