block: manipulate bio->bi_flags through helpers
[cascardo/linux.git] / drivers / md / raid5.c
index b6793d2..e3d4877 100644 (file)
@@ -233,7 +233,7 @@ static void return_io(struct bio *return_bi)
                bi->bi_iter.bi_size = 0;
                trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
                                         bi, 0);
-               bio_endio(bi, 0);
+               bio_endio(bi);
                bi = return_bi;
        }
 }
@@ -344,7 +344,8 @@ static void release_inactive_stripe_list(struct r5conf *conf,
                                         int hash)
 {
        int size;
-       bool do_wakeup = false;
+       unsigned long do_wakeup = 0;
+       int i = 0;
        unsigned long flags;
 
        if (hash == NR_STRIPE_HASH_LOCKS) {
@@ -365,15 +366,21 @@ static void release_inactive_stripe_list(struct r5conf *conf,
                            !list_empty(list))
                                atomic_dec(&conf->empty_inactive_list_nr);
                        list_splice_tail_init(list, conf->inactive_list + hash);
-                       do_wakeup = true;
+                       do_wakeup |= 1 << hash;
                        spin_unlock_irqrestore(conf->hash_locks + hash, flags);
                }
                size--;
                hash--;
        }
 
+       for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
+               if (do_wakeup & (1 << i))
+                       wake_up(&conf->wait_for_stripe[i]);
+       }
+
        if (do_wakeup) {
-               wake_up(&conf->wait_for_stripe);
+               if (atomic_read(&conf->active_stripes) == 0)
+                       wake_up(&conf->wait_for_quiescent);
                if (conf->retry_read_aligned)
                        md_wakeup_thread(conf->mddev->thread);
        }
@@ -667,15 +674,15 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
        spin_lock_irq(conf->hash_locks + hash);
 
        do {
-               wait_event_lock_irq(conf->wait_for_stripe,
+               wait_event_lock_irq(conf->wait_for_quiescent,
                                    conf->quiesce == 0 || noquiesce,
                                    *(conf->hash_locks + hash));
                sh = __find_stripe(conf, sector, conf->generation - previous);
                if (!sh) {
                        if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
                                sh = get_free_stripe(conf, hash);
-                               if (!sh && llist_empty(&conf->released_stripes) &&
-                                   !test_bit(R5_DID_ALLOC, &conf->cache_state))
+                               if (!sh && !test_bit(R5_DID_ALLOC,
+                                                    &conf->cache_state))
                                        set_bit(R5_ALLOC_MORE,
                                                &conf->cache_state);
                        }
@@ -684,14 +691,15 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
                        if (!sh) {
                                set_bit(R5_INACTIVE_BLOCKED,
                                        &conf->cache_state);
-                               wait_event_lock_irq(
-                                       conf->wait_for_stripe,
+                               wait_event_exclusive_cmd(
+                                       conf->wait_for_stripe[hash],
                                        !list_empty(conf->inactive_list + hash) &&
                                        (atomic_read(&conf->active_stripes)
                                         < (conf->max_nr_stripes * 3 / 4)
                                         || !test_bit(R5_INACTIVE_BLOCKED,
                                                      &conf->cache_state)),
-                                       *(conf->hash_locks + hash));
+                                       spin_unlock_irq(conf->hash_locks + hash),
+                                       spin_lock_irq(conf->hash_locks + hash));
                                clear_bit(R5_INACTIVE_BLOCKED,
                                          &conf->cache_state);
                        } else {
@@ -716,6 +724,9 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
                }
        } while (sh == NULL);
 
+       if (!list_empty(conf->inactive_list + hash))
+               wake_up(&conf->wait_for_stripe[hash]);
+
        spin_unlock_irq(conf->hash_locks + hash);
        return sh;
 }
@@ -876,9 +887,9 @@ static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
 }
 
 static void
-raid5_end_read_request(struct bio *bi, int error);
+raid5_end_read_request(struct bio *bi);
 static void
-raid5_end_write_request(struct bio *bi, int error);
+raid5_end_write_request(struct bio *bi);
 
 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 {
@@ -2177,7 +2188,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
        cnt = 0;
        list_for_each_entry(nsh, &newstripes, lru) {
                lock_device_hash_lock(conf, hash);
-               wait_event_cmd(conf->wait_for_stripe,
+               wait_event_exclusive_cmd(conf->wait_for_stripe[hash],
                                    !list_empty(conf->inactive_list + hash),
                                    unlock_device_hash_lock(conf, hash),
                                    lock_device_hash_lock(conf, hash));
@@ -2266,12 +2277,11 @@ static void shrink_stripes(struct r5conf *conf)
        conf->slab_cache = NULL;
 }
 
-static void raid5_end_read_request(struct bio * bi, int error)
+static void raid5_end_read_request(struct bio * bi)
 {
        struct stripe_head *sh = bi->bi_private;
        struct r5conf *conf = sh->raid_conf;
        int disks = sh->disks, i;
-       int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
        char b[BDEVNAME_SIZE];
        struct md_rdev *rdev = NULL;
        sector_t s;
@@ -2280,9 +2290,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
                if (bi == &sh->dev[i].req)
                        break;
 
-       pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
+       pr_debug("end_read_request %llu/%d, count: %d, error %d.\n",
                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
-               uptodate);
+               bi->bi_error);
        if (i == disks) {
                BUG();
                return;
@@ -2301,7 +2311,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
                s = sh->sector + rdev->new_data_offset;
        else
                s = sh->sector + rdev->data_offset;
-       if (uptodate) {
+       if (!bi->bi_error) {
                set_bit(R5_UPTODATE, &sh->dev[i].flags);
                if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
                        /* Note that this cannot happen on a
@@ -2389,13 +2399,12 @@ static void raid5_end_read_request(struct bio * bi, int error)
        release_stripe(sh);
 }
 
-static void raid5_end_write_request(struct bio *bi, int error)
+static void raid5_end_write_request(struct bio *bi)
 {
        struct stripe_head *sh = bi->bi_private;
        struct r5conf *conf = sh->raid_conf;
        int disks = sh->disks, i;
        struct md_rdev *uninitialized_var(rdev);
-       int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
        sector_t first_bad;
        int bad_sectors;
        int replacement = 0;
@@ -2418,23 +2427,23 @@ static void raid5_end_write_request(struct bio *bi, int error)
                        break;
                }
        }
-       pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
+       pr_debug("end_write_request %llu/%d, count %d, error: %d.\n",
                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
-               uptodate);
+               bi->bi_error);
        if (i == disks) {
                BUG();
                return;
        }
 
        if (replacement) {
-               if (!uptodate)
+               if (bi->bi_error)
                        md_error(conf->mddev, rdev);
                else if (is_badblock(rdev, sh->sector,
                                     STRIPE_SECTORS,
                                     &first_bad, &bad_sectors))
                        set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
        } else {
-               if (!uptodate) {
+               if (bi->bi_error) {
                        set_bit(STRIPE_DEGRADED, &sh->state);
                        set_bit(WriteErrorSeen, &rdev->flags);
                        set_bit(R5_WriteError, &sh->dev[i].flags);
@@ -2455,7 +2464,7 @@ static void raid5_end_write_request(struct bio *bi, int error)
        }
        rdev_dec_pending(rdev, conf->mddev);
 
-       if (sh->batch_head && !uptodate && !replacement)
+       if (sh->batch_head && bi->bi_error && !replacement)
                set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
 
        if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
@@ -3096,7 +3105,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                while (bi && bi->bi_iter.bi_sector <
                        sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
-                       clear_bit(BIO_UPTODATE, &bi->bi_flags);
+
+                       bi->bi_error = -EIO;
                        if (!raid5_dec_bi_active_stripes(bi)) {
                                md_write_end(conf->mddev);
                                bi->bi_next = *return_bi;
@@ -3120,7 +3130,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                while (bi && bi->bi_iter.bi_sector <
                       sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
-                       clear_bit(BIO_UPTODATE, &bi->bi_flags);
+
+                       bi->bi_error = -EIO;
                        if (!raid5_dec_bi_active_stripes(bi)) {
                                md_write_end(conf->mddev);
                                bi->bi_next = *return_bi;
@@ -3145,7 +3156,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                               sh->dev[i].sector + STRIPE_SECTORS) {
                                struct bio *nextbi =
                                        r5_next_bio(bi, sh->dev[i].sector);
-                               clear_bit(BIO_UPTODATE, &bi->bi_flags);
+
+                               bi->bi_error = -EIO;
                                if (!raid5_dec_bi_active_stripes(bi)) {
                                        bi->bi_next = *return_bi;
                                        *return_bi = bi;
@@ -4738,12 +4750,11 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf)
  *  first).
  *  If the read failed..
  */
-static void raid5_align_endio(struct bio *bi, int error)
+static void raid5_align_endio(struct bio *bi)
 {
        struct bio* raid_bi  = bi->bi_private;
        struct mddev *mddev;
        struct r5conf *conf;
-       int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
        struct md_rdev *rdev;
 
        bio_put(bi);
@@ -4755,12 +4766,12 @@ static void raid5_align_endio(struct bio *bi, int error)
 
        rdev_dec_pending(rdev, conf->mddev);
 
-       if (!error && uptodate) {
+       if (!bi->bi_error) {
                trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
                                         raid_bi, 0);
-               bio_endio(raid_bi, 0);
+               bio_endio(raid_bi);
                if (atomic_dec_and_test(&conf->active_aligned_reads))
-                       wake_up(&conf->wait_for_stripe);
+                       wake_up(&conf->wait_for_quiescent);
                return;
        }
 
@@ -4839,7 +4850,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                rcu_read_unlock();
                raid_bio->bi_next = (void*)rdev;
                align_bi->bi_bdev =  rdev->bdev;
-               __clear_bit(BIO_SEG_VALID, &align_bi->bi_flags);
+               bio_clear_flag(align_bi, BIO_SEG_VALID);
 
                if (!bio_fits_rdev(align_bi) ||
                    is_badblock(rdev, align_bi->bi_iter.bi_sector,
@@ -4855,7 +4866,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                align_bi->bi_iter.bi_sector += rdev->data_offset;
 
                spin_lock_irq(&conf->device_lock);
-               wait_event_lock_irq(conf->wait_for_stripe,
+               wait_event_lock_irq(conf->wait_for_quiescent,
                                    conf->quiesce == 0,
                                    conf->device_lock);
                atomic_inc(&conf->active_aligned_reads);
@@ -5122,7 +5133,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
        remaining = raid5_dec_bi_active_stripes(bi);
        if (remaining == 0) {
                md_write_end(mddev);
-               bio_endio(bi, 0);
+               bio_endio(bi);
        }
 }
 
@@ -5286,7 +5297,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                        release_stripe_plug(mddev, sh);
                } else {
                        /* cannot get stripe for read-ahead, just give-up */
-                       clear_bit(BIO_UPTODATE, &bi->bi_flags);
+                       bi->bi_error = -EIO;
                        break;
                }
        }
@@ -5300,7 +5311,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
 
                trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
                                         bi, 0);
-               bio_endio(bi, 0);
+               bio_endio(bi);
        }
 }
 
@@ -5696,10 +5707,10 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
        if (remaining == 0) {
                trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
                                         raid_bio, 0);
-               bio_endio(raid_bio, 0);
+               bio_endio(raid_bio);
        }
        if (atomic_dec_and_test(&conf->active_aligned_reads))
-               wake_up(&conf->wait_for_stripe);
+               wake_up(&conf->wait_for_quiescent);
        return handled;
 }
 
@@ -6433,7 +6444,10 @@ static struct r5conf *setup_conf(struct mddev *mddev)
                goto abort;
        spin_lock_init(&conf->device_lock);
        seqcount_init(&conf->gen_lock);
-       init_waitqueue_head(&conf->wait_for_stripe);
+       init_waitqueue_head(&conf->wait_for_quiescent);
+       for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
+               init_waitqueue_head(&conf->wait_for_stripe[i]);
+       }
        init_waitqueue_head(&conf->wait_for_overlap);
        INIT_LIST_HEAD(&conf->handle_list);
        INIT_LIST_HEAD(&conf->hold_list);
@@ -7466,7 +7480,7 @@ static void raid5_quiesce(struct mddev *mddev, int state)
                 * active stripes can drain
                 */
                conf->quiesce = 2;
-               wait_event_cmd(conf->wait_for_stripe,
+               wait_event_cmd(conf->wait_for_quiescent,
                                    atomic_read(&conf->active_stripes) == 0 &&
                                    atomic_read(&conf->active_aligned_reads) == 0,
                                    unlock_all_device_hash_locks_irq(conf),
@@ -7480,7 +7494,7 @@ static void raid5_quiesce(struct mddev *mddev, int state)
        case 0: /* re-enable writes */
                lock_all_device_hash_locks_irq(conf);
                conf->quiesce = 0;
-               wake_up(&conf->wait_for_stripe);
+               wake_up(&conf->wait_for_quiescent);
                wake_up(&conf->wait_for_overlap);
                unlock_all_device_hash_locks_irq(conf);
                break;