Merge tag 'md-3.10-fixes' of git://neil.brown.name/md
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 13 Jun 2013 17:13:29 +0000 (10:13 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 13 Jun 2013 17:13:29 +0000 (10:13 -0700)
Pull md bugfixes from Neil Brown:
 "A few bugfixes for md

  Some tagged for -stable"

* tag 'md-3.10-fixes' of git://neil.brown.name/md:
  md/raid1,5,10: Disable WRITE SAME until a recovery strategy is in place
  md/raid1,raid10: use freeze_array in place of raise_barrier in various places.
  md/raid1: consider WRITE as successful only if at least one non-Faulty and non-rebuilding drive completed it.
  md: md_stop_writes() should always freeze recovery.

1  2 
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c

diff --combined drivers/md/md.c
@@@ -197,12 -197,21 +197,12 @@@ void md_trim_bio(struct bio *bio, int o
        if (offset == 0 && size == bio->bi_size)
                return;
  
 -      bio->bi_sector += offset;
 -      bio->bi_size = size;
 -      offset <<= 9;
        clear_bit(BIO_SEG_VALID, &bio->bi_flags);
  
 -      while (bio->bi_idx < bio->bi_vcnt &&
 -             bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
 -              /* remove this whole bio_vec */
 -              offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
 -              bio->bi_idx++;
 -      }
 -      if (bio->bi_idx < bio->bi_vcnt) {
 -              bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
 -              bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
 -      }
 +      bio_advance(bio, offset << 9);
 +
 +      bio->bi_size = size;
 +
        /* avoid any complications with bi_idx being non-zero*/
        if (bio->bi_idx) {
                memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
@@@ -5268,8 -5277,8 +5268,8 @@@ static void md_clean(struct mddev *mdde
  
  static void __md_stop_writes(struct mddev *mddev)
  {
+       set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
        if (mddev->sync_thread) {
-               set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                md_reap_sync_thread(mddev);
        }
@@@ -6665,13 -6674,15 +6665,13 @@@ static int md_open(struct block_device 
        return err;
  }
  
 -static int md_release(struct gendisk *disk, fmode_t mode)
 +static void md_release(struct gendisk *disk, fmode_t mode)
  {
        struct mddev *mddev = disk->private_data;
  
        BUG_ON(!mddev);
        atomic_dec(&mddev->openers);
        mddev_put(mddev);
 -
 -      return 0;
  }
  
  static int md_media_changed(struct gendisk *disk)
diff --combined drivers/md/raid1.c
@@@ -92,6 -92,7 +92,6 @@@ static void r1bio_pool_free(void *r1_bi
  static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
  {
        struct pool_info *pi = data;
 -      struct page *page;
        struct r1bio *r1_bio;
        struct bio *bio;
        int i, j;
                j = 1;
        while(j--) {
                bio = r1_bio->bios[j];
 -              for (i = 0; i < RESYNC_PAGES; i++) {
 -                      page = alloc_page(gfp_flags);
 -                      if (unlikely(!page))
 -                              goto out_free_pages;
 +              bio->bi_vcnt = RESYNC_PAGES;
  
 -                      bio->bi_io_vec[i].bv_page = page;
 -                      bio->bi_vcnt = i+1;
 -              }
 +              if (bio_alloc_pages(bio, gfp_flags))
 +                      goto out_free_bio;
        }
        /* If not user-requests, copy the page pointers to all bios */
        if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
  
        return r1_bio;
  
 -out_free_pages:
 -      for (j=0 ; j < pi->raid_disks; j++)
 -              for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++)
 -                      put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
 -      j = -1;
  out_free_bio:
        while (++j < pi->raid_disks)
                bio_put(r1_bio->bios[j]);
@@@ -257,7 -267,7 +257,7 @@@ static void raid_end_bio_io(struct r1bi
                         (bio_data_dir(bio) == WRITE) ? "write" : "read",
                         (unsigned long long) bio->bi_sector,
                         (unsigned long long) bio->bi_sector +
 -                       (bio->bi_size >> 9) - 1);
 +                       bio_sectors(bio) - 1);
  
                call_bio_endio(r1_bio);
        }
@@@ -417,7 -427,17 +417,17 @@@ static void raid1_end_write_request(str
  
                r1_bio->bios[mirror] = NULL;
                to_put = bio;
-               set_bit(R1BIO_Uptodate, &r1_bio->state);
+               /*
+                * Do not set R1BIO_Uptodate if the current device is
+                * rebuilding or Faulty. This is because we cannot use
+                * such device for properly reading the data back (we could
+                * potentially use it, if the current write would have felt
+                * before rdev->recovery_offset, but for simplicity we don't
+                * check this here.
+                */
+               if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
+                   !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
+                       set_bit(R1BIO_Uptodate, &r1_bio->state);
  
                /* Maybe we can clear some bad blocks. */
                if (is_badblock(conf->mirrors[mirror].rdev,
                                         " %llu-%llu\n",
                                         (unsigned long long) mbio->bi_sector,
                                         (unsigned long long) mbio->bi_sector +
 -                                       (mbio->bi_size >> 9) - 1);
 +                                       bio_sectors(mbio) - 1);
                                call_bio_endio(r1_bio);
                        }
                }
@@@ -870,17 -890,17 +880,17 @@@ static void allow_barrier(struct r1con
        wake_up(&conf->wait_barrier);
  }
  
- static void freeze_array(struct r1conf *conf)
+ static void freeze_array(struct r1conf *conf, int extra)
  {
        /* stop syncio and normal IO and wait for everything to
         * go quite.
         * We increment barrier and nr_waiting, and then
-        * wait until nr_pending match nr_queued+1
+        * wait until nr_pending match nr_queued+extra
         * This is called in the context of one normal IO request
         * that has failed. Thus any sync request that might be pending
         * will be blocked by nr_pending, and we need to wait for
         * pending IO requests to complete or be queued for re-try.
-        * Thus the number queued (nr_queued) plus this request (1)
+        * Thus the number queued (nr_queued) plus this request (extra)
         * must match the number of pending IOs (nr_pending) before
         * we continue.
         */
        conf->barrier++;
        conf->nr_waiting++;
        wait_event_lock_irq_cmd(conf->wait_barrier,
-                               conf->nr_pending == conf->nr_queued+1,
+                               conf->nr_pending == conf->nr_queued+extra,
                                conf->resync_lock,
                                flush_pending_writes(conf));
        spin_unlock_irq(&conf->resync_lock);
@@@ -915,7 -935,7 +925,7 @@@ static void alloc_behind_pages(struct b
        if (unlikely(!bvecs))
                return;
  
 -      bio_for_each_segment(bvec, bio, i) {
 +      bio_for_each_segment_all(bvec, bio, i) {
                bvecs[i] = *bvec;
                bvecs[i].bv_page = alloc_page(GFP_NOIO);
                if (unlikely(!bvecs[i].bv_page))
@@@ -1013,7 -1033,7 +1023,7 @@@ static void make_request(struct mddev *
        md_write_start(mddev, bio); /* wait on superblock update early */
  
        if (bio_data_dir(bio) == WRITE &&
 -          bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo &&
 +          bio_end_sector(bio) > mddev->suspend_lo &&
            bio->bi_sector < mddev->suspend_hi) {
                /* As the suspend_* range is controlled by
                 * userspace, we want an interruptible
                        flush_signals(current);
                        prepare_to_wait(&conf->wait_barrier,
                                        &w, TASK_INTERRUPTIBLE);
 -                      if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo ||
 +                      if (bio_end_sector(bio) <= mddev->suspend_lo ||
                            bio->bi_sector >= mddev->suspend_hi)
                                break;
                        schedule();
        r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  
        r1_bio->master_bio = bio;
 -      r1_bio->sectors = bio->bi_size >> 9;
 +      r1_bio->sectors = bio_sectors(bio);
        r1_bio->state = 0;
        r1_bio->mddev = mddev;
        r1_bio->sector = bio->bi_sector;
@@@ -1122,7 -1142,7 +1132,7 @@@ read_again
                        r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  
                        r1_bio->master_bio = bio;
 -                      r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
 +                      r1_bio->sectors = bio_sectors(bio) - sectors_handled;
                        r1_bio->state = 0;
                        r1_bio->mddev = mddev;
                        r1_bio->sector = bio->bi_sector + sectors_handled;
                        struct bio_vec *bvec;
                        int j;
  
 -                      /* Yes, I really want the '__' version so that
 -                       * we clear any unused pointer in the io_vec, rather
 -                       * than leave them unchanged.  This is important
 -                       * because when we come to free the pages, we won't
 -                       * know the original bi_idx, so we just free
 -                       * them all
 +                      /*
 +                       * We trimmed the bio, so _all is legit
                         */
 -                      __bio_for_each_segment(bvec, mbio, j, 0)
 +                      bio_for_each_segment_all(bvec, mbio, j)
                                bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
                        if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
                                atomic_inc(&r1_bio->behind_remaining);
        /* Mustn't call r1_bio_write_done before this next test,
         * as it could result in the bio being freed.
         */
 -      if (sectors_handled < (bio->bi_size >> 9)) {
 +      if (sectors_handled < bio_sectors(bio)) {
                r1_bio_write_done(r1_bio);
                /* We need another r1_bio.  It has already been counted
                 * in bio->bi_phys_segments
                 */
                r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
                r1_bio->master_bio = bio;
 -              r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
 +              r1_bio->sectors = bio_sectors(bio) - sectors_handled;
                r1_bio->state = 0;
                r1_bio->mddev = mddev;
                r1_bio->sector = bio->bi_sector + sectors_handled;
@@@ -1544,8 -1568,8 +1554,8 @@@ static int raid1_add_disk(struct mddev 
                 * we wait for all outstanding requests to complete.
                 */
                synchronize_sched();
-               raise_barrier(conf);
-               lower_barrier(conf);
+               freeze_array(conf, 0);
+               unfreeze_array(conf);
                clear_bit(Unmerged, &rdev->flags);
        }
        md_integrity_add_rdev(rdev, mddev);
@@@ -1595,11 -1619,11 +1605,11 @@@ static int raid1_remove_disk(struct mdd
                         */
                        struct md_rdev *repl =
                                conf->mirrors[conf->raid_disks + number].rdev;
-                       raise_barrier(conf);
+                       freeze_array(conf, 0);
                        clear_bit(Replacement, &repl->flags);
                        p->rdev = repl;
                        conf->mirrors[conf->raid_disks + number].rdev = NULL;
-                       lower_barrier(conf);
+                       unfreeze_array(conf);
                        clear_bit(WantReplacement, &rdev->flags);
                } else
                        clear_bit(WantReplacement, &rdev->flags);
@@@ -1853,7 -1877,7 +1863,7 @@@ static int process_checks(struct r1bio 
                struct bio *sbio = r1_bio->bios[i];
                int size;
  
 -              if (r1_bio->bios[i]->bi_end_io != end_sync_read)
 +              if (sbio->bi_end_io != end_sync_read)
                        continue;
  
                if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
                        continue;
                }
                /* fixup the bio for reuse */
 +              bio_reset(sbio);
                sbio->bi_vcnt = vcnt;
                sbio->bi_size = r1_bio->sectors << 9;
 -              sbio->bi_idx = 0;
 -              sbio->bi_phys_segments = 0;
 -              sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
 -              sbio->bi_flags |= 1 << BIO_UPTODATE;
 -              sbio->bi_next = NULL;
                sbio->bi_sector = r1_bio->sector +
                        conf->mirrors[i].rdev->data_offset;
                sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
 +              sbio->bi_end_io = end_sync_read;
 +              sbio->bi_private = r1_bio;
 +
                size = sbio->bi_size;
                for (j = 0; j < vcnt ; j++) {
                        struct bio_vec *bi;
                        else
                                bi->bv_len = size;
                        size -= PAGE_SIZE;
 -                      memcpy(page_address(bi->bv_page),
 -                             page_address(pbio->bi_io_vec[j].bv_page),
 -                             PAGE_SIZE);
                }
 +
 +              bio_copy_data(sbio, pbio);
        }
        return 0;
  }
@@@ -1936,7 -1962,7 +1946,7 @@@ static void sync_request_write(struct m
                wbio->bi_rw = WRITE;
                wbio->bi_end_io = end_sync_write;
                atomic_inc(&r1_bio->remaining);
 -              md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
 +              md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
  
                generic_make_request(wbio);
        }
@@@ -2048,11 -2074,32 +2058,11 @@@ static void fix_read_error(struct r1con
        }
  }
  
 -static void bi_complete(struct bio *bio, int error)
 -{
 -      complete((struct completion *)bio->bi_private);
 -}
 -
 -static int submit_bio_wait(int rw, struct bio *bio)
 -{
 -      struct completion event;
 -      rw |= REQ_SYNC;
 -
 -      init_completion(&event);
 -      bio->bi_private = &event;
 -      bio->bi_end_io = bi_complete;
 -      submit_bio(rw, bio);
 -      wait_for_completion(&event);
 -
 -      return test_bit(BIO_UPTODATE, &bio->bi_flags);
 -}
 -
  static int narrow_write_error(struct r1bio *r1_bio, int i)
  {
        struct mddev *mddev = r1_bio->mddev;
        struct r1conf *conf = mddev->private;
        struct md_rdev *rdev = conf->mirrors[i].rdev;
 -      int vcnt, idx;
 -      struct bio_vec *vec;
  
        /* bio has the data to be written to device 'i' where
         * we just recently had a write error.
                   & ~(sector_t)(block_sectors - 1))
                - sector;
  
 -      if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
 -              vcnt = r1_bio->behind_page_count;
 -              vec = r1_bio->behind_bvecs;
 -              idx = 0;
 -              while (vec[idx].bv_page == NULL)
 -                      idx++;
 -      } else {
 -              vcnt = r1_bio->master_bio->bi_vcnt;
 -              vec = r1_bio->master_bio->bi_io_vec;
 -              idx = r1_bio->master_bio->bi_idx;
 -      }
        while (sect_to_write) {
                struct bio *wbio;
                if (sectors > sect_to_write)
                        sectors = sect_to_write;
                /* Write at 'sector' for 'sectors'*/
  
 -              wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
 -              memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
 -              wbio->bi_sector = r1_bio->sector;
 +              if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
 +                      unsigned vcnt = r1_bio->behind_page_count;
 +                      struct bio_vec *vec = r1_bio->behind_bvecs;
 +
 +                      while (!vec->bv_page) {
 +                              vec++;
 +                              vcnt--;
 +                      }
 +
 +                      wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
 +                      memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
 +
 +                      wbio->bi_vcnt = vcnt;
 +              } else {
 +                      wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
 +              }
 +
                wbio->bi_rw = WRITE;
 -              wbio->bi_vcnt = vcnt;
 +              wbio->bi_sector = r1_bio->sector;
                wbio->bi_size = r1_bio->sectors << 9;
 -              wbio->bi_idx = idx;
  
                md_trim_bio(wbio, sector - r1_bio->sector, sectors);
                wbio->bi_sector += rdev->data_offset;
@@@ -2195,7 -2240,7 +2205,7 @@@ static void handle_read_error(struct r1
         * frozen
         */
        if (mddev->ro == 0) {
-               freeze_array(conf);
+               freeze_array(conf, 1);
                fix_read_error(conf, r1_bio->read_disk,
                               r1_bio->sector, r1_bio->sectors);
                unfreeze_array(conf);
@@@ -2254,7 -2299,8 +2264,7 @@@ read_more
                        r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  
                        r1_bio->master_bio = mbio;
 -                      r1_bio->sectors = (mbio->bi_size >> 9)
 -                                        - sectors_handled;
 +                      r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
                        r1_bio->state = 0;
                        set_bit(R1BIO_ReadError, &r1_bio->state);
                        r1_bio->mddev = mddev;
@@@ -2428,7 -2474,18 +2438,7 @@@ static sector_t sync_request(struct mdd
        for (i = 0; i < conf->raid_disks * 2; i++) {
                struct md_rdev *rdev;
                bio = r1_bio->bios[i];
 -
 -              /* take from bio_init */
 -              bio->bi_next = NULL;
 -              bio->bi_flags &= ~(BIO_POOL_MASK-1);
 -              bio->bi_flags |= 1 << BIO_UPTODATE;
 -              bio->bi_rw = READ;
 -              bio->bi_vcnt = 0;
 -              bio->bi_idx = 0;
 -              bio->bi_phys_segments = 0;
 -              bio->bi_size = 0;
 -              bio->bi_end_io = NULL;
 -              bio->bi_private = NULL;
 +              bio_reset(bio);
  
                rdev = rcu_dereference(conf->mirrors[i].rdev);
                if (rdev == NULL ||
@@@ -2780,8 -2837,8 +2790,8 @@@ static int run(struct mddev *mddev
                return PTR_ERR(conf);
  
        if (mddev->queue)
-               blk_queue_max_write_same_sectors(mddev->queue,
-                                                mddev->chunk_sectors);
+               blk_queue_max_write_same_sectors(mddev->queue, 0);
        rdev_for_each(rdev, mddev) {
                if (!mddev->gendisk)
                        continue;
@@@ -2963,7 -3020,7 +2973,7 @@@ static int raid1_reshape(struct mddev *
                return -ENOMEM;
        }
  
-       raise_barrier(conf);
+       freeze_array(conf, 0);
  
        /* ok, everything is stopped */
        oldpool = conf->r1bio_pool;
        conf->raid_disks = mddev->raid_disks = raid_disks;
        mddev->delta_disks = 0;
  
-       lower_barrier(conf);
+       unfreeze_array(conf);
  
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        md_wakeup_thread(mddev->thread);
diff --combined drivers/md/raid10.c
@@@ -490,7 -490,17 +490,17 @@@ static void raid10_end_write_request(st
                sector_t first_bad;
                int bad_sectors;
  
-               set_bit(R10BIO_Uptodate, &r10_bio->state);
+               /*
+                * Do not set R10BIO_Uptodate if the current device is
+                * rebuilding or Faulty. This is because we cannot use
+                * such device for properly reading the data back (we could
+                * potentially use it, if the current write would have felt
+                * before rdev->recovery_offset, but for simplicity we don't
+                * check this here.
+                */
+               if (test_bit(In_sync, &rdev->flags) &&
+                   !test_bit(Faulty, &rdev->flags))
+                       set_bit(R10BIO_Uptodate, &r10_bio->state);
  
                /* Maybe we can clear some bad blocks. */
                if (is_badblock(rdev,
@@@ -1055,17 -1065,17 +1065,17 @@@ static void allow_barrier(struct r10con
        wake_up(&conf->wait_barrier);
  }
  
- static void freeze_array(struct r10conf *conf)
+ static void freeze_array(struct r10conf *conf, int extra)
  {
        /* stop syncio and normal IO and wait for everything to
         * go quiet.
         * We increment barrier and nr_waiting, and then
-        * wait until nr_pending match nr_queued+1
+        * wait until nr_pending match nr_queued+extra
         * This is called in the context of one normal IO request
         * that has failed. Thus any sync request that might be pending
         * will be blocked by nr_pending, and we need to wait for
         * pending IO requests to complete or be queued for re-try.
-        * Thus the number queued (nr_queued) plus this request (1)
+        * Thus the number queued (nr_queued) plus this request (extra)
         * must match the number of pending IOs (nr_pending) before
         * we continue.
         */
        conf->barrier++;
        conf->nr_waiting++;
        wait_event_lock_irq_cmd(conf->wait_barrier,
-                               conf->nr_pending == conf->nr_queued+1,
+                               conf->nr_pending == conf->nr_queued+extra,
                                conf->resync_lock,
                                flush_pending_writes(conf));
  
@@@ -1174,13 -1184,14 +1184,13 @@@ static void make_request(struct mddev *
        /* If this request crosses a chunk boundary, we need to
         * split it.  This will only happen for 1 PAGE (or less) requests.
         */
 -      if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9)
 +      if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
                     > chunk_sects
                     && (conf->geo.near_copies < conf->geo.raid_disks
                         || conf->prev.near_copies < conf->prev.raid_disks))) {
                struct bio_pair *bp;
                /* Sanity check -- queue functions should prevent this happening */
 -              if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
 -                  bio->bi_idx != 0)
 +              if (bio_segments(bio) > 1)
                        goto bad_map;
                /* This is a one page bio that upper layers
                 * refuse to split for us, so we need to split it.
        bad_map:
                printk("md/raid10:%s: make_request bug: can't convert block across chunks"
                       " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
 -                     (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
 +                     (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
  
                bio_io_error(bio);
                return;
         */
        wait_barrier(conf);
  
 -      sectors = bio->bi_size >> 9;
 +      sectors = bio_sectors(bio);
        while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
            bio->bi_sector < conf->reshape_progress &&
            bio->bi_sector + sectors > conf->reshape_progress) {
@@@ -1330,7 -1341,8 +1340,7 @@@ read_again
                        r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
  
                        r10_bio->master_bio = bio;
 -                      r10_bio->sectors = ((bio->bi_size >> 9)
 -                                          - sectors_handled);
 +                      r10_bio->sectors = bio_sectors(bio) - sectors_handled;
                        r10_bio->state = 0;
                        r10_bio->mddev = mddev;
                        r10_bio->sector = bio->bi_sector + sectors_handled;
@@@ -1572,7 -1584,7 +1582,7 @@@ retry_write
         * after checking if we need to go around again.
         */
  
 -      if (sectors_handled < (bio->bi_size >> 9)) {
 +      if (sectors_handled < bio_sectors(bio)) {
                one_write_done(r10_bio);
                /* We need another r10_bio.  It has already been counted
                 * in bio->bi_phys_segments.
                r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
  
                r10_bio->master_bio = bio;
 -              r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
 +              r10_bio->sectors = bio_sectors(bio) - sectors_handled;
  
                r10_bio->mddev = mddev;
                r10_bio->sector = bio->bi_sector + sectors_handled;
@@@ -1837,8 -1849,8 +1847,8 @@@ static int raid10_add_disk(struct mdde
                 * we wait for all outstanding requests to complete.
                 */
                synchronize_sched();
-               raise_barrier(conf, 0);
-               lower_barrier(conf);
+               freeze_array(conf, 0);
+               unfreeze_array(conf);
                clear_bit(Unmerged, &rdev->flags);
        }
        md_integrity_add_rdev(rdev, mddev);
@@@ -2082,10 -2094,13 +2092,10 @@@ static void sync_request_write(struct m
                 * First we need to fixup bv_offset, bv_len and
                 * bi_vecs, as the read request might have corrupted these
                 */
 +              bio_reset(tbio);
 +
                tbio->bi_vcnt = vcnt;
                tbio->bi_size = r10_bio->sectors << 9;
 -              tbio->bi_idx = 0;
 -              tbio->bi_phys_segments = 0;
 -              tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
 -              tbio->bi_flags |= 1 << BIO_UPTODATE;
 -              tbio->bi_next = NULL;
                tbio->bi_rw = WRITE;
                tbio->bi_private = r10_bio;
                tbio->bi_sector = r10_bio->devs[i].addr;
                d = r10_bio->devs[i].devnum;
                atomic_inc(&conf->mirrors[d].rdev->nr_pending);
                atomic_inc(&r10_bio->remaining);
 -              md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
 +              md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
  
                tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
                tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
                d = r10_bio->devs[i].devnum;
                atomic_inc(&r10_bio->remaining);
                md_sync_acct(conf->mirrors[d].replacement->bdev,
 -                           tbio->bi_size >> 9);
 +                           bio_sectors(tbio));
                generic_make_request(tbio);
        }
  
@@@ -2254,13 -2269,13 +2264,13 @@@ static void recovery_request_write(stru
        wbio2 = r10_bio->devs[1].repl_bio;
        if (wbio->bi_end_io) {
                atomic_inc(&conf->mirrors[d].rdev->nr_pending);
 -              md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
 +              md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
                generic_make_request(wbio);
        }
        if (wbio2 && wbio2->bi_end_io) {
                atomic_inc(&conf->mirrors[d].replacement->nr_pending);
                md_sync_acct(conf->mirrors[d].replacement->bdev,
 -                           wbio2->bi_size >> 9);
 +                           bio_sectors(wbio2));
                generic_make_request(wbio2);
        }
  }
@@@ -2531,6 -2546,25 +2541,6 @@@ static void fix_read_error(struct r10co
        }
  }
  
 -static void bi_complete(struct bio *bio, int error)
 -{
 -      complete((struct completion *)bio->bi_private);
 -}
 -
 -static int submit_bio_wait(int rw, struct bio *bio)
 -{
 -      struct completion event;
 -      rw |= REQ_SYNC;
 -
 -      init_completion(&event);
 -      bio->bi_private = &event;
 -      bio->bi_end_io = bi_complete;
 -      submit_bio(rw, bio);
 -      wait_for_completion(&event);
 -
 -      return test_bit(BIO_UPTODATE, &bio->bi_flags);
 -}
 -
  static int narrow_write_error(struct r10bio *r10_bio, int i)
  {
        struct bio *bio = r10_bio->master_bio;
@@@ -2612,7 -2646,7 +2622,7 @@@ static void handle_read_error(struct md
        r10_bio->devs[slot].bio = NULL;
  
        if (mddev->ro == 0) {
-               freeze_array(conf);
+               freeze_array(conf, 1);
                fix_read_error(conf, mddev, r10_bio);
                unfreeze_array(conf);
        } else
@@@ -2671,7 -2705,8 +2681,7 @@@ read_more
                r10_bio = mempool_alloc(conf->r10bio_pool,
                                        GFP_NOIO);
                r10_bio->master_bio = mbio;
 -              r10_bio->sectors = (mbio->bi_size >> 9)
 -                      - sectors_handled;
 +              r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
                r10_bio->state = 0;
                set_bit(R10BIO_ReadError,
                        &r10_bio->state);
@@@ -3108,7 -3143,6 +3118,7 @@@ static sector_t sync_request(struct mdd
                                        }
                                }
                                bio = r10_bio->devs[0].bio;
 +                              bio_reset(bio);
                                bio->bi_next = biolist;
                                biolist = bio;
                                bio->bi_private = r10_bio;
                                rdev = mirror->rdev;
                                if (!test_bit(In_sync, &rdev->flags)) {
                                        bio = r10_bio->devs[1].bio;
 +                                      bio_reset(bio);
                                        bio->bi_next = biolist;
                                        biolist = bio;
                                        bio->bi_private = r10_bio;
                                if (rdev == NULL || bio == NULL ||
                                    test_bit(Faulty, &rdev->flags))
                                        break;
 +                              bio_reset(bio);
                                bio->bi_next = biolist;
                                biolist = bio;
                                bio->bi_private = r10_bio;
                                r10_bio->devs[i].repl_bio->bi_end_io = NULL;
  
                        bio = r10_bio->devs[i].bio;
 -                      bio->bi_end_io = NULL;
 +                      bio_reset(bio);
                        clear_bit(BIO_UPTODATE, &bio->bi_flags);
                        if (conf->mirrors[d].rdev == NULL ||
                            test_bit(Faulty, &conf->mirrors[d].rdev->flags))
  
                        /* Need to set up for writing to the replacement */
                        bio = r10_bio->devs[i].repl_bio;
 +                      bio_reset(bio);
                        clear_bit(BIO_UPTODATE, &bio->bi_flags);
  
                        sector = r10_bio->devs[i].addr;
                }
        }
  
 -      for (bio = biolist; bio ; bio=bio->bi_next) {
 -
 -              bio->bi_flags &= ~(BIO_POOL_MASK - 1);
 -              if (bio->bi_end_io)
 -                      bio->bi_flags |= 1 << BIO_UPTODATE;
 -              bio->bi_vcnt = 0;
 -              bio->bi_idx = 0;
 -              bio->bi_phys_segments = 0;
 -              bio->bi_size = 0;
 -      }
 -
        nr_sectors = 0;
        if (sector_nr + max_sync < max_sector)
                max_sector = sector_nr + max_sync;
@@@ -3609,8 -3651,7 +3619,7 @@@ static int run(struct mddev *mddev
        if (mddev->queue) {
                blk_queue_max_discard_sectors(mddev->queue,
                                              mddev->chunk_sectors);
-               blk_queue_max_write_same_sectors(mddev->queue,
-                                                mddev->chunk_sectors);
+               blk_queue_max_write_same_sectors(mddev->queue, 0);
                blk_queue_io_min(mddev->queue, chunk_size);
                if (conf->geo.raid_disks % conf->geo.near_copies)
                        blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
@@@ -4379,6 -4420,7 +4388,6 @@@ read_more
        read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
        read_bio->bi_flags |= 1 << BIO_UPTODATE;
        read_bio->bi_vcnt = 0;
 -      read_bio->bi_idx = 0;
        read_bio->bi_size = 0;
        r10_bio->master_bio = read_bio;
        r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
                }
                if (!rdev2 || test_bit(Faulty, &rdev2->flags))
                        continue;
 +
 +              bio_reset(b);
                b->bi_bdev = rdev2->bdev;
                b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
                b->bi_private = r10_bio;
                b->bi_end_io = end_reshape_write;
                b->bi_rw = WRITE;
 -              b->bi_flags &= ~(BIO_POOL_MASK - 1);
 -              b->bi_flags |= 1 << BIO_UPTODATE;
                b->bi_next = blist;
 -              b->bi_vcnt = 0;
 -              b->bi_idx = 0;
 -              b->bi_size = 0;
                blist = b;
        }
  
diff --combined drivers/md/raid5.c
@@@ -90,7 -90,7 +90,7 @@@ static inline struct hlist_head *stripe
   */
  static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
  {
 -      int sectors = bio->bi_size >> 9;
 +      int sectors = bio_sectors(bio);
        if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
                return bio->bi_next;
        else
@@@ -569,6 -569,14 +569,6 @@@ static void ops_run_io(struct stripe_he
                bi = &sh->dev[i].req;
                rbi = &sh->dev[i].rreq; /* For writing to replacement */
  
 -              bi->bi_rw = rw;
 -              rbi->bi_rw = rw;
 -              if (rw & WRITE) {
 -                      bi->bi_end_io = raid5_end_write_request;
 -                      rbi->bi_end_io = raid5_end_write_request;
 -              } else
 -                      bi->bi_end_io = raid5_end_read_request;
 -
                rcu_read_lock();
                rrdev = rcu_dereference(conf->disks[i].replacement);
                smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
  
                        set_bit(STRIPE_IO_STARTED, &sh->state);
  
 +                      bio_reset(bi);
                        bi->bi_bdev = rdev->bdev;
 +                      bi->bi_rw = rw;
 +                      bi->bi_end_io = (rw & WRITE)
 +                              ? raid5_end_write_request
 +                              : raid5_end_read_request;
 +                      bi->bi_private = sh;
 +
                        pr_debug("%s: for %llu schedule op %ld on disc %d\n",
                                __func__, (unsigned long long)sh->sector,
                                bi->bi_rw, i);
                        if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
                                bi->bi_rw |= REQ_FLUSH;
  
 -                      bi->bi_flags = 1 << BIO_UPTODATE;
 -                      bi->bi_idx = 0;
 +                      bi->bi_vcnt = 1;
                        bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        bi->bi_io_vec[0].bv_offset = 0;
                        bi->bi_size = STRIPE_SIZE;
 -                      bi->bi_next = NULL;
                        if (rrdev)
                                set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
  
  
                        set_bit(STRIPE_IO_STARTED, &sh->state);
  
 +                      bio_reset(rbi);
                        rbi->bi_bdev = rrdev->bdev;
 +                      rbi->bi_rw = rw;
 +                      BUG_ON(!(rw & WRITE));
 +                      rbi->bi_end_io = raid5_end_write_request;
 +                      rbi->bi_private = sh;
 +
                        pr_debug("%s: for %llu schedule op %ld on "
                                 "replacement disc %d\n",
                                __func__, (unsigned long long)sh->sector,
                        else
                                rbi->bi_sector = (sh->sector
                                                  + rrdev->data_offset);
 -                      rbi->bi_flags = 1 << BIO_UPTODATE;
 -                      rbi->bi_idx = 0;
 +                      rbi->bi_vcnt = 1;
                        rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        rbi->bi_io_vec[0].bv_offset = 0;
                        rbi->bi_size = STRIPE_SIZE;
 -                      rbi->bi_next = NULL;
                        if (conf->mddev->gendisk)
                                trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
                                                      rbi, disk_devt(conf->mddev->gendisk),
@@@ -2403,11 -2402,11 +2403,11 @@@ static int add_stripe_bio(struct stripe
        } else
                bip = &sh->dev[dd_idx].toread;
        while (*bip && (*bip)->bi_sector < bi->bi_sector) {
 -              if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
 +              if (bio_end_sector(*bip) > bi->bi_sector)
                        goto overlap;
                bip = & (*bip)->bi_next;
        }
 -      if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
 +      if (*bip && (*bip)->bi_sector < bio_end_sector(bi))
                goto overlap;
  
        BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
                     sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
                             bi && bi->bi_sector <= sector;
                     bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
 -                      if (bi->bi_sector + (bi->bi_size>>9) >= sector)
 -                              sector = bi->bi_sector + (bi->bi_size>>9);
 +                      if (bio_end_sector(bi) >= sector)
 +                              sector = bio_end_sector(bi);
                }
                if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
                        set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
@@@ -3850,7 -3849,7 +3850,7 @@@ static int in_chunk_boundary(struct mdd
  {
        sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
        unsigned int chunk_sectors = mddev->chunk_sectors;
 -      unsigned int bio_sectors = bio->bi_size >> 9;
 +      unsigned int bio_sectors = bio_sectors(bio);
  
        if (mddev->new_chunk_sectors < mddev->chunk_sectors)
                chunk_sectors = mddev->new_chunk_sectors;
@@@ -3942,7 -3941,7 +3942,7 @@@ static int bio_fits_rdev(struct bio *bi
  {
        struct request_queue *q = bdev_get_queue(bi->bi_bdev);
  
 -      if ((bi->bi_size>>9) > queue_max_sectors(q))
 +      if (bio_sectors(bi) > queue_max_sectors(q))
                return 0;
        blk_recount_segments(q, bi);
        if (bi->bi_phys_segments > queue_max_segments(q))
@@@ -3989,7 -3988,7 +3989,7 @@@ static int chunk_aligned_read(struct md
                                                    0,
                                                    &dd_idx, NULL);
  
 -      end_sector = align_bi->bi_sector + (align_bi->bi_size >> 9);
 +      end_sector = bio_end_sector(align_bi);
        rcu_read_lock();
        rdev = rcu_dereference(conf->disks[dd_idx].replacement);
        if (!rdev || test_bit(Faulty, &rdev->flags) ||
                align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
  
                if (!bio_fits_rdev(align_bi) ||
 -                  is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
 +                  is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
                                &first_bad, &bad_sectors)) {
                        /* too big in some way, or has a known bad block */
                        bio_put(align_bi);
@@@ -4274,7 -4273,7 +4274,7 @@@ static void make_request(struct mddev *
        }
  
        logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
 -      last_sector = bi->bi_sector + (bi->bi_size>>9);
 +      last_sector = bio_end_sector(bi);
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
  
@@@ -4740,7 -4739,7 +4740,7 @@@ static int  retry_aligned_read(struct r
        logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
        sector = raid5_compute_sector(conf, logical_sector,
                                      0, &dd_idx, NULL);
 -      last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
 +      last_sector = bio_end_sector(raid_bio);
  
        for (; logical_sector < last_sector;
             logical_sector += STRIPE_SECTORS,
@@@ -5466,7 -5465,7 +5466,7 @@@ static int run(struct mddev *mddev
                if (mddev->major_version == 0 &&
                    mddev->minor_version > 90)
                        rdev->recovery_offset = reshape_offset;
-                       
                if (rdev->recovery_offset < reshape_offset) {
                        /* We need to check old and new layout */
                        if (!only_parity(rdev->raid_disk,
                 */
                mddev->queue->limits.discard_zeroes_data = 0;
  
+               blk_queue_max_write_same_sectors(mddev->queue, 0);
                rdev_for_each(rdev, mddev) {
                        disk_stack_limits(mddev->gendisk, rdev->bdev,
                                          rdev->data_offset << 9);