Merge branch 'mymd/for-next' into mymd/for-linus
[cascardo/linux.git] / drivers / md / raid1.c
1 /*
2  * raid1.c : Multiple Devices driver for Linux
3  *
4  * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5  *
6  * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7  *
8  * RAID-1 management functions.
9  *
10  * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
11  *
12  * Fixes to reconstruction by Jakob Ã˜stergaard" <jakob@ostenfeld.dk>
13  * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
14  *
15  * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16  * bitmapped intelligence in resync:
17  *
18  *      - bitmap marked during normal i/o
19  *      - bitmap used to skip nondirty blocks during sync
20  *
21  * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22  * - persistent bitmap code
23  *
24  * This program is free software; you can redistribute it and/or modify
25  * it under the terms of the GNU General Public License as published by
26  * the Free Software Foundation; either version 2, or (at your option)
27  * any later version.
28  *
29  * You should have received a copy of the GNU General Public License
30  * (for example /usr/src/linux/COPYING); if not, write to the Free
31  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32  */
33
34 #include <linux/slab.h>
35 #include <linux/delay.h>
36 #include <linux/blkdev.h>
37 #include <linux/module.h>
38 #include <linux/seq_file.h>
39 #include <linux/ratelimit.h>
40 #include "md.h"
41 #include "raid1.h"
42 #include "bitmap.h"
43
44 /*
45  * Number of guaranteed r1bios in case of extreme VM load:
46  */
47 #define NR_RAID1_BIOS 256
48
49 /* when we get a read error on a read-only array, we redirect to another
50  * device without failing the first device, or trying to over-write to
51  * correct the read error.  To keep track of bad blocks on a per-bio
52  * level, we store IO_BLOCKED in the appropriate 'bios' pointer
53  */
54 #define IO_BLOCKED ((struct bio *)1)
55 /* When we successfully write to a known bad-block, we need to remove the
56  * bad-block marking which must be done from process context.  So we record
57  * the success by setting devs[n].bio to IO_MADE_GOOD
58  */
59 #define IO_MADE_GOOD ((struct bio *)2)
60
61 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
62
63 /* When there are this many requests queue to be written by
64  * the raid1 thread, we become 'congested' to provide back-pressure
65  * for writeback.
66  */
67 static int max_queued_requests = 1024;
68
69 static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
70                           sector_t bi_sector);
71 static void lower_barrier(struct r1conf *conf);
72
73 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
74 {
75         struct pool_info *pi = data;
76         int size = offsetof(struct r1bio, bios[pi->raid_disks]);
77
78         /* allocate a r1bio with room for raid_disks entries in the bios array */
79         return kzalloc(size, gfp_flags);
80 }
81
82 static void r1bio_pool_free(void *r1_bio, void *data)
83 {
84         kfree(r1_bio);
85 }
86
87 #define RESYNC_BLOCK_SIZE (64*1024)
88 #define RESYNC_DEPTH 32
89 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
90 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
91 #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
92 #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
93 #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
94 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
95 #define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
96
97 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
98 {
99         struct pool_info *pi = data;
100         struct r1bio *r1_bio;
101         struct bio *bio;
102         int need_pages;
103         int i, j;
104
105         r1_bio = r1bio_pool_alloc(gfp_flags, pi);
106         if (!r1_bio)
107                 return NULL;
108
109         /*
110          * Allocate bios : 1 for reading, n-1 for writing
111          */
112         for (j = pi->raid_disks ; j-- ; ) {
113                 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
114                 if (!bio)
115                         goto out_free_bio;
116                 r1_bio->bios[j] = bio;
117         }
118         /*
119          * Allocate RESYNC_PAGES data pages and attach them to
120          * the first bio.
121          * If this is a user-requested check/repair, allocate
122          * RESYNC_PAGES for each bio.
123          */
124         if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
125                 need_pages = pi->raid_disks;
126         else
127                 need_pages = 1;
128         for (j = 0; j < need_pages; j++) {
129                 bio = r1_bio->bios[j];
130                 bio->bi_vcnt = RESYNC_PAGES;
131
132                 if (bio_alloc_pages(bio, gfp_flags))
133                         goto out_free_pages;
134         }
135         /* If not user-requests, copy the page pointers to all bios */
136         if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
137                 for (i=0; i<RESYNC_PAGES ; i++)
138                         for (j=1; j<pi->raid_disks; j++)
139                                 r1_bio->bios[j]->bi_io_vec[i].bv_page =
140                                         r1_bio->bios[0]->bi_io_vec[i].bv_page;
141         }
142
143         r1_bio->master_bio = NULL;
144
145         return r1_bio;
146
147 out_free_pages:
148         while (--j >= 0) {
149                 struct bio_vec *bv;
150
151                 bio_for_each_segment_all(bv, r1_bio->bios[j], i)
152                         __free_page(bv->bv_page);
153         }
154
155 out_free_bio:
156         while (++j < pi->raid_disks)
157                 bio_put(r1_bio->bios[j]);
158         r1bio_pool_free(r1_bio, data);
159         return NULL;
160 }
161
162 static void r1buf_pool_free(void *__r1_bio, void *data)
163 {
164         struct pool_info *pi = data;
165         int i,j;
166         struct r1bio *r1bio = __r1_bio;
167
168         for (i = 0; i < RESYNC_PAGES; i++)
169                 for (j = pi->raid_disks; j-- ;) {
170                         if (j == 0 ||
171                             r1bio->bios[j]->bi_io_vec[i].bv_page !=
172                             r1bio->bios[0]->bi_io_vec[i].bv_page)
173                                 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
174                 }
175         for (i=0 ; i < pi->raid_disks; i++)
176                 bio_put(r1bio->bios[i]);
177
178         r1bio_pool_free(r1bio, data);
179 }
180
181 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
182 {
183         int i;
184
185         for (i = 0; i < conf->raid_disks * 2; i++) {
186                 struct bio **bio = r1_bio->bios + i;
187                 if (!BIO_SPECIAL(*bio))
188                         bio_put(*bio);
189                 *bio = NULL;
190         }
191 }
192
193 static void free_r1bio(struct r1bio *r1_bio)
194 {
195         struct r1conf *conf = r1_bio->mddev->private;
196
197         put_all_bios(conf, r1_bio);
198         mempool_free(r1_bio, conf->r1bio_pool);
199 }
200
201 static void put_buf(struct r1bio *r1_bio)
202 {
203         struct r1conf *conf = r1_bio->mddev->private;
204         int i;
205
206         for (i = 0; i < conf->raid_disks * 2; i++) {
207                 struct bio *bio = r1_bio->bios[i];
208                 if (bio->bi_end_io)
209                         rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
210         }
211
212         mempool_free(r1_bio, conf->r1buf_pool);
213
214         lower_barrier(conf);
215 }
216
217 static void reschedule_retry(struct r1bio *r1_bio)
218 {
219         unsigned long flags;
220         struct mddev *mddev = r1_bio->mddev;
221         struct r1conf *conf = mddev->private;
222
223         spin_lock_irqsave(&conf->device_lock, flags);
224         list_add(&r1_bio->retry_list, &conf->retry_list);
225         conf->nr_queued ++;
226         spin_unlock_irqrestore(&conf->device_lock, flags);
227
228         wake_up(&conf->wait_barrier);
229         md_wakeup_thread(mddev->thread);
230 }
231
232 /*
233  * raid_end_bio_io() is called when we have finished servicing a mirrored
234  * operation and are ready to return a success/failure code to the buffer
235  * cache layer.
236  */
237 static void call_bio_endio(struct r1bio *r1_bio)
238 {
239         struct bio *bio = r1_bio->master_bio;
240         int done;
241         struct r1conf *conf = r1_bio->mddev->private;
242         sector_t start_next_window = r1_bio->start_next_window;
243         sector_t bi_sector = bio->bi_iter.bi_sector;
244
245         if (bio->bi_phys_segments) {
246                 unsigned long flags;
247                 spin_lock_irqsave(&conf->device_lock, flags);
248                 bio->bi_phys_segments--;
249                 done = (bio->bi_phys_segments == 0);
250                 spin_unlock_irqrestore(&conf->device_lock, flags);
251                 /*
252                  * make_request() might be waiting for
253                  * bi_phys_segments to decrease
254                  */
255                 wake_up(&conf->wait_barrier);
256         } else
257                 done = 1;
258
259         if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
260                 bio->bi_error = -EIO;
261
262         if (done) {
263                 bio_endio(bio);
264                 /*
265                  * Wake up any possible resync thread that waits for the device
266                  * to go idle.
267                  */
268                 allow_barrier(conf, start_next_window, bi_sector);
269         }
270 }
271
272 static void raid_end_bio_io(struct r1bio *r1_bio)
273 {
274         struct bio *bio = r1_bio->master_bio;
275
276         /* if nobody has done the final endio yet, do it now */
277         if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
278                 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
279                          (bio_data_dir(bio) == WRITE) ? "write" : "read",
280                          (unsigned long long) bio->bi_iter.bi_sector,
281                          (unsigned long long) bio_end_sector(bio) - 1);
282
283                 call_bio_endio(r1_bio);
284         }
285         free_r1bio(r1_bio);
286 }
287
288 /*
289  * Update disk head position estimator based on IRQ completion info.
290  */
291 static inline void update_head_pos(int disk, struct r1bio *r1_bio)
292 {
293         struct r1conf *conf = r1_bio->mddev->private;
294
295         conf->mirrors[disk].head_position =
296                 r1_bio->sector + (r1_bio->sectors);
297 }
298
299 /*
300  * Find the disk number which triggered given bio
301  */
302 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
303 {
304         int mirror;
305         struct r1conf *conf = r1_bio->mddev->private;
306         int raid_disks = conf->raid_disks;
307
308         for (mirror = 0; mirror < raid_disks * 2; mirror++)
309                 if (r1_bio->bios[mirror] == bio)
310                         break;
311
312         BUG_ON(mirror == raid_disks * 2);
313         update_head_pos(mirror, r1_bio);
314
315         return mirror;
316 }
317
318 static void raid1_end_read_request(struct bio *bio)
319 {
320         int uptodate = !bio->bi_error;
321         struct r1bio *r1_bio = bio->bi_private;
322         struct r1conf *conf = r1_bio->mddev->private;
323         struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
324
325         /*
326          * this branch is our 'one mirror IO has finished' event handler:
327          */
328         update_head_pos(r1_bio->read_disk, r1_bio);
329
330         if (uptodate)
331                 set_bit(R1BIO_Uptodate, &r1_bio->state);
332         else {
333                 /* If all other devices have failed, we want to return
334                  * the error upwards rather than fail the last device.
335                  * Here we redefine "uptodate" to mean "Don't want to retry"
336                  */
337                 unsigned long flags;
338                 spin_lock_irqsave(&conf->device_lock, flags);
339                 if (r1_bio->mddev->degraded == conf->raid_disks ||
340                     (r1_bio->mddev->degraded == conf->raid_disks-1 &&
341                      test_bit(In_sync, &rdev->flags)))
342                         uptodate = 1;
343                 spin_unlock_irqrestore(&conf->device_lock, flags);
344         }
345
346         if (uptodate) {
347                 raid_end_bio_io(r1_bio);
348                 rdev_dec_pending(rdev, conf->mddev);
349         } else {
350                 /*
351                  * oops, read error:
352                  */
353                 char b[BDEVNAME_SIZE];
354                 printk_ratelimited(
355                         KERN_ERR "md/raid1:%s: %s: "
356                         "rescheduling sector %llu\n",
357                         mdname(conf->mddev),
358                         bdevname(rdev->bdev,
359                                  b),
360                         (unsigned long long)r1_bio->sector);
361                 set_bit(R1BIO_ReadError, &r1_bio->state);
362                 reschedule_retry(r1_bio);
363                 /* don't drop the reference on read_disk yet */
364         }
365 }
366
367 static void close_write(struct r1bio *r1_bio)
368 {
369         /* it really is the end of this request */
370         if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
371                 /* free extra copy of the data pages */
372                 int i = r1_bio->behind_page_count;
373                 while (i--)
374                         safe_put_page(r1_bio->behind_bvecs[i].bv_page);
375                 kfree(r1_bio->behind_bvecs);
376                 r1_bio->behind_bvecs = NULL;
377         }
378         /* clear the bitmap if all writes complete successfully */
379         bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
380                         r1_bio->sectors,
381                         !test_bit(R1BIO_Degraded, &r1_bio->state),
382                         test_bit(R1BIO_BehindIO, &r1_bio->state));
383         md_write_end(r1_bio->mddev);
384 }
385
386 static void r1_bio_write_done(struct r1bio *r1_bio)
387 {
388         if (!atomic_dec_and_test(&r1_bio->remaining))
389                 return;
390
391         if (test_bit(R1BIO_WriteError, &r1_bio->state))
392                 reschedule_retry(r1_bio);
393         else {
394                 close_write(r1_bio);
395                 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
396                         reschedule_retry(r1_bio);
397                 else
398                         raid_end_bio_io(r1_bio);
399         }
400 }
401
402 static void raid1_end_write_request(struct bio *bio)
403 {
404         struct r1bio *r1_bio = bio->bi_private;
405         int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
406         struct r1conf *conf = r1_bio->mddev->private;
407         struct bio *to_put = NULL;
408         int mirror = find_bio_disk(r1_bio, bio);
409         struct md_rdev *rdev = conf->mirrors[mirror].rdev;
410
411         /*
412          * 'one mirror IO has finished' event handler:
413          */
414         if (bio->bi_error) {
415                 set_bit(WriteErrorSeen, &rdev->flags);
416                 if (!test_and_set_bit(WantReplacement, &rdev->flags))
417                         set_bit(MD_RECOVERY_NEEDED, &
418                                 conf->mddev->recovery);
419
420                 set_bit(R1BIO_WriteError, &r1_bio->state);
421         } else {
422                 /*
423                  * Set R1BIO_Uptodate in our master bio, so that we
424                  * will return a good error code for to the higher
425                  * levels even if IO on some other mirrored buffer
426                  * fails.
427                  *
428                  * The 'master' represents the composite IO operation
429                  * to user-side. So if something waits for IO, then it
430                  * will wait for the 'master' bio.
431                  */
432                 sector_t first_bad;
433                 int bad_sectors;
434
435                 r1_bio->bios[mirror] = NULL;
436                 to_put = bio;
437                 /*
438                  * Do not set R1BIO_Uptodate if the current device is
439                  * rebuilding or Faulty. This is because we cannot use
440                  * such device for properly reading the data back (we could
441                  * potentially use it, if the current write would have felt
442                  * before rdev->recovery_offset, but for simplicity we don't
443                  * check this here.
444                  */
445                 if (test_bit(In_sync, &rdev->flags) &&
446                     !test_bit(Faulty, &rdev->flags))
447                         set_bit(R1BIO_Uptodate, &r1_bio->state);
448
449                 /* Maybe we can clear some bad blocks. */
450                 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
451                                 &first_bad, &bad_sectors)) {
452                         r1_bio->bios[mirror] = IO_MADE_GOOD;
453                         set_bit(R1BIO_MadeGood, &r1_bio->state);
454                 }
455         }
456
457         if (behind) {
458                 if (test_bit(WriteMostly, &rdev->flags))
459                         atomic_dec(&r1_bio->behind_remaining);
460
461                 /*
462                  * In behind mode, we ACK the master bio once the I/O
463                  * has safely reached all non-writemostly
464                  * disks. Setting the Returned bit ensures that this
465                  * gets done only once -- we don't ever want to return
466                  * -EIO here, instead we'll wait
467                  */
468                 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
469                     test_bit(R1BIO_Uptodate, &r1_bio->state)) {
470                         /* Maybe we can return now */
471                         if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
472                                 struct bio *mbio = r1_bio->master_bio;
473                                 pr_debug("raid1: behind end write sectors"
474                                          " %llu-%llu\n",
475                                          (unsigned long long) mbio->bi_iter.bi_sector,
476                                          (unsigned long long) bio_end_sector(mbio) - 1);
477                                 call_bio_endio(r1_bio);
478                         }
479                 }
480         }
481         if (r1_bio->bios[mirror] == NULL)
482                 rdev_dec_pending(rdev, conf->mddev);
483
484         /*
485          * Let's see if all mirrored write operations have finished
486          * already.
487          */
488         r1_bio_write_done(r1_bio);
489
490         if (to_put)
491                 bio_put(to_put);
492 }
493
494 /*
495  * This routine returns the disk from which the requested read should
496  * be done. There is a per-array 'next expected sequential IO' sector
497  * number - if this matches on the next IO then we use the last disk.
498  * There is also a per-disk 'last know head position' sector that is
499  * maintained from IRQ contexts, both the normal and the resync IO
500  * completion handlers update this position correctly. If there is no
501  * perfect sequential match then we pick the disk whose head is closest.
502  *
503  * If there are 2 mirrors in the same 2 devices, performance degrades
504  * because position is mirror, not device based.
505  *
506  * The rdev for the device selected will have nr_pending incremented.
507  */
508 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
509 {
510         const sector_t this_sector = r1_bio->sector;
511         int sectors;
512         int best_good_sectors;
513         int best_disk, best_dist_disk, best_pending_disk;
514         int has_nonrot_disk;
515         int disk;
516         sector_t best_dist;
517         unsigned int min_pending;
518         struct md_rdev *rdev;
519         int choose_first;
520         int choose_next_idle;
521
522         rcu_read_lock();
523         /*
524          * Check if we can balance. We can balance on the whole
525          * device if no resync is going on, or below the resync window.
526          * We take the first readable disk when above the resync window.
527          */
528  retry:
529         sectors = r1_bio->sectors;
530         best_disk = -1;
531         best_dist_disk = -1;
532         best_dist = MaxSector;
533         best_pending_disk = -1;
534         min_pending = UINT_MAX;
535         best_good_sectors = 0;
536         has_nonrot_disk = 0;
537         choose_next_idle = 0;
538
539         if ((conf->mddev->recovery_cp < this_sector + sectors) ||
540             (mddev_is_clustered(conf->mddev) &&
541             md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
542                     this_sector + sectors)))
543                 choose_first = 1;
544         else
545                 choose_first = 0;
546
547         for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
548                 sector_t dist;
549                 sector_t first_bad;
550                 int bad_sectors;
551                 unsigned int pending;
552                 bool nonrot;
553
554                 rdev = rcu_dereference(conf->mirrors[disk].rdev);
555                 if (r1_bio->bios[disk] == IO_BLOCKED
556                     || rdev == NULL
557                     || test_bit(Faulty, &rdev->flags))
558                         continue;
559                 if (!test_bit(In_sync, &rdev->flags) &&
560                     rdev->recovery_offset < this_sector + sectors)
561                         continue;
562                 if (test_bit(WriteMostly, &rdev->flags)) {
563                         /* Don't balance among write-mostly, just
564                          * use the first as a last resort */
565                         if (best_dist_disk < 0) {
566                                 if (is_badblock(rdev, this_sector, sectors,
567                                                 &first_bad, &bad_sectors)) {
568                                         if (first_bad <= this_sector)
569                                                 /* Cannot use this */
570                                                 continue;
571                                         best_good_sectors = first_bad - this_sector;
572                                 } else
573                                         best_good_sectors = sectors;
574                                 best_dist_disk = disk;
575                                 best_pending_disk = disk;
576                         }
577                         continue;
578                 }
579                 /* This is a reasonable device to use.  It might
580                  * even be best.
581                  */
582                 if (is_badblock(rdev, this_sector, sectors,
583                                 &first_bad, &bad_sectors)) {
584                         if (best_dist < MaxSector)
585                                 /* already have a better device */
586                                 continue;
587                         if (first_bad <= this_sector) {
588                                 /* cannot read here. If this is the 'primary'
589                                  * device, then we must not read beyond
590                                  * bad_sectors from another device..
591                                  */
592                                 bad_sectors -= (this_sector - first_bad);
593                                 if (choose_first && sectors > bad_sectors)
594                                         sectors = bad_sectors;
595                                 if (best_good_sectors > sectors)
596                                         best_good_sectors = sectors;
597
598                         } else {
599                                 sector_t good_sectors = first_bad - this_sector;
600                                 if (good_sectors > best_good_sectors) {
601                                         best_good_sectors = good_sectors;
602                                         best_disk = disk;
603                                 }
604                                 if (choose_first)
605                                         break;
606                         }
607                         continue;
608                 } else
609                         best_good_sectors = sectors;
610
611                 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
612                 has_nonrot_disk |= nonrot;
613                 pending = atomic_read(&rdev->nr_pending);
614                 dist = abs(this_sector - conf->mirrors[disk].head_position);
615                 if (choose_first) {
616                         best_disk = disk;
617                         break;
618                 }
619                 /* Don't change to another disk for sequential reads */
620                 if (conf->mirrors[disk].next_seq_sect == this_sector
621                     || dist == 0) {
622                         int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
623                         struct raid1_info *mirror = &conf->mirrors[disk];
624
625                         best_disk = disk;
626                         /*
627                          * If buffered sequential IO size exceeds optimal
628                          * iosize, check if there is idle disk. If yes, choose
629                          * the idle disk. read_balance could already choose an
630                          * idle disk before noticing it's a sequential IO in
631                          * this disk. This doesn't matter because this disk
632                          * will idle, next time it will be utilized after the
633                          * first disk has IO size exceeds optimal iosize. In
634                          * this way, iosize of the first disk will be optimal
635                          * iosize at least. iosize of the second disk might be
636                          * small, but not a big deal since when the second disk
637                          * starts IO, the first disk is likely still busy.
638                          */
639                         if (nonrot && opt_iosize > 0 &&
640                             mirror->seq_start != MaxSector &&
641                             mirror->next_seq_sect > opt_iosize &&
642                             mirror->next_seq_sect - opt_iosize >=
643                             mirror->seq_start) {
644                                 choose_next_idle = 1;
645                                 continue;
646                         }
647                         break;
648                 }
649                 /* If device is idle, use it */
650                 if (pending == 0) {
651                         best_disk = disk;
652                         break;
653                 }
654
655                 if (choose_next_idle)
656                         continue;
657
658                 if (min_pending > pending) {
659                         min_pending = pending;
660                         best_pending_disk = disk;
661                 }
662
663                 if (dist < best_dist) {
664                         best_dist = dist;
665                         best_dist_disk = disk;
666                 }
667         }
668
669         /*
670          * If all disks are rotational, choose the closest disk. If any disk is
671          * non-rotational, choose the disk with less pending request even the
672          * disk is rotational, which might/might not be optimal for raids with
673          * mixed ratation/non-rotational disks depending on workload.
674          */
675         if (best_disk == -1) {
676                 if (has_nonrot_disk)
677                         best_disk = best_pending_disk;
678                 else
679                         best_disk = best_dist_disk;
680         }
681
682         if (best_disk >= 0) {
683                 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
684                 if (!rdev)
685                         goto retry;
686                 atomic_inc(&rdev->nr_pending);
687                 sectors = best_good_sectors;
688
689                 if (conf->mirrors[best_disk].next_seq_sect != this_sector)
690                         conf->mirrors[best_disk].seq_start = this_sector;
691
692                 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
693         }
694         rcu_read_unlock();
695         *max_sectors = sectors;
696
697         return best_disk;
698 }
699
700 static int raid1_congested(struct mddev *mddev, int bits)
701 {
702         struct r1conf *conf = mddev->private;
703         int i, ret = 0;
704
705         if ((bits & (1 << WB_async_congested)) &&
706             conf->pending_count >= max_queued_requests)
707                 return 1;
708
709         rcu_read_lock();
710         for (i = 0; i < conf->raid_disks * 2; i++) {
711                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
712                 if (rdev && !test_bit(Faulty, &rdev->flags)) {
713                         struct request_queue *q = bdev_get_queue(rdev->bdev);
714
715                         BUG_ON(!q);
716
717                         /* Note the '|| 1' - when read_balance prefers
718                          * non-congested targets, it can be removed
719                          */
720                         if ((bits & (1 << WB_async_congested)) || 1)
721                                 ret |= bdi_congested(&q->backing_dev_info, bits);
722                         else
723                                 ret &= bdi_congested(&q->backing_dev_info, bits);
724                 }
725         }
726         rcu_read_unlock();
727         return ret;
728 }
729
730 static void flush_pending_writes(struct r1conf *conf)
731 {
732         /* Any writes that have been queued but are awaiting
733          * bitmap updates get flushed here.
734          */
735         spin_lock_irq(&conf->device_lock);
736
737         if (conf->pending_bio_list.head) {
738                 struct bio *bio;
739                 bio = bio_list_get(&conf->pending_bio_list);
740                 conf->pending_count = 0;
741                 spin_unlock_irq(&conf->device_lock);
742                 /* flush any pending bitmap writes to
743                  * disk before proceeding w/ I/O */
744                 bitmap_unplug(conf->mddev->bitmap);
745                 wake_up(&conf->wait_barrier);
746
747                 while (bio) { /* submit pending writes */
748                         struct bio *next = bio->bi_next;
749                         bio->bi_next = NULL;
750                         if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
751                             !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
752                                 /* Just ignore it */
753                                 bio_endio(bio);
754                         else
755                                 generic_make_request(bio);
756                         bio = next;
757                 }
758         } else
759                 spin_unlock_irq(&conf->device_lock);
760 }
761
762 /* Barriers....
763  * Sometimes we need to suspend IO while we do something else,
764  * either some resync/recovery, or reconfigure the array.
765  * To do this we raise a 'barrier'.
766  * The 'barrier' is a counter that can be raised multiple times
767  * to count how many activities are happening which preclude
768  * normal IO.
769  * We can only raise the barrier if there is no pending IO.
770  * i.e. if nr_pending == 0.
771  * We choose only to raise the barrier if no-one is waiting for the
772  * barrier to go down.  This means that as soon as an IO request
773  * is ready, no other operations which require a barrier will start
774  * until the IO request has had a chance.
775  *
776  * So: regular IO calls 'wait_barrier'.  When that returns there
777  *    is no backgroup IO happening,  It must arrange to call
778  *    allow_barrier when it has finished its IO.
779  * backgroup IO calls must call raise_barrier.  Once that returns
780  *    there is no normal IO happeing.  It must arrange to call
781  *    lower_barrier when the particular background IO completes.
782  */
783 static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
784 {
785         spin_lock_irq(&conf->resync_lock);
786
787         /* Wait until no block IO is waiting */
788         wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
789                             conf->resync_lock);
790
791         /* block any new IO from starting */
792         conf->barrier++;
793         conf->next_resync = sector_nr;
794
795         /* For these conditions we must wait:
796          * A: while the array is in frozen state
797          * B: while barrier >= RESYNC_DEPTH, meaning resync reach
798          *    the max count which allowed.
799          * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
800          *    next resync will reach to the window which normal bios are
801          *    handling.
802          * D: while there are any active requests in the current window.
803          */
804         wait_event_lock_irq(conf->wait_barrier,
805                             !conf->array_frozen &&
806                             conf->barrier < RESYNC_DEPTH &&
807                             conf->current_window_requests == 0 &&
808                             (conf->start_next_window >=
809                              conf->next_resync + RESYNC_SECTORS),
810                             conf->resync_lock);
811
812         conf->nr_pending++;
813         spin_unlock_irq(&conf->resync_lock);
814 }
815
816 static void lower_barrier(struct r1conf *conf)
817 {
818         unsigned long flags;
819         BUG_ON(conf->barrier <= 0);
820         spin_lock_irqsave(&conf->resync_lock, flags);
821         conf->barrier--;
822         conf->nr_pending--;
823         spin_unlock_irqrestore(&conf->resync_lock, flags);
824         wake_up(&conf->wait_barrier);
825 }
826
827 static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
828 {
829         bool wait = false;
830
831         if (conf->array_frozen || !bio)
832                 wait = true;
833         else if (conf->barrier && bio_data_dir(bio) == WRITE) {
834                 if ((conf->mddev->curr_resync_completed
835                      >= bio_end_sector(bio)) ||
836                     (conf->next_resync + NEXT_NORMALIO_DISTANCE
837                      <= bio->bi_iter.bi_sector))
838                         wait = false;
839                 else
840                         wait = true;
841         }
842
843         return wait;
844 }
845
846 static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
847 {
848         sector_t sector = 0;
849
850         spin_lock_irq(&conf->resync_lock);
851         if (need_to_wait_for_sync(conf, bio)) {
852                 conf->nr_waiting++;
853                 /* Wait for the barrier to drop.
854                  * However if there are already pending
855                  * requests (preventing the barrier from
856                  * rising completely), and the
857                  * per-process bio queue isn't empty,
858                  * then don't wait, as we need to empty
859                  * that queue to allow conf->start_next_window
860                  * to increase.
861                  */
862                 wait_event_lock_irq(conf->wait_barrier,
863                                     !conf->array_frozen &&
864                                     (!conf->barrier ||
865                                      ((conf->start_next_window <
866                                        conf->next_resync + RESYNC_SECTORS) &&
867                                       current->bio_list &&
868                                       !bio_list_empty(current->bio_list))),
869                                     conf->resync_lock);
870                 conf->nr_waiting--;
871         }
872
873         if (bio && bio_data_dir(bio) == WRITE) {
874                 if (bio->bi_iter.bi_sector >= conf->next_resync) {
875                         if (conf->start_next_window == MaxSector)
876                                 conf->start_next_window =
877                                         conf->next_resync +
878                                         NEXT_NORMALIO_DISTANCE;
879
880                         if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
881                             <= bio->bi_iter.bi_sector)
882                                 conf->next_window_requests++;
883                         else
884                                 conf->current_window_requests++;
885                         sector = conf->start_next_window;
886                 }
887         }
888
889         conf->nr_pending++;
890         spin_unlock_irq(&conf->resync_lock);
891         return sector;
892 }
893
894 static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
895                           sector_t bi_sector)
896 {
897         unsigned long flags;
898
899         spin_lock_irqsave(&conf->resync_lock, flags);
900         conf->nr_pending--;
901         if (start_next_window) {
902                 if (start_next_window == conf->start_next_window) {
903                         if (conf->start_next_window + NEXT_NORMALIO_DISTANCE
904                             <= bi_sector)
905                                 conf->next_window_requests--;
906                         else
907                                 conf->current_window_requests--;
908                 } else
909                         conf->current_window_requests--;
910
911                 if (!conf->current_window_requests) {
912                         if (conf->next_window_requests) {
913                                 conf->current_window_requests =
914                                         conf->next_window_requests;
915                                 conf->next_window_requests = 0;
916                                 conf->start_next_window +=
917                                         NEXT_NORMALIO_DISTANCE;
918                         } else
919                                 conf->start_next_window = MaxSector;
920                 }
921         }
922         spin_unlock_irqrestore(&conf->resync_lock, flags);
923         wake_up(&conf->wait_barrier);
924 }
925
926 static void freeze_array(struct r1conf *conf, int extra)
927 {
928         /* stop syncio and normal IO and wait for everything to
929          * go quite.
930          * We wait until nr_pending match nr_queued+extra
931          * This is called in the context of one normal IO request
932          * that has failed. Thus any sync request that might be pending
933          * will be blocked by nr_pending, and we need to wait for
934          * pending IO requests to complete or be queued for re-try.
935          * Thus the number queued (nr_queued) plus this request (extra)
936          * must match the number of pending IOs (nr_pending) before
937          * we continue.
938          */
939         spin_lock_irq(&conf->resync_lock);
940         conf->array_frozen = 1;
941         wait_event_lock_irq_cmd(conf->wait_barrier,
942                                 conf->nr_pending == conf->nr_queued+extra,
943                                 conf->resync_lock,
944                                 flush_pending_writes(conf));
945         spin_unlock_irq(&conf->resync_lock);
946 }
947 static void unfreeze_array(struct r1conf *conf)
948 {
949         /* reverse the effect of the freeze */
950         spin_lock_irq(&conf->resync_lock);
951         conf->array_frozen = 0;
952         wake_up(&conf->wait_barrier);
953         spin_unlock_irq(&conf->resync_lock);
954 }
955
956 /* duplicate the data pages for behind I/O
957  */
958 static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
959 {
960         int i;
961         struct bio_vec *bvec;
962         struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
963                                         GFP_NOIO);
964         if (unlikely(!bvecs))
965                 return;
966
967         bio_for_each_segment_all(bvec, bio, i) {
968                 bvecs[i] = *bvec;
969                 bvecs[i].bv_page = alloc_page(GFP_NOIO);
970                 if (unlikely(!bvecs[i].bv_page))
971                         goto do_sync_io;
972                 memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
973                        kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
974                 kunmap(bvecs[i].bv_page);
975                 kunmap(bvec->bv_page);
976         }
977         r1_bio->behind_bvecs = bvecs;
978         r1_bio->behind_page_count = bio->bi_vcnt;
979         set_bit(R1BIO_BehindIO, &r1_bio->state);
980         return;
981
982 do_sync_io:
983         for (i = 0; i < bio->bi_vcnt; i++)
984                 if (bvecs[i].bv_page)
985                         put_page(bvecs[i].bv_page);
986         kfree(bvecs);
987         pr_debug("%dB behind alloc failed, doing sync I/O\n",
988                  bio->bi_iter.bi_size);
989 }
990
991 struct raid1_plug_cb {
992         struct blk_plug_cb      cb;
993         struct bio_list         pending;
994         int                     pending_cnt;
995 };
996
997 static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
998 {
999         struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1000                                                   cb);
1001         struct mddev *mddev = plug->cb.data;
1002         struct r1conf *conf = mddev->private;
1003         struct bio *bio;
1004
1005         if (from_schedule || current->bio_list) {
1006                 spin_lock_irq(&conf->device_lock);
1007                 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1008                 conf->pending_count += plug->pending_cnt;
1009                 spin_unlock_irq(&conf->device_lock);
1010                 wake_up(&conf->wait_barrier);
1011                 md_wakeup_thread(mddev->thread);
1012                 kfree(plug);
1013                 return;
1014         }
1015
1016         /* we aren't scheduling, so we can do the write-out directly. */
1017         bio = bio_list_get(&plug->pending);
1018         bitmap_unplug(mddev->bitmap);
1019         wake_up(&conf->wait_barrier);
1020
1021         while (bio) { /* submit pending writes */
1022                 struct bio *next = bio->bi_next;
1023                 bio->bi_next = NULL;
1024                 if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
1025                     !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1026                         /* Just ignore it */
1027                         bio_endio(bio);
1028                 else
1029                         generic_make_request(bio);
1030                 bio = next;
1031         }
1032         kfree(plug);
1033 }
1034
1035 static void raid1_make_request(struct mddev *mddev, struct bio * bio)
1036 {
1037         struct r1conf *conf = mddev->private;
1038         struct raid1_info *mirror;
1039         struct r1bio *r1_bio;
1040         struct bio *read_bio;
1041         int i, disks;
1042         struct bitmap *bitmap;
1043         unsigned long flags;
1044         const int op = bio_op(bio);
1045         const int rw = bio_data_dir(bio);
1046         const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1047         const unsigned long do_flush_fua = (bio->bi_rw &
1048                                                 (REQ_PREFLUSH | REQ_FUA));
1049         struct md_rdev *blocked_rdev;
1050         struct blk_plug_cb *cb;
1051         struct raid1_plug_cb *plug = NULL;
1052         int first_clone;
1053         int sectors_handled;
1054         int max_sectors;
1055         sector_t start_next_window;
1056
1057         /*
1058          * Register the new request and wait if the reconstruction
1059          * thread has put up a bar for new requests.
1060          * Continue immediately if no resync is active currently.
1061          */
1062
1063         md_write_start(mddev, bio); /* wait on superblock update early */
1064
1065         if (bio_data_dir(bio) == WRITE &&
1066             ((bio_end_sector(bio) > mddev->suspend_lo &&
1067             bio->bi_iter.bi_sector < mddev->suspend_hi) ||
1068             (mddev_is_clustered(mddev) &&
1069              md_cluster_ops->area_resyncing(mddev, WRITE,
1070                      bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
1071                 /* As the suspend_* range is controlled by
1072                  * userspace, we want an interruptible
1073                  * wait.
1074                  */
1075                 DEFINE_WAIT(w);
1076                 for (;;) {
1077                         flush_signals(current);
1078                         prepare_to_wait(&conf->wait_barrier,
1079                                         &w, TASK_INTERRUPTIBLE);
1080                         if (bio_end_sector(bio) <= mddev->suspend_lo ||
1081                             bio->bi_iter.bi_sector >= mddev->suspend_hi ||
1082                             (mddev_is_clustered(mddev) &&
1083                              !md_cluster_ops->area_resyncing(mddev, WRITE,
1084                                      bio->bi_iter.bi_sector, bio_end_sector(bio))))
1085                                 break;
1086                         schedule();
1087                 }
1088                 finish_wait(&conf->wait_barrier, &w);
1089         }
1090
1091         start_next_window = wait_barrier(conf, bio);
1092
1093         bitmap = mddev->bitmap;
1094
1095         /*
1096          * make_request() can abort the operation when read-ahead is being
1097          * used and no empty request is available.
1098          *
1099          */
1100         r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1101
1102         r1_bio->master_bio = bio;
1103         r1_bio->sectors = bio_sectors(bio);
1104         r1_bio->state = 0;
1105         r1_bio->mddev = mddev;
1106         r1_bio->sector = bio->bi_iter.bi_sector;
1107
1108         /* We might need to issue multiple reads to different
1109          * devices if there are bad blocks around, so we keep
1110          * track of the number of reads in bio->bi_phys_segments.
1111          * If this is 0, there is only one r1_bio and no locking
1112          * will be needed when requests complete.  If it is
1113          * non-zero, then it is the number of not-completed requests.
1114          */
1115         bio->bi_phys_segments = 0;
1116         bio_clear_flag(bio, BIO_SEG_VALID);
1117
1118         if (rw == READ) {
1119                 /*
1120                  * read balancing logic:
1121                  */
1122                 int rdisk;
1123
1124 read_again:
1125                 rdisk = read_balance(conf, r1_bio, &max_sectors);
1126
1127                 if (rdisk < 0) {
1128                         /* couldn't find anywhere to read from */
1129                         raid_end_bio_io(r1_bio);
1130                         return;
1131                 }
1132                 mirror = conf->mirrors + rdisk;
1133
1134                 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1135                     bitmap) {
1136                         /* Reading from a write-mostly device must
1137                          * take care not to over-take any writes
1138                          * that are 'behind'
1139                          */
1140                         wait_event(bitmap->behind_wait,
1141                                    atomic_read(&bitmap->behind_writes) == 0);
1142                 }
1143                 r1_bio->read_disk = rdisk;
1144                 r1_bio->start_next_window = 0;
1145
1146                 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1147                 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
1148                          max_sectors);
1149
1150                 r1_bio->bios[rdisk] = read_bio;
1151
1152                 read_bio->bi_iter.bi_sector = r1_bio->sector +
1153                         mirror->rdev->data_offset;
1154                 read_bio->bi_bdev = mirror->rdev->bdev;
1155                 read_bio->bi_end_io = raid1_end_read_request;
1156                 bio_set_op_attrs(read_bio, op, do_sync);
1157                 read_bio->bi_private = r1_bio;
1158
1159                 if (max_sectors < r1_bio->sectors) {
1160                         /* could not read all from this device, so we will
1161                          * need another r1_bio.
1162                          */
1163
1164                         sectors_handled = (r1_bio->sector + max_sectors
1165                                            - bio->bi_iter.bi_sector);
1166                         r1_bio->sectors = max_sectors;
1167                         spin_lock_irq(&conf->device_lock);
1168                         if (bio->bi_phys_segments == 0)
1169                                 bio->bi_phys_segments = 2;
1170                         else
1171                                 bio->bi_phys_segments++;
1172                         spin_unlock_irq(&conf->device_lock);
1173                         /* Cannot call generic_make_request directly
1174                          * as that will be queued in __make_request
1175                          * and subsequent mempool_alloc might block waiting
1176                          * for it.  So hand bio over to raid1d.
1177                          */
1178                         reschedule_retry(r1_bio);
1179
1180                         r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1181
1182                         r1_bio->master_bio = bio;
1183                         r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1184                         r1_bio->state = 0;
1185                         r1_bio->mddev = mddev;
1186                         r1_bio->sector = bio->bi_iter.bi_sector +
1187                                 sectors_handled;
1188                         goto read_again;
1189                 } else
1190                         generic_make_request(read_bio);
1191                 return;
1192         }
1193
1194         /*
1195          * WRITE:
1196          */
1197         if (conf->pending_count >= max_queued_requests) {
1198                 md_wakeup_thread(mddev->thread);
1199                 wait_event(conf->wait_barrier,
1200                            conf->pending_count < max_queued_requests);
1201         }
1202         /* first select target devices under rcu_lock and
1203          * inc refcount on their rdev.  Record them by setting
1204          * bios[x] to bio
1205          * If there are known/acknowledged bad blocks on any device on
1206          * which we have seen a write error, we want to avoid writing those
1207          * blocks.
1208          * This potentially requires several writes to write around
1209          * the bad blocks.  Each set of writes gets it's own r1bio
1210          * with a set of bios attached.
1211          */
1212
1213         disks = conf->raid_disks * 2;
1214  retry_write:
1215         r1_bio->start_next_window = start_next_window;
1216         blocked_rdev = NULL;
1217         rcu_read_lock();
1218         max_sectors = r1_bio->sectors;
1219         for (i = 0;  i < disks; i++) {
1220                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1221                 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1222                         atomic_inc(&rdev->nr_pending);
1223                         blocked_rdev = rdev;
1224                         break;
1225                 }
1226                 r1_bio->bios[i] = NULL;
1227                 if (!rdev || test_bit(Faulty, &rdev->flags)) {
1228                         if (i < conf->raid_disks)
1229                                 set_bit(R1BIO_Degraded, &r1_bio->state);
1230                         continue;
1231                 }
1232
1233                 atomic_inc(&rdev->nr_pending);
1234                 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1235                         sector_t first_bad;
1236                         int bad_sectors;
1237                         int is_bad;
1238
1239                         is_bad = is_badblock(rdev, r1_bio->sector,
1240                                              max_sectors,
1241                                              &first_bad, &bad_sectors);
1242                         if (is_bad < 0) {
1243                                 /* mustn't write here until the bad block is
1244                                  * acknowledged*/
1245                                 set_bit(BlockedBadBlocks, &rdev->flags);
1246                                 blocked_rdev = rdev;
1247                                 break;
1248                         }
1249                         if (is_bad && first_bad <= r1_bio->sector) {
1250                                 /* Cannot write here at all */
1251                                 bad_sectors -= (r1_bio->sector - first_bad);
1252                                 if (bad_sectors < max_sectors)
1253                                         /* mustn't write more than bad_sectors
1254                                          * to other devices yet
1255                                          */
1256                                         max_sectors = bad_sectors;
1257                                 rdev_dec_pending(rdev, mddev);
1258                                 /* We don't set R1BIO_Degraded as that
1259                                  * only applies if the disk is
1260                                  * missing, so it might be re-added,
1261                                  * and we want to know to recover this
1262                                  * chunk.
1263                                  * In this case the device is here,
1264                                  * and the fact that this chunk is not
1265                                  * in-sync is recorded in the bad
1266                                  * block log
1267                                  */
1268                                 continue;
1269                         }
1270                         if (is_bad) {
1271                                 int good_sectors = first_bad - r1_bio->sector;
1272                                 if (good_sectors < max_sectors)
1273                                         max_sectors = good_sectors;
1274                         }
1275                 }
1276                 r1_bio->bios[i] = bio;
1277         }
1278         rcu_read_unlock();
1279
1280         if (unlikely(blocked_rdev)) {
1281                 /* Wait for this device to become unblocked */
1282                 int j;
1283                 sector_t old = start_next_window;
1284
1285                 for (j = 0; j < i; j++)
1286                         if (r1_bio->bios[j])
1287                                 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1288                 r1_bio->state = 0;
1289                 allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
1290                 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1291                 start_next_window = wait_barrier(conf, bio);
1292                 /*
1293                  * We must make sure the multi r1bios of bio have
1294                  * the same value of bi_phys_segments
1295                  */
1296                 if (bio->bi_phys_segments && old &&
1297                     old != start_next_window)
1298                         /* Wait for the former r1bio(s) to complete */
1299                         wait_event(conf->wait_barrier,
1300                                    bio->bi_phys_segments == 1);
1301                 goto retry_write;
1302         }
1303
1304         if (max_sectors < r1_bio->sectors) {
1305                 /* We are splitting this write into multiple parts, so
1306                  * we need to prepare for allocating another r1_bio.
1307                  */
1308                 r1_bio->sectors = max_sectors;
1309                 spin_lock_irq(&conf->device_lock);
1310                 if (bio->bi_phys_segments == 0)
1311                         bio->bi_phys_segments = 2;
1312                 else
1313                         bio->bi_phys_segments++;
1314                 spin_unlock_irq(&conf->device_lock);
1315         }
1316         sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
1317
1318         atomic_set(&r1_bio->remaining, 1);
1319         atomic_set(&r1_bio->behind_remaining, 0);
1320
1321         first_clone = 1;
1322         for (i = 0; i < disks; i++) {
1323                 struct bio *mbio;
1324                 if (!r1_bio->bios[i])
1325                         continue;
1326
1327                 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1328                 bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
1329
1330                 if (first_clone) {
1331                         /* do behind I/O ?
1332                          * Not if there are too many, or cannot
1333                          * allocate memory, or a reader on WriteMostly
1334                          * is waiting for behind writes to flush */
1335                         if (bitmap &&
1336                             (atomic_read(&bitmap->behind_writes)
1337                              < mddev->bitmap_info.max_write_behind) &&
1338                             !waitqueue_active(&bitmap->behind_wait))
1339                                 alloc_behind_pages(mbio, r1_bio);
1340
1341                         bitmap_startwrite(bitmap, r1_bio->sector,
1342                                           r1_bio->sectors,
1343                                           test_bit(R1BIO_BehindIO,
1344                                                    &r1_bio->state));
1345                         first_clone = 0;
1346                 }
1347                 if (r1_bio->behind_bvecs) {
1348                         struct bio_vec *bvec;
1349                         int j;
1350
1351                         /*
1352                          * We trimmed the bio, so _all is legit
1353                          */
1354                         bio_for_each_segment_all(bvec, mbio, j)
1355                                 bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
1356                         if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
1357                                 atomic_inc(&r1_bio->behind_remaining);
1358                 }
1359
1360                 r1_bio->bios[i] = mbio;
1361
1362                 mbio->bi_iter.bi_sector = (r1_bio->sector +
1363                                    conf->mirrors[i].rdev->data_offset);
1364                 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1365                 mbio->bi_end_io = raid1_end_write_request;
1366                 bio_set_op_attrs(mbio, op, do_flush_fua | do_sync);
1367                 mbio->bi_private = r1_bio;
1368
1369                 atomic_inc(&r1_bio->remaining);
1370
1371                 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1372                 if (cb)
1373                         plug = container_of(cb, struct raid1_plug_cb, cb);
1374                 else
1375                         plug = NULL;
1376                 spin_lock_irqsave(&conf->device_lock, flags);
1377                 if (plug) {
1378                         bio_list_add(&plug->pending, mbio);
1379                         plug->pending_cnt++;
1380                 } else {
1381                         bio_list_add(&conf->pending_bio_list, mbio);
1382                         conf->pending_count++;
1383                 }
1384                 spin_unlock_irqrestore(&conf->device_lock, flags);
1385                 if (!plug)
1386                         md_wakeup_thread(mddev->thread);
1387         }
1388         /* Mustn't call r1_bio_write_done before this next test,
1389          * as it could result in the bio being freed.
1390          */
1391         if (sectors_handled < bio_sectors(bio)) {
1392                 r1_bio_write_done(r1_bio);
1393                 /* We need another r1_bio.  It has already been counted
1394                  * in bio->bi_phys_segments
1395                  */
1396                 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1397                 r1_bio->master_bio = bio;
1398                 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1399                 r1_bio->state = 0;
1400                 r1_bio->mddev = mddev;
1401                 r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1402                 goto retry_write;
1403         }
1404
1405         r1_bio_write_done(r1_bio);
1406
1407         /* In case raid1d snuck in to freeze_array */
1408         wake_up(&conf->wait_barrier);
1409 }
1410
1411 static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1412 {
1413         struct r1conf *conf = mddev->private;
1414         int i;
1415
1416         seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1417                    conf->raid_disks - mddev->degraded);
1418         rcu_read_lock();
1419         for (i = 0; i < conf->raid_disks; i++) {
1420                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1421                 seq_printf(seq, "%s",
1422                            rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1423         }
1424         rcu_read_unlock();
1425         seq_printf(seq, "]");
1426 }
1427
1428 static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1429 {
1430         char b[BDEVNAME_SIZE];
1431         struct r1conf *conf = mddev->private;
1432         unsigned long flags;
1433
1434         /*
1435          * If it is not operational, then we have already marked it as dead
1436          * else if it is the last working disks, ignore the error, let the
1437          * next level up know.
1438          * else mark the drive as failed
1439          */
1440         if (test_bit(In_sync, &rdev->flags)
1441             && (conf->raid_disks - mddev->degraded) == 1) {
1442                 /*
1443                  * Don't fail the drive, act as though we were just a
1444                  * normal single drive.
1445                  * However don't try a recovery from this drive as
1446                  * it is very likely to fail.
1447                  */
1448                 conf->recovery_disabled = mddev->recovery_disabled;
1449                 return;
1450         }
1451         set_bit(Blocked, &rdev->flags);
1452         spin_lock_irqsave(&conf->device_lock, flags);
1453         if (test_and_clear_bit(In_sync, &rdev->flags)) {
1454                 mddev->degraded++;
1455                 set_bit(Faulty, &rdev->flags);
1456         } else
1457                 set_bit(Faulty, &rdev->flags);
1458         spin_unlock_irqrestore(&conf->device_lock, flags);
1459         /*
1460          * if recovery is running, make sure it aborts.
1461          */
1462         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1463         set_mask_bits(&mddev->flags, 0,
1464                       BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
1465         printk(KERN_ALERT
1466                "md/raid1:%s: Disk failure on %s, disabling device.\n"
1467                "md/raid1:%s: Operation continuing on %d devices.\n",
1468                mdname(mddev), bdevname(rdev->bdev, b),
1469                mdname(mddev), conf->raid_disks - mddev->degraded);
1470 }
1471
1472 static void print_conf(struct r1conf *conf)
1473 {
1474         int i;
1475
1476         printk(KERN_DEBUG "RAID1 conf printout:\n");
1477         if (!conf) {
1478                 printk(KERN_DEBUG "(!conf)\n");
1479                 return;
1480         }
1481         printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1482                 conf->raid_disks);
1483
1484         rcu_read_lock();
1485         for (i = 0; i < conf->raid_disks; i++) {
1486                 char b[BDEVNAME_SIZE];
1487                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1488                 if (rdev)
1489                         printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1490                                i, !test_bit(In_sync, &rdev->flags),
1491                                !test_bit(Faulty, &rdev->flags),
1492                                bdevname(rdev->bdev,b));
1493         }
1494         rcu_read_unlock();
1495 }
1496
1497 static void close_sync(struct r1conf *conf)
1498 {
1499         wait_barrier(conf, NULL);
1500         allow_barrier(conf, 0, 0);
1501
1502         mempool_destroy(conf->r1buf_pool);
1503         conf->r1buf_pool = NULL;
1504
1505         spin_lock_irq(&conf->resync_lock);
1506         conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE;
1507         conf->start_next_window = MaxSector;
1508         conf->current_window_requests +=
1509                 conf->next_window_requests;
1510         conf->next_window_requests = 0;
1511         spin_unlock_irq(&conf->resync_lock);
1512 }
1513
1514 static int raid1_spare_active(struct mddev *mddev)
1515 {
1516         int i;
1517         struct r1conf *conf = mddev->private;
1518         int count = 0;
1519         unsigned long flags;
1520
1521         /*
1522          * Find all failed disks within the RAID1 configuration
1523          * and mark them readable.
1524          * Called under mddev lock, so rcu protection not needed.
1525          * device_lock used to avoid races with raid1_end_read_request
1526          * which expects 'In_sync' flags and ->degraded to be consistent.
1527          */
1528         spin_lock_irqsave(&conf->device_lock, flags);
1529         for (i = 0; i < conf->raid_disks; i++) {
1530                 struct md_rdev *rdev = conf->mirrors[i].rdev;
1531                 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1532                 if (repl
1533                     && !test_bit(Candidate, &repl->flags)
1534                     && repl->recovery_offset == MaxSector
1535                     && !test_bit(Faulty, &repl->flags)
1536                     && !test_and_set_bit(In_sync, &repl->flags)) {
1537                         /* replacement has just become active */
1538                         if (!rdev ||
1539                             !test_and_clear_bit(In_sync, &rdev->flags))
1540                                 count++;
1541                         if (rdev) {
1542                                 /* Replaced device not technically
1543                                  * faulty, but we need to be sure
1544                                  * it gets removed and never re-added
1545                                  */
1546                                 set_bit(Faulty, &rdev->flags);
1547                                 sysfs_notify_dirent_safe(
1548                                         rdev->sysfs_state);
1549                         }
1550                 }
1551                 if (rdev
1552                     && rdev->recovery_offset == MaxSector
1553                     && !test_bit(Faulty, &rdev->flags)
1554                     && !test_and_set_bit(In_sync, &rdev->flags)) {
1555                         count++;
1556                         sysfs_notify_dirent_safe(rdev->sysfs_state);
1557                 }
1558         }
1559         mddev->degraded -= count;
1560         spin_unlock_irqrestore(&conf->device_lock, flags);
1561
1562         print_conf(conf);
1563         return count;
1564 }
1565
1566 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1567 {
1568         struct r1conf *conf = mddev->private;
1569         int err = -EEXIST;
1570         int mirror = 0;
1571         struct raid1_info *p;
1572         int first = 0;
1573         int last = conf->raid_disks - 1;
1574
1575         if (mddev->recovery_disabled == conf->recovery_disabled)
1576                 return -EBUSY;
1577
1578         if (md_integrity_add_rdev(rdev, mddev))
1579                 return -ENXIO;
1580
1581         if (rdev->raid_disk >= 0)
1582                 first = last = rdev->raid_disk;
1583
1584         /*
1585          * find the disk ... but prefer rdev->saved_raid_disk
1586          * if possible.
1587          */
1588         if (rdev->saved_raid_disk >= 0 &&
1589             rdev->saved_raid_disk >= first &&
1590             conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1591                 first = last = rdev->saved_raid_disk;
1592
1593         for (mirror = first; mirror <= last; mirror++) {
1594                 p = conf->mirrors+mirror;
1595                 if (!p->rdev) {
1596
1597                         if (mddev->gendisk)
1598                                 disk_stack_limits(mddev->gendisk, rdev->bdev,
1599                                                   rdev->data_offset << 9);
1600
1601                         p->head_position = 0;
1602                         rdev->raid_disk = mirror;
1603                         err = 0;
1604                         /* As all devices are equivalent, we don't need a full recovery
1605                          * if this was recently any drive of the array
1606                          */
1607                         if (rdev->saved_raid_disk < 0)
1608                                 conf->fullsync = 1;
1609                         rcu_assign_pointer(p->rdev, rdev);
1610                         break;
1611                 }
1612                 if (test_bit(WantReplacement, &p->rdev->flags) &&
1613                     p[conf->raid_disks].rdev == NULL) {
1614                         /* Add this device as a replacement */
1615                         clear_bit(In_sync, &rdev->flags);
1616                         set_bit(Replacement, &rdev->flags);
1617                         rdev->raid_disk = mirror;
1618                         err = 0;
1619                         conf->fullsync = 1;
1620                         rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1621                         break;
1622                 }
1623         }
1624         if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1625                 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1626         print_conf(conf);
1627         return err;
1628 }
1629
1630 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1631 {
1632         struct r1conf *conf = mddev->private;
1633         int err = 0;
1634         int number = rdev->raid_disk;
1635         struct raid1_info *p = conf->mirrors + number;
1636
1637         if (rdev != p->rdev)
1638                 p = conf->mirrors + conf->raid_disks + number;
1639
1640         print_conf(conf);
1641         if (rdev == p->rdev) {
1642                 if (test_bit(In_sync, &rdev->flags) ||
1643                     atomic_read(&rdev->nr_pending)) {
1644                         err = -EBUSY;
1645                         goto abort;
1646                 }
1647                 /* Only remove non-faulty devices if recovery
1648                  * is not possible.
1649                  */
1650                 if (!test_bit(Faulty, &rdev->flags) &&
1651                     mddev->recovery_disabled != conf->recovery_disabled &&
1652                     mddev->degraded < conf->raid_disks) {
1653                         err = -EBUSY;
1654                         goto abort;
1655                 }
1656                 p->rdev = NULL;
1657                 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1658                         synchronize_rcu();
1659                         if (atomic_read(&rdev->nr_pending)) {
1660                                 /* lost the race, try later */
1661                                 err = -EBUSY;
1662                                 p->rdev = rdev;
1663                                 goto abort;
1664                         }
1665                 }
1666                 if (conf->mirrors[conf->raid_disks + number].rdev) {
1667                         /* We just removed a device that is being replaced.
1668                          * Move down the replacement.  We drain all IO before
1669                          * doing this to avoid confusion.
1670                          */
1671                         struct md_rdev *repl =
1672                                 conf->mirrors[conf->raid_disks + number].rdev;
1673                         freeze_array(conf, 0);
1674                         clear_bit(Replacement, &repl->flags);
1675                         p->rdev = repl;
1676                         conf->mirrors[conf->raid_disks + number].rdev = NULL;
1677                         unfreeze_array(conf);
1678                         clear_bit(WantReplacement, &rdev->flags);
1679                 } else
1680                         clear_bit(WantReplacement, &rdev->flags);
1681                 err = md_integrity_register(mddev);
1682         }
1683 abort:
1684
1685         print_conf(conf);
1686         return err;
1687 }
1688
1689 static void end_sync_read(struct bio *bio)
1690 {
1691         struct r1bio *r1_bio = bio->bi_private;
1692
1693         update_head_pos(r1_bio->read_disk, r1_bio);
1694
1695         /*
1696          * we have read a block, now it needs to be re-written,
1697          * or re-read if the read failed.
1698          * We don't do much here, just schedule handling by raid1d
1699          */
1700         if (!bio->bi_error)
1701                 set_bit(R1BIO_Uptodate, &r1_bio->state);
1702
1703         if (atomic_dec_and_test(&r1_bio->remaining))
1704                 reschedule_retry(r1_bio);
1705 }
1706
1707 static void end_sync_write(struct bio *bio)
1708 {
1709         int uptodate = !bio->bi_error;
1710         struct r1bio *r1_bio = bio->bi_private;
1711         struct mddev *mddev = r1_bio->mddev;
1712         struct r1conf *conf = mddev->private;
1713         sector_t first_bad;
1714         int bad_sectors;
1715         struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1716
1717         if (!uptodate) {
1718                 sector_t sync_blocks = 0;
1719                 sector_t s = r1_bio->sector;
1720                 long sectors_to_go = r1_bio->sectors;
1721                 /* make sure these bits doesn't get cleared. */
1722                 do {
1723                         bitmap_end_sync(mddev->bitmap, s,
1724                                         &sync_blocks, 1);
1725                         s += sync_blocks;
1726                         sectors_to_go -= sync_blocks;
1727                 } while (sectors_to_go > 0);
1728                 set_bit(WriteErrorSeen, &rdev->flags);
1729                 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1730                         set_bit(MD_RECOVERY_NEEDED, &
1731                                 mddev->recovery);
1732                 set_bit(R1BIO_WriteError, &r1_bio->state);
1733         } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1734                                &first_bad, &bad_sectors) &&
1735                    !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1736                                 r1_bio->sector,
1737                                 r1_bio->sectors,
1738                                 &first_bad, &bad_sectors)
1739                 )
1740                 set_bit(R1BIO_MadeGood, &r1_bio->state);
1741
1742         if (atomic_dec_and_test(&r1_bio->remaining)) {
1743                 int s = r1_bio->sectors;
1744                 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1745                     test_bit(R1BIO_WriteError, &r1_bio->state))
1746                         reschedule_retry(r1_bio);
1747                 else {
1748                         put_buf(r1_bio);
1749                         md_done_sync(mddev, s, uptodate);
1750                 }
1751         }
1752 }
1753
1754 static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1755                             int sectors, struct page *page, int rw)
1756 {
1757         if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
1758                 /* success */
1759                 return 1;
1760         if (rw == WRITE) {
1761                 set_bit(WriteErrorSeen, &rdev->flags);
1762                 if (!test_and_set_bit(WantReplacement,
1763                                       &rdev->flags))
1764                         set_bit(MD_RECOVERY_NEEDED, &
1765                                 rdev->mddev->recovery);
1766         }
1767         /* need to record an error - either for the block or the device */
1768         if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1769                 md_error(rdev->mddev, rdev);
1770         return 0;
1771 }
1772
1773 static int fix_sync_read_error(struct r1bio *r1_bio)
1774 {
1775         /* Try some synchronous reads of other devices to get
1776          * good data, much like with normal read errors.  Only
1777          * read into the pages we already have so we don't
1778          * need to re-issue the read request.
1779          * We don't need to freeze the array, because being in an
1780          * active sync request, there is no normal IO, and
1781          * no overlapping syncs.
1782          * We don't need to check is_badblock() again as we
1783          * made sure that anything with a bad block in range
1784          * will have bi_end_io clear.
1785          */
1786         struct mddev *mddev = r1_bio->mddev;
1787         struct r1conf *conf = mddev->private;
1788         struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1789         sector_t sect = r1_bio->sector;
1790         int sectors = r1_bio->sectors;
1791         int idx = 0;
1792
1793         while(sectors) {
1794                 int s = sectors;
1795                 int d = r1_bio->read_disk;
1796                 int success = 0;
1797                 struct md_rdev *rdev;
1798                 int start;
1799
1800                 if (s > (PAGE_SIZE>>9))
1801                         s = PAGE_SIZE >> 9;
1802                 do {
1803                         if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1804                                 /* No rcu protection needed here devices
1805                                  * can only be removed when no resync is
1806                                  * active, and resync is currently active
1807                                  */
1808                                 rdev = conf->mirrors[d].rdev;
1809                                 if (sync_page_io(rdev, sect, s<<9,
1810                                                  bio->bi_io_vec[idx].bv_page,
1811                                                  REQ_OP_READ, 0, false)) {
1812                                         success = 1;
1813                                         break;
1814                                 }
1815                         }
1816                         d++;
1817                         if (d == conf->raid_disks * 2)
1818                                 d = 0;
1819                 } while (!success && d != r1_bio->read_disk);
1820
1821                 if (!success) {
1822                         char b[BDEVNAME_SIZE];
1823                         int abort = 0;
1824                         /* Cannot read from anywhere, this block is lost.
1825                          * Record a bad block on each device.  If that doesn't
1826                          * work just disable and interrupt the recovery.
1827                          * Don't fail devices as that won't really help.
1828                          */
1829                         printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
1830                                " for block %llu\n",
1831                                mdname(mddev),
1832                                bdevname(bio->bi_bdev, b),
1833                                (unsigned long long)r1_bio->sector);
1834                         for (d = 0; d < conf->raid_disks * 2; d++) {
1835                                 rdev = conf->mirrors[d].rdev;
1836                                 if (!rdev || test_bit(Faulty, &rdev->flags))
1837                                         continue;
1838                                 if (!rdev_set_badblocks(rdev, sect, s, 0))
1839                                         abort = 1;
1840                         }
1841                         if (abort) {
1842                                 conf->recovery_disabled =
1843                                         mddev->recovery_disabled;
1844                                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1845                                 md_done_sync(mddev, r1_bio->sectors, 0);
1846                                 put_buf(r1_bio);
1847                                 return 0;
1848                         }
1849                         /* Try next page */
1850                         sectors -= s;
1851                         sect += s;
1852                         idx++;
1853                         continue;
1854                 }
1855
1856                 start = d;
1857                 /* write it back and re-read */
1858                 while (d != r1_bio->read_disk) {
1859                         if (d == 0)
1860                                 d = conf->raid_disks * 2;
1861                         d--;
1862                         if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1863                                 continue;
1864                         rdev = conf->mirrors[d].rdev;
1865                         if (r1_sync_page_io(rdev, sect, s,
1866                                             bio->bi_io_vec[idx].bv_page,
1867                                             WRITE) == 0) {
1868                                 r1_bio->bios[d]->bi_end_io = NULL;
1869                                 rdev_dec_pending(rdev, mddev);
1870                         }
1871                 }
1872                 d = start;
1873                 while (d != r1_bio->read_disk) {
1874                         if (d == 0)
1875                                 d = conf->raid_disks * 2;
1876                         d--;
1877                         if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1878                                 continue;
1879                         rdev = conf->mirrors[d].rdev;
1880                         if (r1_sync_page_io(rdev, sect, s,
1881                                             bio->bi_io_vec[idx].bv_page,
1882                                             READ) != 0)
1883                                 atomic_add(s, &rdev->corrected_errors);
1884                 }
1885                 sectors -= s;
1886                 sect += s;
1887                 idx ++;
1888         }
1889         set_bit(R1BIO_Uptodate, &r1_bio->state);
1890         bio->bi_error = 0;
1891         return 1;
1892 }
1893
1894 static void process_checks(struct r1bio *r1_bio)
1895 {
1896         /* We have read all readable devices.  If we haven't
1897          * got the block, then there is no hope left.
1898          * If we have, then we want to do a comparison
1899          * and skip the write if everything is the same.
1900          * If any blocks failed to read, then we need to
1901          * attempt an over-write
1902          */
1903         struct mddev *mddev = r1_bio->mddev;
1904         struct r1conf *conf = mddev->private;
1905         int primary;
1906         int i;
1907         int vcnt;
1908
1909         /* Fix variable parts of all bios */
1910         vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
1911         for (i = 0; i < conf->raid_disks * 2; i++) {
1912                 int j;
1913                 int size;
1914                 int error;
1915                 struct bio *b = r1_bio->bios[i];
1916                 if (b->bi_end_io != end_sync_read)
1917                         continue;
1918                 /* fixup the bio for reuse, but preserve errno */
1919                 error = b->bi_error;
1920                 bio_reset(b);
1921                 b->bi_error = error;
1922                 b->bi_vcnt = vcnt;
1923                 b->bi_iter.bi_size = r1_bio->sectors << 9;
1924                 b->bi_iter.bi_sector = r1_bio->sector +
1925                         conf->mirrors[i].rdev->data_offset;
1926                 b->bi_bdev = conf->mirrors[i].rdev->bdev;
1927                 b->bi_end_io = end_sync_read;
1928                 b->bi_private = r1_bio;
1929
1930                 size = b->bi_iter.bi_size;
1931                 for (j = 0; j < vcnt ; j++) {
1932                         struct bio_vec *bi;
1933                         bi = &b->bi_io_vec[j];
1934                         bi->bv_offset = 0;
1935                         if (size > PAGE_SIZE)
1936                                 bi->bv_len = PAGE_SIZE;
1937                         else
1938                                 bi->bv_len = size;
1939                         size -= PAGE_SIZE;
1940                 }
1941         }
1942         for (primary = 0; primary < conf->raid_disks * 2; primary++)
1943                 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1944                     !r1_bio->bios[primary]->bi_error) {
1945                         r1_bio->bios[primary]->bi_end_io = NULL;
1946                         rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
1947                         break;
1948                 }
1949         r1_bio->read_disk = primary;
1950         for (i = 0; i < conf->raid_disks * 2; i++) {
1951                 int j;
1952                 struct bio *pbio = r1_bio->bios[primary];
1953                 struct bio *sbio = r1_bio->bios[i];
1954                 int error = sbio->bi_error;
1955
1956                 if (sbio->bi_end_io != end_sync_read)
1957                         continue;
1958                 /* Now we can 'fixup' the error value */
1959                 sbio->bi_error = 0;
1960
1961                 if (!error) {
1962                         for (j = vcnt; j-- ; ) {
1963                                 struct page *p, *s;
1964                                 p = pbio->bi_io_vec[j].bv_page;
1965                                 s = sbio->bi_io_vec[j].bv_page;
1966                                 if (memcmp(page_address(p),
1967                                            page_address(s),
1968                                            sbio->bi_io_vec[j].bv_len))
1969                                         break;
1970                         }
1971                 } else
1972                         j = 0;
1973                 if (j >= 0)
1974                         atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
1975                 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
1976                               && !error)) {
1977                         /* No need to write to this device. */
1978                         sbio->bi_end_io = NULL;
1979                         rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1980                         continue;
1981                 }
1982
1983                 bio_copy_data(sbio, pbio);
1984         }
1985 }
1986
1987 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
1988 {
1989         struct r1conf *conf = mddev->private;
1990         int i;
1991         int disks = conf->raid_disks * 2;
1992         struct bio *bio, *wbio;
1993
1994         bio = r1_bio->bios[r1_bio->read_disk];
1995
1996         if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
1997                 /* ouch - failed to read all of that. */
1998                 if (!fix_sync_read_error(r1_bio))
1999                         return;
2000
2001         if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2002                 process_checks(r1_bio);
2003
2004         /*
2005          * schedule writes
2006          */
2007         atomic_set(&r1_bio->remaining, 1);
2008         for (i = 0; i < disks ; i++) {
2009                 wbio = r1_bio->bios[i];
2010                 if (wbio->bi_end_io == NULL ||
2011                     (wbio->bi_end_io == end_sync_read &&
2012                      (i == r1_bio->read_disk ||
2013                       !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2014                         continue;
2015
2016                 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2017                 wbio->bi_end_io = end_sync_write;
2018                 atomic_inc(&r1_bio->remaining);
2019                 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2020
2021                 generic_make_request(wbio);
2022         }
2023
2024         if (atomic_dec_and_test(&r1_bio->remaining)) {
2025                 /* if we're here, all write(s) have completed, so clean up */
2026                 int s = r1_bio->sectors;
2027                 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2028                     test_bit(R1BIO_WriteError, &r1_bio->state))
2029                         reschedule_retry(r1_bio);
2030                 else {
2031                         put_buf(r1_bio);
2032                         md_done_sync(mddev, s, 1);
2033                 }
2034         }
2035 }
2036
2037 /*
2038  * This is a kernel thread which:
2039  *
2040  *      1.      Retries failed read operations on working mirrors.
2041  *      2.      Updates the raid superblock when problems encounter.
2042  *      3.      Performs writes following reads for array synchronising.
2043  */
2044
2045 static void fix_read_error(struct r1conf *conf, int read_disk,
2046                            sector_t sect, int sectors)
2047 {
2048         struct mddev *mddev = conf->mddev;
2049         while(sectors) {
2050                 int s = sectors;
2051                 int d = read_disk;
2052                 int success = 0;
2053                 int start;
2054                 struct md_rdev *rdev;
2055
2056                 if (s > (PAGE_SIZE>>9))
2057                         s = PAGE_SIZE >> 9;
2058
2059                 do {
2060                         sector_t first_bad;
2061                         int bad_sectors;
2062
2063                         rcu_read_lock();
2064                         rdev = rcu_dereference(conf->mirrors[d].rdev);
2065                         if (rdev &&
2066                             (test_bit(In_sync, &rdev->flags) ||
2067                              (!test_bit(Faulty, &rdev->flags) &&
2068                               rdev->recovery_offset >= sect + s)) &&
2069                             is_badblock(rdev, sect, s,
2070                                         &first_bad, &bad_sectors) == 0) {
2071                                 atomic_inc(&rdev->nr_pending);
2072                                 rcu_read_unlock();
2073                                 if (sync_page_io(rdev, sect, s<<9,
2074                                          conf->tmppage, REQ_OP_READ, 0, false))
2075                                         success = 1;
2076                                 rdev_dec_pending(rdev, mddev);
2077                                 if (success)
2078                                         break;
2079                         } else
2080                                 rcu_read_unlock();
2081                         d++;
2082                         if (d == conf->raid_disks * 2)
2083                                 d = 0;
2084                 } while (!success && d != read_disk);
2085
2086                 if (!success) {
2087                         /* Cannot read from anywhere - mark it bad */
2088                         struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2089                         if (!rdev_set_badblocks(rdev, sect, s, 0))
2090                                 md_error(mddev, rdev);
2091                         break;
2092                 }
2093                 /* write it back and re-read */
2094                 start = d;
2095                 while (d != read_disk) {
2096                         if (d==0)
2097                                 d = conf->raid_disks * 2;
2098                         d--;
2099                         rcu_read_lock();
2100                         rdev = rcu_dereference(conf->mirrors[d].rdev);
2101                         if (rdev &&
2102                             !test_bit(Faulty, &rdev->flags)) {
2103                                 atomic_inc(&rdev->nr_pending);
2104                                 rcu_read_unlock();
2105                                 r1_sync_page_io(rdev, sect, s,
2106                                                 conf->tmppage, WRITE);
2107                                 rdev_dec_pending(rdev, mddev);
2108                         } else
2109                                 rcu_read_unlock();
2110                 }
2111                 d = start;
2112                 while (d != read_disk) {
2113                         char b[BDEVNAME_SIZE];
2114                         if (d==0)
2115                                 d = conf->raid_disks * 2;
2116                         d--;
2117                         rcu_read_lock();
2118                         rdev = rcu_dereference(conf->mirrors[d].rdev);
2119                         if (rdev &&
2120                             !test_bit(Faulty, &rdev->flags)) {
2121                                 atomic_inc(&rdev->nr_pending);
2122                                 rcu_read_unlock();
2123                                 if (r1_sync_page_io(rdev, sect, s,
2124                                                     conf->tmppage, READ)) {
2125                                         atomic_add(s, &rdev->corrected_errors);
2126                                         printk(KERN_INFO
2127                                                "md/raid1:%s: read error corrected "
2128                                                "(%d sectors at %llu on %s)\n",
2129                                                mdname(mddev), s,
2130                                                (unsigned long long)(sect +
2131                                                                     rdev->data_offset),
2132                                                bdevname(rdev->bdev, b));
2133                                 }
2134                                 rdev_dec_pending(rdev, mddev);
2135                         } else
2136                                 rcu_read_unlock();
2137                 }
2138                 sectors -= s;
2139                 sect += s;
2140         }
2141 }
2142
2143 static int narrow_write_error(struct r1bio *r1_bio, int i)
2144 {
2145         struct mddev *mddev = r1_bio->mddev;
2146         struct r1conf *conf = mddev->private;
2147         struct md_rdev *rdev = conf->mirrors[i].rdev;
2148
2149         /* bio has the data to be written to device 'i' where
2150          * we just recently had a write error.
2151          * We repeatedly clone the bio and trim down to one block,
2152          * then try the write.  Where the write fails we record
2153          * a bad block.
2154          * It is conceivable that the bio doesn't exactly align with
2155          * blocks.  We must handle this somehow.
2156          *
2157          * We currently own a reference on the rdev.
2158          */
2159
2160         int block_sectors;
2161         sector_t sector;
2162         int sectors;
2163         int sect_to_write = r1_bio->sectors;
2164         int ok = 1;
2165
2166         if (rdev->badblocks.shift < 0)
2167                 return 0;
2168
2169         block_sectors = roundup(1 << rdev->badblocks.shift,
2170                                 bdev_logical_block_size(rdev->bdev) >> 9);
2171         sector = r1_bio->sector;
2172         sectors = ((sector + block_sectors)
2173                    & ~(sector_t)(block_sectors - 1))
2174                 - sector;
2175
2176         while (sect_to_write) {
2177                 struct bio *wbio;
2178                 if (sectors > sect_to_write)
2179                         sectors = sect_to_write;
2180                 /* Write at 'sector' for 'sectors'*/
2181
2182                 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2183                         unsigned vcnt = r1_bio->behind_page_count;
2184                         struct bio_vec *vec = r1_bio->behind_bvecs;
2185
2186                         while (!vec->bv_page) {
2187                                 vec++;
2188                                 vcnt--;
2189                         }
2190
2191                         wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
2192                         memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
2193
2194                         wbio->bi_vcnt = vcnt;
2195                 } else {
2196                         wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2197                 }
2198
2199                 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2200                 wbio->bi_iter.bi_sector = r1_bio->sector;
2201                 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2202
2203                 bio_trim(wbio, sector - r1_bio->sector, sectors);
2204                 wbio->bi_iter.bi_sector += rdev->data_offset;
2205                 wbio->bi_bdev = rdev->bdev;
2206
2207                 if (submit_bio_wait(wbio) < 0)
2208                         /* failure! */
2209                         ok = rdev_set_badblocks(rdev, sector,
2210                                                 sectors, 0)
2211                                 && ok;
2212
2213                 bio_put(wbio);
2214                 sect_to_write -= sectors;
2215                 sector += sectors;
2216                 sectors = block_sectors;
2217         }
2218         return ok;
2219 }
2220
2221 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2222 {
2223         int m;
2224         int s = r1_bio->sectors;
2225         for (m = 0; m < conf->raid_disks * 2 ; m++) {
2226                 struct md_rdev *rdev = conf->mirrors[m].rdev;
2227                 struct bio *bio = r1_bio->bios[m];
2228                 if (bio->bi_end_io == NULL)
2229                         continue;
2230                 if (!bio->bi_error &&
2231                     test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2232                         rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2233                 }
2234                 if (bio->bi_error &&
2235                     test_bit(R1BIO_WriteError, &r1_bio->state)) {
2236                         if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2237                                 md_error(conf->mddev, rdev);
2238                 }
2239         }
2240         put_buf(r1_bio);
2241         md_done_sync(conf->mddev, s, 1);
2242 }
2243
2244 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2245 {
2246         int m;
2247         bool fail = false;
2248         for (m = 0; m < conf->raid_disks * 2 ; m++)
2249                 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2250                         struct md_rdev *rdev = conf->mirrors[m].rdev;
2251                         rdev_clear_badblocks(rdev,
2252                                              r1_bio->sector,
2253                                              r1_bio->sectors, 0);
2254                         rdev_dec_pending(rdev, conf->mddev);
2255                 } else if (r1_bio->bios[m] != NULL) {
2256                         /* This drive got a write error.  We need to
2257                          * narrow down and record precise write
2258                          * errors.
2259                          */
2260                         fail = true;
2261                         if (!narrow_write_error(r1_bio, m)) {
2262                                 md_error(conf->mddev,
2263                                          conf->mirrors[m].rdev);
2264                                 /* an I/O failed, we can't clear the bitmap */
2265                                 set_bit(R1BIO_Degraded, &r1_bio->state);
2266                         }
2267                         rdev_dec_pending(conf->mirrors[m].rdev,
2268                                          conf->mddev);
2269                 }
2270         if (fail) {
2271                 spin_lock_irq(&conf->device_lock);
2272                 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2273                 conf->nr_queued++;
2274                 spin_unlock_irq(&conf->device_lock);
2275                 md_wakeup_thread(conf->mddev->thread);
2276         } else {
2277                 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2278                         close_write(r1_bio);
2279                 raid_end_bio_io(r1_bio);
2280         }
2281 }
2282
2283 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2284 {
2285         int disk;
2286         int max_sectors;
2287         struct mddev *mddev = conf->mddev;
2288         struct bio *bio;
2289         char b[BDEVNAME_SIZE];
2290         struct md_rdev *rdev;
2291
2292         clear_bit(R1BIO_ReadError, &r1_bio->state);
2293         /* we got a read error. Maybe the drive is bad.  Maybe just
2294          * the block and we can fix it.
2295          * We freeze all other IO, and try reading the block from
2296          * other devices.  When we find one, we re-write
2297          * and check it that fixes the read error.
2298          * This is all done synchronously while the array is
2299          * frozen
2300          */
2301         if (mddev->ro == 0) {
2302                 freeze_array(conf, 1);
2303                 fix_read_error(conf, r1_bio->read_disk,
2304                                r1_bio->sector, r1_bio->sectors);
2305                 unfreeze_array(conf);
2306         } else
2307                 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
2308         rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
2309
2310         bio = r1_bio->bios[r1_bio->read_disk];
2311         bdevname(bio->bi_bdev, b);
2312 read_more:
2313         disk = read_balance(conf, r1_bio, &max_sectors);
2314         if (disk == -1) {
2315                 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
2316                        " read error for block %llu\n",
2317                        mdname(mddev), b, (unsigned long long)r1_bio->sector);
2318                 raid_end_bio_io(r1_bio);
2319         } else {
2320                 const unsigned long do_sync
2321                         = r1_bio->master_bio->bi_rw & REQ_SYNC;
2322                 if (bio) {
2323                         r1_bio->bios[r1_bio->read_disk] =
2324                                 mddev->ro ? IO_BLOCKED : NULL;
2325                         bio_put(bio);
2326                 }
2327                 r1_bio->read_disk = disk;
2328                 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2329                 bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
2330                          max_sectors);
2331                 r1_bio->bios[r1_bio->read_disk] = bio;
2332                 rdev = conf->mirrors[disk].rdev;
2333                 printk_ratelimited(KERN_ERR
2334                                    "md/raid1:%s: redirecting sector %llu"
2335                                    " to other mirror: %s\n",
2336                                    mdname(mddev),
2337                                    (unsigned long long)r1_bio->sector,
2338                                    bdevname(rdev->bdev, b));
2339                 bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
2340                 bio->bi_bdev = rdev->bdev;
2341                 bio->bi_end_io = raid1_end_read_request;
2342                 bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
2343                 bio->bi_private = r1_bio;
2344                 if (max_sectors < r1_bio->sectors) {
2345                         /* Drat - have to split this up more */
2346                         struct bio *mbio = r1_bio->master_bio;
2347                         int sectors_handled = (r1_bio->sector + max_sectors
2348                                                - mbio->bi_iter.bi_sector);
2349                         r1_bio->sectors = max_sectors;
2350                         spin_lock_irq(&conf->device_lock);
2351                         if (mbio->bi_phys_segments == 0)
2352                                 mbio->bi_phys_segments = 2;
2353                         else
2354                                 mbio->bi_phys_segments++;
2355                         spin_unlock_irq(&conf->device_lock);
2356                         generic_make_request(bio);
2357                         bio = NULL;
2358
2359                         r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
2360
2361                         r1_bio->master_bio = mbio;
2362                         r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
2363                         r1_bio->state = 0;
2364                         set_bit(R1BIO_ReadError, &r1_bio->state);
2365                         r1_bio->mddev = mddev;
2366                         r1_bio->sector = mbio->bi_iter.bi_sector +
2367                                 sectors_handled;
2368
2369                         goto read_more;
2370                 } else
2371                         generic_make_request(bio);
2372         }
2373 }
2374
2375 static void raid1d(struct md_thread *thread)
2376 {
2377         struct mddev *mddev = thread->mddev;
2378         struct r1bio *r1_bio;
2379         unsigned long flags;
2380         struct r1conf *conf = mddev->private;
2381         struct list_head *head = &conf->retry_list;
2382         struct blk_plug plug;
2383
2384         md_check_recovery(mddev);
2385
2386         if (!list_empty_careful(&conf->bio_end_io_list) &&
2387             !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
2388                 LIST_HEAD(tmp);
2389                 spin_lock_irqsave(&conf->device_lock, flags);
2390                 if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
2391                         while (!list_empty(&conf->bio_end_io_list)) {
2392                                 list_move(conf->bio_end_io_list.prev, &tmp);
2393                                 conf->nr_queued--;
2394                         }
2395                 }
2396                 spin_unlock_irqrestore(&conf->device_lock, flags);
2397                 while (!list_empty(&tmp)) {
2398                         r1_bio = list_first_entry(&tmp, struct r1bio,
2399                                                   retry_list);
2400                         list_del(&r1_bio->retry_list);
2401                         if (mddev->degraded)
2402                                 set_bit(R1BIO_Degraded, &r1_bio->state);
2403                         if (test_bit(R1BIO_WriteError, &r1_bio->state))
2404                                 close_write(r1_bio);
2405                         raid_end_bio_io(r1_bio);
2406                 }
2407         }
2408
2409         blk_start_plug(&plug);
2410         for (;;) {
2411
2412                 flush_pending_writes(conf);
2413
2414                 spin_lock_irqsave(&conf->device_lock, flags);
2415                 if (list_empty(head)) {
2416                         spin_unlock_irqrestore(&conf->device_lock, flags);
2417                         break;
2418                 }
2419                 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2420                 list_del(head->prev);
2421                 conf->nr_queued--;
2422                 spin_unlock_irqrestore(&conf->device_lock, flags);
2423
2424                 mddev = r1_bio->mddev;
2425                 conf = mddev->private;
2426                 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2427                         if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2428                             test_bit(R1BIO_WriteError, &r1_bio->state))
2429                                 handle_sync_write_finished(conf, r1_bio);
2430                         else
2431                                 sync_request_write(mddev, r1_bio);
2432                 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2433                            test_bit(R1BIO_WriteError, &r1_bio->state))
2434                         handle_write_finished(conf, r1_bio);
2435                 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2436                         handle_read_error(conf, r1_bio);
2437                 else
2438                         /* just a partial read to be scheduled from separate
2439                          * context
2440                          */
2441                         generic_make_request(r1_bio->bios[r1_bio->read_disk]);
2442
2443                 cond_resched();
2444                 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2445                         md_check_recovery(mddev);
2446         }
2447         blk_finish_plug(&plug);
2448 }
2449
2450 static int init_resync(struct r1conf *conf)
2451 {
2452         int buffs;
2453
2454         buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2455         BUG_ON(conf->r1buf_pool);
2456         conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
2457                                           conf->poolinfo);
2458         if (!conf->r1buf_pool)
2459                 return -ENOMEM;
2460         conf->next_resync = 0;
2461         return 0;
2462 }
2463
2464 /*
2465  * perform a "sync" on one "block"
2466  *
2467  * We need to make sure that no normal I/O request - particularly write
2468  * requests - conflict with active sync requests.
2469  *
2470  * This is achieved by tracking pending requests and a 'barrier' concept
2471  * that can be installed to exclude normal IO requests.
2472  */
2473
2474 static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2475                                    int *skipped)
2476 {
2477         struct r1conf *conf = mddev->private;
2478         struct r1bio *r1_bio;
2479         struct bio *bio;
2480         sector_t max_sector, nr_sectors;
2481         int disk = -1;
2482         int i;
2483         int wonly = -1;
2484         int write_targets = 0, read_targets = 0;
2485         sector_t sync_blocks;
2486         int still_degraded = 0;
2487         int good_sectors = RESYNC_SECTORS;
2488         int min_bad = 0; /* number of sectors that are bad in all devices */
2489
2490         if (!conf->r1buf_pool)
2491                 if (init_resync(conf))
2492                         return 0;
2493
2494         max_sector = mddev->dev_sectors;
2495         if (sector_nr >= max_sector) {
2496                 /* If we aborted, we need to abort the
2497                  * sync on the 'current' bitmap chunk (there will
2498                  * only be one in raid1 resync.
2499                  * We can find the current addess in mddev->curr_resync
2500                  */
2501                 if (mddev->curr_resync < max_sector) /* aborted */
2502                         bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2503                                                 &sync_blocks, 1);
2504                 else /* completed sync */
2505                         conf->fullsync = 0;
2506
2507                 bitmap_close_sync(mddev->bitmap);
2508                 close_sync(conf);
2509
2510                 if (mddev_is_clustered(mddev)) {
2511                         conf->cluster_sync_low = 0;
2512                         conf->cluster_sync_high = 0;
2513                 }
2514                 return 0;
2515         }
2516
2517         if (mddev->bitmap == NULL &&
2518             mddev->recovery_cp == MaxSector &&
2519             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2520             conf->fullsync == 0) {
2521                 *skipped = 1;
2522                 return max_sector - sector_nr;
2523         }
2524         /* before building a request, check if we can skip these blocks..
2525          * This call the bitmap_start_sync doesn't actually record anything
2526          */
2527         if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2528             !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2529                 /* We can skip this block, and probably several more */
2530                 *skipped = 1;
2531                 return sync_blocks;
2532         }
2533
2534         /*
2535          * If there is non-resync activity waiting for a turn, then let it
2536          * though before starting on this new sync request.
2537          */
2538         if (conf->nr_waiting)
2539                 schedule_timeout_uninterruptible(1);
2540
2541         /* we are incrementing sector_nr below. To be safe, we check against
2542          * sector_nr + two times RESYNC_SECTORS
2543          */
2544
2545         bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2546                 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2547         r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
2548
2549         raise_barrier(conf, sector_nr);
2550
2551         rcu_read_lock();
2552         /*
2553          * If we get a correctably read error during resync or recovery,
2554          * we might want to read from a different device.  So we
2555          * flag all drives that could conceivably be read from for READ,
2556          * and any others (which will be non-In_sync devices) for WRITE.
2557          * If a read fails, we try reading from something else for which READ
2558          * is OK.
2559          */
2560
2561         r1_bio->mddev = mddev;
2562         r1_bio->sector = sector_nr;
2563         r1_bio->state = 0;
2564         set_bit(R1BIO_IsSync, &r1_bio->state);
2565
2566         for (i = 0; i < conf->raid_disks * 2; i++) {
2567                 struct md_rdev *rdev;
2568                 bio = r1_bio->bios[i];
2569                 bio_reset(bio);
2570
2571                 rdev = rcu_dereference(conf->mirrors[i].rdev);
2572                 if (rdev == NULL ||
2573                     test_bit(Faulty, &rdev->flags)) {
2574                         if (i < conf->raid_disks)
2575                                 still_degraded = 1;
2576                 } else if (!test_bit(In_sync, &rdev->flags)) {
2577                         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2578                         bio->bi_end_io = end_sync_write;
2579                         write_targets ++;
2580                 } else {
2581                         /* may need to read from here */
2582                         sector_t first_bad = MaxSector;
2583                         int bad_sectors;
2584
2585                         if (is_badblock(rdev, sector_nr, good_sectors,
2586                                         &first_bad, &bad_sectors)) {
2587                                 if (first_bad > sector_nr)
2588                                         good_sectors = first_bad - sector_nr;
2589                                 else {
2590                                         bad_sectors -= (sector_nr - first_bad);
2591                                         if (min_bad == 0 ||
2592                                             min_bad > bad_sectors)
2593                                                 min_bad = bad_sectors;
2594                                 }
2595                         }
2596                         if (sector_nr < first_bad) {
2597                                 if (test_bit(WriteMostly, &rdev->flags)) {
2598                                         if (wonly < 0)
2599                                                 wonly = i;
2600                                 } else {
2601                                         if (disk < 0)
2602                                                 disk = i;
2603                                 }
2604                                 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2605                                 bio->bi_end_io = end_sync_read;
2606                                 read_targets++;
2607                         } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2608                                 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2609                                 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2610                                 /*
2611                                  * The device is suitable for reading (InSync),
2612                                  * but has bad block(s) here. Let's try to correct them,
2613                                  * if we are doing resync or repair. Otherwise, leave
2614                                  * this device alone for this sync request.
2615                                  */
2616                                 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2617                                 bio->bi_end_io = end_sync_write;
2618                                 write_targets++;
2619                         }
2620                 }
2621                 if (bio->bi_end_io) {
2622                         atomic_inc(&rdev->nr_pending);
2623                         bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2624                         bio->bi_bdev = rdev->bdev;
2625                         bio->bi_private = r1_bio;
2626                 }
2627         }
2628         rcu_read_unlock();
2629         if (disk < 0)
2630                 disk = wonly;
2631         r1_bio->read_disk = disk;
2632
2633         if (read_targets == 0 && min_bad > 0) {
2634                 /* These sectors are bad on all InSync devices, so we
2635                  * need to mark them bad on all write targets
2636                  */
2637                 int ok = 1;
2638                 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2639                         if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2640                                 struct md_rdev *rdev = conf->mirrors[i].rdev;
2641                                 ok = rdev_set_badblocks(rdev, sector_nr,
2642                                                         min_bad, 0
2643                                         ) && ok;
2644                         }
2645                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2646                 *skipped = 1;
2647                 put_buf(r1_bio);
2648
2649                 if (!ok) {
2650                         /* Cannot record the badblocks, so need to
2651                          * abort the resync.
2652                          * If there are multiple read targets, could just
2653                          * fail the really bad ones ???
2654                          */
2655                         conf->recovery_disabled = mddev->recovery_disabled;
2656                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2657                         return 0;
2658                 } else
2659                         return min_bad;
2660
2661         }
2662         if (min_bad > 0 && min_bad < good_sectors) {
2663                 /* only resync enough to reach the next bad->good
2664                  * transition */
2665                 good_sectors = min_bad;
2666         }
2667
2668         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2669                 /* extra read targets are also write targets */
2670                 write_targets += read_targets-1;
2671
2672         if (write_targets == 0 || read_targets == 0) {
2673                 /* There is nowhere to write, so all non-sync
2674                  * drives must be failed - so we are finished
2675                  */
2676                 sector_t rv;
2677                 if (min_bad > 0)
2678                         max_sector = sector_nr + min_bad;
2679                 rv = max_sector - sector_nr;
2680                 *skipped = 1;
2681                 put_buf(r1_bio);
2682                 return rv;
2683         }
2684
2685         if (max_sector > mddev->resync_max)
2686                 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2687         if (max_sector > sector_nr + good_sectors)
2688                 max_sector = sector_nr + good_sectors;
2689         nr_sectors = 0;
2690         sync_blocks = 0;
2691         do {
2692                 struct page *page;
2693                 int len = PAGE_SIZE;
2694                 if (sector_nr + (len>>9) > max_sector)
2695                         len = (max_sector - sector_nr) << 9;
2696                 if (len == 0)
2697                         break;
2698                 if (sync_blocks == 0) {
2699                         if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2700                                                &sync_blocks, still_degraded) &&
2701                             !conf->fullsync &&
2702                             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2703                                 break;
2704                         if ((len >> 9) > sync_blocks)
2705                                 len = sync_blocks<<9;
2706                 }
2707
2708                 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2709                         bio = r1_bio->bios[i];
2710                         if (bio->bi_end_io) {
2711                                 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
2712                                 if (bio_add_page(bio, page, len, 0) == 0) {
2713                                         /* stop here */
2714                                         bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
2715                                         while (i > 0) {
2716                                                 i--;
2717                                                 bio = r1_bio->bios[i];
2718                                                 if (bio->bi_end_io==NULL)
2719                                                         continue;
2720                                                 /* remove last page from this bio */
2721                                                 bio->bi_vcnt--;
2722                                                 bio->bi_iter.bi_size -= len;
2723                                                 bio_clear_flag(bio, BIO_SEG_VALID);
2724                                         }
2725                                         goto bio_full;
2726                                 }
2727                         }
2728                 }
2729                 nr_sectors += len>>9;
2730                 sector_nr += len>>9;
2731                 sync_blocks -= (len>>9);
2732         } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
2733  bio_full:
2734         r1_bio->sectors = nr_sectors;
2735
2736         if (mddev_is_clustered(mddev) &&
2737                         conf->cluster_sync_high < sector_nr + nr_sectors) {
2738                 conf->cluster_sync_low = mddev->curr_resync_completed;
2739                 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2740                 /* Send resync message */
2741                 md_cluster_ops->resync_info_update(mddev,
2742                                 conf->cluster_sync_low,
2743                                 conf->cluster_sync_high);
2744         }
2745
2746         /* For a user-requested sync, we read all readable devices and do a
2747          * compare
2748          */
2749         if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2750                 atomic_set(&r1_bio->remaining, read_targets);
2751                 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2752                         bio = r1_bio->bios[i];
2753                         if (bio->bi_end_io == end_sync_read) {
2754                                 read_targets--;
2755                                 md_sync_acct(bio->bi_bdev, nr_sectors);
2756                                 generic_make_request(bio);
2757                         }
2758                 }
2759         } else {
2760                 atomic_set(&r1_bio->remaining, 1);
2761                 bio = r1_bio->bios[r1_bio->read_disk];
2762                 md_sync_acct(bio->bi_bdev, nr_sectors);
2763                 generic_make_request(bio);
2764
2765         }
2766         return nr_sectors;
2767 }
2768
2769 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2770 {
2771         if (sectors)
2772                 return sectors;
2773
2774         return mddev->dev_sectors;
2775 }
2776
2777 static struct r1conf *setup_conf(struct mddev *mddev)
2778 {
2779         struct r1conf *conf;
2780         int i;
2781         struct raid1_info *disk;
2782         struct md_rdev *rdev;
2783         int err = -ENOMEM;
2784
2785         conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2786         if (!conf)
2787                 goto abort;
2788
2789         conf->mirrors = kzalloc(sizeof(struct raid1_info)
2790                                 * mddev->raid_disks * 2,
2791                                  GFP_KERNEL);
2792         if (!conf->mirrors)
2793                 goto abort;
2794
2795         conf->tmppage = alloc_page(GFP_KERNEL);
2796         if (!conf->tmppage)
2797                 goto abort;
2798
2799         conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2800         if (!conf->poolinfo)
2801                 goto abort;
2802         conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2803         conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2804                                           r1bio_pool_free,
2805                                           conf->poolinfo);
2806         if (!conf->r1bio_pool)
2807                 goto abort;
2808
2809         conf->poolinfo->mddev = mddev;
2810
2811         err = -EINVAL;
2812         spin_lock_init(&conf->device_lock);
2813         rdev_for_each(rdev, mddev) {
2814                 struct request_queue *q;
2815                 int disk_idx = rdev->raid_disk;
2816                 if (disk_idx >= mddev->raid_disks
2817                     || disk_idx < 0)
2818                         continue;
2819                 if (test_bit(Replacement, &rdev->flags))
2820                         disk = conf->mirrors + mddev->raid_disks + disk_idx;
2821                 else
2822                         disk = conf->mirrors + disk_idx;
2823
2824                 if (disk->rdev)
2825                         goto abort;
2826                 disk->rdev = rdev;
2827                 q = bdev_get_queue(rdev->bdev);
2828
2829                 disk->head_position = 0;
2830                 disk->seq_start = MaxSector;
2831         }
2832         conf->raid_disks = mddev->raid_disks;
2833         conf->mddev = mddev;
2834         INIT_LIST_HEAD(&conf->retry_list);
2835         INIT_LIST_HEAD(&conf->bio_end_io_list);
2836
2837         spin_lock_init(&conf->resync_lock);
2838         init_waitqueue_head(&conf->wait_barrier);
2839
2840         bio_list_init(&conf->pending_bio_list);
2841         conf->pending_count = 0;
2842         conf->recovery_disabled = mddev->recovery_disabled - 1;
2843
2844         conf->start_next_window = MaxSector;
2845         conf->current_window_requests = conf->next_window_requests = 0;
2846
2847         err = -EIO;
2848         for (i = 0; i < conf->raid_disks * 2; i++) {
2849
2850                 disk = conf->mirrors + i;
2851
2852                 if (i < conf->raid_disks &&
2853                     disk[conf->raid_disks].rdev) {
2854                         /* This slot has a replacement. */
2855                         if (!disk->rdev) {
2856                                 /* No original, just make the replacement
2857                                  * a recovering spare
2858                                  */
2859                                 disk->rdev =
2860                                         disk[conf->raid_disks].rdev;
2861                                 disk[conf->raid_disks].rdev = NULL;
2862                         } else if (!test_bit(In_sync, &disk->rdev->flags))
2863                                 /* Original is not in_sync - bad */
2864                                 goto abort;
2865                 }
2866
2867                 if (!disk->rdev ||
2868                     !test_bit(In_sync, &disk->rdev->flags)) {
2869                         disk->head_position = 0;
2870                         if (disk->rdev &&
2871                             (disk->rdev->saved_raid_disk < 0))
2872                                 conf->fullsync = 1;
2873                 }
2874         }
2875
2876         err = -ENOMEM;
2877         conf->thread = md_register_thread(raid1d, mddev, "raid1");
2878         if (!conf->thread) {
2879                 printk(KERN_ERR
2880                        "md/raid1:%s: couldn't allocate thread\n",
2881                        mdname(mddev));
2882                 goto abort;
2883         }
2884
2885         return conf;
2886
2887  abort:
2888         if (conf) {
2889                 mempool_destroy(conf->r1bio_pool);
2890                 kfree(conf->mirrors);
2891                 safe_put_page(conf->tmppage);
2892                 kfree(conf->poolinfo);
2893                 kfree(conf);
2894         }
2895         return ERR_PTR(err);
2896 }
2897
2898 static void raid1_free(struct mddev *mddev, void *priv);
2899 static int raid1_run(struct mddev *mddev)
2900 {
2901         struct r1conf *conf;
2902         int i;
2903         struct md_rdev *rdev;
2904         int ret;
2905         bool discard_supported = false;
2906
2907         if (mddev->level != 1) {
2908                 printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
2909                        mdname(mddev), mddev->level);
2910                 return -EIO;
2911         }
2912         if (mddev->reshape_position != MaxSector) {
2913                 printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
2914                        mdname(mddev));
2915                 return -EIO;
2916         }
2917         /*
2918          * copy the already verified devices into our private RAID1
2919          * bookkeeping area. [whatever we allocate in run(),
2920          * should be freed in raid1_free()]
2921          */
2922         if (mddev->private == NULL)
2923                 conf = setup_conf(mddev);
2924         else
2925                 conf = mddev->private;
2926
2927         if (IS_ERR(conf))
2928                 return PTR_ERR(conf);
2929
2930         if (mddev->queue)
2931                 blk_queue_max_write_same_sectors(mddev->queue, 0);
2932
2933         rdev_for_each(rdev, mddev) {
2934                 if (!mddev->gendisk)
2935                         continue;
2936                 disk_stack_limits(mddev->gendisk, rdev->bdev,
2937                                   rdev->data_offset << 9);
2938                 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
2939                         discard_supported = true;
2940         }
2941
2942         mddev->degraded = 0;
2943         for (i=0; i < conf->raid_disks; i++)
2944                 if (conf->mirrors[i].rdev == NULL ||
2945                     !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
2946                     test_bit(Faulty, &conf->mirrors[i].rdev->flags))
2947                         mddev->degraded++;
2948
2949         if (conf->raid_disks - mddev->degraded == 1)
2950                 mddev->recovery_cp = MaxSector;
2951
2952         if (mddev->recovery_cp != MaxSector)
2953                 printk(KERN_NOTICE "md/raid1:%s: not clean"
2954                        " -- starting background reconstruction\n",
2955                        mdname(mddev));
2956         printk(KERN_INFO
2957                 "md/raid1:%s: active with %d out of %d mirrors\n",
2958                 mdname(mddev), mddev->raid_disks - mddev->degraded,
2959                 mddev->raid_disks);
2960
2961         /*
2962          * Ok, everything is just fine now
2963          */
2964         mddev->thread = conf->thread;
2965         conf->thread = NULL;
2966         mddev->private = conf;
2967
2968         md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
2969
2970         if (mddev->queue) {
2971                 if (discard_supported)
2972                         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
2973                                                 mddev->queue);
2974                 else
2975                         queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
2976                                                   mddev->queue);
2977         }
2978
2979         ret =  md_integrity_register(mddev);
2980         if (ret) {
2981                 md_unregister_thread(&mddev->thread);
2982                 raid1_free(mddev, conf);
2983         }
2984         return ret;
2985 }
2986
2987 static void raid1_free(struct mddev *mddev, void *priv)
2988 {
2989         struct r1conf *conf = priv;
2990
2991         mempool_destroy(conf->r1bio_pool);
2992         kfree(conf->mirrors);
2993         safe_put_page(conf->tmppage);
2994         kfree(conf->poolinfo);
2995         kfree(conf);
2996 }
2997
2998 static int raid1_resize(struct mddev *mddev, sector_t sectors)
2999 {
3000         /* no resync is happening, and there is enough space
3001          * on all devices, so we can resize.
3002          * We need to make sure resync covers any new space.
3003          * If the array is shrinking we should possibly wait until
3004          * any io in the removed space completes, but it hardly seems
3005          * worth it.
3006          */
3007         sector_t newsize = raid1_size(mddev, sectors, 0);
3008         if (mddev->external_size &&
3009             mddev->array_sectors > newsize)
3010                 return -EINVAL;
3011         if (mddev->bitmap) {
3012                 int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
3013                 if (ret)
3014                         return ret;
3015         }
3016         md_set_array_sectors(mddev, newsize);
3017         set_capacity(mddev->gendisk, mddev->array_sectors);
3018         revalidate_disk(mddev->gendisk);
3019         if (sectors > mddev->dev_sectors &&
3020             mddev->recovery_cp > mddev->dev_sectors) {
3021                 mddev->recovery_cp = mddev->dev_sectors;
3022                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3023         }
3024         mddev->dev_sectors = sectors;
3025         mddev->resync_max_sectors = sectors;
3026         return 0;
3027 }
3028
3029 static int raid1_reshape(struct mddev *mddev)
3030 {
3031         /* We need to:
3032          * 1/ resize the r1bio_pool
3033          * 2/ resize conf->mirrors
3034          *
3035          * We allocate a new r1bio_pool if we can.
3036          * Then raise a device barrier and wait until all IO stops.
3037          * Then resize conf->mirrors and swap in the new r1bio pool.
3038          *
3039          * At the same time, we "pack" the devices so that all the missing
3040          * devices have the higher raid_disk numbers.
3041          */
3042         mempool_t *newpool, *oldpool;
3043         struct pool_info *newpoolinfo;
3044         struct raid1_info *newmirrors;
3045         struct r1conf *conf = mddev->private;
3046         int cnt, raid_disks;
3047         unsigned long flags;
3048         int d, d2, err;
3049
3050         /* Cannot change chunk_size, layout, or level */
3051         if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3052             mddev->layout != mddev->new_layout ||
3053             mddev->level != mddev->new_level) {
3054                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3055                 mddev->new_layout = mddev->layout;
3056                 mddev->new_level = mddev->level;
3057                 return -EINVAL;
3058         }
3059
3060         if (!mddev_is_clustered(mddev)) {
3061                 err = md_allow_write(mddev);
3062                 if (err)
3063                         return err;
3064         }
3065
3066         raid_disks = mddev->raid_disks + mddev->delta_disks;
3067
3068         if (raid_disks < conf->raid_disks) {
3069                 cnt=0;
3070                 for (d= 0; d < conf->raid_disks; d++)
3071                         if (conf->mirrors[d].rdev)
3072                                 cnt++;
3073                 if (cnt > raid_disks)
3074                         return -EBUSY;
3075         }
3076
3077         newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3078         if (!newpoolinfo)
3079                 return -ENOMEM;
3080         newpoolinfo->mddev = mddev;
3081         newpoolinfo->raid_disks = raid_disks * 2;
3082
3083         newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
3084                                  r1bio_pool_free, newpoolinfo);
3085         if (!newpool) {
3086                 kfree(newpoolinfo);
3087                 return -ENOMEM;
3088         }
3089         newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
3090                              GFP_KERNEL);
3091         if (!newmirrors) {
3092                 kfree(newpoolinfo);
3093                 mempool_destroy(newpool);
3094                 return -ENOMEM;
3095         }
3096
3097         freeze_array(conf, 0);
3098
3099         /* ok, everything is stopped */
3100         oldpool = conf->r1bio_pool;
3101         conf->r1bio_pool = newpool;
3102
3103         for (d = d2 = 0; d < conf->raid_disks; d++) {
3104                 struct md_rdev *rdev = conf->mirrors[d].rdev;
3105                 if (rdev && rdev->raid_disk != d2) {
3106                         sysfs_unlink_rdev(mddev, rdev);
3107                         rdev->raid_disk = d2;
3108                         sysfs_unlink_rdev(mddev, rdev);
3109                         if (sysfs_link_rdev(mddev, rdev))
3110                                 printk(KERN_WARNING
3111                                        "md/raid1:%s: cannot register rd%d\n",
3112                                        mdname(mddev), rdev->raid_disk);
3113                 }
3114                 if (rdev)
3115                         newmirrors[d2++].rdev = rdev;
3116         }
3117         kfree(conf->mirrors);
3118         conf->mirrors = newmirrors;
3119         kfree(conf->poolinfo);
3120         conf->poolinfo = newpoolinfo;
3121
3122         spin_lock_irqsave(&conf->device_lock, flags);
3123         mddev->degraded += (raid_disks - conf->raid_disks);
3124         spin_unlock_irqrestore(&conf->device_lock, flags);
3125         conf->raid_disks = mddev->raid_disks = raid_disks;
3126         mddev->delta_disks = 0;
3127
3128         unfreeze_array(conf);
3129
3130         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3131         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3132         md_wakeup_thread(mddev->thread);
3133
3134         mempool_destroy(oldpool);
3135         return 0;
3136 }
3137
3138 static void raid1_quiesce(struct mddev *mddev, int state)
3139 {
3140         struct r1conf *conf = mddev->private;
3141
3142         switch(state) {
3143         case 2: /* wake for suspend */
3144                 wake_up(&conf->wait_barrier);
3145                 break;
3146         case 1:
3147                 freeze_array(conf, 0);
3148                 break;
3149         case 0:
3150                 unfreeze_array(conf);
3151                 break;
3152         }
3153 }
3154
3155 static void *raid1_takeover(struct mddev *mddev)
3156 {
3157         /* raid1 can take over:
3158          *  raid5 with 2 devices, any layout or chunk size
3159          */
3160         if (mddev->level == 5 && mddev->raid_disks == 2) {
3161                 struct r1conf *conf;
3162                 mddev->new_level = 1;
3163                 mddev->new_layout = 0;
3164                 mddev->new_chunk_sectors = 0;
3165                 conf = setup_conf(mddev);
3166                 if (!IS_ERR(conf))
3167                         /* Array must appear to be quiesced */
3168                         conf->array_frozen = 1;
3169                 return conf;
3170         }
3171         return ERR_PTR(-EINVAL);
3172 }
3173
3174 static struct md_personality raid1_personality =
3175 {
3176         .name           = "raid1",
3177         .level          = 1,
3178         .owner          = THIS_MODULE,
3179         .make_request   = raid1_make_request,
3180         .run            = raid1_run,
3181         .free           = raid1_free,
3182         .status         = raid1_status,
3183         .error_handler  = raid1_error,
3184         .hot_add_disk   = raid1_add_disk,
3185         .hot_remove_disk= raid1_remove_disk,
3186         .spare_active   = raid1_spare_active,
3187         .sync_request   = raid1_sync_request,
3188         .resize         = raid1_resize,
3189         .size           = raid1_size,
3190         .check_reshape  = raid1_reshape,
3191         .quiesce        = raid1_quiesce,
3192         .takeover       = raid1_takeover,
3193         .congested      = raid1_congested,
3194 };
3195
3196 static int __init raid_init(void)
3197 {
3198         return register_md_personality(&raid1_personality);
3199 }
3200
3201 static void raid_exit(void)
3202 {
3203         unregister_md_personality(&raid1_personality);
3204 }
3205
3206 module_init(raid_init);
3207 module_exit(raid_exit);
3208 MODULE_LICENSE("GPL");
3209 MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3210 MODULE_ALIAS("md-personality-3"); /* RAID1 */
3211 MODULE_ALIAS("md-raid1");
3212 MODULE_ALIAS("md-level-1");
3213
3214 module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);