block: manipulate bio->bi_flags through helpers
[cascardo/linux.git] / drivers / md / raid1.c
1 /*
2  * raid1.c : Multiple Devices driver for Linux
3  *
4  * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5  *
6  * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7  *
8  * RAID-1 management functions.
9  *
10  * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
11  *
12  * Fixes to reconstruction by Jakob Ã˜stergaard" <jakob@ostenfeld.dk>
13  * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
14  *
15  * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16  * bitmapped intelligence in resync:
17  *
18  *      - bitmap marked during normal i/o
19  *      - bitmap used to skip nondirty blocks during sync
20  *
21  * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22  * - persistent bitmap code
23  *
24  * This program is free software; you can redistribute it and/or modify
25  * it under the terms of the GNU General Public License as published by
26  * the Free Software Foundation; either version 2, or (at your option)
27  * any later version.
28  *
29  * You should have received a copy of the GNU General Public License
30  * (for example /usr/src/linux/COPYING); if not, write to the Free
31  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32  */
33
34 #include <linux/slab.h>
35 #include <linux/delay.h>
36 #include <linux/blkdev.h>
37 #include <linux/module.h>
38 #include <linux/seq_file.h>
39 #include <linux/ratelimit.h>
40 #include "md.h"
41 #include "raid1.h"
42 #include "bitmap.h"
43
44 /*
45  * Number of guaranteed r1bios in case of extreme VM load:
46  */
47 #define NR_RAID1_BIOS 256
48
49 /* when we get a read error on a read-only array, we redirect to another
50  * device without failing the first device, or trying to over-write to
51  * correct the read error.  To keep track of bad blocks on a per-bio
52  * level, we store IO_BLOCKED in the appropriate 'bios' pointer
53  */
54 #define IO_BLOCKED ((struct bio *)1)
55 /* When we successfully write to a known bad-block, we need to remove the
56  * bad-block marking which must be done from process context.  So we record
57  * the success by setting devs[n].bio to IO_MADE_GOOD
58  */
59 #define IO_MADE_GOOD ((struct bio *)2)
60
61 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
62
63 /* When there are this many requests queue to be written by
64  * the raid1 thread, we become 'congested' to provide back-pressure
65  * for writeback.
66  */
67 static int max_queued_requests = 1024;
68
69 static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
70                           sector_t bi_sector);
71 static void lower_barrier(struct r1conf *conf);
72
73 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
74 {
75         struct pool_info *pi = data;
76         int size = offsetof(struct r1bio, bios[pi->raid_disks]);
77
78         /* allocate a r1bio with room for raid_disks entries in the bios array */
79         return kzalloc(size, gfp_flags);
80 }
81
82 static void r1bio_pool_free(void *r1_bio, void *data)
83 {
84         kfree(r1_bio);
85 }
86
87 #define RESYNC_BLOCK_SIZE (64*1024)
88 #define RESYNC_DEPTH 32
89 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
90 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
91 #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
92 #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
93 #define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
94
95 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
96 {
97         struct pool_info *pi = data;
98         struct r1bio *r1_bio;
99         struct bio *bio;
100         int need_pages;
101         int i, j;
102
103         r1_bio = r1bio_pool_alloc(gfp_flags, pi);
104         if (!r1_bio)
105                 return NULL;
106
107         /*
108          * Allocate bios : 1 for reading, n-1 for writing
109          */
110         for (j = pi->raid_disks ; j-- ; ) {
111                 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
112                 if (!bio)
113                         goto out_free_bio;
114                 r1_bio->bios[j] = bio;
115         }
116         /*
117          * Allocate RESYNC_PAGES data pages and attach them to
118          * the first bio.
119          * If this is a user-requested check/repair, allocate
120          * RESYNC_PAGES for each bio.
121          */
122         if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
123                 need_pages = pi->raid_disks;
124         else
125                 need_pages = 1;
126         for (j = 0; j < need_pages; j++) {
127                 bio = r1_bio->bios[j];
128                 bio->bi_vcnt = RESYNC_PAGES;
129
130                 if (bio_alloc_pages(bio, gfp_flags))
131                         goto out_free_pages;
132         }
133         /* If not user-requests, copy the page pointers to all bios */
134         if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
135                 for (i=0; i<RESYNC_PAGES ; i++)
136                         for (j=1; j<pi->raid_disks; j++)
137                                 r1_bio->bios[j]->bi_io_vec[i].bv_page =
138                                         r1_bio->bios[0]->bi_io_vec[i].bv_page;
139         }
140
141         r1_bio->master_bio = NULL;
142
143         return r1_bio;
144
145 out_free_pages:
146         while (--j >= 0) {
147                 struct bio_vec *bv;
148
149                 bio_for_each_segment_all(bv, r1_bio->bios[j], i)
150                         __free_page(bv->bv_page);
151         }
152
153 out_free_bio:
154         while (++j < pi->raid_disks)
155                 bio_put(r1_bio->bios[j]);
156         r1bio_pool_free(r1_bio, data);
157         return NULL;
158 }
159
160 static void r1buf_pool_free(void *__r1_bio, void *data)
161 {
162         struct pool_info *pi = data;
163         int i,j;
164         struct r1bio *r1bio = __r1_bio;
165
166         for (i = 0; i < RESYNC_PAGES; i++)
167                 for (j = pi->raid_disks; j-- ;) {
168                         if (j == 0 ||
169                             r1bio->bios[j]->bi_io_vec[i].bv_page !=
170                             r1bio->bios[0]->bi_io_vec[i].bv_page)
171                                 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
172                 }
173         for (i=0 ; i < pi->raid_disks; i++)
174                 bio_put(r1bio->bios[i]);
175
176         r1bio_pool_free(r1bio, data);
177 }
178
179 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
180 {
181         int i;
182
183         for (i = 0; i < conf->raid_disks * 2; i++) {
184                 struct bio **bio = r1_bio->bios + i;
185                 if (!BIO_SPECIAL(*bio))
186                         bio_put(*bio);
187                 *bio = NULL;
188         }
189 }
190
191 static void free_r1bio(struct r1bio *r1_bio)
192 {
193         struct r1conf *conf = r1_bio->mddev->private;
194
195         put_all_bios(conf, r1_bio);
196         mempool_free(r1_bio, conf->r1bio_pool);
197 }
198
199 static void put_buf(struct r1bio *r1_bio)
200 {
201         struct r1conf *conf = r1_bio->mddev->private;
202         int i;
203
204         for (i = 0; i < conf->raid_disks * 2; i++) {
205                 struct bio *bio = r1_bio->bios[i];
206                 if (bio->bi_end_io)
207                         rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
208         }
209
210         mempool_free(r1_bio, conf->r1buf_pool);
211
212         lower_barrier(conf);
213 }
214
215 static void reschedule_retry(struct r1bio *r1_bio)
216 {
217         unsigned long flags;
218         struct mddev *mddev = r1_bio->mddev;
219         struct r1conf *conf = mddev->private;
220
221         spin_lock_irqsave(&conf->device_lock, flags);
222         list_add(&r1_bio->retry_list, &conf->retry_list);
223         conf->nr_queued ++;
224         spin_unlock_irqrestore(&conf->device_lock, flags);
225
226         wake_up(&conf->wait_barrier);
227         md_wakeup_thread(mddev->thread);
228 }
229
230 /*
231  * raid_end_bio_io() is called when we have finished servicing a mirrored
232  * operation and are ready to return a success/failure code to the buffer
233  * cache layer.
234  */
235 static void call_bio_endio(struct r1bio *r1_bio)
236 {
237         struct bio *bio = r1_bio->master_bio;
238         int done;
239         struct r1conf *conf = r1_bio->mddev->private;
240         sector_t start_next_window = r1_bio->start_next_window;
241         sector_t bi_sector = bio->bi_iter.bi_sector;
242
243         if (bio->bi_phys_segments) {
244                 unsigned long flags;
245                 spin_lock_irqsave(&conf->device_lock, flags);
246                 bio->bi_phys_segments--;
247                 done = (bio->bi_phys_segments == 0);
248                 spin_unlock_irqrestore(&conf->device_lock, flags);
249                 /*
250                  * make_request() might be waiting for
251                  * bi_phys_segments to decrease
252                  */
253                 wake_up(&conf->wait_barrier);
254         } else
255                 done = 1;
256
257         if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
258                 bio->bi_error = -EIO;
259
260         if (done) {
261                 bio_endio(bio);
262                 /*
263                  * Wake up any possible resync thread that waits for the device
264                  * to go idle.
265                  */
266                 allow_barrier(conf, start_next_window, bi_sector);
267         }
268 }
269
270 static void raid_end_bio_io(struct r1bio *r1_bio)
271 {
272         struct bio *bio = r1_bio->master_bio;
273
274         /* if nobody has done the final endio yet, do it now */
275         if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
276                 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
277                          (bio_data_dir(bio) == WRITE) ? "write" : "read",
278                          (unsigned long long) bio->bi_iter.bi_sector,
279                          (unsigned long long) bio_end_sector(bio) - 1);
280
281                 call_bio_endio(r1_bio);
282         }
283         free_r1bio(r1_bio);
284 }
285
286 /*
287  * Update disk head position estimator based on IRQ completion info.
288  */
289 static inline void update_head_pos(int disk, struct r1bio *r1_bio)
290 {
291         struct r1conf *conf = r1_bio->mddev->private;
292
293         conf->mirrors[disk].head_position =
294                 r1_bio->sector + (r1_bio->sectors);
295 }
296
297 /*
298  * Find the disk number which triggered given bio
299  */
300 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
301 {
302         int mirror;
303         struct r1conf *conf = r1_bio->mddev->private;
304         int raid_disks = conf->raid_disks;
305
306         for (mirror = 0; mirror < raid_disks * 2; mirror++)
307                 if (r1_bio->bios[mirror] == bio)
308                         break;
309
310         BUG_ON(mirror == raid_disks * 2);
311         update_head_pos(mirror, r1_bio);
312
313         return mirror;
314 }
315
316 static void raid1_end_read_request(struct bio *bio)
317 {
318         int uptodate = !bio->bi_error;
319         struct r1bio *r1_bio = bio->bi_private;
320         int mirror;
321         struct r1conf *conf = r1_bio->mddev->private;
322
323         mirror = r1_bio->read_disk;
324         /*
325          * this branch is our 'one mirror IO has finished' event handler:
326          */
327         update_head_pos(mirror, r1_bio);
328
329         if (uptodate)
330                 set_bit(R1BIO_Uptodate, &r1_bio->state);
331         else {
332                 /* If all other devices have failed, we want to return
333                  * the error upwards rather than fail the last device.
334                  * Here we redefine "uptodate" to mean "Don't want to retry"
335                  */
336                 unsigned long flags;
337                 spin_lock_irqsave(&conf->device_lock, flags);
338                 if (r1_bio->mddev->degraded == conf->raid_disks ||
339                     (r1_bio->mddev->degraded == conf->raid_disks-1 &&
340                      !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
341                         uptodate = 1;
342                 spin_unlock_irqrestore(&conf->device_lock, flags);
343         }
344
345         if (uptodate) {
346                 raid_end_bio_io(r1_bio);
347                 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
348         } else {
349                 /*
350                  * oops, read error:
351                  */
352                 char b[BDEVNAME_SIZE];
353                 printk_ratelimited(
354                         KERN_ERR "md/raid1:%s: %s: "
355                         "rescheduling sector %llu\n",
356                         mdname(conf->mddev),
357                         bdevname(conf->mirrors[mirror].rdev->bdev,
358                                  b),
359                         (unsigned long long)r1_bio->sector);
360                 set_bit(R1BIO_ReadError, &r1_bio->state);
361                 reschedule_retry(r1_bio);
362                 /* don't drop the reference on read_disk yet */
363         }
364 }
365
366 static void close_write(struct r1bio *r1_bio)
367 {
368         /* it really is the end of this request */
369         if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
370                 /* free extra copy of the data pages */
371                 int i = r1_bio->behind_page_count;
372                 while (i--)
373                         safe_put_page(r1_bio->behind_bvecs[i].bv_page);
374                 kfree(r1_bio->behind_bvecs);
375                 r1_bio->behind_bvecs = NULL;
376         }
377         /* clear the bitmap if all writes complete successfully */
378         bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
379                         r1_bio->sectors,
380                         !test_bit(R1BIO_Degraded, &r1_bio->state),
381                         test_bit(R1BIO_BehindIO, &r1_bio->state));
382         md_write_end(r1_bio->mddev);
383 }
384
385 static void r1_bio_write_done(struct r1bio *r1_bio)
386 {
387         if (!atomic_dec_and_test(&r1_bio->remaining))
388                 return;
389
390         if (test_bit(R1BIO_WriteError, &r1_bio->state))
391                 reschedule_retry(r1_bio);
392         else {
393                 close_write(r1_bio);
394                 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
395                         reschedule_retry(r1_bio);
396                 else
397                         raid_end_bio_io(r1_bio);
398         }
399 }
400
401 static void raid1_end_write_request(struct bio *bio)
402 {
403         struct r1bio *r1_bio = bio->bi_private;
404         int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
405         struct r1conf *conf = r1_bio->mddev->private;
406         struct bio *to_put = NULL;
407
408         mirror = find_bio_disk(r1_bio, bio);
409
410         /*
411          * 'one mirror IO has finished' event handler:
412          */
413         if (bio->bi_error) {
414                 set_bit(WriteErrorSeen,
415                         &conf->mirrors[mirror].rdev->flags);
416                 if (!test_and_set_bit(WantReplacement,
417                                       &conf->mirrors[mirror].rdev->flags))
418                         set_bit(MD_RECOVERY_NEEDED, &
419                                 conf->mddev->recovery);
420
421                 set_bit(R1BIO_WriteError, &r1_bio->state);
422         } else {
423                 /*
424                  * Set R1BIO_Uptodate in our master bio, so that we
425                  * will return a good error code for to the higher
426                  * levels even if IO on some other mirrored buffer
427                  * fails.
428                  *
429                  * The 'master' represents the composite IO operation
430                  * to user-side. So if something waits for IO, then it
431                  * will wait for the 'master' bio.
432                  */
433                 sector_t first_bad;
434                 int bad_sectors;
435
436                 r1_bio->bios[mirror] = NULL;
437                 to_put = bio;
438                 /*
439                  * Do not set R1BIO_Uptodate if the current device is
440                  * rebuilding or Faulty. This is because we cannot use
441                  * such device for properly reading the data back (we could
442                  * potentially use it, if the current write would have felt
443                  * before rdev->recovery_offset, but for simplicity we don't
444                  * check this here.
445                  */
446                 if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
447                     !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
448                         set_bit(R1BIO_Uptodate, &r1_bio->state);
449
450                 /* Maybe we can clear some bad blocks. */
451                 if (is_badblock(conf->mirrors[mirror].rdev,
452                                 r1_bio->sector, r1_bio->sectors,
453                                 &first_bad, &bad_sectors)) {
454                         r1_bio->bios[mirror] = IO_MADE_GOOD;
455                         set_bit(R1BIO_MadeGood, &r1_bio->state);
456                 }
457         }
458
459         if (behind) {
460                 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
461                         atomic_dec(&r1_bio->behind_remaining);
462
463                 /*
464                  * In behind mode, we ACK the master bio once the I/O
465                  * has safely reached all non-writemostly
466                  * disks. Setting the Returned bit ensures that this
467                  * gets done only once -- we don't ever want to return
468                  * -EIO here, instead we'll wait
469                  */
470                 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
471                     test_bit(R1BIO_Uptodate, &r1_bio->state)) {
472                         /* Maybe we can return now */
473                         if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
474                                 struct bio *mbio = r1_bio->master_bio;
475                                 pr_debug("raid1: behind end write sectors"
476                                          " %llu-%llu\n",
477                                          (unsigned long long) mbio->bi_iter.bi_sector,
478                                          (unsigned long long) bio_end_sector(mbio) - 1);
479                                 call_bio_endio(r1_bio);
480                         }
481                 }
482         }
483         if (r1_bio->bios[mirror] == NULL)
484                 rdev_dec_pending(conf->mirrors[mirror].rdev,
485                                  conf->mddev);
486
487         /*
488          * Let's see if all mirrored write operations have finished
489          * already.
490          */
491         r1_bio_write_done(r1_bio);
492
493         if (to_put)
494                 bio_put(to_put);
495 }
496
497 /*
498  * This routine returns the disk from which the requested read should
499  * be done. There is a per-array 'next expected sequential IO' sector
500  * number - if this matches on the next IO then we use the last disk.
501  * There is also a per-disk 'last know head position' sector that is
502  * maintained from IRQ contexts, both the normal and the resync IO
503  * completion handlers update this position correctly. If there is no
504  * perfect sequential match then we pick the disk whose head is closest.
505  *
506  * If there are 2 mirrors in the same 2 devices, performance degrades
507  * because position is mirror, not device based.
508  *
509  * The rdev for the device selected will have nr_pending incremented.
510  */
511 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
512 {
513         const sector_t this_sector = r1_bio->sector;
514         int sectors;
515         int best_good_sectors;
516         int best_disk, best_dist_disk, best_pending_disk;
517         int has_nonrot_disk;
518         int disk;
519         sector_t best_dist;
520         unsigned int min_pending;
521         struct md_rdev *rdev;
522         int choose_first;
523         int choose_next_idle;
524
525         rcu_read_lock();
526         /*
527          * Check if we can balance. We can balance on the whole
528          * device if no resync is going on, or below the resync window.
529          * We take the first readable disk when above the resync window.
530          */
531  retry:
532         sectors = r1_bio->sectors;
533         best_disk = -1;
534         best_dist_disk = -1;
535         best_dist = MaxSector;
536         best_pending_disk = -1;
537         min_pending = UINT_MAX;
538         best_good_sectors = 0;
539         has_nonrot_disk = 0;
540         choose_next_idle = 0;
541
542         if ((conf->mddev->recovery_cp < this_sector + sectors) ||
543             (mddev_is_clustered(conf->mddev) &&
544             md_cluster_ops->area_resyncing(conf->mddev, this_sector,
545                     this_sector + sectors)))
546                 choose_first = 1;
547         else
548                 choose_first = 0;
549
550         for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
551                 sector_t dist;
552                 sector_t first_bad;
553                 int bad_sectors;
554                 unsigned int pending;
555                 bool nonrot;
556
557                 rdev = rcu_dereference(conf->mirrors[disk].rdev);
558                 if (r1_bio->bios[disk] == IO_BLOCKED
559                     || rdev == NULL
560                     || test_bit(Unmerged, &rdev->flags)
561                     || test_bit(Faulty, &rdev->flags))
562                         continue;
563                 if (!test_bit(In_sync, &rdev->flags) &&
564                     rdev->recovery_offset < this_sector + sectors)
565                         continue;
566                 if (test_bit(WriteMostly, &rdev->flags)) {
567                         /* Don't balance among write-mostly, just
568                          * use the first as a last resort */
569                         if (best_dist_disk < 0) {
570                                 if (is_badblock(rdev, this_sector, sectors,
571                                                 &first_bad, &bad_sectors)) {
572                                         if (first_bad < this_sector)
573                                                 /* Cannot use this */
574                                                 continue;
575                                         best_good_sectors = first_bad - this_sector;
576                                 } else
577                                         best_good_sectors = sectors;
578                                 best_dist_disk = disk;
579                                 best_pending_disk = disk;
580                         }
581                         continue;
582                 }
583                 /* This is a reasonable device to use.  It might
584                  * even be best.
585                  */
586                 if (is_badblock(rdev, this_sector, sectors,
587                                 &first_bad, &bad_sectors)) {
588                         if (best_dist < MaxSector)
589                                 /* already have a better device */
590                                 continue;
591                         if (first_bad <= this_sector) {
592                                 /* cannot read here. If this is the 'primary'
593                                  * device, then we must not read beyond
594                                  * bad_sectors from another device..
595                                  */
596                                 bad_sectors -= (this_sector - first_bad);
597                                 if (choose_first && sectors > bad_sectors)
598                                         sectors = bad_sectors;
599                                 if (best_good_sectors > sectors)
600                                         best_good_sectors = sectors;
601
602                         } else {
603                                 sector_t good_sectors = first_bad - this_sector;
604                                 if (good_sectors > best_good_sectors) {
605                                         best_good_sectors = good_sectors;
606                                         best_disk = disk;
607                                 }
608                                 if (choose_first)
609                                         break;
610                         }
611                         continue;
612                 } else
613                         best_good_sectors = sectors;
614
615                 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
616                 has_nonrot_disk |= nonrot;
617                 pending = atomic_read(&rdev->nr_pending);
618                 dist = abs(this_sector - conf->mirrors[disk].head_position);
619                 if (choose_first) {
620                         best_disk = disk;
621                         break;
622                 }
623                 /* Don't change to another disk for sequential reads */
624                 if (conf->mirrors[disk].next_seq_sect == this_sector
625                     || dist == 0) {
626                         int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
627                         struct raid1_info *mirror = &conf->mirrors[disk];
628
629                         best_disk = disk;
630                         /*
631                          * If buffered sequential IO size exceeds optimal
632                          * iosize, check if there is idle disk. If yes, choose
633                          * the idle disk. read_balance could already choose an
634                          * idle disk before noticing it's a sequential IO in
635                          * this disk. This doesn't matter because this disk
636                          * will idle, next time it will be utilized after the
637                          * first disk has IO size exceeds optimal iosize. In
638                          * this way, iosize of the first disk will be optimal
639                          * iosize at least. iosize of the second disk might be
640                          * small, but not a big deal since when the second disk
641                          * starts IO, the first disk is likely still busy.
642                          */
643                         if (nonrot && opt_iosize > 0 &&
644                             mirror->seq_start != MaxSector &&
645                             mirror->next_seq_sect > opt_iosize &&
646                             mirror->next_seq_sect - opt_iosize >=
647                             mirror->seq_start) {
648                                 choose_next_idle = 1;
649                                 continue;
650                         }
651                         break;
652                 }
653                 /* If device is idle, use it */
654                 if (pending == 0) {
655                         best_disk = disk;
656                         break;
657                 }
658
659                 if (choose_next_idle)
660                         continue;
661
662                 if (min_pending > pending) {
663                         min_pending = pending;
664                         best_pending_disk = disk;
665                 }
666
667                 if (dist < best_dist) {
668                         best_dist = dist;
669                         best_dist_disk = disk;
670                 }
671         }
672
673         /*
674          * If all disks are rotational, choose the closest disk. If any disk is
675          * non-rotational, choose the disk with less pending request even the
676          * disk is rotational, which might/might not be optimal for raids with
677          * mixed ratation/non-rotational disks depending on workload.
678          */
679         if (best_disk == -1) {
680                 if (has_nonrot_disk)
681                         best_disk = best_pending_disk;
682                 else
683                         best_disk = best_dist_disk;
684         }
685
686         if (best_disk >= 0) {
687                 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
688                 if (!rdev)
689                         goto retry;
690                 atomic_inc(&rdev->nr_pending);
691                 if (test_bit(Faulty, &rdev->flags)) {
692                         /* cannot risk returning a device that failed
693                          * before we inc'ed nr_pending
694                          */
695                         rdev_dec_pending(rdev, conf->mddev);
696                         goto retry;
697                 }
698                 sectors = best_good_sectors;
699
700                 if (conf->mirrors[best_disk].next_seq_sect != this_sector)
701                         conf->mirrors[best_disk].seq_start = this_sector;
702
703                 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
704         }
705         rcu_read_unlock();
706         *max_sectors = sectors;
707
708         return best_disk;
709 }
710
711 static int raid1_mergeable_bvec(struct mddev *mddev,
712                                 struct bvec_merge_data *bvm,
713                                 struct bio_vec *biovec)
714 {
715         struct r1conf *conf = mddev->private;
716         sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
717         int max = biovec->bv_len;
718
719         if (mddev->merge_check_needed) {
720                 int disk;
721                 rcu_read_lock();
722                 for (disk = 0; disk < conf->raid_disks * 2; disk++) {
723                         struct md_rdev *rdev = rcu_dereference(
724                                 conf->mirrors[disk].rdev);
725                         if (rdev && !test_bit(Faulty, &rdev->flags)) {
726                                 struct request_queue *q =
727                                         bdev_get_queue(rdev->bdev);
728                                 if (q->merge_bvec_fn) {
729                                         bvm->bi_sector = sector +
730                                                 rdev->data_offset;
731                                         bvm->bi_bdev = rdev->bdev;
732                                         max = min(max, q->merge_bvec_fn(
733                                                           q, bvm, biovec));
734                                 }
735                         }
736                 }
737                 rcu_read_unlock();
738         }
739         return max;
740
741 }
742
743 static int raid1_congested(struct mddev *mddev, int bits)
744 {
745         struct r1conf *conf = mddev->private;
746         int i, ret = 0;
747
748         if ((bits & (1 << WB_async_congested)) &&
749             conf->pending_count >= max_queued_requests)
750                 return 1;
751
752         rcu_read_lock();
753         for (i = 0; i < conf->raid_disks * 2; i++) {
754                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
755                 if (rdev && !test_bit(Faulty, &rdev->flags)) {
756                         struct request_queue *q = bdev_get_queue(rdev->bdev);
757
758                         BUG_ON(!q);
759
760                         /* Note the '|| 1' - when read_balance prefers
761                          * non-congested targets, it can be removed
762                          */
763                         if ((bits & (1 << WB_async_congested)) || 1)
764                                 ret |= bdi_congested(&q->backing_dev_info, bits);
765                         else
766                                 ret &= bdi_congested(&q->backing_dev_info, bits);
767                 }
768         }
769         rcu_read_unlock();
770         return ret;
771 }
772
773 static void flush_pending_writes(struct r1conf *conf)
774 {
775         /* Any writes that have been queued but are awaiting
776          * bitmap updates get flushed here.
777          */
778         spin_lock_irq(&conf->device_lock);
779
780         if (conf->pending_bio_list.head) {
781                 struct bio *bio;
782                 bio = bio_list_get(&conf->pending_bio_list);
783                 conf->pending_count = 0;
784                 spin_unlock_irq(&conf->device_lock);
785                 /* flush any pending bitmap writes to
786                  * disk before proceeding w/ I/O */
787                 bitmap_unplug(conf->mddev->bitmap);
788                 wake_up(&conf->wait_barrier);
789
790                 while (bio) { /* submit pending writes */
791                         struct bio *next = bio->bi_next;
792                         bio->bi_next = NULL;
793                         if (unlikely((bio->bi_rw & REQ_DISCARD) &&
794                             !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
795                                 /* Just ignore it */
796                                 bio_endio(bio);
797                         else
798                                 generic_make_request(bio);
799                         bio = next;
800                 }
801         } else
802                 spin_unlock_irq(&conf->device_lock);
803 }
804
805 /* Barriers....
806  * Sometimes we need to suspend IO while we do something else,
807  * either some resync/recovery, or reconfigure the array.
808  * To do this we raise a 'barrier'.
809  * The 'barrier' is a counter that can be raised multiple times
810  * to count how many activities are happening which preclude
811  * normal IO.
812  * We can only raise the barrier if there is no pending IO.
813  * i.e. if nr_pending == 0.
814  * We choose only to raise the barrier if no-one is waiting for the
815  * barrier to go down.  This means that as soon as an IO request
816  * is ready, no other operations which require a barrier will start
817  * until the IO request has had a chance.
818  *
819  * So: regular IO calls 'wait_barrier'.  When that returns there
820  *    is no backgroup IO happening,  It must arrange to call
821  *    allow_barrier when it has finished its IO.
822  * backgroup IO calls must call raise_barrier.  Once that returns
823  *    there is no normal IO happeing.  It must arrange to call
824  *    lower_barrier when the particular background IO completes.
825  */
826 static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
827 {
828         spin_lock_irq(&conf->resync_lock);
829
830         /* Wait until no block IO is waiting */
831         wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
832                             conf->resync_lock);
833
834         /* block any new IO from starting */
835         conf->barrier++;
836         conf->next_resync = sector_nr;
837
838         /* For these conditions we must wait:
839          * A: while the array is in frozen state
840          * B: while barrier >= RESYNC_DEPTH, meaning resync reach
841          *    the max count which allowed.
842          * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
843          *    next resync will reach to the window which normal bios are
844          *    handling.
845          * D: while there are any active requests in the current window.
846          */
847         wait_event_lock_irq(conf->wait_barrier,
848                             !conf->array_frozen &&
849                             conf->barrier < RESYNC_DEPTH &&
850                             conf->current_window_requests == 0 &&
851                             (conf->start_next_window >=
852                              conf->next_resync + RESYNC_SECTORS),
853                             conf->resync_lock);
854
855         conf->nr_pending++;
856         spin_unlock_irq(&conf->resync_lock);
857 }
858
859 static void lower_barrier(struct r1conf *conf)
860 {
861         unsigned long flags;
862         BUG_ON(conf->barrier <= 0);
863         spin_lock_irqsave(&conf->resync_lock, flags);
864         conf->barrier--;
865         conf->nr_pending--;
866         spin_unlock_irqrestore(&conf->resync_lock, flags);
867         wake_up(&conf->wait_barrier);
868 }
869
870 static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
871 {
872         bool wait = false;
873
874         if (conf->array_frozen || !bio)
875                 wait = true;
876         else if (conf->barrier && bio_data_dir(bio) == WRITE) {
877                 if ((conf->mddev->curr_resync_completed
878                      >= bio_end_sector(bio)) ||
879                     (conf->next_resync + NEXT_NORMALIO_DISTANCE
880                      <= bio->bi_iter.bi_sector))
881                         wait = false;
882                 else
883                         wait = true;
884         }
885
886         return wait;
887 }
888
889 static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
890 {
891         sector_t sector = 0;
892
893         spin_lock_irq(&conf->resync_lock);
894         if (need_to_wait_for_sync(conf, bio)) {
895                 conf->nr_waiting++;
896                 /* Wait for the barrier to drop.
897                  * However if there are already pending
898                  * requests (preventing the barrier from
899                  * rising completely), and the
900                  * per-process bio queue isn't empty,
901                  * then don't wait, as we need to empty
902                  * that queue to allow conf->start_next_window
903                  * to increase.
904                  */
905                 wait_event_lock_irq(conf->wait_barrier,
906                                     !conf->array_frozen &&
907                                     (!conf->barrier ||
908                                      ((conf->start_next_window <
909                                        conf->next_resync + RESYNC_SECTORS) &&
910                                       current->bio_list &&
911                                       !bio_list_empty(current->bio_list))),
912                                     conf->resync_lock);
913                 conf->nr_waiting--;
914         }
915
916         if (bio && bio_data_dir(bio) == WRITE) {
917                 if (bio->bi_iter.bi_sector >=
918                     conf->mddev->curr_resync_completed) {
919                         if (conf->start_next_window == MaxSector)
920                                 conf->start_next_window =
921                                         conf->next_resync +
922                                         NEXT_NORMALIO_DISTANCE;
923
924                         if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
925                             <= bio->bi_iter.bi_sector)
926                                 conf->next_window_requests++;
927                         else
928                                 conf->current_window_requests++;
929                         sector = conf->start_next_window;
930                 }
931         }
932
933         conf->nr_pending++;
934         spin_unlock_irq(&conf->resync_lock);
935         return sector;
936 }
937
938 static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
939                           sector_t bi_sector)
940 {
941         unsigned long flags;
942
943         spin_lock_irqsave(&conf->resync_lock, flags);
944         conf->nr_pending--;
945         if (start_next_window) {
946                 if (start_next_window == conf->start_next_window) {
947                         if (conf->start_next_window + NEXT_NORMALIO_DISTANCE
948                             <= bi_sector)
949                                 conf->next_window_requests--;
950                         else
951                                 conf->current_window_requests--;
952                 } else
953                         conf->current_window_requests--;
954
955                 if (!conf->current_window_requests) {
956                         if (conf->next_window_requests) {
957                                 conf->current_window_requests =
958                                         conf->next_window_requests;
959                                 conf->next_window_requests = 0;
960                                 conf->start_next_window +=
961                                         NEXT_NORMALIO_DISTANCE;
962                         } else
963                                 conf->start_next_window = MaxSector;
964                 }
965         }
966         spin_unlock_irqrestore(&conf->resync_lock, flags);
967         wake_up(&conf->wait_barrier);
968 }
969
970 static void freeze_array(struct r1conf *conf, int extra)
971 {
972         /* stop syncio and normal IO and wait for everything to
973          * go quite.
974          * We wait until nr_pending match nr_queued+extra
975          * This is called in the context of one normal IO request
976          * that has failed. Thus any sync request that might be pending
977          * will be blocked by nr_pending, and we need to wait for
978          * pending IO requests to complete or be queued for re-try.
979          * Thus the number queued (nr_queued) plus this request (extra)
980          * must match the number of pending IOs (nr_pending) before
981          * we continue.
982          */
983         spin_lock_irq(&conf->resync_lock);
984         conf->array_frozen = 1;
985         wait_event_lock_irq_cmd(conf->wait_barrier,
986                                 conf->nr_pending == conf->nr_queued+extra,
987                                 conf->resync_lock,
988                                 flush_pending_writes(conf));
989         spin_unlock_irq(&conf->resync_lock);
990 }
991 static void unfreeze_array(struct r1conf *conf)
992 {
993         /* reverse the effect of the freeze */
994         spin_lock_irq(&conf->resync_lock);
995         conf->array_frozen = 0;
996         wake_up(&conf->wait_barrier);
997         spin_unlock_irq(&conf->resync_lock);
998 }
999
1000 /* duplicate the data pages for behind I/O
1001  */
1002 static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
1003 {
1004         int i;
1005         struct bio_vec *bvec;
1006         struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
1007                                         GFP_NOIO);
1008         if (unlikely(!bvecs))
1009                 return;
1010
1011         bio_for_each_segment_all(bvec, bio, i) {
1012                 bvecs[i] = *bvec;
1013                 bvecs[i].bv_page = alloc_page(GFP_NOIO);
1014                 if (unlikely(!bvecs[i].bv_page))
1015                         goto do_sync_io;
1016                 memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
1017                        kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
1018                 kunmap(bvecs[i].bv_page);
1019                 kunmap(bvec->bv_page);
1020         }
1021         r1_bio->behind_bvecs = bvecs;
1022         r1_bio->behind_page_count = bio->bi_vcnt;
1023         set_bit(R1BIO_BehindIO, &r1_bio->state);
1024         return;
1025
1026 do_sync_io:
1027         for (i = 0; i < bio->bi_vcnt; i++)
1028                 if (bvecs[i].bv_page)
1029                         put_page(bvecs[i].bv_page);
1030         kfree(bvecs);
1031         pr_debug("%dB behind alloc failed, doing sync I/O\n",
1032                  bio->bi_iter.bi_size);
1033 }
1034
1035 struct raid1_plug_cb {
1036         struct blk_plug_cb      cb;
1037         struct bio_list         pending;
1038         int                     pending_cnt;
1039 };
1040
1041 static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1042 {
1043         struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1044                                                   cb);
1045         struct mddev *mddev = plug->cb.data;
1046         struct r1conf *conf = mddev->private;
1047         struct bio *bio;
1048
1049         if (from_schedule || current->bio_list) {
1050                 spin_lock_irq(&conf->device_lock);
1051                 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1052                 conf->pending_count += plug->pending_cnt;
1053                 spin_unlock_irq(&conf->device_lock);
1054                 wake_up(&conf->wait_barrier);
1055                 md_wakeup_thread(mddev->thread);
1056                 kfree(plug);
1057                 return;
1058         }
1059
1060         /* we aren't scheduling, so we can do the write-out directly. */
1061         bio = bio_list_get(&plug->pending);
1062         bitmap_unplug(mddev->bitmap);
1063         wake_up(&conf->wait_barrier);
1064
1065         while (bio) { /* submit pending writes */
1066                 struct bio *next = bio->bi_next;
1067                 bio->bi_next = NULL;
1068                 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
1069                     !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1070                         /* Just ignore it */
1071                         bio_endio(bio);
1072                 else
1073                         generic_make_request(bio);
1074                 bio = next;
1075         }
1076         kfree(plug);
1077 }
1078
1079 static void make_request(struct mddev *mddev, struct bio * bio)
1080 {
1081         struct r1conf *conf = mddev->private;
1082         struct raid1_info *mirror;
1083         struct r1bio *r1_bio;
1084         struct bio *read_bio;
1085         int i, disks;
1086         struct bitmap *bitmap;
1087         unsigned long flags;
1088         const int rw = bio_data_dir(bio);
1089         const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1090         const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
1091         const unsigned long do_discard = (bio->bi_rw
1092                                           & (REQ_DISCARD | REQ_SECURE));
1093         const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
1094         struct md_rdev *blocked_rdev;
1095         struct blk_plug_cb *cb;
1096         struct raid1_plug_cb *plug = NULL;
1097         int first_clone;
1098         int sectors_handled;
1099         int max_sectors;
1100         sector_t start_next_window;
1101
1102         /*
1103          * Register the new request and wait if the reconstruction
1104          * thread has put up a bar for new requests.
1105          * Continue immediately if no resync is active currently.
1106          */
1107
1108         md_write_start(mddev, bio); /* wait on superblock update early */
1109
1110         if (bio_data_dir(bio) == WRITE &&
1111             ((bio_end_sector(bio) > mddev->suspend_lo &&
1112             bio->bi_iter.bi_sector < mddev->suspend_hi) ||
1113             (mddev_is_clustered(mddev) &&
1114              md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
1115                 /* As the suspend_* range is controlled by
1116                  * userspace, we want an interruptible
1117                  * wait.
1118                  */
1119                 DEFINE_WAIT(w);
1120                 for (;;) {
1121                         flush_signals(current);
1122                         prepare_to_wait(&conf->wait_barrier,
1123                                         &w, TASK_INTERRUPTIBLE);
1124                         if (bio_end_sector(bio) <= mddev->suspend_lo ||
1125                             bio->bi_iter.bi_sector >= mddev->suspend_hi ||
1126                             (mddev_is_clustered(mddev) &&
1127                              !md_cluster_ops->area_resyncing(mddev,
1128                                      bio->bi_iter.bi_sector, bio_end_sector(bio))))
1129                                 break;
1130                         schedule();
1131                 }
1132                 finish_wait(&conf->wait_barrier, &w);
1133         }
1134
1135         start_next_window = wait_barrier(conf, bio);
1136
1137         bitmap = mddev->bitmap;
1138
1139         /*
1140          * make_request() can abort the operation when READA is being
1141          * used and no empty request is available.
1142          *
1143          */
1144         r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1145
1146         r1_bio->master_bio = bio;
1147         r1_bio->sectors = bio_sectors(bio);
1148         r1_bio->state = 0;
1149         r1_bio->mddev = mddev;
1150         r1_bio->sector = bio->bi_iter.bi_sector;
1151
1152         /* We might need to issue multiple reads to different
1153          * devices if there are bad blocks around, so we keep
1154          * track of the number of reads in bio->bi_phys_segments.
1155          * If this is 0, there is only one r1_bio and no locking
1156          * will be needed when requests complete.  If it is
1157          * non-zero, then it is the number of not-completed requests.
1158          */
1159         bio->bi_phys_segments = 0;
1160         bio_clear_flag(bio, BIO_SEG_VALID);
1161
1162         if (rw == READ) {
1163                 /*
1164                  * read balancing logic:
1165                  */
1166                 int rdisk;
1167
1168 read_again:
1169                 rdisk = read_balance(conf, r1_bio, &max_sectors);
1170
1171                 if (rdisk < 0) {
1172                         /* couldn't find anywhere to read from */
1173                         raid_end_bio_io(r1_bio);
1174                         return;
1175                 }
1176                 mirror = conf->mirrors + rdisk;
1177
1178                 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1179                     bitmap) {
1180                         /* Reading from a write-mostly device must
1181                          * take care not to over-take any writes
1182                          * that are 'behind'
1183                          */
1184                         wait_event(bitmap->behind_wait,
1185                                    atomic_read(&bitmap->behind_writes) == 0);
1186                 }
1187                 r1_bio->read_disk = rdisk;
1188                 r1_bio->start_next_window = 0;
1189
1190                 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1191                 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
1192                          max_sectors);
1193
1194                 r1_bio->bios[rdisk] = read_bio;
1195
1196                 read_bio->bi_iter.bi_sector = r1_bio->sector +
1197                         mirror->rdev->data_offset;
1198                 read_bio->bi_bdev = mirror->rdev->bdev;
1199                 read_bio->bi_end_io = raid1_end_read_request;
1200                 read_bio->bi_rw = READ | do_sync;
1201                 read_bio->bi_private = r1_bio;
1202
1203                 if (max_sectors < r1_bio->sectors) {
1204                         /* could not read all from this device, so we will
1205                          * need another r1_bio.
1206                          */
1207
1208                         sectors_handled = (r1_bio->sector + max_sectors
1209                                            - bio->bi_iter.bi_sector);
1210                         r1_bio->sectors = max_sectors;
1211                         spin_lock_irq(&conf->device_lock);
1212                         if (bio->bi_phys_segments == 0)
1213                                 bio->bi_phys_segments = 2;
1214                         else
1215                                 bio->bi_phys_segments++;
1216                         spin_unlock_irq(&conf->device_lock);
1217                         /* Cannot call generic_make_request directly
1218                          * as that will be queued in __make_request
1219                          * and subsequent mempool_alloc might block waiting
1220                          * for it.  So hand bio over to raid1d.
1221                          */
1222                         reschedule_retry(r1_bio);
1223
1224                         r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1225
1226                         r1_bio->master_bio = bio;
1227                         r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1228                         r1_bio->state = 0;
1229                         r1_bio->mddev = mddev;
1230                         r1_bio->sector = bio->bi_iter.bi_sector +
1231                                 sectors_handled;
1232                         goto read_again;
1233                 } else
1234                         generic_make_request(read_bio);
1235                 return;
1236         }
1237
1238         /*
1239          * WRITE:
1240          */
1241         if (conf->pending_count >= max_queued_requests) {
1242                 md_wakeup_thread(mddev->thread);
1243                 wait_event(conf->wait_barrier,
1244                            conf->pending_count < max_queued_requests);
1245         }
1246         /* first select target devices under rcu_lock and
1247          * inc refcount on their rdev.  Record them by setting
1248          * bios[x] to bio
1249          * If there are known/acknowledged bad blocks on any device on
1250          * which we have seen a write error, we want to avoid writing those
1251          * blocks.
1252          * This potentially requires several writes to write around
1253          * the bad blocks.  Each set of writes gets it's own r1bio
1254          * with a set of bios attached.
1255          */
1256
1257         disks = conf->raid_disks * 2;
1258  retry_write:
1259         r1_bio->start_next_window = start_next_window;
1260         blocked_rdev = NULL;
1261         rcu_read_lock();
1262         max_sectors = r1_bio->sectors;
1263         for (i = 0;  i < disks; i++) {
1264                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1265                 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1266                         atomic_inc(&rdev->nr_pending);
1267                         blocked_rdev = rdev;
1268                         break;
1269                 }
1270                 r1_bio->bios[i] = NULL;
1271                 if (!rdev || test_bit(Faulty, &rdev->flags)
1272                     || test_bit(Unmerged, &rdev->flags)) {
1273                         if (i < conf->raid_disks)
1274                                 set_bit(R1BIO_Degraded, &r1_bio->state);
1275                         continue;
1276                 }
1277
1278                 atomic_inc(&rdev->nr_pending);
1279                 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1280                         sector_t first_bad;
1281                         int bad_sectors;
1282                         int is_bad;
1283
1284                         is_bad = is_badblock(rdev, r1_bio->sector,
1285                                              max_sectors,
1286                                              &first_bad, &bad_sectors);
1287                         if (is_bad < 0) {
1288                                 /* mustn't write here until the bad block is
1289                                  * acknowledged*/
1290                                 set_bit(BlockedBadBlocks, &rdev->flags);
1291                                 blocked_rdev = rdev;
1292                                 break;
1293                         }
1294                         if (is_bad && first_bad <= r1_bio->sector) {
1295                                 /* Cannot write here at all */
1296                                 bad_sectors -= (r1_bio->sector - first_bad);
1297                                 if (bad_sectors < max_sectors)
1298                                         /* mustn't write more than bad_sectors
1299                                          * to other devices yet
1300                                          */
1301                                         max_sectors = bad_sectors;
1302                                 rdev_dec_pending(rdev, mddev);
1303                                 /* We don't set R1BIO_Degraded as that
1304                                  * only applies if the disk is
1305                                  * missing, so it might be re-added,
1306                                  * and we want to know to recover this
1307                                  * chunk.
1308                                  * In this case the device is here,
1309                                  * and the fact that this chunk is not
1310                                  * in-sync is recorded in the bad
1311                                  * block log
1312                                  */
1313                                 continue;
1314                         }
1315                         if (is_bad) {
1316                                 int good_sectors = first_bad - r1_bio->sector;
1317                                 if (good_sectors < max_sectors)
1318                                         max_sectors = good_sectors;
1319                         }
1320                 }
1321                 r1_bio->bios[i] = bio;
1322         }
1323         rcu_read_unlock();
1324
1325         if (unlikely(blocked_rdev)) {
1326                 /* Wait for this device to become unblocked */
1327                 int j;
1328                 sector_t old = start_next_window;
1329
1330                 for (j = 0; j < i; j++)
1331                         if (r1_bio->bios[j])
1332                                 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1333                 r1_bio->state = 0;
1334                 allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
1335                 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1336                 start_next_window = wait_barrier(conf, bio);
1337                 /*
1338                  * We must make sure the multi r1bios of bio have
1339                  * the same value of bi_phys_segments
1340                  */
1341                 if (bio->bi_phys_segments && old &&
1342                     old != start_next_window)
1343                         /* Wait for the former r1bio(s) to complete */
1344                         wait_event(conf->wait_barrier,
1345                                    bio->bi_phys_segments == 1);
1346                 goto retry_write;
1347         }
1348
1349         if (max_sectors < r1_bio->sectors) {
1350                 /* We are splitting this write into multiple parts, so
1351                  * we need to prepare for allocating another r1_bio.
1352                  */
1353                 r1_bio->sectors = max_sectors;
1354                 spin_lock_irq(&conf->device_lock);
1355                 if (bio->bi_phys_segments == 0)
1356                         bio->bi_phys_segments = 2;
1357                 else
1358                         bio->bi_phys_segments++;
1359                 spin_unlock_irq(&conf->device_lock);
1360         }
1361         sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
1362
1363         atomic_set(&r1_bio->remaining, 1);
1364         atomic_set(&r1_bio->behind_remaining, 0);
1365
1366         first_clone = 1;
1367         for (i = 0; i < disks; i++) {
1368                 struct bio *mbio;
1369                 if (!r1_bio->bios[i])
1370                         continue;
1371
1372                 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1373                 bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
1374
1375                 if (first_clone) {
1376                         /* do behind I/O ?
1377                          * Not if there are too many, or cannot
1378                          * allocate memory, or a reader on WriteMostly
1379                          * is waiting for behind writes to flush */
1380                         if (bitmap &&
1381                             (atomic_read(&bitmap->behind_writes)
1382                              < mddev->bitmap_info.max_write_behind) &&
1383                             !waitqueue_active(&bitmap->behind_wait))
1384                                 alloc_behind_pages(mbio, r1_bio);
1385
1386                         bitmap_startwrite(bitmap, r1_bio->sector,
1387                                           r1_bio->sectors,
1388                                           test_bit(R1BIO_BehindIO,
1389                                                    &r1_bio->state));
1390                         first_clone = 0;
1391                 }
1392                 if (r1_bio->behind_bvecs) {
1393                         struct bio_vec *bvec;
1394                         int j;
1395
1396                         /*
1397                          * We trimmed the bio, so _all is legit
1398                          */
1399                         bio_for_each_segment_all(bvec, mbio, j)
1400                                 bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
1401                         if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
1402                                 atomic_inc(&r1_bio->behind_remaining);
1403                 }
1404
1405                 r1_bio->bios[i] = mbio;
1406
1407                 mbio->bi_iter.bi_sector = (r1_bio->sector +
1408                                    conf->mirrors[i].rdev->data_offset);
1409                 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1410                 mbio->bi_end_io = raid1_end_write_request;
1411                 mbio->bi_rw =
1412                         WRITE | do_flush_fua | do_sync | do_discard | do_same;
1413                 mbio->bi_private = r1_bio;
1414
1415                 atomic_inc(&r1_bio->remaining);
1416
1417                 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1418                 if (cb)
1419                         plug = container_of(cb, struct raid1_plug_cb, cb);
1420                 else
1421                         plug = NULL;
1422                 spin_lock_irqsave(&conf->device_lock, flags);
1423                 if (plug) {
1424                         bio_list_add(&plug->pending, mbio);
1425                         plug->pending_cnt++;
1426                 } else {
1427                         bio_list_add(&conf->pending_bio_list, mbio);
1428                         conf->pending_count++;
1429                 }
1430                 spin_unlock_irqrestore(&conf->device_lock, flags);
1431                 if (!plug)
1432                         md_wakeup_thread(mddev->thread);
1433         }
1434         /* Mustn't call r1_bio_write_done before this next test,
1435          * as it could result in the bio being freed.
1436          */
1437         if (sectors_handled < bio_sectors(bio)) {
1438                 r1_bio_write_done(r1_bio);
1439                 /* We need another r1_bio.  It has already been counted
1440                  * in bio->bi_phys_segments
1441                  */
1442                 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1443                 r1_bio->master_bio = bio;
1444                 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1445                 r1_bio->state = 0;
1446                 r1_bio->mddev = mddev;
1447                 r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1448                 goto retry_write;
1449         }
1450
1451         r1_bio_write_done(r1_bio);
1452
1453         /* In case raid1d snuck in to freeze_array */
1454         wake_up(&conf->wait_barrier);
1455 }
1456
1457 static void status(struct seq_file *seq, struct mddev *mddev)
1458 {
1459         struct r1conf *conf = mddev->private;
1460         int i;
1461
1462         seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1463                    conf->raid_disks - mddev->degraded);
1464         rcu_read_lock();
1465         for (i = 0; i < conf->raid_disks; i++) {
1466                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1467                 seq_printf(seq, "%s",
1468                            rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1469         }
1470         rcu_read_unlock();
1471         seq_printf(seq, "]");
1472 }
1473
1474 static void error(struct mddev *mddev, struct md_rdev *rdev)
1475 {
1476         char b[BDEVNAME_SIZE];
1477         struct r1conf *conf = mddev->private;
1478
1479         /*
1480          * If it is not operational, then we have already marked it as dead
1481          * else if it is the last working disks, ignore the error, let the
1482          * next level up know.
1483          * else mark the drive as failed
1484          */
1485         if (test_bit(In_sync, &rdev->flags)
1486             && (conf->raid_disks - mddev->degraded) == 1) {
1487                 /*
1488                  * Don't fail the drive, act as though we were just a
1489                  * normal single drive.
1490                  * However don't try a recovery from this drive as
1491                  * it is very likely to fail.
1492                  */
1493                 conf->recovery_disabled = mddev->recovery_disabled;
1494                 return;
1495         }
1496         set_bit(Blocked, &rdev->flags);
1497         if (test_and_clear_bit(In_sync, &rdev->flags)) {
1498                 unsigned long flags;
1499                 spin_lock_irqsave(&conf->device_lock, flags);
1500                 mddev->degraded++;
1501                 set_bit(Faulty, &rdev->flags);
1502                 spin_unlock_irqrestore(&conf->device_lock, flags);
1503         } else
1504                 set_bit(Faulty, &rdev->flags);
1505         /*
1506          * if recovery is running, make sure it aborts.
1507          */
1508         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1509         set_bit(MD_CHANGE_DEVS, &mddev->flags);
1510         printk(KERN_ALERT
1511                "md/raid1:%s: Disk failure on %s, disabling device.\n"
1512                "md/raid1:%s: Operation continuing on %d devices.\n",
1513                mdname(mddev), bdevname(rdev->bdev, b),
1514                mdname(mddev), conf->raid_disks - mddev->degraded);
1515 }
1516
1517 static void print_conf(struct r1conf *conf)
1518 {
1519         int i;
1520
1521         printk(KERN_DEBUG "RAID1 conf printout:\n");
1522         if (!conf) {
1523                 printk(KERN_DEBUG "(!conf)\n");
1524                 return;
1525         }
1526         printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1527                 conf->raid_disks);
1528
1529         rcu_read_lock();
1530         for (i = 0; i < conf->raid_disks; i++) {
1531                 char b[BDEVNAME_SIZE];
1532                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1533                 if (rdev)
1534                         printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1535                                i, !test_bit(In_sync, &rdev->flags),
1536                                !test_bit(Faulty, &rdev->flags),
1537                                bdevname(rdev->bdev,b));
1538         }
1539         rcu_read_unlock();
1540 }
1541
1542 static void close_sync(struct r1conf *conf)
1543 {
1544         wait_barrier(conf, NULL);
1545         allow_barrier(conf, 0, 0);
1546
1547         mempool_destroy(conf->r1buf_pool);
1548         conf->r1buf_pool = NULL;
1549
1550         spin_lock_irq(&conf->resync_lock);
1551         conf->next_resync = 0;
1552         conf->start_next_window = MaxSector;
1553         conf->current_window_requests +=
1554                 conf->next_window_requests;
1555         conf->next_window_requests = 0;
1556         spin_unlock_irq(&conf->resync_lock);
1557 }
1558
1559 static int raid1_spare_active(struct mddev *mddev)
1560 {
1561         int i;
1562         struct r1conf *conf = mddev->private;
1563         int count = 0;
1564         unsigned long flags;
1565
1566         /*
1567          * Find all failed disks within the RAID1 configuration
1568          * and mark them readable.
1569          * Called under mddev lock, so rcu protection not needed.
1570          */
1571         for (i = 0; i < conf->raid_disks; i++) {
1572                 struct md_rdev *rdev = conf->mirrors[i].rdev;
1573                 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1574                 if (repl
1575                     && !test_bit(Candidate, &repl->flags)
1576                     && repl->recovery_offset == MaxSector
1577                     && !test_bit(Faulty, &repl->flags)
1578                     && !test_and_set_bit(In_sync, &repl->flags)) {
1579                         /* replacement has just become active */
1580                         if (!rdev ||
1581                             !test_and_clear_bit(In_sync, &rdev->flags))
1582                                 count++;
1583                         if (rdev) {
1584                                 /* Replaced device not technically
1585                                  * faulty, but we need to be sure
1586                                  * it gets removed and never re-added
1587                                  */
1588                                 set_bit(Faulty, &rdev->flags);
1589                                 sysfs_notify_dirent_safe(
1590                                         rdev->sysfs_state);
1591                         }
1592                 }
1593                 if (rdev
1594                     && rdev->recovery_offset == MaxSector
1595                     && !test_bit(Faulty, &rdev->flags)
1596                     && !test_and_set_bit(In_sync, &rdev->flags)) {
1597                         count++;
1598                         sysfs_notify_dirent_safe(rdev->sysfs_state);
1599                 }
1600         }
1601         spin_lock_irqsave(&conf->device_lock, flags);
1602         mddev->degraded -= count;
1603         spin_unlock_irqrestore(&conf->device_lock, flags);
1604
1605         print_conf(conf);
1606         return count;
1607 }
1608
1609 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1610 {
1611         struct r1conf *conf = mddev->private;
1612         int err = -EEXIST;
1613         int mirror = 0;
1614         struct raid1_info *p;
1615         int first = 0;
1616         int last = conf->raid_disks - 1;
1617         struct request_queue *q = bdev_get_queue(rdev->bdev);
1618
1619         if (mddev->recovery_disabled == conf->recovery_disabled)
1620                 return -EBUSY;
1621
1622         if (rdev->raid_disk >= 0)
1623                 first = last = rdev->raid_disk;
1624
1625         if (q->merge_bvec_fn) {
1626                 set_bit(Unmerged, &rdev->flags);
1627                 mddev->merge_check_needed = 1;
1628         }
1629
1630         for (mirror = first; mirror <= last; mirror++) {
1631                 p = conf->mirrors+mirror;
1632                 if (!p->rdev) {
1633
1634                         if (mddev->gendisk)
1635                                 disk_stack_limits(mddev->gendisk, rdev->bdev,
1636                                                   rdev->data_offset << 9);
1637
1638                         p->head_position = 0;
1639                         rdev->raid_disk = mirror;
1640                         err = 0;
1641                         /* As all devices are equivalent, we don't need a full recovery
1642                          * if this was recently any drive of the array
1643                          */
1644                         if (rdev->saved_raid_disk < 0)
1645                                 conf->fullsync = 1;
1646                         rcu_assign_pointer(p->rdev, rdev);
1647                         break;
1648                 }
1649                 if (test_bit(WantReplacement, &p->rdev->flags) &&
1650                     p[conf->raid_disks].rdev == NULL) {
1651                         /* Add this device as a replacement */
1652                         clear_bit(In_sync, &rdev->flags);
1653                         set_bit(Replacement, &rdev->flags);
1654                         rdev->raid_disk = mirror;
1655                         err = 0;
1656                         conf->fullsync = 1;
1657                         rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1658                         break;
1659                 }
1660         }
1661         if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
1662                 /* Some requests might not have seen this new
1663                  * merge_bvec_fn.  We must wait for them to complete
1664                  * before merging the device fully.
1665                  * First we make sure any code which has tested
1666                  * our function has submitted the request, then
1667                  * we wait for all outstanding requests to complete.
1668                  */
1669                 synchronize_sched();
1670                 freeze_array(conf, 0);
1671                 unfreeze_array(conf);
1672                 clear_bit(Unmerged, &rdev->flags);
1673         }
1674         md_integrity_add_rdev(rdev, mddev);
1675         if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1676                 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1677         print_conf(conf);
1678         return err;
1679 }
1680
1681 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1682 {
1683         struct r1conf *conf = mddev->private;
1684         int err = 0;
1685         int number = rdev->raid_disk;
1686         struct raid1_info *p = conf->mirrors + number;
1687
1688         if (rdev != p->rdev)
1689                 p = conf->mirrors + conf->raid_disks + number;
1690
1691         print_conf(conf);
1692         if (rdev == p->rdev) {
1693                 if (test_bit(In_sync, &rdev->flags) ||
1694                     atomic_read(&rdev->nr_pending)) {
1695                         err = -EBUSY;
1696                         goto abort;
1697                 }
1698                 /* Only remove non-faulty devices if recovery
1699                  * is not possible.
1700                  */
1701                 if (!test_bit(Faulty, &rdev->flags) &&
1702                     mddev->recovery_disabled != conf->recovery_disabled &&
1703                     mddev->degraded < conf->raid_disks) {
1704                         err = -EBUSY;
1705                         goto abort;
1706                 }
1707                 p->rdev = NULL;
1708                 synchronize_rcu();
1709                 if (atomic_read(&rdev->nr_pending)) {
1710                         /* lost the race, try later */
1711                         err = -EBUSY;
1712                         p->rdev = rdev;
1713                         goto abort;
1714                 } else if (conf->mirrors[conf->raid_disks + number].rdev) {
1715                         /* We just removed a device that is being replaced.
1716                          * Move down the replacement.  We drain all IO before
1717                          * doing this to avoid confusion.
1718                          */
1719                         struct md_rdev *repl =
1720                                 conf->mirrors[conf->raid_disks + number].rdev;
1721                         freeze_array(conf, 0);
1722                         clear_bit(Replacement, &repl->flags);
1723                         p->rdev = repl;
1724                         conf->mirrors[conf->raid_disks + number].rdev = NULL;
1725                         unfreeze_array(conf);
1726                         clear_bit(WantReplacement, &rdev->flags);
1727                 } else
1728                         clear_bit(WantReplacement, &rdev->flags);
1729                 err = md_integrity_register(mddev);
1730         }
1731 abort:
1732
1733         print_conf(conf);
1734         return err;
1735 }
1736
1737 static void end_sync_read(struct bio *bio)
1738 {
1739         struct r1bio *r1_bio = bio->bi_private;
1740
1741         update_head_pos(r1_bio->read_disk, r1_bio);
1742
1743         /*
1744          * we have read a block, now it needs to be re-written,
1745          * or re-read if the read failed.
1746          * We don't do much here, just schedule handling by raid1d
1747          */
1748         if (!bio->bi_error)
1749                 set_bit(R1BIO_Uptodate, &r1_bio->state);
1750
1751         if (atomic_dec_and_test(&r1_bio->remaining))
1752                 reschedule_retry(r1_bio);
1753 }
1754
1755 static void end_sync_write(struct bio *bio)
1756 {
1757         int uptodate = !bio->bi_error;
1758         struct r1bio *r1_bio = bio->bi_private;
1759         struct mddev *mddev = r1_bio->mddev;
1760         struct r1conf *conf = mddev->private;
1761         int mirror=0;
1762         sector_t first_bad;
1763         int bad_sectors;
1764
1765         mirror = find_bio_disk(r1_bio, bio);
1766
1767         if (!uptodate) {
1768                 sector_t sync_blocks = 0;
1769                 sector_t s = r1_bio->sector;
1770                 long sectors_to_go = r1_bio->sectors;
1771                 /* make sure these bits doesn't get cleared. */
1772                 do {
1773                         bitmap_end_sync(mddev->bitmap, s,
1774                                         &sync_blocks, 1);
1775                         s += sync_blocks;
1776                         sectors_to_go -= sync_blocks;
1777                 } while (sectors_to_go > 0);
1778                 set_bit(WriteErrorSeen,
1779                         &conf->mirrors[mirror].rdev->flags);
1780                 if (!test_and_set_bit(WantReplacement,
1781                                       &conf->mirrors[mirror].rdev->flags))
1782                         set_bit(MD_RECOVERY_NEEDED, &
1783                                 mddev->recovery);
1784                 set_bit(R1BIO_WriteError, &r1_bio->state);
1785         } else if (is_badblock(conf->mirrors[mirror].rdev,
1786                                r1_bio->sector,
1787                                r1_bio->sectors,
1788                                &first_bad, &bad_sectors) &&
1789                    !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1790                                 r1_bio->sector,
1791                                 r1_bio->sectors,
1792                                 &first_bad, &bad_sectors)
1793                 )
1794                 set_bit(R1BIO_MadeGood, &r1_bio->state);
1795
1796         if (atomic_dec_and_test(&r1_bio->remaining)) {
1797                 int s = r1_bio->sectors;
1798                 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1799                     test_bit(R1BIO_WriteError, &r1_bio->state))
1800                         reschedule_retry(r1_bio);
1801                 else {
1802                         put_buf(r1_bio);
1803                         md_done_sync(mddev, s, uptodate);
1804                 }
1805         }
1806 }
1807
1808 static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1809                             int sectors, struct page *page, int rw)
1810 {
1811         if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
1812                 /* success */
1813                 return 1;
1814         if (rw == WRITE) {
1815                 set_bit(WriteErrorSeen, &rdev->flags);
1816                 if (!test_and_set_bit(WantReplacement,
1817                                       &rdev->flags))
1818                         set_bit(MD_RECOVERY_NEEDED, &
1819                                 rdev->mddev->recovery);
1820         }
1821         /* need to record an error - either for the block or the device */
1822         if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1823                 md_error(rdev->mddev, rdev);
1824         return 0;
1825 }
1826
1827 static int fix_sync_read_error(struct r1bio *r1_bio)
1828 {
1829         /* Try some synchronous reads of other devices to get
1830          * good data, much like with normal read errors.  Only
1831          * read into the pages we already have so we don't
1832          * need to re-issue the read request.
1833          * We don't need to freeze the array, because being in an
1834          * active sync request, there is no normal IO, and
1835          * no overlapping syncs.
1836          * We don't need to check is_badblock() again as we
1837          * made sure that anything with a bad block in range
1838          * will have bi_end_io clear.
1839          */
1840         struct mddev *mddev = r1_bio->mddev;
1841         struct r1conf *conf = mddev->private;
1842         struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1843         sector_t sect = r1_bio->sector;
1844         int sectors = r1_bio->sectors;
1845         int idx = 0;
1846
1847         while(sectors) {
1848                 int s = sectors;
1849                 int d = r1_bio->read_disk;
1850                 int success = 0;
1851                 struct md_rdev *rdev;
1852                 int start;
1853
1854                 if (s > (PAGE_SIZE>>9))
1855                         s = PAGE_SIZE >> 9;
1856                 do {
1857                         if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1858                                 /* No rcu protection needed here devices
1859                                  * can only be removed when no resync is
1860                                  * active, and resync is currently active
1861                                  */
1862                                 rdev = conf->mirrors[d].rdev;
1863                                 if (sync_page_io(rdev, sect, s<<9,
1864                                                  bio->bi_io_vec[idx].bv_page,
1865                                                  READ, false)) {
1866                                         success = 1;
1867                                         break;
1868                                 }
1869                         }
1870                         d++;
1871                         if (d == conf->raid_disks * 2)
1872                                 d = 0;
1873                 } while (!success && d != r1_bio->read_disk);
1874
1875                 if (!success) {
1876                         char b[BDEVNAME_SIZE];
1877                         int abort = 0;
1878                         /* Cannot read from anywhere, this block is lost.
1879                          * Record a bad block on each device.  If that doesn't
1880                          * work just disable and interrupt the recovery.
1881                          * Don't fail devices as that won't really help.
1882                          */
1883                         printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
1884                                " for block %llu\n",
1885                                mdname(mddev),
1886                                bdevname(bio->bi_bdev, b),
1887                                (unsigned long long)r1_bio->sector);
1888                         for (d = 0; d < conf->raid_disks * 2; d++) {
1889                                 rdev = conf->mirrors[d].rdev;
1890                                 if (!rdev || test_bit(Faulty, &rdev->flags))
1891                                         continue;
1892                                 if (!rdev_set_badblocks(rdev, sect, s, 0))
1893                                         abort = 1;
1894                         }
1895                         if (abort) {
1896                                 conf->recovery_disabled =
1897                                         mddev->recovery_disabled;
1898                                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1899                                 md_done_sync(mddev, r1_bio->sectors, 0);
1900                                 put_buf(r1_bio);
1901                                 return 0;
1902                         }
1903                         /* Try next page */
1904                         sectors -= s;
1905                         sect += s;
1906                         idx++;
1907                         continue;
1908                 }
1909
1910                 start = d;
1911                 /* write it back and re-read */
1912                 while (d != r1_bio->read_disk) {
1913                         if (d == 0)
1914                                 d = conf->raid_disks * 2;
1915                         d--;
1916                         if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1917                                 continue;
1918                         rdev = conf->mirrors[d].rdev;
1919                         if (r1_sync_page_io(rdev, sect, s,
1920                                             bio->bi_io_vec[idx].bv_page,
1921                                             WRITE) == 0) {
1922                                 r1_bio->bios[d]->bi_end_io = NULL;
1923                                 rdev_dec_pending(rdev, mddev);
1924                         }
1925                 }
1926                 d = start;
1927                 while (d != r1_bio->read_disk) {
1928                         if (d == 0)
1929                                 d = conf->raid_disks * 2;
1930                         d--;
1931                         if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1932                                 continue;
1933                         rdev = conf->mirrors[d].rdev;
1934                         if (r1_sync_page_io(rdev, sect, s,
1935                                             bio->bi_io_vec[idx].bv_page,
1936                                             READ) != 0)
1937                                 atomic_add(s, &rdev->corrected_errors);
1938                 }
1939                 sectors -= s;
1940                 sect += s;
1941                 idx ++;
1942         }
1943         set_bit(R1BIO_Uptodate, &r1_bio->state);
1944         bio->bi_error = 0;
1945         return 1;
1946 }
1947
1948 static void process_checks(struct r1bio *r1_bio)
1949 {
1950         /* We have read all readable devices.  If we haven't
1951          * got the block, then there is no hope left.
1952          * If we have, then we want to do a comparison
1953          * and skip the write if everything is the same.
1954          * If any blocks failed to read, then we need to
1955          * attempt an over-write
1956          */
1957         struct mddev *mddev = r1_bio->mddev;
1958         struct r1conf *conf = mddev->private;
1959         int primary;
1960         int i;
1961         int vcnt;
1962
1963         /* Fix variable parts of all bios */
1964         vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
1965         for (i = 0; i < conf->raid_disks * 2; i++) {
1966                 int j;
1967                 int size;
1968                 int error;
1969                 struct bio *b = r1_bio->bios[i];
1970                 if (b->bi_end_io != end_sync_read)
1971                         continue;
1972                 /* fixup the bio for reuse, but preserve errno */
1973                 error = b->bi_error;
1974                 bio_reset(b);
1975                 b->bi_error = error;
1976                 b->bi_vcnt = vcnt;
1977                 b->bi_iter.bi_size = r1_bio->sectors << 9;
1978                 b->bi_iter.bi_sector = r1_bio->sector +
1979                         conf->mirrors[i].rdev->data_offset;
1980                 b->bi_bdev = conf->mirrors[i].rdev->bdev;
1981                 b->bi_end_io = end_sync_read;
1982                 b->bi_private = r1_bio;
1983
1984                 size = b->bi_iter.bi_size;
1985                 for (j = 0; j < vcnt ; j++) {
1986                         struct bio_vec *bi;
1987                         bi = &b->bi_io_vec[j];
1988                         bi->bv_offset = 0;
1989                         if (size > PAGE_SIZE)
1990                                 bi->bv_len = PAGE_SIZE;
1991                         else
1992                                 bi->bv_len = size;
1993                         size -= PAGE_SIZE;
1994                 }
1995         }
1996         for (primary = 0; primary < conf->raid_disks * 2; primary++)
1997                 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1998                     !r1_bio->bios[primary]->bi_error) {
1999                         r1_bio->bios[primary]->bi_end_io = NULL;
2000                         rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2001                         break;
2002                 }
2003         r1_bio->read_disk = primary;
2004         for (i = 0; i < conf->raid_disks * 2; i++) {
2005                 int j;
2006                 struct bio *pbio = r1_bio->bios[primary];
2007                 struct bio *sbio = r1_bio->bios[i];
2008                 int error = sbio->bi_error;
2009
2010                 if (sbio->bi_end_io != end_sync_read)
2011                         continue;
2012                 /* Now we can 'fixup' the error value */
2013                 sbio->bi_error = 0;
2014
2015                 if (!error) {
2016                         for (j = vcnt; j-- ; ) {
2017                                 struct page *p, *s;
2018                                 p = pbio->bi_io_vec[j].bv_page;
2019                                 s = sbio->bi_io_vec[j].bv_page;
2020                                 if (memcmp(page_address(p),
2021                                            page_address(s),
2022                                            sbio->bi_io_vec[j].bv_len))
2023                                         break;
2024                         }
2025                 } else
2026                         j = 0;
2027                 if (j >= 0)
2028                         atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2029                 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2030                               && !error)) {
2031                         /* No need to write to this device. */
2032                         sbio->bi_end_io = NULL;
2033                         rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2034                         continue;
2035                 }
2036
2037                 bio_copy_data(sbio, pbio);
2038         }
2039 }
2040
2041 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2042 {
2043         struct r1conf *conf = mddev->private;
2044         int i;
2045         int disks = conf->raid_disks * 2;
2046         struct bio *bio, *wbio;
2047
2048         bio = r1_bio->bios[r1_bio->read_disk];
2049
2050         if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2051                 /* ouch - failed to read all of that. */
2052                 if (!fix_sync_read_error(r1_bio))
2053                         return;
2054
2055         if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2056                 process_checks(r1_bio);
2057
2058         /*
2059          * schedule writes
2060          */
2061         atomic_set(&r1_bio->remaining, 1);
2062         for (i = 0; i < disks ; i++) {
2063                 wbio = r1_bio->bios[i];
2064                 if (wbio->bi_end_io == NULL ||
2065                     (wbio->bi_end_io == end_sync_read &&
2066                      (i == r1_bio->read_disk ||
2067                       !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2068                         continue;
2069
2070                 wbio->bi_rw = WRITE;
2071                 wbio->bi_end_io = end_sync_write;
2072                 atomic_inc(&r1_bio->remaining);
2073                 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2074
2075                 generic_make_request(wbio);
2076         }
2077
2078         if (atomic_dec_and_test(&r1_bio->remaining)) {
2079                 /* if we're here, all write(s) have completed, so clean up */
2080                 int s = r1_bio->sectors;
2081                 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2082                     test_bit(R1BIO_WriteError, &r1_bio->state))
2083                         reschedule_retry(r1_bio);
2084                 else {
2085                         put_buf(r1_bio);
2086                         md_done_sync(mddev, s, 1);
2087                 }
2088         }
2089 }
2090
2091 /*
2092  * This is a kernel thread which:
2093  *
2094  *      1.      Retries failed read operations on working mirrors.
2095  *      2.      Updates the raid superblock when problems encounter.
2096  *      3.      Performs writes following reads for array synchronising.
2097  */
2098
2099 static void fix_read_error(struct r1conf *conf, int read_disk,
2100                            sector_t sect, int sectors)
2101 {
2102         struct mddev *mddev = conf->mddev;
2103         while(sectors) {
2104                 int s = sectors;
2105                 int d = read_disk;
2106                 int success = 0;
2107                 int start;
2108                 struct md_rdev *rdev;
2109
2110                 if (s > (PAGE_SIZE>>9))
2111                         s = PAGE_SIZE >> 9;
2112
2113                 do {
2114                         /* Note: no rcu protection needed here
2115                          * as this is synchronous in the raid1d thread
2116                          * which is the thread that might remove
2117                          * a device.  If raid1d ever becomes multi-threaded....
2118                          */
2119                         sector_t first_bad;
2120                         int bad_sectors;
2121
2122                         rdev = conf->mirrors[d].rdev;
2123                         if (rdev &&
2124                             (test_bit(In_sync, &rdev->flags) ||
2125                              (!test_bit(Faulty, &rdev->flags) &&
2126                               rdev->recovery_offset >= sect + s)) &&
2127                             is_badblock(rdev, sect, s,
2128                                         &first_bad, &bad_sectors) == 0 &&
2129                             sync_page_io(rdev, sect, s<<9,
2130                                          conf->tmppage, READ, false))
2131                                 success = 1;
2132                         else {
2133                                 d++;
2134                                 if (d == conf->raid_disks * 2)
2135                                         d = 0;
2136                         }
2137                 } while (!success && d != read_disk);
2138
2139                 if (!success) {
2140                         /* Cannot read from anywhere - mark it bad */
2141                         struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2142                         if (!rdev_set_badblocks(rdev, sect, s, 0))
2143                                 md_error(mddev, rdev);
2144                         break;
2145                 }
2146                 /* write it back and re-read */
2147                 start = d;
2148                 while (d != read_disk) {
2149                         if (d==0)
2150                                 d = conf->raid_disks * 2;
2151                         d--;
2152                         rdev = conf->mirrors[d].rdev;
2153                         if (rdev &&
2154                             !test_bit(Faulty, &rdev->flags))
2155                                 r1_sync_page_io(rdev, sect, s,
2156                                                 conf->tmppage, WRITE);
2157                 }
2158                 d = start;
2159                 while (d != read_disk) {
2160                         char b[BDEVNAME_SIZE];
2161                         if (d==0)
2162                                 d = conf->raid_disks * 2;
2163                         d--;
2164                         rdev = conf->mirrors[d].rdev;
2165                         if (rdev &&
2166                             !test_bit(Faulty, &rdev->flags)) {
2167                                 if (r1_sync_page_io(rdev, sect, s,
2168                                                     conf->tmppage, READ)) {
2169                                         atomic_add(s, &rdev->corrected_errors);
2170                                         printk(KERN_INFO
2171                                                "md/raid1:%s: read error corrected "
2172                                                "(%d sectors at %llu on %s)\n",
2173                                                mdname(mddev), s,
2174                                                (unsigned long long)(sect +
2175                                                    rdev->data_offset),
2176                                                bdevname(rdev->bdev, b));
2177                                 }
2178                         }
2179                 }
2180                 sectors -= s;
2181                 sect += s;
2182         }
2183 }
2184
2185 static int narrow_write_error(struct r1bio *r1_bio, int i)
2186 {
2187         struct mddev *mddev = r1_bio->mddev;
2188         struct r1conf *conf = mddev->private;
2189         struct md_rdev *rdev = conf->mirrors[i].rdev;
2190
2191         /* bio has the data to be written to device 'i' where
2192          * we just recently had a write error.
2193          * We repeatedly clone the bio and trim down to one block,
2194          * then try the write.  Where the write fails we record
2195          * a bad block.
2196          * It is conceivable that the bio doesn't exactly align with
2197          * blocks.  We must handle this somehow.
2198          *
2199          * We currently own a reference on the rdev.
2200          */
2201
2202         int block_sectors;
2203         sector_t sector;
2204         int sectors;
2205         int sect_to_write = r1_bio->sectors;
2206         int ok = 1;
2207
2208         if (rdev->badblocks.shift < 0)
2209                 return 0;
2210
2211         block_sectors = roundup(1 << rdev->badblocks.shift,
2212                                 bdev_logical_block_size(rdev->bdev) >> 9);
2213         sector = r1_bio->sector;
2214         sectors = ((sector + block_sectors)
2215                    & ~(sector_t)(block_sectors - 1))
2216                 - sector;
2217
2218         while (sect_to_write) {
2219                 struct bio *wbio;
2220                 if (sectors > sect_to_write)
2221                         sectors = sect_to_write;
2222                 /* Write at 'sector' for 'sectors'*/
2223
2224                 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2225                         unsigned vcnt = r1_bio->behind_page_count;
2226                         struct bio_vec *vec = r1_bio->behind_bvecs;
2227
2228                         while (!vec->bv_page) {
2229                                 vec++;
2230                                 vcnt--;
2231                         }
2232
2233                         wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
2234                         memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
2235
2236                         wbio->bi_vcnt = vcnt;
2237                 } else {
2238                         wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2239                 }
2240
2241                 wbio->bi_rw = WRITE;
2242                 wbio->bi_iter.bi_sector = r1_bio->sector;
2243                 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2244
2245                 bio_trim(wbio, sector - r1_bio->sector, sectors);
2246                 wbio->bi_iter.bi_sector += rdev->data_offset;
2247                 wbio->bi_bdev = rdev->bdev;
2248                 if (submit_bio_wait(WRITE, wbio) == 0)
2249                         /* failure! */
2250                         ok = rdev_set_badblocks(rdev, sector,
2251                                                 sectors, 0)
2252                                 && ok;
2253
2254                 bio_put(wbio);
2255                 sect_to_write -= sectors;
2256                 sector += sectors;
2257                 sectors = block_sectors;
2258         }
2259         return ok;
2260 }
2261
2262 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2263 {
2264         int m;
2265         int s = r1_bio->sectors;
2266         for (m = 0; m < conf->raid_disks * 2 ; m++) {
2267                 struct md_rdev *rdev = conf->mirrors[m].rdev;
2268                 struct bio *bio = r1_bio->bios[m];
2269                 if (bio->bi_end_io == NULL)
2270                         continue;
2271                 if (!bio->bi_error &&
2272                     test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2273                         rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2274                 }
2275                 if (bio->bi_error &&
2276                     test_bit(R1BIO_WriteError, &r1_bio->state)) {
2277                         if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2278                                 md_error(conf->mddev, rdev);
2279                 }
2280         }
2281         put_buf(r1_bio);
2282         md_done_sync(conf->mddev, s, 1);
2283 }
2284
2285 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2286 {
2287         int m;
2288         for (m = 0; m < conf->raid_disks * 2 ; m++)
2289                 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2290                         struct md_rdev *rdev = conf->mirrors[m].rdev;
2291                         rdev_clear_badblocks(rdev,
2292                                              r1_bio->sector,
2293                                              r1_bio->sectors, 0);
2294                         rdev_dec_pending(rdev, conf->mddev);
2295                 } else if (r1_bio->bios[m] != NULL) {
2296                         /* This drive got a write error.  We need to
2297                          * narrow down and record precise write
2298                          * errors.
2299                          */
2300                         if (!narrow_write_error(r1_bio, m)) {
2301                                 md_error(conf->mddev,
2302                                          conf->mirrors[m].rdev);
2303                                 /* an I/O failed, we can't clear the bitmap */
2304                                 set_bit(R1BIO_Degraded, &r1_bio->state);
2305                         }
2306                         rdev_dec_pending(conf->mirrors[m].rdev,
2307                                          conf->mddev);
2308                 }
2309         if (test_bit(R1BIO_WriteError, &r1_bio->state))
2310                 close_write(r1_bio);
2311         raid_end_bio_io(r1_bio);
2312 }
2313
2314 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2315 {
2316         int disk;
2317         int max_sectors;
2318         struct mddev *mddev = conf->mddev;
2319         struct bio *bio;
2320         char b[BDEVNAME_SIZE];
2321         struct md_rdev *rdev;
2322
2323         clear_bit(R1BIO_ReadError, &r1_bio->state);
2324         /* we got a read error. Maybe the drive is bad.  Maybe just
2325          * the block and we can fix it.
2326          * We freeze all other IO, and try reading the block from
2327          * other devices.  When we find one, we re-write
2328          * and check it that fixes the read error.
2329          * This is all done synchronously while the array is
2330          * frozen
2331          */
2332         if (mddev->ro == 0) {
2333                 freeze_array(conf, 1);
2334                 fix_read_error(conf, r1_bio->read_disk,
2335                                r1_bio->sector, r1_bio->sectors);
2336                 unfreeze_array(conf);
2337         } else
2338                 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
2339         rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
2340
2341         bio = r1_bio->bios[r1_bio->read_disk];
2342         bdevname(bio->bi_bdev, b);
2343 read_more:
2344         disk = read_balance(conf, r1_bio, &max_sectors);
2345         if (disk == -1) {
2346                 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
2347                        " read error for block %llu\n",
2348                        mdname(mddev), b, (unsigned long long)r1_bio->sector);
2349                 raid_end_bio_io(r1_bio);
2350         } else {
2351                 const unsigned long do_sync
2352                         = r1_bio->master_bio->bi_rw & REQ_SYNC;
2353                 if (bio) {
2354                         r1_bio->bios[r1_bio->read_disk] =
2355                                 mddev->ro ? IO_BLOCKED : NULL;
2356                         bio_put(bio);
2357                 }
2358                 r1_bio->read_disk = disk;
2359                 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2360                 bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
2361                          max_sectors);
2362                 r1_bio->bios[r1_bio->read_disk] = bio;
2363                 rdev = conf->mirrors[disk].rdev;
2364                 printk_ratelimited(KERN_ERR
2365                                    "md/raid1:%s: redirecting sector %llu"
2366                                    " to other mirror: %s\n",
2367                                    mdname(mddev),
2368                                    (unsigned long long)r1_bio->sector,
2369                                    bdevname(rdev->bdev, b));
2370                 bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
2371                 bio->bi_bdev = rdev->bdev;
2372                 bio->bi_end_io = raid1_end_read_request;
2373                 bio->bi_rw = READ | do_sync;
2374                 bio->bi_private = r1_bio;
2375                 if (max_sectors < r1_bio->sectors) {
2376                         /* Drat - have to split this up more */
2377                         struct bio *mbio = r1_bio->master_bio;
2378                         int sectors_handled = (r1_bio->sector + max_sectors
2379                                                - mbio->bi_iter.bi_sector);
2380                         r1_bio->sectors = max_sectors;
2381                         spin_lock_irq(&conf->device_lock);
2382                         if (mbio->bi_phys_segments == 0)
2383                                 mbio->bi_phys_segments = 2;
2384                         else
2385                                 mbio->bi_phys_segments++;
2386                         spin_unlock_irq(&conf->device_lock);
2387                         generic_make_request(bio);
2388                         bio = NULL;
2389
2390                         r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
2391
2392                         r1_bio->master_bio = mbio;
2393                         r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
2394                         r1_bio->state = 0;
2395                         set_bit(R1BIO_ReadError, &r1_bio->state);
2396                         r1_bio->mddev = mddev;
2397                         r1_bio->sector = mbio->bi_iter.bi_sector +
2398                                 sectors_handled;
2399
2400                         goto read_more;
2401                 } else
2402                         generic_make_request(bio);
2403         }
2404 }
2405
2406 static void raid1d(struct md_thread *thread)
2407 {
2408         struct mddev *mddev = thread->mddev;
2409         struct r1bio *r1_bio;
2410         unsigned long flags;
2411         struct r1conf *conf = mddev->private;
2412         struct list_head *head = &conf->retry_list;
2413         struct blk_plug plug;
2414
2415         md_check_recovery(mddev);
2416
2417         blk_start_plug(&plug);
2418         for (;;) {
2419
2420                 flush_pending_writes(conf);
2421
2422                 spin_lock_irqsave(&conf->device_lock, flags);
2423                 if (list_empty(head)) {
2424                         spin_unlock_irqrestore(&conf->device_lock, flags);
2425                         break;
2426                 }
2427                 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2428                 list_del(head->prev);
2429                 conf->nr_queued--;
2430                 spin_unlock_irqrestore(&conf->device_lock, flags);
2431
2432                 mddev = r1_bio->mddev;
2433                 conf = mddev->private;
2434                 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2435                         if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2436                             test_bit(R1BIO_WriteError, &r1_bio->state))
2437                                 handle_sync_write_finished(conf, r1_bio);
2438                         else
2439                                 sync_request_write(mddev, r1_bio);
2440                 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2441                            test_bit(R1BIO_WriteError, &r1_bio->state))
2442                         handle_write_finished(conf, r1_bio);
2443                 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2444                         handle_read_error(conf, r1_bio);
2445                 else
2446                         /* just a partial read to be scheduled from separate
2447                          * context
2448                          */
2449                         generic_make_request(r1_bio->bios[r1_bio->read_disk]);
2450
2451                 cond_resched();
2452                 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2453                         md_check_recovery(mddev);
2454         }
2455         blk_finish_plug(&plug);
2456 }
2457
2458 static int init_resync(struct r1conf *conf)
2459 {
2460         int buffs;
2461
2462         buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2463         BUG_ON(conf->r1buf_pool);
2464         conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
2465                                           conf->poolinfo);
2466         if (!conf->r1buf_pool)
2467                 return -ENOMEM;
2468         conf->next_resync = 0;
2469         return 0;
2470 }
2471
2472 /*
2473  * perform a "sync" on one "block"
2474  *
2475  * We need to make sure that no normal I/O request - particularly write
2476  * requests - conflict with active sync requests.
2477  *
2478  * This is achieved by tracking pending requests and a 'barrier' concept
2479  * that can be installed to exclude normal IO requests.
2480  */
2481
2482 static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
2483 {
2484         struct r1conf *conf = mddev->private;
2485         struct r1bio *r1_bio;
2486         struct bio *bio;
2487         sector_t max_sector, nr_sectors;
2488         int disk = -1;
2489         int i;
2490         int wonly = -1;
2491         int write_targets = 0, read_targets = 0;
2492         sector_t sync_blocks;
2493         int still_degraded = 0;
2494         int good_sectors = RESYNC_SECTORS;
2495         int min_bad = 0; /* number of sectors that are bad in all devices */
2496
2497         if (!conf->r1buf_pool)
2498                 if (init_resync(conf))
2499                         return 0;
2500
2501         max_sector = mddev->dev_sectors;
2502         if (sector_nr >= max_sector) {
2503                 /* If we aborted, we need to abort the
2504                  * sync on the 'current' bitmap chunk (there will
2505                  * only be one in raid1 resync.
2506                  * We can find the current addess in mddev->curr_resync
2507                  */
2508                 if (mddev->curr_resync < max_sector) /* aborted */
2509                         bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2510                                                 &sync_blocks, 1);
2511                 else /* completed sync */
2512                         conf->fullsync = 0;
2513
2514                 bitmap_close_sync(mddev->bitmap);
2515                 close_sync(conf);
2516                 return 0;
2517         }
2518
2519         if (mddev->bitmap == NULL &&
2520             mddev->recovery_cp == MaxSector &&
2521             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2522             conf->fullsync == 0) {
2523                 *skipped = 1;
2524                 return max_sector - sector_nr;
2525         }
2526         /* before building a request, check if we can skip these blocks..
2527          * This call the bitmap_start_sync doesn't actually record anything
2528          */
2529         if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2530             !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2531                 /* We can skip this block, and probably several more */
2532                 *skipped = 1;
2533                 return sync_blocks;
2534         }
2535
2536         bitmap_cond_end_sync(mddev->bitmap, sector_nr);
2537         r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
2538
2539         raise_barrier(conf, sector_nr);
2540
2541         rcu_read_lock();
2542         /*
2543          * If we get a correctably read error during resync or recovery,
2544          * we might want to read from a different device.  So we
2545          * flag all drives that could conceivably be read from for READ,
2546          * and any others (which will be non-In_sync devices) for WRITE.
2547          * If a read fails, we try reading from something else for which READ
2548          * is OK.
2549          */
2550
2551         r1_bio->mddev = mddev;
2552         r1_bio->sector = sector_nr;
2553         r1_bio->state = 0;
2554         set_bit(R1BIO_IsSync, &r1_bio->state);
2555
2556         for (i = 0; i < conf->raid_disks * 2; i++) {
2557                 struct md_rdev *rdev;
2558                 bio = r1_bio->bios[i];
2559                 bio_reset(bio);
2560
2561                 rdev = rcu_dereference(conf->mirrors[i].rdev);
2562                 if (rdev == NULL ||
2563                     test_bit(Faulty, &rdev->flags)) {
2564                         if (i < conf->raid_disks)
2565                                 still_degraded = 1;
2566                 } else if (!test_bit(In_sync, &rdev->flags)) {
2567                         bio->bi_rw = WRITE;
2568                         bio->bi_end_io = end_sync_write;
2569                         write_targets ++;
2570                 } else {
2571                         /* may need to read from here */
2572                         sector_t first_bad = MaxSector;
2573                         int bad_sectors;
2574
2575                         if (is_badblock(rdev, sector_nr, good_sectors,
2576                                         &first_bad, &bad_sectors)) {
2577                                 if (first_bad > sector_nr)
2578                                         good_sectors = first_bad - sector_nr;
2579                                 else {
2580                                         bad_sectors -= (sector_nr - first_bad);
2581                                         if (min_bad == 0 ||
2582                                             min_bad > bad_sectors)
2583                                                 min_bad = bad_sectors;
2584                                 }
2585                         }
2586                         if (sector_nr < first_bad) {
2587                                 if (test_bit(WriteMostly, &rdev->flags)) {
2588                                         if (wonly < 0)
2589                                                 wonly = i;
2590                                 } else {
2591                                         if (disk < 0)
2592                                                 disk = i;
2593                                 }
2594                                 bio->bi_rw = READ;
2595                                 bio->bi_end_io = end_sync_read;
2596                                 read_targets++;
2597                         } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2598                                 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2599                                 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2600                                 /*
2601                                  * The device is suitable for reading (InSync),
2602                                  * but has bad block(s) here. Let's try to correct them,
2603                                  * if we are doing resync or repair. Otherwise, leave
2604                                  * this device alone for this sync request.
2605                                  */
2606                                 bio->bi_rw = WRITE;
2607                                 bio->bi_end_io = end_sync_write;
2608                                 write_targets++;
2609                         }
2610                 }
2611                 if (bio->bi_end_io) {
2612                         atomic_inc(&rdev->nr_pending);
2613                         bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2614                         bio->bi_bdev = rdev->bdev;
2615                         bio->bi_private = r1_bio;
2616                 }
2617         }
2618         rcu_read_unlock();
2619         if (disk < 0)
2620                 disk = wonly;
2621         r1_bio->read_disk = disk;
2622
2623         if (read_targets == 0 && min_bad > 0) {
2624                 /* These sectors are bad on all InSync devices, so we
2625                  * need to mark them bad on all write targets
2626                  */
2627                 int ok = 1;
2628                 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2629                         if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2630                                 struct md_rdev *rdev = conf->mirrors[i].rdev;
2631                                 ok = rdev_set_badblocks(rdev, sector_nr,
2632                                                         min_bad, 0
2633                                         ) && ok;
2634                         }
2635                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2636                 *skipped = 1;
2637                 put_buf(r1_bio);
2638
2639                 if (!ok) {
2640                         /* Cannot record the badblocks, so need to
2641                          * abort the resync.
2642                          * If there are multiple read targets, could just
2643                          * fail the really bad ones ???
2644                          */
2645                         conf->recovery_disabled = mddev->recovery_disabled;
2646                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2647                         return 0;
2648                 } else
2649                         return min_bad;
2650
2651         }
2652         if (min_bad > 0 && min_bad < good_sectors) {
2653                 /* only resync enough to reach the next bad->good
2654                  * transition */
2655                 good_sectors = min_bad;
2656         }
2657
2658         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2659                 /* extra read targets are also write targets */
2660                 write_targets += read_targets-1;
2661
2662         if (write_targets == 0 || read_targets == 0) {
2663                 /* There is nowhere to write, so all non-sync
2664                  * drives must be failed - so we are finished
2665                  */
2666                 sector_t rv;
2667                 if (min_bad > 0)
2668                         max_sector = sector_nr + min_bad;
2669                 rv = max_sector - sector_nr;
2670                 *skipped = 1;
2671                 put_buf(r1_bio);
2672                 return rv;
2673         }
2674
2675         if (max_sector > mddev->resync_max)
2676                 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2677         if (max_sector > sector_nr + good_sectors)
2678                 max_sector = sector_nr + good_sectors;
2679         nr_sectors = 0;
2680         sync_blocks = 0;
2681         do {
2682                 struct page *page;
2683                 int len = PAGE_SIZE;
2684                 if (sector_nr + (len>>9) > max_sector)
2685                         len = (max_sector - sector_nr) << 9;
2686                 if (len == 0)
2687                         break;
2688                 if (sync_blocks == 0) {
2689                         if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2690                                                &sync_blocks, still_degraded) &&
2691                             !conf->fullsync &&
2692                             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2693                                 break;
2694                         BUG_ON(sync_blocks < (PAGE_SIZE>>9));
2695                         if ((len >> 9) > sync_blocks)
2696                                 len = sync_blocks<<9;
2697                 }
2698
2699                 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2700                         bio = r1_bio->bios[i];
2701                         if (bio->bi_end_io) {
2702                                 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
2703                                 if (bio_add_page(bio, page, len, 0) == 0) {
2704                                         /* stop here */
2705                                         bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
2706                                         while (i > 0) {
2707                                                 i--;
2708                                                 bio = r1_bio->bios[i];
2709                                                 if (bio->bi_end_io==NULL)
2710                                                         continue;
2711                                                 /* remove last page from this bio */
2712                                                 bio->bi_vcnt--;
2713                                                 bio->bi_iter.bi_size -= len;
2714                                                 bio_clear_flag(bio, BIO_SEG_VALID);
2715                                         }
2716                                         goto bio_full;
2717                                 }
2718                         }
2719                 }
2720                 nr_sectors += len>>9;
2721                 sector_nr += len>>9;
2722                 sync_blocks -= (len>>9);
2723         } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
2724  bio_full:
2725         r1_bio->sectors = nr_sectors;
2726
2727         /* For a user-requested sync, we read all readable devices and do a
2728          * compare
2729          */
2730         if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2731                 atomic_set(&r1_bio->remaining, read_targets);
2732                 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2733                         bio = r1_bio->bios[i];
2734                         if (bio->bi_end_io == end_sync_read) {
2735                                 read_targets--;
2736                                 md_sync_acct(bio->bi_bdev, nr_sectors);
2737                                 generic_make_request(bio);
2738                         }
2739                 }
2740         } else {
2741                 atomic_set(&r1_bio->remaining, 1);
2742                 bio = r1_bio->bios[r1_bio->read_disk];
2743                 md_sync_acct(bio->bi_bdev, nr_sectors);
2744                 generic_make_request(bio);
2745
2746         }
2747         return nr_sectors;
2748 }
2749
2750 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2751 {
2752         if (sectors)
2753                 return sectors;
2754
2755         return mddev->dev_sectors;
2756 }
2757
2758 static struct r1conf *setup_conf(struct mddev *mddev)
2759 {
2760         struct r1conf *conf;
2761         int i;
2762         struct raid1_info *disk;
2763         struct md_rdev *rdev;
2764         int err = -ENOMEM;
2765
2766         conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2767         if (!conf)
2768                 goto abort;
2769
2770         conf->mirrors = kzalloc(sizeof(struct raid1_info)
2771                                 * mddev->raid_disks * 2,
2772                                  GFP_KERNEL);
2773         if (!conf->mirrors)
2774                 goto abort;
2775
2776         conf->tmppage = alloc_page(GFP_KERNEL);
2777         if (!conf->tmppage)
2778                 goto abort;
2779
2780         conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2781         if (!conf->poolinfo)
2782                 goto abort;
2783         conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2784         conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2785                                           r1bio_pool_free,
2786                                           conf->poolinfo);
2787         if (!conf->r1bio_pool)
2788                 goto abort;
2789
2790         conf->poolinfo->mddev = mddev;
2791
2792         err = -EINVAL;
2793         spin_lock_init(&conf->device_lock);
2794         rdev_for_each(rdev, mddev) {
2795                 struct request_queue *q;
2796                 int disk_idx = rdev->raid_disk;
2797                 if (disk_idx >= mddev->raid_disks
2798                     || disk_idx < 0)
2799                         continue;
2800                 if (test_bit(Replacement, &rdev->flags))
2801                         disk = conf->mirrors + mddev->raid_disks + disk_idx;
2802                 else
2803                         disk = conf->mirrors + disk_idx;
2804
2805                 if (disk->rdev)
2806                         goto abort;
2807                 disk->rdev = rdev;
2808                 q = bdev_get_queue(rdev->bdev);
2809                 if (q->merge_bvec_fn)
2810                         mddev->merge_check_needed = 1;
2811
2812                 disk->head_position = 0;
2813                 disk->seq_start = MaxSector;
2814         }
2815         conf->raid_disks = mddev->raid_disks;
2816         conf->mddev = mddev;
2817         INIT_LIST_HEAD(&conf->retry_list);
2818
2819         spin_lock_init(&conf->resync_lock);
2820         init_waitqueue_head(&conf->wait_barrier);
2821
2822         bio_list_init(&conf->pending_bio_list);
2823         conf->pending_count = 0;
2824         conf->recovery_disabled = mddev->recovery_disabled - 1;
2825
2826         conf->start_next_window = MaxSector;
2827         conf->current_window_requests = conf->next_window_requests = 0;
2828
2829         err = -EIO;
2830         for (i = 0; i < conf->raid_disks * 2; i++) {
2831
2832                 disk = conf->mirrors + i;
2833
2834                 if (i < conf->raid_disks &&
2835                     disk[conf->raid_disks].rdev) {
2836                         /* This slot has a replacement. */
2837                         if (!disk->rdev) {
2838                                 /* No original, just make the replacement
2839                                  * a recovering spare
2840                                  */
2841                                 disk->rdev =
2842                                         disk[conf->raid_disks].rdev;
2843                                 disk[conf->raid_disks].rdev = NULL;
2844                         } else if (!test_bit(In_sync, &disk->rdev->flags))
2845                                 /* Original is not in_sync - bad */
2846                                 goto abort;
2847                 }
2848
2849                 if (!disk->rdev ||
2850                     !test_bit(In_sync, &disk->rdev->flags)) {
2851                         disk->head_position = 0;
2852                         if (disk->rdev &&
2853                             (disk->rdev->saved_raid_disk < 0))
2854                                 conf->fullsync = 1;
2855                 }
2856         }
2857
2858         err = -ENOMEM;
2859         conf->thread = md_register_thread(raid1d, mddev, "raid1");
2860         if (!conf->thread) {
2861                 printk(KERN_ERR
2862                        "md/raid1:%s: couldn't allocate thread\n",
2863                        mdname(mddev));
2864                 goto abort;
2865         }
2866
2867         return conf;
2868
2869  abort:
2870         if (conf) {
2871                 if (conf->r1bio_pool)
2872                         mempool_destroy(conf->r1bio_pool);
2873                 kfree(conf->mirrors);
2874                 safe_put_page(conf->tmppage);
2875                 kfree(conf->poolinfo);
2876                 kfree(conf);
2877         }
2878         return ERR_PTR(err);
2879 }
2880
2881 static void raid1_free(struct mddev *mddev, void *priv);
2882 static int run(struct mddev *mddev)
2883 {
2884         struct r1conf *conf;
2885         int i;
2886         struct md_rdev *rdev;
2887         int ret;
2888         bool discard_supported = false;
2889
2890         if (mddev->level != 1) {
2891                 printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
2892                        mdname(mddev), mddev->level);
2893                 return -EIO;
2894         }
2895         if (mddev->reshape_position != MaxSector) {
2896                 printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
2897                        mdname(mddev));
2898                 return -EIO;
2899         }
2900         /*
2901          * copy the already verified devices into our private RAID1
2902          * bookkeeping area. [whatever we allocate in run(),
2903          * should be freed in raid1_free()]
2904          */
2905         if (mddev->private == NULL)
2906                 conf = setup_conf(mddev);
2907         else
2908                 conf = mddev->private;
2909
2910         if (IS_ERR(conf))
2911                 return PTR_ERR(conf);
2912
2913         if (mddev->queue)
2914                 blk_queue_max_write_same_sectors(mddev->queue, 0);
2915
2916         rdev_for_each(rdev, mddev) {
2917                 if (!mddev->gendisk)
2918                         continue;
2919                 disk_stack_limits(mddev->gendisk, rdev->bdev,
2920                                   rdev->data_offset << 9);
2921                 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
2922                         discard_supported = true;
2923         }
2924
2925         mddev->degraded = 0;
2926         for (i=0; i < conf->raid_disks; i++)
2927                 if (conf->mirrors[i].rdev == NULL ||
2928                     !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
2929                     test_bit(Faulty, &conf->mirrors[i].rdev->flags))
2930                         mddev->degraded++;
2931
2932         if (conf->raid_disks - mddev->degraded == 1)
2933                 mddev->recovery_cp = MaxSector;
2934
2935         if (mddev->recovery_cp != MaxSector)
2936                 printk(KERN_NOTICE "md/raid1:%s: not clean"
2937                        " -- starting background reconstruction\n",
2938                        mdname(mddev));
2939         printk(KERN_INFO
2940                 "md/raid1:%s: active with %d out of %d mirrors\n",
2941                 mdname(mddev), mddev->raid_disks - mddev->degraded,
2942                 mddev->raid_disks);
2943
2944         /*
2945          * Ok, everything is just fine now
2946          */
2947         mddev->thread = conf->thread;
2948         conf->thread = NULL;
2949         mddev->private = conf;
2950
2951         md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
2952
2953         if (mddev->queue) {
2954                 if (discard_supported)
2955                         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
2956                                                 mddev->queue);
2957                 else
2958                         queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
2959                                                   mddev->queue);
2960         }
2961
2962         ret =  md_integrity_register(mddev);
2963         if (ret) {
2964                 md_unregister_thread(&mddev->thread);
2965                 raid1_free(mddev, conf);
2966         }
2967         return ret;
2968 }
2969
2970 static void raid1_free(struct mddev *mddev, void *priv)
2971 {
2972         struct r1conf *conf = priv;
2973
2974         if (conf->r1bio_pool)
2975                 mempool_destroy(conf->r1bio_pool);
2976         kfree(conf->mirrors);
2977         safe_put_page(conf->tmppage);
2978         kfree(conf->poolinfo);
2979         kfree(conf);
2980 }
2981
2982 static int raid1_resize(struct mddev *mddev, sector_t sectors)
2983 {
2984         /* no resync is happening, and there is enough space
2985          * on all devices, so we can resize.
2986          * We need to make sure resync covers any new space.
2987          * If the array is shrinking we should possibly wait until
2988          * any io in the removed space completes, but it hardly seems
2989          * worth it.
2990          */
2991         sector_t newsize = raid1_size(mddev, sectors, 0);
2992         if (mddev->external_size &&
2993             mddev->array_sectors > newsize)
2994                 return -EINVAL;
2995         if (mddev->bitmap) {
2996                 int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
2997                 if (ret)
2998                         return ret;
2999         }
3000         md_set_array_sectors(mddev, newsize);
3001         set_capacity(mddev->gendisk, mddev->array_sectors);
3002         revalidate_disk(mddev->gendisk);
3003         if (sectors > mddev->dev_sectors &&
3004             mddev->recovery_cp > mddev->dev_sectors) {
3005                 mddev->recovery_cp = mddev->dev_sectors;
3006                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3007         }
3008         mddev->dev_sectors = sectors;
3009         mddev->resync_max_sectors = sectors;
3010         return 0;
3011 }
3012
3013 static int raid1_reshape(struct mddev *mddev)
3014 {
3015         /* We need to:
3016          * 1/ resize the r1bio_pool
3017          * 2/ resize conf->mirrors
3018          *
3019          * We allocate a new r1bio_pool if we can.
3020          * Then raise a device barrier and wait until all IO stops.
3021          * Then resize conf->mirrors and swap in the new r1bio pool.
3022          *
3023          * At the same time, we "pack" the devices so that all the missing
3024          * devices have the higher raid_disk numbers.
3025          */
3026         mempool_t *newpool, *oldpool;
3027         struct pool_info *newpoolinfo;
3028         struct raid1_info *newmirrors;
3029         struct r1conf *conf = mddev->private;
3030         int cnt, raid_disks;
3031         unsigned long flags;
3032         int d, d2, err;
3033
3034         /* Cannot change chunk_size, layout, or level */
3035         if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3036             mddev->layout != mddev->new_layout ||
3037             mddev->level != mddev->new_level) {
3038                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3039                 mddev->new_layout = mddev->layout;
3040                 mddev->new_level = mddev->level;
3041                 return -EINVAL;
3042         }
3043
3044         err = md_allow_write(mddev);
3045         if (err)
3046                 return err;
3047
3048         raid_disks = mddev->raid_disks + mddev->delta_disks;
3049
3050         if (raid_disks < conf->raid_disks) {
3051                 cnt=0;
3052                 for (d= 0; d < conf->raid_disks; d++)
3053                         if (conf->mirrors[d].rdev)
3054                                 cnt++;
3055                 if (cnt > raid_disks)
3056                         return -EBUSY;
3057         }
3058
3059         newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3060         if (!newpoolinfo)
3061                 return -ENOMEM;
3062         newpoolinfo->mddev = mddev;
3063         newpoolinfo->raid_disks = raid_disks * 2;
3064
3065         newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
3066                                  r1bio_pool_free, newpoolinfo);
3067         if (!newpool) {
3068                 kfree(newpoolinfo);
3069                 return -ENOMEM;
3070         }
3071         newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
3072                              GFP_KERNEL);
3073         if (!newmirrors) {
3074                 kfree(newpoolinfo);
3075                 mempool_destroy(newpool);
3076                 return -ENOMEM;
3077         }
3078
3079         freeze_array(conf, 0);
3080
3081         /* ok, everything is stopped */
3082         oldpool = conf->r1bio_pool;
3083         conf->r1bio_pool = newpool;
3084
3085         for (d = d2 = 0; d < conf->raid_disks; d++) {
3086                 struct md_rdev *rdev = conf->mirrors[d].rdev;
3087                 if (rdev && rdev->raid_disk != d2) {
3088                         sysfs_unlink_rdev(mddev, rdev);
3089                         rdev->raid_disk = d2;
3090                         sysfs_unlink_rdev(mddev, rdev);
3091                         if (sysfs_link_rdev(mddev, rdev))
3092                                 printk(KERN_WARNING
3093                                        "md/raid1:%s: cannot register rd%d\n",
3094                                        mdname(mddev), rdev->raid_disk);
3095                 }
3096                 if (rdev)
3097                         newmirrors[d2++].rdev = rdev;
3098         }
3099         kfree(conf->mirrors);
3100         conf->mirrors = newmirrors;
3101         kfree(conf->poolinfo);
3102         conf->poolinfo = newpoolinfo;
3103
3104         spin_lock_irqsave(&conf->device_lock, flags);
3105         mddev->degraded += (raid_disks - conf->raid_disks);
3106         spin_unlock_irqrestore(&conf->device_lock, flags);
3107         conf->raid_disks = mddev->raid_disks = raid_disks;
3108         mddev->delta_disks = 0;
3109
3110         unfreeze_array(conf);
3111
3112         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3113         md_wakeup_thread(mddev->thread);
3114
3115         mempool_destroy(oldpool);
3116         return 0;
3117 }
3118
3119 static void raid1_quiesce(struct mddev *mddev, int state)
3120 {
3121         struct r1conf *conf = mddev->private;
3122
3123         switch(state) {
3124         case 2: /* wake for suspend */
3125                 wake_up(&conf->wait_barrier);
3126                 break;
3127         case 1:
3128                 freeze_array(conf, 0);
3129                 break;
3130         case 0:
3131                 unfreeze_array(conf);
3132                 break;
3133         }
3134 }
3135
3136 static void *raid1_takeover(struct mddev *mddev)
3137 {
3138         /* raid1 can take over:
3139          *  raid5 with 2 devices, any layout or chunk size
3140          */
3141         if (mddev->level == 5 && mddev->raid_disks == 2) {
3142                 struct r1conf *conf;
3143                 mddev->new_level = 1;
3144                 mddev->new_layout = 0;
3145                 mddev->new_chunk_sectors = 0;
3146                 conf = setup_conf(mddev);
3147                 if (!IS_ERR(conf))
3148                         /* Array must appear to be quiesced */
3149                         conf->array_frozen = 1;
3150                 return conf;
3151         }
3152         return ERR_PTR(-EINVAL);
3153 }
3154
3155 static struct md_personality raid1_personality =
3156 {
3157         .name           = "raid1",
3158         .level          = 1,
3159         .owner          = THIS_MODULE,
3160         .make_request   = make_request,
3161         .run            = run,
3162         .free           = raid1_free,
3163         .status         = status,
3164         .error_handler  = error,
3165         .hot_add_disk   = raid1_add_disk,
3166         .hot_remove_disk= raid1_remove_disk,
3167         .spare_active   = raid1_spare_active,
3168         .sync_request   = sync_request,
3169         .resize         = raid1_resize,
3170         .size           = raid1_size,
3171         .check_reshape  = raid1_reshape,
3172         .quiesce        = raid1_quiesce,
3173         .takeover       = raid1_takeover,
3174         .congested      = raid1_congested,
3175         .mergeable_bvec = raid1_mergeable_bvec,
3176 };
3177
3178 static int __init raid_init(void)
3179 {
3180         return register_md_personality(&raid1_personality);
3181 }
3182
3183 static void raid_exit(void)
3184 {
3185         unregister_md_personality(&raid1_personality);
3186 }
3187
3188 module_init(raid_init);
3189 module_exit(raid_exit);
3190 MODULE_LICENSE("GPL");
3191 MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3192 MODULE_ALIAS("md-personality-3"); /* RAID1 */
3193 MODULE_ALIAS("md-raid1");
3194 MODULE_ALIAS("md-level-1");
3195
3196 module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);