Merge branch 'mailbox-for-next' of git://git.linaro.org/landing-teams/working/fujitsu...
[cascardo/linux.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3      Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/badblocks.h>
38 #include <linux/sysctl.h>
39 #include <linux/seq_file.h>
40 #include <linux/fs.h>
41 #include <linux/poll.h>
42 #include <linux/ctype.h>
43 #include <linux/string.h>
44 #include <linux/hdreg.h>
45 #include <linux/proc_fs.h>
46 #include <linux/random.h>
47 #include <linux/module.h>
48 #include <linux/reboot.h>
49 #include <linux/file.h>
50 #include <linux/compat.h>
51 #include <linux/delay.h>
52 #include <linux/raid/md_p.h>
53 #include <linux/raid/md_u.h>
54 #include <linux/slab.h>
55 #include "md.h"
56 #include "bitmap.h"
57 #include "md-cluster.h"
58
59 #ifndef MODULE
60 static void autostart_arrays(int part);
61 #endif
62
63 /* pers_list is a list of registered personalities protected
64  * by pers_lock.
65  * pers_lock does extra service to protect accesses to
66  * mddev->thread when the mutex cannot be held.
67  */
68 static LIST_HEAD(pers_list);
69 static DEFINE_SPINLOCK(pers_lock);
70
71 struct md_cluster_operations *md_cluster_ops;
72 EXPORT_SYMBOL(md_cluster_ops);
73 struct module *md_cluster_mod;
74 EXPORT_SYMBOL(md_cluster_mod);
75
76 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
77 static struct workqueue_struct *md_wq;
78 static struct workqueue_struct *md_misc_wq;
79
80 static int remove_and_add_spares(struct mddev *mddev,
81                                  struct md_rdev *this);
82 static void mddev_detach(struct mddev *mddev);
83
84 /*
85  * Default number of read corrections we'll attempt on an rdev
86  * before ejecting it from the array. We divide the read error
87  * count by 2 for every hour elapsed between read errors.
88  */
89 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
90 /*
91  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
92  * is 1000 KB/sec, so the extra system load does not show up that much.
93  * Increase it if you want to have more _guaranteed_ speed. Note that
94  * the RAID driver will use the maximum available bandwidth if the IO
95  * subsystem is idle. There is also an 'absolute maximum' reconstruction
96  * speed limit - in case reconstruction slows down your system despite
97  * idle IO detection.
98  *
99  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
100  * or /sys/block/mdX/md/sync_speed_{min,max}
101  */
102
103 static int sysctl_speed_limit_min = 1000;
104 static int sysctl_speed_limit_max = 200000;
105 static inline int speed_min(struct mddev *mddev)
106 {
107         return mddev->sync_speed_min ?
108                 mddev->sync_speed_min : sysctl_speed_limit_min;
109 }
110
111 static inline int speed_max(struct mddev *mddev)
112 {
113         return mddev->sync_speed_max ?
114                 mddev->sync_speed_max : sysctl_speed_limit_max;
115 }
116
117 static struct ctl_table_header *raid_table_header;
118
119 static struct ctl_table raid_table[] = {
120         {
121                 .procname       = "speed_limit_min",
122                 .data           = &sysctl_speed_limit_min,
123                 .maxlen         = sizeof(int),
124                 .mode           = S_IRUGO|S_IWUSR,
125                 .proc_handler   = proc_dointvec,
126         },
127         {
128                 .procname       = "speed_limit_max",
129                 .data           = &sysctl_speed_limit_max,
130                 .maxlen         = sizeof(int),
131                 .mode           = S_IRUGO|S_IWUSR,
132                 .proc_handler   = proc_dointvec,
133         },
134         { }
135 };
136
137 static struct ctl_table raid_dir_table[] = {
138         {
139                 .procname       = "raid",
140                 .maxlen         = 0,
141                 .mode           = S_IRUGO|S_IXUGO,
142                 .child          = raid_table,
143         },
144         { }
145 };
146
147 static struct ctl_table raid_root_table[] = {
148         {
149                 .procname       = "dev",
150                 .maxlen         = 0,
151                 .mode           = 0555,
152                 .child          = raid_dir_table,
153         },
154         {  }
155 };
156
157 static const struct block_device_operations md_fops;
158
159 static int start_readonly;
160
161 /* bio_clone_mddev
162  * like bio_clone, but with a local bio set
163  */
164
165 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
166                             struct mddev *mddev)
167 {
168         struct bio *b;
169
170         if (!mddev || !mddev->bio_set)
171                 return bio_alloc(gfp_mask, nr_iovecs);
172
173         b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
174         if (!b)
175                 return NULL;
176         return b;
177 }
178 EXPORT_SYMBOL_GPL(bio_alloc_mddev);
179
180 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
181                             struct mddev *mddev)
182 {
183         if (!mddev || !mddev->bio_set)
184                 return bio_clone(bio, gfp_mask);
185
186         return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
187 }
188 EXPORT_SYMBOL_GPL(bio_clone_mddev);
189
190 /*
191  * We have a system wide 'event count' that is incremented
192  * on any 'interesting' event, and readers of /proc/mdstat
193  * can use 'poll' or 'select' to find out when the event
194  * count increases.
195  *
196  * Events are:
197  *  start array, stop array, error, add device, remove device,
198  *  start build, activate spare
199  */
200 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
201 static atomic_t md_event_count;
202 void md_new_event(struct mddev *mddev)
203 {
204         atomic_inc(&md_event_count);
205         wake_up(&md_event_waiters);
206 }
207 EXPORT_SYMBOL_GPL(md_new_event);
208
209 /*
210  * Enables to iterate over all existing md arrays
211  * all_mddevs_lock protects this list.
212  */
213 static LIST_HEAD(all_mddevs);
214 static DEFINE_SPINLOCK(all_mddevs_lock);
215
216 /*
217  * iterates through all used mddevs in the system.
218  * We take care to grab the all_mddevs_lock whenever navigating
219  * the list, and to always hold a refcount when unlocked.
220  * Any code which breaks out of this loop while own
221  * a reference to the current mddev and must mddev_put it.
222  */
223 #define for_each_mddev(_mddev,_tmp)                                     \
224                                                                         \
225         for (({ spin_lock(&all_mddevs_lock);                            \
226                 _tmp = all_mddevs.next;                                 \
227                 _mddev = NULL;});                                       \
228              ({ if (_tmp != &all_mddevs)                                \
229                         mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
230                 spin_unlock(&all_mddevs_lock);                          \
231                 if (_mddev) mddev_put(_mddev);                          \
232                 _mddev = list_entry(_tmp, struct mddev, all_mddevs);    \
233                 _tmp != &all_mddevs;});                                 \
234              ({ spin_lock(&all_mddevs_lock);                            \
235                 _tmp = _tmp->next;})                                    \
236                 )
237
238 /* Rather than calling directly into the personality make_request function,
239  * IO requests come here first so that we can check if the device is
240  * being suspended pending a reconfiguration.
241  * We hold a refcount over the call to ->make_request.  By the time that
242  * call has finished, the bio has been linked into some internal structure
243  * and so is visible to ->quiesce(), so we don't need the refcount any more.
244  */
245 static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
246 {
247         const int rw = bio_data_dir(bio);
248         struct mddev *mddev = q->queuedata;
249         unsigned int sectors;
250         int cpu;
251
252         blk_queue_split(q, &bio, q->bio_split);
253
254         if (mddev == NULL || mddev->pers == NULL) {
255                 bio_io_error(bio);
256                 return BLK_QC_T_NONE;
257         }
258         if (mddev->ro == 1 && unlikely(rw == WRITE)) {
259                 if (bio_sectors(bio) != 0)
260                         bio->bi_error = -EROFS;
261                 bio_endio(bio);
262                 return BLK_QC_T_NONE;
263         }
264         smp_rmb(); /* Ensure implications of  'active' are visible */
265         rcu_read_lock();
266         if (mddev->suspended) {
267                 DEFINE_WAIT(__wait);
268                 for (;;) {
269                         prepare_to_wait(&mddev->sb_wait, &__wait,
270                                         TASK_UNINTERRUPTIBLE);
271                         if (!mddev->suspended)
272                                 break;
273                         rcu_read_unlock();
274                         schedule();
275                         rcu_read_lock();
276                 }
277                 finish_wait(&mddev->sb_wait, &__wait);
278         }
279         atomic_inc(&mddev->active_io);
280         rcu_read_unlock();
281
282         /*
283          * save the sectors now since our bio can
284          * go away inside make_request
285          */
286         sectors = bio_sectors(bio);
287         mddev->pers->make_request(mddev, bio);
288
289         cpu = part_stat_lock();
290         part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
291         part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
292         part_stat_unlock();
293
294         if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
295                 wake_up(&mddev->sb_wait);
296
297         return BLK_QC_T_NONE;
298 }
299
300 /* mddev_suspend makes sure no new requests are submitted
301  * to the device, and that any requests that have been submitted
302  * are completely handled.
303  * Once mddev_detach() is called and completes, the module will be
304  * completely unused.
305  */
306 void mddev_suspend(struct mddev *mddev)
307 {
308         WARN_ON_ONCE(current == mddev->thread->tsk);
309         if (mddev->suspended++)
310                 return;
311         synchronize_rcu();
312         wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
313         mddev->pers->quiesce(mddev, 1);
314
315         del_timer_sync(&mddev->safemode_timer);
316 }
317 EXPORT_SYMBOL_GPL(mddev_suspend);
318
319 void mddev_resume(struct mddev *mddev)
320 {
321         if (--mddev->suspended)
322                 return;
323         wake_up(&mddev->sb_wait);
324         mddev->pers->quiesce(mddev, 0);
325
326         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
327         md_wakeup_thread(mddev->thread);
328         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
329 }
330 EXPORT_SYMBOL_GPL(mddev_resume);
331
332 int mddev_congested(struct mddev *mddev, int bits)
333 {
334         struct md_personality *pers = mddev->pers;
335         int ret = 0;
336
337         rcu_read_lock();
338         if (mddev->suspended)
339                 ret = 1;
340         else if (pers && pers->congested)
341                 ret = pers->congested(mddev, bits);
342         rcu_read_unlock();
343         return ret;
344 }
345 EXPORT_SYMBOL_GPL(mddev_congested);
346 static int md_congested(void *data, int bits)
347 {
348         struct mddev *mddev = data;
349         return mddev_congested(mddev, bits);
350 }
351
352 /*
353  * Generic flush handling for md
354  */
355
356 static void md_end_flush(struct bio *bio)
357 {
358         struct md_rdev *rdev = bio->bi_private;
359         struct mddev *mddev = rdev->mddev;
360
361         rdev_dec_pending(rdev, mddev);
362
363         if (atomic_dec_and_test(&mddev->flush_pending)) {
364                 /* The pre-request flush has finished */
365                 queue_work(md_wq, &mddev->flush_work);
366         }
367         bio_put(bio);
368 }
369
370 static void md_submit_flush_data(struct work_struct *ws);
371
372 static void submit_flushes(struct work_struct *ws)
373 {
374         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
375         struct md_rdev *rdev;
376
377         INIT_WORK(&mddev->flush_work, md_submit_flush_data);
378         atomic_set(&mddev->flush_pending, 1);
379         rcu_read_lock();
380         rdev_for_each_rcu(rdev, mddev)
381                 if (rdev->raid_disk >= 0 &&
382                     !test_bit(Faulty, &rdev->flags)) {
383                         /* Take two references, one is dropped
384                          * when request finishes, one after
385                          * we reclaim rcu_read_lock
386                          */
387                         struct bio *bi;
388                         atomic_inc(&rdev->nr_pending);
389                         atomic_inc(&rdev->nr_pending);
390                         rcu_read_unlock();
391                         bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
392                         bi->bi_end_io = md_end_flush;
393                         bi->bi_private = rdev;
394                         bi->bi_bdev = rdev->bdev;
395                         atomic_inc(&mddev->flush_pending);
396                         submit_bio(WRITE_FLUSH, bi);
397                         rcu_read_lock();
398                         rdev_dec_pending(rdev, mddev);
399                 }
400         rcu_read_unlock();
401         if (atomic_dec_and_test(&mddev->flush_pending))
402                 queue_work(md_wq, &mddev->flush_work);
403 }
404
405 static void md_submit_flush_data(struct work_struct *ws)
406 {
407         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
408         struct bio *bio = mddev->flush_bio;
409
410         if (bio->bi_iter.bi_size == 0)
411                 /* an empty barrier - all done */
412                 bio_endio(bio);
413         else {
414                 bio->bi_rw &= ~REQ_FLUSH;
415                 mddev->pers->make_request(mddev, bio);
416         }
417
418         mddev->flush_bio = NULL;
419         wake_up(&mddev->sb_wait);
420 }
421
422 void md_flush_request(struct mddev *mddev, struct bio *bio)
423 {
424         spin_lock_irq(&mddev->lock);
425         wait_event_lock_irq(mddev->sb_wait,
426                             !mddev->flush_bio,
427                             mddev->lock);
428         mddev->flush_bio = bio;
429         spin_unlock_irq(&mddev->lock);
430
431         INIT_WORK(&mddev->flush_work, submit_flushes);
432         queue_work(md_wq, &mddev->flush_work);
433 }
434 EXPORT_SYMBOL(md_flush_request);
435
436 void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
437 {
438         struct mddev *mddev = cb->data;
439         md_wakeup_thread(mddev->thread);
440         kfree(cb);
441 }
442 EXPORT_SYMBOL(md_unplug);
443
444 static inline struct mddev *mddev_get(struct mddev *mddev)
445 {
446         atomic_inc(&mddev->active);
447         return mddev;
448 }
449
450 static void mddev_delayed_delete(struct work_struct *ws);
451
452 static void mddev_put(struct mddev *mddev)
453 {
454         struct bio_set *bs = NULL;
455
456         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
457                 return;
458         if (!mddev->raid_disks && list_empty(&mddev->disks) &&
459             mddev->ctime == 0 && !mddev->hold_active) {
460                 /* Array is not configured at all, and not held active,
461                  * so destroy it */
462                 list_del_init(&mddev->all_mddevs);
463                 bs = mddev->bio_set;
464                 mddev->bio_set = NULL;
465                 if (mddev->gendisk) {
466                         /* We did a probe so need to clean up.  Call
467                          * queue_work inside the spinlock so that
468                          * flush_workqueue() after mddev_find will
469                          * succeed in waiting for the work to be done.
470                          */
471                         INIT_WORK(&mddev->del_work, mddev_delayed_delete);
472                         queue_work(md_misc_wq, &mddev->del_work);
473                 } else
474                         kfree(mddev);
475         }
476         spin_unlock(&all_mddevs_lock);
477         if (bs)
478                 bioset_free(bs);
479 }
480
481 static void md_safemode_timeout(unsigned long data);
482
483 void mddev_init(struct mddev *mddev)
484 {
485         mutex_init(&mddev->open_mutex);
486         mutex_init(&mddev->reconfig_mutex);
487         mutex_init(&mddev->bitmap_info.mutex);
488         INIT_LIST_HEAD(&mddev->disks);
489         INIT_LIST_HEAD(&mddev->all_mddevs);
490         setup_timer(&mddev->safemode_timer, md_safemode_timeout,
491                     (unsigned long) mddev);
492         atomic_set(&mddev->active, 1);
493         atomic_set(&mddev->openers, 0);
494         atomic_set(&mddev->active_io, 0);
495         spin_lock_init(&mddev->lock);
496         atomic_set(&mddev->flush_pending, 0);
497         init_waitqueue_head(&mddev->sb_wait);
498         init_waitqueue_head(&mddev->recovery_wait);
499         mddev->reshape_position = MaxSector;
500         mddev->reshape_backwards = 0;
501         mddev->last_sync_action = "none";
502         mddev->resync_min = 0;
503         mddev->resync_max = MaxSector;
504         mddev->level = LEVEL_NONE;
505 }
506 EXPORT_SYMBOL_GPL(mddev_init);
507
508 static struct mddev *mddev_find(dev_t unit)
509 {
510         struct mddev *mddev, *new = NULL;
511
512         if (unit && MAJOR(unit) != MD_MAJOR)
513                 unit &= ~((1<<MdpMinorShift)-1);
514
515  retry:
516         spin_lock(&all_mddevs_lock);
517
518         if (unit) {
519                 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
520                         if (mddev->unit == unit) {
521                                 mddev_get(mddev);
522                                 spin_unlock(&all_mddevs_lock);
523                                 kfree(new);
524                                 return mddev;
525                         }
526
527                 if (new) {
528                         list_add(&new->all_mddevs, &all_mddevs);
529                         spin_unlock(&all_mddevs_lock);
530                         new->hold_active = UNTIL_IOCTL;
531                         return new;
532                 }
533         } else if (new) {
534                 /* find an unused unit number */
535                 static int next_minor = 512;
536                 int start = next_minor;
537                 int is_free = 0;
538                 int dev = 0;
539                 while (!is_free) {
540                         dev = MKDEV(MD_MAJOR, next_minor);
541                         next_minor++;
542                         if (next_minor > MINORMASK)
543                                 next_minor = 0;
544                         if (next_minor == start) {
545                                 /* Oh dear, all in use. */
546                                 spin_unlock(&all_mddevs_lock);
547                                 kfree(new);
548                                 return NULL;
549                         }
550
551                         is_free = 1;
552                         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
553                                 if (mddev->unit == dev) {
554                                         is_free = 0;
555                                         break;
556                                 }
557                 }
558                 new->unit = dev;
559                 new->md_minor = MINOR(dev);
560                 new->hold_active = UNTIL_STOP;
561                 list_add(&new->all_mddevs, &all_mddevs);
562                 spin_unlock(&all_mddevs_lock);
563                 return new;
564         }
565         spin_unlock(&all_mddevs_lock);
566
567         new = kzalloc(sizeof(*new), GFP_KERNEL);
568         if (!new)
569                 return NULL;
570
571         new->unit = unit;
572         if (MAJOR(unit) == MD_MAJOR)
573                 new->md_minor = MINOR(unit);
574         else
575                 new->md_minor = MINOR(unit) >> MdpMinorShift;
576
577         mddev_init(new);
578
579         goto retry;
580 }
581
582 static struct attribute_group md_redundancy_group;
583
584 void mddev_unlock(struct mddev *mddev)
585 {
586         if (mddev->to_remove) {
587                 /* These cannot be removed under reconfig_mutex as
588                  * an access to the files will try to take reconfig_mutex
589                  * while holding the file unremovable, which leads to
590                  * a deadlock.
591                  * So hold set sysfs_active while the remove in happeing,
592                  * and anything else which might set ->to_remove or my
593                  * otherwise change the sysfs namespace will fail with
594                  * -EBUSY if sysfs_active is still set.
595                  * We set sysfs_active under reconfig_mutex and elsewhere
596                  * test it under the same mutex to ensure its correct value
597                  * is seen.
598                  */
599                 struct attribute_group *to_remove = mddev->to_remove;
600                 mddev->to_remove = NULL;
601                 mddev->sysfs_active = 1;
602                 mutex_unlock(&mddev->reconfig_mutex);
603
604                 if (mddev->kobj.sd) {
605                         if (to_remove != &md_redundancy_group)
606                                 sysfs_remove_group(&mddev->kobj, to_remove);
607                         if (mddev->pers == NULL ||
608                             mddev->pers->sync_request == NULL) {
609                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
610                                 if (mddev->sysfs_action)
611                                         sysfs_put(mddev->sysfs_action);
612                                 mddev->sysfs_action = NULL;
613                         }
614                 }
615                 mddev->sysfs_active = 0;
616         } else
617                 mutex_unlock(&mddev->reconfig_mutex);
618
619         /* As we've dropped the mutex we need a spinlock to
620          * make sure the thread doesn't disappear
621          */
622         spin_lock(&pers_lock);
623         md_wakeup_thread(mddev->thread);
624         spin_unlock(&pers_lock);
625 }
626 EXPORT_SYMBOL_GPL(mddev_unlock);
627
628 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
629 {
630         struct md_rdev *rdev;
631
632         rdev_for_each_rcu(rdev, mddev)
633                 if (rdev->desc_nr == nr)
634                         return rdev;
635
636         return NULL;
637 }
638 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
639
640 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
641 {
642         struct md_rdev *rdev;
643
644         rdev_for_each(rdev, mddev)
645                 if (rdev->bdev->bd_dev == dev)
646                         return rdev;
647
648         return NULL;
649 }
650
651 static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
652 {
653         struct md_rdev *rdev;
654
655         rdev_for_each_rcu(rdev, mddev)
656                 if (rdev->bdev->bd_dev == dev)
657                         return rdev;
658
659         return NULL;
660 }
661
662 static struct md_personality *find_pers(int level, char *clevel)
663 {
664         struct md_personality *pers;
665         list_for_each_entry(pers, &pers_list, list) {
666                 if (level != LEVEL_NONE && pers->level == level)
667                         return pers;
668                 if (strcmp(pers->name, clevel)==0)
669                         return pers;
670         }
671         return NULL;
672 }
673
674 /* return the offset of the super block in 512byte sectors */
675 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
676 {
677         sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
678         return MD_NEW_SIZE_SECTORS(num_sectors);
679 }
680
681 static int alloc_disk_sb(struct md_rdev *rdev)
682 {
683         rdev->sb_page = alloc_page(GFP_KERNEL);
684         if (!rdev->sb_page) {
685                 printk(KERN_ALERT "md: out of memory.\n");
686                 return -ENOMEM;
687         }
688
689         return 0;
690 }
691
692 void md_rdev_clear(struct md_rdev *rdev)
693 {
694         if (rdev->sb_page) {
695                 put_page(rdev->sb_page);
696                 rdev->sb_loaded = 0;
697                 rdev->sb_page = NULL;
698                 rdev->sb_start = 0;
699                 rdev->sectors = 0;
700         }
701         if (rdev->bb_page) {
702                 put_page(rdev->bb_page);
703                 rdev->bb_page = NULL;
704         }
705         badblocks_exit(&rdev->badblocks);
706 }
707 EXPORT_SYMBOL_GPL(md_rdev_clear);
708
709 static void super_written(struct bio *bio)
710 {
711         struct md_rdev *rdev = bio->bi_private;
712         struct mddev *mddev = rdev->mddev;
713
714         if (bio->bi_error) {
715                 printk("md: super_written gets error=%d\n", bio->bi_error);
716                 md_error(mddev, rdev);
717         }
718
719         if (atomic_dec_and_test(&mddev->pending_writes))
720                 wake_up(&mddev->sb_wait);
721         bio_put(bio);
722 }
723
724 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
725                    sector_t sector, int size, struct page *page)
726 {
727         /* write first size bytes of page to sector of rdev
728          * Increment mddev->pending_writes before returning
729          * and decrement it on completion, waking up sb_wait
730          * if zero is reached.
731          * If an error occurred, call md_error
732          */
733         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
734
735         bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
736         bio->bi_iter.bi_sector = sector;
737         bio_add_page(bio, page, size, 0);
738         bio->bi_private = rdev;
739         bio->bi_end_io = super_written;
740
741         atomic_inc(&mddev->pending_writes);
742         submit_bio(WRITE_FLUSH_FUA, bio);
743 }
744
745 void md_super_wait(struct mddev *mddev)
746 {
747         /* wait for all superblock writes that were scheduled to complete */
748         wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
749 }
750
751 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
752                  struct page *page, int rw, bool metadata_op)
753 {
754         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
755         int ret;
756
757         bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
758                 rdev->meta_bdev : rdev->bdev;
759         if (metadata_op)
760                 bio->bi_iter.bi_sector = sector + rdev->sb_start;
761         else if (rdev->mddev->reshape_position != MaxSector &&
762                  (rdev->mddev->reshape_backwards ==
763                   (sector >= rdev->mddev->reshape_position)))
764                 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
765         else
766                 bio->bi_iter.bi_sector = sector + rdev->data_offset;
767         bio_add_page(bio, page, size, 0);
768         submit_bio_wait(rw, bio);
769
770         ret = !bio->bi_error;
771         bio_put(bio);
772         return ret;
773 }
774 EXPORT_SYMBOL_GPL(sync_page_io);
775
776 static int read_disk_sb(struct md_rdev *rdev, int size)
777 {
778         char b[BDEVNAME_SIZE];
779
780         if (rdev->sb_loaded)
781                 return 0;
782
783         if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
784                 goto fail;
785         rdev->sb_loaded = 1;
786         return 0;
787
788 fail:
789         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
790                 bdevname(rdev->bdev,b));
791         return -EINVAL;
792 }
793
794 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
795 {
796         return  sb1->set_uuid0 == sb2->set_uuid0 &&
797                 sb1->set_uuid1 == sb2->set_uuid1 &&
798                 sb1->set_uuid2 == sb2->set_uuid2 &&
799                 sb1->set_uuid3 == sb2->set_uuid3;
800 }
801
802 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
803 {
804         int ret;
805         mdp_super_t *tmp1, *tmp2;
806
807         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
808         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
809
810         if (!tmp1 || !tmp2) {
811                 ret = 0;
812                 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
813                 goto abort;
814         }
815
816         *tmp1 = *sb1;
817         *tmp2 = *sb2;
818
819         /*
820          * nr_disks is not constant
821          */
822         tmp1->nr_disks = 0;
823         tmp2->nr_disks = 0;
824
825         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
826 abort:
827         kfree(tmp1);
828         kfree(tmp2);
829         return ret;
830 }
831
832 static u32 md_csum_fold(u32 csum)
833 {
834         csum = (csum & 0xffff) + (csum >> 16);
835         return (csum & 0xffff) + (csum >> 16);
836 }
837
838 static unsigned int calc_sb_csum(mdp_super_t *sb)
839 {
840         u64 newcsum = 0;
841         u32 *sb32 = (u32*)sb;
842         int i;
843         unsigned int disk_csum, csum;
844
845         disk_csum = sb->sb_csum;
846         sb->sb_csum = 0;
847
848         for (i = 0; i < MD_SB_BYTES/4 ; i++)
849                 newcsum += sb32[i];
850         csum = (newcsum & 0xffffffff) + (newcsum>>32);
851
852 #ifdef CONFIG_ALPHA
853         /* This used to use csum_partial, which was wrong for several
854          * reasons including that different results are returned on
855          * different architectures.  It isn't critical that we get exactly
856          * the same return value as before (we always csum_fold before
857          * testing, and that removes any differences).  However as we
858          * know that csum_partial always returned a 16bit value on
859          * alphas, do a fold to maximise conformity to previous behaviour.
860          */
861         sb->sb_csum = md_csum_fold(disk_csum);
862 #else
863         sb->sb_csum = disk_csum;
864 #endif
865         return csum;
866 }
867
868 /*
869  * Handle superblock details.
870  * We want to be able to handle multiple superblock formats
871  * so we have a common interface to them all, and an array of
872  * different handlers.
873  * We rely on user-space to write the initial superblock, and support
874  * reading and updating of superblocks.
875  * Interface methods are:
876  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
877  *      loads and validates a superblock on dev.
878  *      if refdev != NULL, compare superblocks on both devices
879  *    Return:
880  *      0 - dev has a superblock that is compatible with refdev
881  *      1 - dev has a superblock that is compatible and newer than refdev
882  *          so dev should be used as the refdev in future
883  *     -EINVAL superblock incompatible or invalid
884  *     -othererror e.g. -EIO
885  *
886  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
887  *      Verify that dev is acceptable into mddev.
888  *       The first time, mddev->raid_disks will be 0, and data from
889  *       dev should be merged in.  Subsequent calls check that dev
890  *       is new enough.  Return 0 or -EINVAL
891  *
892  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
893  *     Update the superblock for rdev with data in mddev
894  *     This does not write to disc.
895  *
896  */
897
898 struct super_type  {
899         char                *name;
900         struct module       *owner;
901         int                 (*load_super)(struct md_rdev *rdev,
902                                           struct md_rdev *refdev,
903                                           int minor_version);
904         int                 (*validate_super)(struct mddev *mddev,
905                                               struct md_rdev *rdev);
906         void                (*sync_super)(struct mddev *mddev,
907                                           struct md_rdev *rdev);
908         unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
909                                                 sector_t num_sectors);
910         int                 (*allow_new_offset)(struct md_rdev *rdev,
911                                                 unsigned long long new_offset);
912 };
913
914 /*
915  * Check that the given mddev has no bitmap.
916  *
917  * This function is called from the run method of all personalities that do not
918  * support bitmaps. It prints an error message and returns non-zero if mddev
919  * has a bitmap. Otherwise, it returns 0.
920  *
921  */
922 int md_check_no_bitmap(struct mddev *mddev)
923 {
924         if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
925                 return 0;
926         printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
927                 mdname(mddev), mddev->pers->name);
928         return 1;
929 }
930 EXPORT_SYMBOL(md_check_no_bitmap);
931
932 /*
933  * load_super for 0.90.0
934  */
935 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
936 {
937         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
938         mdp_super_t *sb;
939         int ret;
940
941         /*
942          * Calculate the position of the superblock (512byte sectors),
943          * it's at the end of the disk.
944          *
945          * It also happens to be a multiple of 4Kb.
946          */
947         rdev->sb_start = calc_dev_sboffset(rdev);
948
949         ret = read_disk_sb(rdev, MD_SB_BYTES);
950         if (ret) return ret;
951
952         ret = -EINVAL;
953
954         bdevname(rdev->bdev, b);
955         sb = page_address(rdev->sb_page);
956
957         if (sb->md_magic != MD_SB_MAGIC) {
958                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
959                        b);
960                 goto abort;
961         }
962
963         if (sb->major_version != 0 ||
964             sb->minor_version < 90 ||
965             sb->minor_version > 91) {
966                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
967                         sb->major_version, sb->minor_version,
968                         b);
969                 goto abort;
970         }
971
972         if (sb->raid_disks <= 0)
973                 goto abort;
974
975         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
976                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
977                         b);
978                 goto abort;
979         }
980
981         rdev->preferred_minor = sb->md_minor;
982         rdev->data_offset = 0;
983         rdev->new_data_offset = 0;
984         rdev->sb_size = MD_SB_BYTES;
985         rdev->badblocks.shift = -1;
986
987         if (sb->level == LEVEL_MULTIPATH)
988                 rdev->desc_nr = -1;
989         else
990                 rdev->desc_nr = sb->this_disk.number;
991
992         if (!refdev) {
993                 ret = 1;
994         } else {
995                 __u64 ev1, ev2;
996                 mdp_super_t *refsb = page_address(refdev->sb_page);
997                 if (!uuid_equal(refsb, sb)) {
998                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
999                                 b, bdevname(refdev->bdev,b2));
1000                         goto abort;
1001                 }
1002                 if (!sb_equal(refsb, sb)) {
1003                         printk(KERN_WARNING "md: %s has same UUID"
1004                                " but different superblock to %s\n",
1005                                b, bdevname(refdev->bdev, b2));
1006                         goto abort;
1007                 }
1008                 ev1 = md_event(sb);
1009                 ev2 = md_event(refsb);
1010                 if (ev1 > ev2)
1011                         ret = 1;
1012                 else
1013                         ret = 0;
1014         }
1015         rdev->sectors = rdev->sb_start;
1016         /* Limit to 4TB as metadata cannot record more than that.
1017          * (not needed for Linear and RAID0 as metadata doesn't
1018          * record this size)
1019          */
1020         if (IS_ENABLED(CONFIG_LBDAF) && (u64)rdev->sectors >= (2ULL << 32) &&
1021             sb->level >= 1)
1022                 rdev->sectors = (sector_t)(2ULL << 32) - 2;
1023
1024         if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1025                 /* "this cannot possibly happen" ... */
1026                 ret = -EINVAL;
1027
1028  abort:
1029         return ret;
1030 }
1031
1032 /*
1033  * validate_super for 0.90.0
1034  */
1035 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1036 {
1037         mdp_disk_t *desc;
1038         mdp_super_t *sb = page_address(rdev->sb_page);
1039         __u64 ev1 = md_event(sb);
1040
1041         rdev->raid_disk = -1;
1042         clear_bit(Faulty, &rdev->flags);
1043         clear_bit(In_sync, &rdev->flags);
1044         clear_bit(Bitmap_sync, &rdev->flags);
1045         clear_bit(WriteMostly, &rdev->flags);
1046
1047         if (mddev->raid_disks == 0) {
1048                 mddev->major_version = 0;
1049                 mddev->minor_version = sb->minor_version;
1050                 mddev->patch_version = sb->patch_version;
1051                 mddev->external = 0;
1052                 mddev->chunk_sectors = sb->chunk_size >> 9;
1053                 mddev->ctime = sb->ctime;
1054                 mddev->utime = sb->utime;
1055                 mddev->level = sb->level;
1056                 mddev->clevel[0] = 0;
1057                 mddev->layout = sb->layout;
1058                 mddev->raid_disks = sb->raid_disks;
1059                 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1060                 mddev->events = ev1;
1061                 mddev->bitmap_info.offset = 0;
1062                 mddev->bitmap_info.space = 0;
1063                 /* bitmap can use 60 K after the 4K superblocks */
1064                 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1065                 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1066                 mddev->reshape_backwards = 0;
1067
1068                 if (mddev->minor_version >= 91) {
1069                         mddev->reshape_position = sb->reshape_position;
1070                         mddev->delta_disks = sb->delta_disks;
1071                         mddev->new_level = sb->new_level;
1072                         mddev->new_layout = sb->new_layout;
1073                         mddev->new_chunk_sectors = sb->new_chunk >> 9;
1074                         if (mddev->delta_disks < 0)
1075                                 mddev->reshape_backwards = 1;
1076                 } else {
1077                         mddev->reshape_position = MaxSector;
1078                         mddev->delta_disks = 0;
1079                         mddev->new_level = mddev->level;
1080                         mddev->new_layout = mddev->layout;
1081                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1082                 }
1083
1084                 if (sb->state & (1<<MD_SB_CLEAN))
1085                         mddev->recovery_cp = MaxSector;
1086                 else {
1087                         if (sb->events_hi == sb->cp_events_hi &&
1088                                 sb->events_lo == sb->cp_events_lo) {
1089                                 mddev->recovery_cp = sb->recovery_cp;
1090                         } else
1091                                 mddev->recovery_cp = 0;
1092                 }
1093
1094                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1095                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1096                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1097                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1098
1099                 mddev->max_disks = MD_SB_DISKS;
1100
1101                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1102                     mddev->bitmap_info.file == NULL) {
1103                         mddev->bitmap_info.offset =
1104                                 mddev->bitmap_info.default_offset;
1105                         mddev->bitmap_info.space =
1106                                 mddev->bitmap_info.default_space;
1107                 }
1108
1109         } else if (mddev->pers == NULL) {
1110                 /* Insist on good event counter while assembling, except
1111                  * for spares (which don't need an event count) */
1112                 ++ev1;
1113                 if (sb->disks[rdev->desc_nr].state & (
1114                             (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1115                         if (ev1 < mddev->events)
1116                                 return -EINVAL;
1117         } else if (mddev->bitmap) {
1118                 /* if adding to array with a bitmap, then we can accept an
1119                  * older device ... but not too old.
1120                  */
1121                 if (ev1 < mddev->bitmap->events_cleared)
1122                         return 0;
1123                 if (ev1 < mddev->events)
1124                         set_bit(Bitmap_sync, &rdev->flags);
1125         } else {
1126                 if (ev1 < mddev->events)
1127                         /* just a hot-add of a new device, leave raid_disk at -1 */
1128                         return 0;
1129         }
1130
1131         if (mddev->level != LEVEL_MULTIPATH) {
1132                 desc = sb->disks + rdev->desc_nr;
1133
1134                 if (desc->state & (1<<MD_DISK_FAULTY))
1135                         set_bit(Faulty, &rdev->flags);
1136                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1137                             desc->raid_disk < mddev->raid_disks */) {
1138                         set_bit(In_sync, &rdev->flags);
1139                         rdev->raid_disk = desc->raid_disk;
1140                         rdev->saved_raid_disk = desc->raid_disk;
1141                 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1142                         /* active but not in sync implies recovery up to
1143                          * reshape position.  We don't know exactly where
1144                          * that is, so set to zero for now */
1145                         if (mddev->minor_version >= 91) {
1146                                 rdev->recovery_offset = 0;
1147                                 rdev->raid_disk = desc->raid_disk;
1148                         }
1149                 }
1150                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1151                         set_bit(WriteMostly, &rdev->flags);
1152         } else /* MULTIPATH are always insync */
1153                 set_bit(In_sync, &rdev->flags);
1154         return 0;
1155 }
1156
1157 /*
1158  * sync_super for 0.90.0
1159  */
1160 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1161 {
1162         mdp_super_t *sb;
1163         struct md_rdev *rdev2;
1164         int next_spare = mddev->raid_disks;
1165
1166         /* make rdev->sb match mddev data..
1167          *
1168          * 1/ zero out disks
1169          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1170          * 3/ any empty disks < next_spare become removed
1171          *
1172          * disks[0] gets initialised to REMOVED because
1173          * we cannot be sure from other fields if it has
1174          * been initialised or not.
1175          */
1176         int i;
1177         int active=0, working=0,failed=0,spare=0,nr_disks=0;
1178
1179         rdev->sb_size = MD_SB_BYTES;
1180
1181         sb = page_address(rdev->sb_page);
1182
1183         memset(sb, 0, sizeof(*sb));
1184
1185         sb->md_magic = MD_SB_MAGIC;
1186         sb->major_version = mddev->major_version;
1187         sb->patch_version = mddev->patch_version;
1188         sb->gvalid_words  = 0; /* ignored */
1189         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1190         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1191         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1192         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1193
1194         sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
1195         sb->level = mddev->level;
1196         sb->size = mddev->dev_sectors / 2;
1197         sb->raid_disks = mddev->raid_disks;
1198         sb->md_minor = mddev->md_minor;
1199         sb->not_persistent = 0;
1200         sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
1201         sb->state = 0;
1202         sb->events_hi = (mddev->events>>32);
1203         sb->events_lo = (u32)mddev->events;
1204
1205         if (mddev->reshape_position == MaxSector)
1206                 sb->minor_version = 90;
1207         else {
1208                 sb->minor_version = 91;
1209                 sb->reshape_position = mddev->reshape_position;
1210                 sb->new_level = mddev->new_level;
1211                 sb->delta_disks = mddev->delta_disks;
1212                 sb->new_layout = mddev->new_layout;
1213                 sb->new_chunk = mddev->new_chunk_sectors << 9;
1214         }
1215         mddev->minor_version = sb->minor_version;
1216         if (mddev->in_sync)
1217         {
1218                 sb->recovery_cp = mddev->recovery_cp;
1219                 sb->cp_events_hi = (mddev->events>>32);
1220                 sb->cp_events_lo = (u32)mddev->events;
1221                 if (mddev->recovery_cp == MaxSector)
1222                         sb->state = (1<< MD_SB_CLEAN);
1223         } else
1224                 sb->recovery_cp = 0;
1225
1226         sb->layout = mddev->layout;
1227         sb->chunk_size = mddev->chunk_sectors << 9;
1228
1229         if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1230                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1231
1232         sb->disks[0].state = (1<<MD_DISK_REMOVED);
1233         rdev_for_each(rdev2, mddev) {
1234                 mdp_disk_t *d;
1235                 int desc_nr;
1236                 int is_active = test_bit(In_sync, &rdev2->flags);
1237
1238                 if (rdev2->raid_disk >= 0 &&
1239                     sb->minor_version >= 91)
1240                         /* we have nowhere to store the recovery_offset,
1241                          * but if it is not below the reshape_position,
1242                          * we can piggy-back on that.
1243                          */
1244                         is_active = 1;
1245                 if (rdev2->raid_disk < 0 ||
1246                     test_bit(Faulty, &rdev2->flags))
1247                         is_active = 0;
1248                 if (is_active)
1249                         desc_nr = rdev2->raid_disk;
1250                 else
1251                         desc_nr = next_spare++;
1252                 rdev2->desc_nr = desc_nr;
1253                 d = &sb->disks[rdev2->desc_nr];
1254                 nr_disks++;
1255                 d->number = rdev2->desc_nr;
1256                 d->major = MAJOR(rdev2->bdev->bd_dev);
1257                 d->minor = MINOR(rdev2->bdev->bd_dev);
1258                 if (is_active)
1259                         d->raid_disk = rdev2->raid_disk;
1260                 else
1261                         d->raid_disk = rdev2->desc_nr; /* compatibility */
1262                 if (test_bit(Faulty, &rdev2->flags))
1263                         d->state = (1<<MD_DISK_FAULTY);
1264                 else if (is_active) {
1265                         d->state = (1<<MD_DISK_ACTIVE);
1266                         if (test_bit(In_sync, &rdev2->flags))
1267                                 d->state |= (1<<MD_DISK_SYNC);
1268                         active++;
1269                         working++;
1270                 } else {
1271                         d->state = 0;
1272                         spare++;
1273                         working++;
1274                 }
1275                 if (test_bit(WriteMostly, &rdev2->flags))
1276                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
1277         }
1278         /* now set the "removed" and "faulty" bits on any missing devices */
1279         for (i=0 ; i < mddev->raid_disks ; i++) {
1280                 mdp_disk_t *d = &sb->disks[i];
1281                 if (d->state == 0 && d->number == 0) {
1282                         d->number = i;
1283                         d->raid_disk = i;
1284                         d->state = (1<<MD_DISK_REMOVED);
1285                         d->state |= (1<<MD_DISK_FAULTY);
1286                         failed++;
1287                 }
1288         }
1289         sb->nr_disks = nr_disks;
1290         sb->active_disks = active;
1291         sb->working_disks = working;
1292         sb->failed_disks = failed;
1293         sb->spare_disks = spare;
1294
1295         sb->this_disk = sb->disks[rdev->desc_nr];
1296         sb->sb_csum = calc_sb_csum(sb);
1297 }
1298
1299 /*
1300  * rdev_size_change for 0.90.0
1301  */
1302 static unsigned long long
1303 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1304 {
1305         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1306                 return 0; /* component must fit device */
1307         if (rdev->mddev->bitmap_info.offset)
1308                 return 0; /* can't move bitmap */
1309         rdev->sb_start = calc_dev_sboffset(rdev);
1310         if (!num_sectors || num_sectors > rdev->sb_start)
1311                 num_sectors = rdev->sb_start;
1312         /* Limit to 4TB as metadata cannot record more than that.
1313          * 4TB == 2^32 KB, or 2*2^32 sectors.
1314          */
1315         if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
1316             rdev->mddev->level >= 1)
1317                 num_sectors = (sector_t)(2ULL << 32) - 2;
1318         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1319                        rdev->sb_page);
1320         md_super_wait(rdev->mddev);
1321         return num_sectors;
1322 }
1323
1324 static int
1325 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1326 {
1327         /* non-zero offset changes not possible with v0.90 */
1328         return new_offset == 0;
1329 }
1330
1331 /*
1332  * version 1 superblock
1333  */
1334
1335 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1336 {
1337         __le32 disk_csum;
1338         u32 csum;
1339         unsigned long long newcsum;
1340         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1341         __le32 *isuper = (__le32*)sb;
1342
1343         disk_csum = sb->sb_csum;
1344         sb->sb_csum = 0;
1345         newcsum = 0;
1346         for (; size >= 4; size -= 4)
1347                 newcsum += le32_to_cpu(*isuper++);
1348
1349         if (size == 2)
1350                 newcsum += le16_to_cpu(*(__le16*) isuper);
1351
1352         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1353         sb->sb_csum = disk_csum;
1354         return cpu_to_le32(csum);
1355 }
1356
1357 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1358 {
1359         struct mdp_superblock_1 *sb;
1360         int ret;
1361         sector_t sb_start;
1362         sector_t sectors;
1363         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1364         int bmask;
1365
1366         /*
1367          * Calculate the position of the superblock in 512byte sectors.
1368          * It is always aligned to a 4K boundary and
1369          * depeding on minor_version, it can be:
1370          * 0: At least 8K, but less than 12K, from end of device
1371          * 1: At start of device
1372          * 2: 4K from start of device.
1373          */
1374         switch(minor_version) {
1375         case 0:
1376                 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1377                 sb_start -= 8*2;
1378                 sb_start &= ~(sector_t)(4*2-1);
1379                 break;
1380         case 1:
1381                 sb_start = 0;
1382                 break;
1383         case 2:
1384                 sb_start = 8;
1385                 break;
1386         default:
1387                 return -EINVAL;
1388         }
1389         rdev->sb_start = sb_start;
1390
1391         /* superblock is rarely larger than 1K, but it can be larger,
1392          * and it is safe to read 4k, so we do that
1393          */
1394         ret = read_disk_sb(rdev, 4096);
1395         if (ret) return ret;
1396
1397         sb = page_address(rdev->sb_page);
1398
1399         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1400             sb->major_version != cpu_to_le32(1) ||
1401             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1402             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1403             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1404                 return -EINVAL;
1405
1406         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1407                 printk("md: invalid superblock checksum on %s\n",
1408                         bdevname(rdev->bdev,b));
1409                 return -EINVAL;
1410         }
1411         if (le64_to_cpu(sb->data_size) < 10) {
1412                 printk("md: data_size too small on %s\n",
1413                        bdevname(rdev->bdev,b));
1414                 return -EINVAL;
1415         }
1416         if (sb->pad0 ||
1417             sb->pad3[0] ||
1418             memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1419                 /* Some padding is non-zero, might be a new feature */
1420                 return -EINVAL;
1421
1422         rdev->preferred_minor = 0xffff;
1423         rdev->data_offset = le64_to_cpu(sb->data_offset);
1424         rdev->new_data_offset = rdev->data_offset;
1425         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1426             (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1427                 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1428         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1429
1430         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1431         bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1432         if (rdev->sb_size & bmask)
1433                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1434
1435         if (minor_version
1436             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1437                 return -EINVAL;
1438         if (minor_version
1439             && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1440                 return -EINVAL;
1441
1442         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1443                 rdev->desc_nr = -1;
1444         else
1445                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1446
1447         if (!rdev->bb_page) {
1448                 rdev->bb_page = alloc_page(GFP_KERNEL);
1449                 if (!rdev->bb_page)
1450                         return -ENOMEM;
1451         }
1452         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1453             rdev->badblocks.count == 0) {
1454                 /* need to load the bad block list.
1455                  * Currently we limit it to one page.
1456                  */
1457                 s32 offset;
1458                 sector_t bb_sector;
1459                 u64 *bbp;
1460                 int i;
1461                 int sectors = le16_to_cpu(sb->bblog_size);
1462                 if (sectors > (PAGE_SIZE / 512))
1463                         return -EINVAL;
1464                 offset = le32_to_cpu(sb->bblog_offset);
1465                 if (offset == 0)
1466                         return -EINVAL;
1467                 bb_sector = (long long)offset;
1468                 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1469                                   rdev->bb_page, READ, true))
1470                         return -EIO;
1471                 bbp = (u64 *)page_address(rdev->bb_page);
1472                 rdev->badblocks.shift = sb->bblog_shift;
1473                 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1474                         u64 bb = le64_to_cpu(*bbp);
1475                         int count = bb & (0x3ff);
1476                         u64 sector = bb >> 10;
1477                         sector <<= sb->bblog_shift;
1478                         count <<= sb->bblog_shift;
1479                         if (bb + 1 == 0)
1480                                 break;
1481                         if (badblocks_set(&rdev->badblocks, sector, count, 1))
1482                                 return -EINVAL;
1483                 }
1484         } else if (sb->bblog_offset != 0)
1485                 rdev->badblocks.shift = 0;
1486
1487         if (!refdev) {
1488                 ret = 1;
1489         } else {
1490                 __u64 ev1, ev2;
1491                 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1492
1493                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1494                     sb->level != refsb->level ||
1495                     sb->layout != refsb->layout ||
1496                     sb->chunksize != refsb->chunksize) {
1497                         printk(KERN_WARNING "md: %s has strangely different"
1498                                 " superblock to %s\n",
1499                                 bdevname(rdev->bdev,b),
1500                                 bdevname(refdev->bdev,b2));
1501                         return -EINVAL;
1502                 }
1503                 ev1 = le64_to_cpu(sb->events);
1504                 ev2 = le64_to_cpu(refsb->events);
1505
1506                 if (ev1 > ev2)
1507                         ret = 1;
1508                 else
1509                         ret = 0;
1510         }
1511         if (minor_version) {
1512                 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1513                 sectors -= rdev->data_offset;
1514         } else
1515                 sectors = rdev->sb_start;
1516         if (sectors < le64_to_cpu(sb->data_size))
1517                 return -EINVAL;
1518         rdev->sectors = le64_to_cpu(sb->data_size);
1519         return ret;
1520 }
1521
1522 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1523 {
1524         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1525         __u64 ev1 = le64_to_cpu(sb->events);
1526
1527         rdev->raid_disk = -1;
1528         clear_bit(Faulty, &rdev->flags);
1529         clear_bit(In_sync, &rdev->flags);
1530         clear_bit(Bitmap_sync, &rdev->flags);
1531         clear_bit(WriteMostly, &rdev->flags);
1532
1533         if (mddev->raid_disks == 0) {
1534                 mddev->major_version = 1;
1535                 mddev->patch_version = 0;
1536                 mddev->external = 0;
1537                 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1538                 mddev->ctime = le64_to_cpu(sb->ctime);
1539                 mddev->utime = le64_to_cpu(sb->utime);
1540                 mddev->level = le32_to_cpu(sb->level);
1541                 mddev->clevel[0] = 0;
1542                 mddev->layout = le32_to_cpu(sb->layout);
1543                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1544                 mddev->dev_sectors = le64_to_cpu(sb->size);
1545                 mddev->events = ev1;
1546                 mddev->bitmap_info.offset = 0;
1547                 mddev->bitmap_info.space = 0;
1548                 /* Default location for bitmap is 1K after superblock
1549                  * using 3K - total of 4K
1550                  */
1551                 mddev->bitmap_info.default_offset = 1024 >> 9;
1552                 mddev->bitmap_info.default_space = (4096-1024) >> 9;
1553                 mddev->reshape_backwards = 0;
1554
1555                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1556                 memcpy(mddev->uuid, sb->set_uuid, 16);
1557
1558                 mddev->max_disks =  (4096-256)/2;
1559
1560                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1561                     mddev->bitmap_info.file == NULL) {
1562                         mddev->bitmap_info.offset =
1563                                 (__s32)le32_to_cpu(sb->bitmap_offset);
1564                         /* Metadata doesn't record how much space is available.
1565                          * For 1.0, we assume we can use up to the superblock
1566                          * if before, else to 4K beyond superblock.
1567                          * For others, assume no change is possible.
1568                          */
1569                         if (mddev->minor_version > 0)
1570                                 mddev->bitmap_info.space = 0;
1571                         else if (mddev->bitmap_info.offset > 0)
1572                                 mddev->bitmap_info.space =
1573                                         8 - mddev->bitmap_info.offset;
1574                         else
1575                                 mddev->bitmap_info.space =
1576                                         -mddev->bitmap_info.offset;
1577                 }
1578
1579                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1580                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1581                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1582                         mddev->new_level = le32_to_cpu(sb->new_level);
1583                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1584                         mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1585                         if (mddev->delta_disks < 0 ||
1586                             (mddev->delta_disks == 0 &&
1587                              (le32_to_cpu(sb->feature_map)
1588                               & MD_FEATURE_RESHAPE_BACKWARDS)))
1589                                 mddev->reshape_backwards = 1;
1590                 } else {
1591                         mddev->reshape_position = MaxSector;
1592                         mddev->delta_disks = 0;
1593                         mddev->new_level = mddev->level;
1594                         mddev->new_layout = mddev->layout;
1595                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1596                 }
1597
1598                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) {
1599                         set_bit(MD_HAS_JOURNAL, &mddev->flags);
1600                         if (mddev->recovery_cp == MaxSector)
1601                                 set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
1602                 }
1603         } else if (mddev->pers == NULL) {
1604                 /* Insist of good event counter while assembling, except for
1605                  * spares (which don't need an event count) */
1606                 ++ev1;
1607                 if (rdev->desc_nr >= 0 &&
1608                     rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1609                     (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1610                      le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1611                         if (ev1 < mddev->events)
1612                                 return -EINVAL;
1613         } else if (mddev->bitmap) {
1614                 /* If adding to array with a bitmap, then we can accept an
1615                  * older device, but not too old.
1616                  */
1617                 if (ev1 < mddev->bitmap->events_cleared)
1618                         return 0;
1619                 if (ev1 < mddev->events)
1620                         set_bit(Bitmap_sync, &rdev->flags);
1621         } else {
1622                 if (ev1 < mddev->events)
1623                         /* just a hot-add of a new device, leave raid_disk at -1 */
1624                         return 0;
1625         }
1626         if (mddev->level != LEVEL_MULTIPATH) {
1627                 int role;
1628                 if (rdev->desc_nr < 0 ||
1629                     rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1630                         role = MD_DISK_ROLE_SPARE;
1631                         rdev->desc_nr = -1;
1632                 } else
1633                         role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1634                 switch(role) {
1635                 case MD_DISK_ROLE_SPARE: /* spare */
1636                         break;
1637                 case MD_DISK_ROLE_FAULTY: /* faulty */
1638                         set_bit(Faulty, &rdev->flags);
1639                         break;
1640                 case MD_DISK_ROLE_JOURNAL: /* journal device */
1641                         if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1642                                 /* journal device without journal feature */
1643                                 printk(KERN_WARNING
1644                                   "md: journal device provided without journal feature, ignoring the device\n");
1645                                 return -EINVAL;
1646                         }
1647                         set_bit(Journal, &rdev->flags);
1648                         rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1649                         rdev->raid_disk = 0;
1650                         break;
1651                 default:
1652                         rdev->saved_raid_disk = role;
1653                         if ((le32_to_cpu(sb->feature_map) &
1654                              MD_FEATURE_RECOVERY_OFFSET)) {
1655                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1656                                 if (!(le32_to_cpu(sb->feature_map) &
1657                                       MD_FEATURE_RECOVERY_BITMAP))
1658                                         rdev->saved_raid_disk = -1;
1659                         } else
1660                                 set_bit(In_sync, &rdev->flags);
1661                         rdev->raid_disk = role;
1662                         break;
1663                 }
1664                 if (sb->devflags & WriteMostly1)
1665                         set_bit(WriteMostly, &rdev->flags);
1666                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1667                         set_bit(Replacement, &rdev->flags);
1668         } else /* MULTIPATH are always insync */
1669                 set_bit(In_sync, &rdev->flags);
1670
1671         return 0;
1672 }
1673
1674 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1675 {
1676         struct mdp_superblock_1 *sb;
1677         struct md_rdev *rdev2;
1678         int max_dev, i;
1679         /* make rdev->sb match mddev and rdev data. */
1680
1681         sb = page_address(rdev->sb_page);
1682
1683         sb->feature_map = 0;
1684         sb->pad0 = 0;
1685         sb->recovery_offset = cpu_to_le64(0);
1686         memset(sb->pad3, 0, sizeof(sb->pad3));
1687
1688         sb->utime = cpu_to_le64((__u64)mddev->utime);
1689         sb->events = cpu_to_le64(mddev->events);
1690         if (mddev->in_sync)
1691                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1692         else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
1693                 sb->resync_offset = cpu_to_le64(MaxSector);
1694         else
1695                 sb->resync_offset = cpu_to_le64(0);
1696
1697         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1698
1699         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1700         sb->size = cpu_to_le64(mddev->dev_sectors);
1701         sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1702         sb->level = cpu_to_le32(mddev->level);
1703         sb->layout = cpu_to_le32(mddev->layout);
1704
1705         if (test_bit(WriteMostly, &rdev->flags))
1706                 sb->devflags |= WriteMostly1;
1707         else
1708                 sb->devflags &= ~WriteMostly1;
1709         sb->data_offset = cpu_to_le64(rdev->data_offset);
1710         sb->data_size = cpu_to_le64(rdev->sectors);
1711
1712         if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1713                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1714                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1715         }
1716
1717         if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
1718             !test_bit(In_sync, &rdev->flags)) {
1719                 sb->feature_map |=
1720                         cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1721                 sb->recovery_offset =
1722                         cpu_to_le64(rdev->recovery_offset);
1723                 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
1724                         sb->feature_map |=
1725                                 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
1726         }
1727         /* Note: recovery_offset and journal_tail share space  */
1728         if (test_bit(Journal, &rdev->flags))
1729                 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
1730         if (test_bit(Replacement, &rdev->flags))
1731                 sb->feature_map |=
1732                         cpu_to_le32(MD_FEATURE_REPLACEMENT);
1733
1734         if (mddev->reshape_position != MaxSector) {
1735                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1736                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1737                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1738                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1739                 sb->new_level = cpu_to_le32(mddev->new_level);
1740                 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1741                 if (mddev->delta_disks == 0 &&
1742                     mddev->reshape_backwards)
1743                         sb->feature_map
1744                                 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
1745                 if (rdev->new_data_offset != rdev->data_offset) {
1746                         sb->feature_map
1747                                 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
1748                         sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
1749                                                              - rdev->data_offset));
1750                 }
1751         }
1752
1753         if (mddev_is_clustered(mddev))
1754                 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
1755
1756         if (rdev->badblocks.count == 0)
1757                 /* Nothing to do for bad blocks*/ ;
1758         else if (sb->bblog_offset == 0)
1759                 /* Cannot record bad blocks on this device */
1760                 md_error(mddev, rdev);
1761         else {
1762                 struct badblocks *bb = &rdev->badblocks;
1763                 u64 *bbp = (u64 *)page_address(rdev->bb_page);
1764                 u64 *p = bb->page;
1765                 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
1766                 if (bb->changed) {
1767                         unsigned seq;
1768
1769 retry:
1770                         seq = read_seqbegin(&bb->lock);
1771
1772                         memset(bbp, 0xff, PAGE_SIZE);
1773
1774                         for (i = 0 ; i < bb->count ; i++) {
1775                                 u64 internal_bb = p[i];
1776                                 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
1777                                                 | BB_LEN(internal_bb));
1778                                 bbp[i] = cpu_to_le64(store_bb);
1779                         }
1780                         bb->changed = 0;
1781                         if (read_seqretry(&bb->lock, seq))
1782                                 goto retry;
1783
1784                         bb->sector = (rdev->sb_start +
1785                                       (int)le32_to_cpu(sb->bblog_offset));
1786                         bb->size = le16_to_cpu(sb->bblog_size);
1787                 }
1788         }
1789
1790         max_dev = 0;
1791         rdev_for_each(rdev2, mddev)
1792                 if (rdev2->desc_nr+1 > max_dev)
1793                         max_dev = rdev2->desc_nr+1;
1794
1795         if (max_dev > le32_to_cpu(sb->max_dev)) {
1796                 int bmask;
1797                 sb->max_dev = cpu_to_le32(max_dev);
1798                 rdev->sb_size = max_dev * 2 + 256;
1799                 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1800                 if (rdev->sb_size & bmask)
1801                         rdev->sb_size = (rdev->sb_size | bmask) + 1;
1802         } else
1803                 max_dev = le32_to_cpu(sb->max_dev);
1804
1805         for (i=0; i<max_dev;i++)
1806                 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
1807
1808         if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
1809                 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
1810
1811         rdev_for_each(rdev2, mddev) {
1812                 i = rdev2->desc_nr;
1813                 if (test_bit(Faulty, &rdev2->flags))
1814                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
1815                 else if (test_bit(In_sync, &rdev2->flags))
1816                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1817                 else if (test_bit(Journal, &rdev2->flags))
1818                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
1819                 else if (rdev2->raid_disk >= 0)
1820                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1821                 else
1822                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
1823         }
1824
1825         sb->sb_csum = calc_sb_1_csum(sb);
1826 }
1827
1828 static unsigned long long
1829 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1830 {
1831         struct mdp_superblock_1 *sb;
1832         sector_t max_sectors;
1833         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1834                 return 0; /* component must fit device */
1835         if (rdev->data_offset != rdev->new_data_offset)
1836                 return 0; /* too confusing */
1837         if (rdev->sb_start < rdev->data_offset) {
1838                 /* minor versions 1 and 2; superblock before data */
1839                 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1840                 max_sectors -= rdev->data_offset;
1841                 if (!num_sectors || num_sectors > max_sectors)
1842                         num_sectors = max_sectors;
1843         } else if (rdev->mddev->bitmap_info.offset) {
1844                 /* minor version 0 with bitmap we can't move */
1845                 return 0;
1846         } else {
1847                 /* minor version 0; superblock after data */
1848                 sector_t sb_start;
1849                 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1850                 sb_start &= ~(sector_t)(4*2 - 1);
1851                 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1852                 if (!num_sectors || num_sectors > max_sectors)
1853                         num_sectors = max_sectors;
1854                 rdev->sb_start = sb_start;
1855         }
1856         sb = page_address(rdev->sb_page);
1857         sb->data_size = cpu_to_le64(num_sectors);
1858         sb->super_offset = rdev->sb_start;
1859         sb->sb_csum = calc_sb_1_csum(sb);
1860         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1861                        rdev->sb_page);
1862         md_super_wait(rdev->mddev);
1863         return num_sectors;
1864
1865 }
1866
1867 static int
1868 super_1_allow_new_offset(struct md_rdev *rdev,
1869                          unsigned long long new_offset)
1870 {
1871         /* All necessary checks on new >= old have been done */
1872         struct bitmap *bitmap;
1873         if (new_offset >= rdev->data_offset)
1874                 return 1;
1875
1876         /* with 1.0 metadata, there is no metadata to tread on
1877          * so we can always move back */
1878         if (rdev->mddev->minor_version == 0)
1879                 return 1;
1880
1881         /* otherwise we must be sure not to step on
1882          * any metadata, so stay:
1883          * 36K beyond start of superblock
1884          * beyond end of badblocks
1885          * beyond write-intent bitmap
1886          */
1887         if (rdev->sb_start + (32+4)*2 > new_offset)
1888                 return 0;
1889         bitmap = rdev->mddev->bitmap;
1890         if (bitmap && !rdev->mddev->bitmap_info.file &&
1891             rdev->sb_start + rdev->mddev->bitmap_info.offset +
1892             bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
1893                 return 0;
1894         if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
1895                 return 0;
1896
1897         return 1;
1898 }
1899
1900 static struct super_type super_types[] = {
1901         [0] = {
1902                 .name   = "0.90.0",
1903                 .owner  = THIS_MODULE,
1904                 .load_super         = super_90_load,
1905                 .validate_super     = super_90_validate,
1906                 .sync_super         = super_90_sync,
1907                 .rdev_size_change   = super_90_rdev_size_change,
1908                 .allow_new_offset   = super_90_allow_new_offset,
1909         },
1910         [1] = {
1911                 .name   = "md-1",
1912                 .owner  = THIS_MODULE,
1913                 .load_super         = super_1_load,
1914                 .validate_super     = super_1_validate,
1915                 .sync_super         = super_1_sync,
1916                 .rdev_size_change   = super_1_rdev_size_change,
1917                 .allow_new_offset   = super_1_allow_new_offset,
1918         },
1919 };
1920
1921 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
1922 {
1923         if (mddev->sync_super) {
1924                 mddev->sync_super(mddev, rdev);
1925                 return;
1926         }
1927
1928         BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
1929
1930         super_types[mddev->major_version].sync_super(mddev, rdev);
1931 }
1932
1933 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
1934 {
1935         struct md_rdev *rdev, *rdev2;
1936
1937         rcu_read_lock();
1938         rdev_for_each_rcu(rdev, mddev1) {
1939                 if (test_bit(Faulty, &rdev->flags) ||
1940                     test_bit(Journal, &rdev->flags) ||
1941                     rdev->raid_disk == -1)
1942                         continue;
1943                 rdev_for_each_rcu(rdev2, mddev2) {
1944                         if (test_bit(Faulty, &rdev2->flags) ||
1945                             test_bit(Journal, &rdev2->flags) ||
1946                             rdev2->raid_disk == -1)
1947                                 continue;
1948                         if (rdev->bdev->bd_contains ==
1949                             rdev2->bdev->bd_contains) {
1950                                 rcu_read_unlock();
1951                                 return 1;
1952                         }
1953                 }
1954         }
1955         rcu_read_unlock();
1956         return 0;
1957 }
1958
1959 static LIST_HEAD(pending_raid_disks);
1960
1961 /*
1962  * Try to register data integrity profile for an mddev
1963  *
1964  * This is called when an array is started and after a disk has been kicked
1965  * from the array. It only succeeds if all working and active component devices
1966  * are integrity capable with matching profiles.
1967  */
1968 int md_integrity_register(struct mddev *mddev)
1969 {
1970         struct md_rdev *rdev, *reference = NULL;
1971
1972         if (list_empty(&mddev->disks))
1973                 return 0; /* nothing to do */
1974         if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
1975                 return 0; /* shouldn't register, or already is */
1976         rdev_for_each(rdev, mddev) {
1977                 /* skip spares and non-functional disks */
1978                 if (test_bit(Faulty, &rdev->flags))
1979                         continue;
1980                 if (rdev->raid_disk < 0)
1981                         continue;
1982                 if (!reference) {
1983                         /* Use the first rdev as the reference */
1984                         reference = rdev;
1985                         continue;
1986                 }
1987                 /* does this rdev's profile match the reference profile? */
1988                 if (blk_integrity_compare(reference->bdev->bd_disk,
1989                                 rdev->bdev->bd_disk) < 0)
1990                         return -EINVAL;
1991         }
1992         if (!reference || !bdev_get_integrity(reference->bdev))
1993                 return 0;
1994         /*
1995          * All component devices are integrity capable and have matching
1996          * profiles, register the common profile for the md device.
1997          */
1998         blk_integrity_register(mddev->gendisk,
1999                                bdev_get_integrity(reference->bdev));
2000
2001         printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
2002         if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
2003                 printk(KERN_ERR "md: failed to create integrity pool for %s\n",
2004                        mdname(mddev));
2005                 return -EINVAL;
2006         }
2007         return 0;
2008 }
2009 EXPORT_SYMBOL(md_integrity_register);
2010
2011 /*
2012  * Attempt to add an rdev, but only if it is consistent with the current
2013  * integrity profile
2014  */
2015 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2016 {
2017         struct blk_integrity *bi_rdev;
2018         struct blk_integrity *bi_mddev;
2019         char name[BDEVNAME_SIZE];
2020
2021         if (!mddev->gendisk)
2022                 return 0;
2023
2024         bi_rdev = bdev_get_integrity(rdev->bdev);
2025         bi_mddev = blk_get_integrity(mddev->gendisk);
2026
2027         if (!bi_mddev) /* nothing to do */
2028                 return 0;
2029
2030         if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
2031                 printk(KERN_NOTICE "%s: incompatible integrity profile for %s\n",
2032                                 mdname(mddev), bdevname(rdev->bdev, name));
2033                 return -ENXIO;
2034         }
2035
2036         return 0;
2037 }
2038 EXPORT_SYMBOL(md_integrity_add_rdev);
2039
2040 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2041 {
2042         char b[BDEVNAME_SIZE];
2043         struct kobject *ko;
2044         int err;
2045
2046         /* prevent duplicates */
2047         if (find_rdev(mddev, rdev->bdev->bd_dev))
2048                 return -EEXIST;
2049
2050         /* make sure rdev->sectors exceeds mddev->dev_sectors */
2051         if (!test_bit(Journal, &rdev->flags) &&
2052             rdev->sectors &&
2053             (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2054                 if (mddev->pers) {
2055                         /* Cannot change size, so fail
2056                          * If mddev->level <= 0, then we don't care
2057                          * about aligning sizes (e.g. linear)
2058                          */
2059                         if (mddev->level > 0)
2060                                 return -ENOSPC;
2061                 } else
2062                         mddev->dev_sectors = rdev->sectors;
2063         }
2064
2065         /* Verify rdev->desc_nr is unique.
2066          * If it is -1, assign a free number, else
2067          * check number is not in use
2068          */
2069         rcu_read_lock();
2070         if (rdev->desc_nr < 0) {
2071                 int choice = 0;
2072                 if (mddev->pers)
2073                         choice = mddev->raid_disks;
2074                 while (md_find_rdev_nr_rcu(mddev, choice))
2075                         choice++;
2076                 rdev->desc_nr = choice;
2077         } else {
2078                 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2079                         rcu_read_unlock();
2080                         return -EBUSY;
2081                 }
2082         }
2083         rcu_read_unlock();
2084         if (!test_bit(Journal, &rdev->flags) &&
2085             mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2086                 printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
2087                        mdname(mddev), mddev->max_disks);
2088                 return -EBUSY;
2089         }
2090         bdevname(rdev->bdev,b);
2091         strreplace(b, '/', '!');
2092
2093         rdev->mddev = mddev;
2094         printk(KERN_INFO "md: bind<%s>\n", b);
2095
2096         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2097                 goto fail;
2098
2099         ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2100         if (sysfs_create_link(&rdev->kobj, ko, "block"))
2101                 /* failure here is OK */;
2102         rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2103
2104         list_add_rcu(&rdev->same_set, &mddev->disks);
2105         bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2106
2107         /* May as well allow recovery to be retried once */
2108         mddev->recovery_disabled++;
2109
2110         return 0;
2111
2112  fail:
2113         printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
2114                b, mdname(mddev));
2115         return err;
2116 }
2117
2118 static void md_delayed_delete(struct work_struct *ws)
2119 {
2120         struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2121         kobject_del(&rdev->kobj);
2122         kobject_put(&rdev->kobj);
2123 }
2124
2125 static void unbind_rdev_from_array(struct md_rdev *rdev)
2126 {
2127         char b[BDEVNAME_SIZE];
2128
2129         bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2130         list_del_rcu(&rdev->same_set);
2131         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
2132         rdev->mddev = NULL;
2133         sysfs_remove_link(&rdev->kobj, "block");
2134         sysfs_put(rdev->sysfs_state);
2135         rdev->sysfs_state = NULL;
2136         rdev->badblocks.count = 0;
2137         /* We need to delay this, otherwise we can deadlock when
2138          * writing to 'remove' to "dev/state".  We also need
2139          * to delay it due to rcu usage.
2140          */
2141         synchronize_rcu();
2142         INIT_WORK(&rdev->del_work, md_delayed_delete);
2143         kobject_get(&rdev->kobj);
2144         queue_work(md_misc_wq, &rdev->del_work);
2145 }
2146
2147 /*
2148  * prevent the device from being mounted, repartitioned or
2149  * otherwise reused by a RAID array (or any other kernel
2150  * subsystem), by bd_claiming the device.
2151  */
2152 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2153 {
2154         int err = 0;
2155         struct block_device *bdev;
2156         char b[BDEVNAME_SIZE];
2157
2158         bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2159                                  shared ? (struct md_rdev *)lock_rdev : rdev);
2160         if (IS_ERR(bdev)) {
2161                 printk(KERN_ERR "md: could not open %s.\n",
2162                         __bdevname(dev, b));
2163                 return PTR_ERR(bdev);
2164         }
2165         rdev->bdev = bdev;
2166         return err;
2167 }
2168
2169 static void unlock_rdev(struct md_rdev *rdev)
2170 {
2171         struct block_device *bdev = rdev->bdev;
2172         rdev->bdev = NULL;
2173         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2174 }
2175
2176 void md_autodetect_dev(dev_t dev);
2177
2178 static void export_rdev(struct md_rdev *rdev)
2179 {
2180         char b[BDEVNAME_SIZE];
2181
2182         printk(KERN_INFO "md: export_rdev(%s)\n",
2183                 bdevname(rdev->bdev,b));
2184         md_rdev_clear(rdev);
2185 #ifndef MODULE
2186         if (test_bit(AutoDetected, &rdev->flags))
2187                 md_autodetect_dev(rdev->bdev->bd_dev);
2188 #endif
2189         unlock_rdev(rdev);
2190         kobject_put(&rdev->kobj);
2191 }
2192
2193 void md_kick_rdev_from_array(struct md_rdev *rdev)
2194 {
2195         unbind_rdev_from_array(rdev);
2196         export_rdev(rdev);
2197 }
2198 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
2199
2200 static void export_array(struct mddev *mddev)
2201 {
2202         struct md_rdev *rdev;
2203
2204         while (!list_empty(&mddev->disks)) {
2205                 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2206                                         same_set);
2207                 md_kick_rdev_from_array(rdev);
2208         }
2209         mddev->raid_disks = 0;
2210         mddev->major_version = 0;
2211 }
2212
2213 static void sync_sbs(struct mddev *mddev, int nospares)
2214 {
2215         /* Update each superblock (in-memory image), but
2216          * if we are allowed to, skip spares which already
2217          * have the right event counter, or have one earlier
2218          * (which would mean they aren't being marked as dirty
2219          * with the rest of the array)
2220          */
2221         struct md_rdev *rdev;
2222         rdev_for_each(rdev, mddev) {
2223                 if (rdev->sb_events == mddev->events ||
2224                     (nospares &&
2225                      rdev->raid_disk < 0 &&
2226                      rdev->sb_events+1 == mddev->events)) {
2227                         /* Don't update this superblock */
2228                         rdev->sb_loaded = 2;
2229                 } else {
2230                         sync_super(mddev, rdev);
2231                         rdev->sb_loaded = 1;
2232                 }
2233         }
2234 }
2235
2236 static bool does_sb_need_changing(struct mddev *mddev)
2237 {
2238         struct md_rdev *rdev;
2239         struct mdp_superblock_1 *sb;
2240         int role;
2241
2242         /* Find a good rdev */
2243         rdev_for_each(rdev, mddev)
2244                 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
2245                         break;
2246
2247         /* No good device found. */
2248         if (!rdev)
2249                 return false;
2250
2251         sb = page_address(rdev->sb_page);
2252         /* Check if a device has become faulty or a spare become active */
2253         rdev_for_each(rdev, mddev) {
2254                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2255                 /* Device activated? */
2256                 if (role == 0xffff && rdev->raid_disk >=0 &&
2257                     !test_bit(Faulty, &rdev->flags))
2258                         return true;
2259                 /* Device turned faulty? */
2260                 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2261                         return true;
2262         }
2263
2264         /* Check if any mddev parameters have changed */
2265         if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2266             (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2267             (mddev->layout != le64_to_cpu(sb->layout)) ||
2268             (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2269             (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2270                 return true;
2271
2272         return false;
2273 }
2274
2275 void md_update_sb(struct mddev *mddev, int force_change)
2276 {
2277         struct md_rdev *rdev;
2278         int sync_req;
2279         int nospares = 0;
2280         int any_badblocks_changed = 0;
2281         int ret = -1;
2282
2283         if (mddev->ro) {
2284                 if (force_change)
2285                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
2286                 return;
2287         }
2288
2289         if (mddev_is_clustered(mddev)) {
2290                 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2291                         force_change = 1;
2292                 ret = md_cluster_ops->metadata_update_start(mddev);
2293                 /* Has someone else has updated the sb */
2294                 if (!does_sb_need_changing(mddev)) {
2295                         if (ret == 0)
2296                                 md_cluster_ops->metadata_update_cancel(mddev);
2297                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2298                         return;
2299                 }
2300         }
2301 repeat:
2302         /* First make sure individual recovery_offsets are correct */
2303         rdev_for_each(rdev, mddev) {
2304                 if (rdev->raid_disk >= 0 &&
2305                     mddev->delta_disks >= 0 &&
2306                     !test_bit(Journal, &rdev->flags) &&
2307                     !test_bit(In_sync, &rdev->flags) &&
2308                     mddev->curr_resync_completed > rdev->recovery_offset)
2309                                 rdev->recovery_offset = mddev->curr_resync_completed;
2310
2311         }
2312         if (!mddev->persistent) {
2313                 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2314                 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2315                 if (!mddev->external) {
2316                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2317                         rdev_for_each(rdev, mddev) {
2318                                 if (rdev->badblocks.changed) {
2319                                         rdev->badblocks.changed = 0;
2320                                         ack_all_badblocks(&rdev->badblocks);
2321                                         md_error(mddev, rdev);
2322                                 }
2323                                 clear_bit(Blocked, &rdev->flags);
2324                                 clear_bit(BlockedBadBlocks, &rdev->flags);
2325                                 wake_up(&rdev->blocked_wait);
2326                         }
2327                 }
2328                 wake_up(&mddev->sb_wait);
2329                 return;
2330         }
2331
2332         spin_lock(&mddev->lock);
2333
2334         mddev->utime = ktime_get_real_seconds();
2335
2336         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2337                 force_change = 1;
2338         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2339                 /* just a clean<-> dirty transition, possibly leave spares alone,
2340                  * though if events isn't the right even/odd, we will have to do
2341                  * spares after all
2342                  */
2343                 nospares = 1;
2344         if (force_change)
2345                 nospares = 0;
2346         if (mddev->degraded)
2347                 /* If the array is degraded, then skipping spares is both
2348                  * dangerous and fairly pointless.
2349                  * Dangerous because a device that was removed from the array
2350                  * might have a event_count that still looks up-to-date,
2351                  * so it can be re-added without a resync.
2352                  * Pointless because if there are any spares to skip,
2353                  * then a recovery will happen and soon that array won't
2354                  * be degraded any more and the spare can go back to sleep then.
2355                  */
2356                 nospares = 0;
2357
2358         sync_req = mddev->in_sync;
2359
2360         /* If this is just a dirty<->clean transition, and the array is clean
2361          * and 'events' is odd, we can roll back to the previous clean state */
2362         if (nospares
2363             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2364             && mddev->can_decrease_events
2365             && mddev->events != 1) {
2366                 mddev->events--;
2367                 mddev->can_decrease_events = 0;
2368         } else {
2369                 /* otherwise we have to go forward and ... */
2370                 mddev->events ++;
2371                 mddev->can_decrease_events = nospares;
2372         }
2373
2374         /*
2375          * This 64-bit counter should never wrap.
2376          * Either we are in around ~1 trillion A.C., assuming
2377          * 1 reboot per second, or we have a bug...
2378          */
2379         WARN_ON(mddev->events == 0);
2380
2381         rdev_for_each(rdev, mddev) {
2382                 if (rdev->badblocks.changed)
2383                         any_badblocks_changed++;
2384                 if (test_bit(Faulty, &rdev->flags))
2385                         set_bit(FaultRecorded, &rdev->flags);
2386         }
2387
2388         sync_sbs(mddev, nospares);
2389         spin_unlock(&mddev->lock);
2390
2391         pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2392                  mdname(mddev), mddev->in_sync);
2393
2394         bitmap_update_sb(mddev->bitmap);
2395         rdev_for_each(rdev, mddev) {
2396                 char b[BDEVNAME_SIZE];
2397
2398                 if (rdev->sb_loaded != 1)
2399                         continue; /* no noise on spare devices */
2400
2401                 if (!test_bit(Faulty, &rdev->flags)) {
2402                         md_super_write(mddev,rdev,
2403                                        rdev->sb_start, rdev->sb_size,
2404                                        rdev->sb_page);
2405                         pr_debug("md: (write) %s's sb offset: %llu\n",
2406                                  bdevname(rdev->bdev, b),
2407                                  (unsigned long long)rdev->sb_start);
2408                         rdev->sb_events = mddev->events;
2409                         if (rdev->badblocks.size) {
2410                                 md_super_write(mddev, rdev,
2411                                                rdev->badblocks.sector,
2412                                                rdev->badblocks.size << 9,
2413                                                rdev->bb_page);
2414                                 rdev->badblocks.size = 0;
2415                         }
2416
2417                 } else
2418                         pr_debug("md: %s (skipping faulty)\n",
2419                                  bdevname(rdev->bdev, b));
2420
2421                 if (mddev->level == LEVEL_MULTIPATH)
2422                         /* only need to write one superblock... */
2423                         break;
2424         }
2425         md_super_wait(mddev);
2426         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2427
2428         spin_lock(&mddev->lock);
2429         if (mddev->in_sync != sync_req ||
2430             test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2431                 /* have to write it out again */
2432                 spin_unlock(&mddev->lock);
2433                 goto repeat;
2434         }
2435         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2436         spin_unlock(&mddev->lock);
2437         wake_up(&mddev->sb_wait);
2438         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2439                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2440
2441         rdev_for_each(rdev, mddev) {
2442                 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2443                         clear_bit(Blocked, &rdev->flags);
2444
2445                 if (any_badblocks_changed)
2446                         ack_all_badblocks(&rdev->badblocks);
2447                 clear_bit(BlockedBadBlocks, &rdev->flags);
2448                 wake_up(&rdev->blocked_wait);
2449         }
2450
2451         if (mddev_is_clustered(mddev) && ret == 0)
2452                 md_cluster_ops->metadata_update_finish(mddev);
2453 }
2454 EXPORT_SYMBOL(md_update_sb);
2455
2456 static int add_bound_rdev(struct md_rdev *rdev)
2457 {
2458         struct mddev *mddev = rdev->mddev;
2459         int err = 0;
2460         bool add_journal = test_bit(Journal, &rdev->flags);
2461
2462         if (!mddev->pers->hot_remove_disk || add_journal) {
2463                 /* If there is hot_add_disk but no hot_remove_disk
2464                  * then added disks for geometry changes,
2465                  * and should be added immediately.
2466                  */
2467                 super_types[mddev->major_version].
2468                         validate_super(mddev, rdev);
2469                 if (add_journal)
2470                         mddev_suspend(mddev);
2471                 err = mddev->pers->hot_add_disk(mddev, rdev);
2472                 if (add_journal)
2473                         mddev_resume(mddev);
2474                 if (err) {
2475                         unbind_rdev_from_array(rdev);
2476                         export_rdev(rdev);
2477                         return err;
2478                 }
2479         }
2480         sysfs_notify_dirent_safe(rdev->sysfs_state);
2481
2482         set_bit(MD_CHANGE_DEVS, &mddev->flags);
2483         if (mddev->degraded)
2484                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2485         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2486         md_new_event(mddev);
2487         md_wakeup_thread(mddev->thread);
2488         return 0;
2489 }
2490
2491 /* words written to sysfs files may, or may not, be \n terminated.
2492  * We want to accept with case. For this we use cmd_match.
2493  */
2494 static int cmd_match(const char *cmd, const char *str)
2495 {
2496         /* See if cmd, written into a sysfs file, matches
2497          * str.  They must either be the same, or cmd can
2498          * have a trailing newline
2499          */
2500         while (*cmd && *str && *cmd == *str) {
2501                 cmd++;
2502                 str++;
2503         }
2504         if (*cmd == '\n')
2505                 cmd++;
2506         if (*str || *cmd)
2507                 return 0;
2508         return 1;
2509 }
2510
2511 struct rdev_sysfs_entry {
2512         struct attribute attr;
2513         ssize_t (*show)(struct md_rdev *, char *);
2514         ssize_t (*store)(struct md_rdev *, const char *, size_t);
2515 };
2516
2517 static ssize_t
2518 state_show(struct md_rdev *rdev, char *page)
2519 {
2520         char *sep = "";
2521         size_t len = 0;
2522         unsigned long flags = ACCESS_ONCE(rdev->flags);
2523
2524         if (test_bit(Faulty, &flags) ||
2525             rdev->badblocks.unacked_exist) {
2526                 len+= sprintf(page+len, "%sfaulty",sep);
2527                 sep = ",";
2528         }
2529         if (test_bit(In_sync, &flags)) {
2530                 len += sprintf(page+len, "%sin_sync",sep);
2531                 sep = ",";
2532         }
2533         if (test_bit(Journal, &flags)) {
2534                 len += sprintf(page+len, "%sjournal",sep);
2535                 sep = ",";
2536         }
2537         if (test_bit(WriteMostly, &flags)) {
2538                 len += sprintf(page+len, "%swrite_mostly",sep);
2539                 sep = ",";
2540         }
2541         if (test_bit(Blocked, &flags) ||
2542             (rdev->badblocks.unacked_exist
2543              && !test_bit(Faulty, &flags))) {
2544                 len += sprintf(page+len, "%sblocked", sep);
2545                 sep = ",";
2546         }
2547         if (!test_bit(Faulty, &flags) &&
2548             !test_bit(Journal, &flags) &&
2549             !test_bit(In_sync, &flags)) {
2550                 len += sprintf(page+len, "%sspare", sep);
2551                 sep = ",";
2552         }
2553         if (test_bit(WriteErrorSeen, &flags)) {
2554                 len += sprintf(page+len, "%swrite_error", sep);
2555                 sep = ",";
2556         }
2557         if (test_bit(WantReplacement, &flags)) {
2558                 len += sprintf(page+len, "%swant_replacement", sep);
2559                 sep = ",";
2560         }
2561         if (test_bit(Replacement, &flags)) {
2562                 len += sprintf(page+len, "%sreplacement", sep);
2563                 sep = ",";
2564         }
2565
2566         return len+sprintf(page+len, "\n");
2567 }
2568
2569 static ssize_t
2570 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2571 {
2572         /* can write
2573          *  faulty  - simulates an error
2574          *  remove  - disconnects the device
2575          *  writemostly - sets write_mostly
2576          *  -writemostly - clears write_mostly
2577          *  blocked - sets the Blocked flags
2578          *  -blocked - clears the Blocked and possibly simulates an error
2579          *  insync - sets Insync providing device isn't active
2580          *  -insync - clear Insync for a device with a slot assigned,
2581          *            so that it gets rebuilt based on bitmap
2582          *  write_error - sets WriteErrorSeen
2583          *  -write_error - clears WriteErrorSeen
2584          */
2585         int err = -EINVAL;
2586         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2587                 md_error(rdev->mddev, rdev);
2588                 if (test_bit(Faulty, &rdev->flags))
2589                         err = 0;
2590                 else
2591                         err = -EBUSY;
2592         } else if (cmd_match(buf, "remove")) {
2593                 if (rdev->raid_disk >= 0)
2594                         err = -EBUSY;
2595                 else {
2596                         struct mddev *mddev = rdev->mddev;
2597                         err = 0;
2598                         if (mddev_is_clustered(mddev))
2599                                 err = md_cluster_ops->remove_disk(mddev, rdev);
2600
2601                         if (err == 0) {
2602                                 md_kick_rdev_from_array(rdev);
2603                                 if (mddev->pers)
2604                                         md_update_sb(mddev, 1);
2605                                 md_new_event(mddev);
2606                         }
2607                 }
2608         } else if (cmd_match(buf, "writemostly")) {
2609                 set_bit(WriteMostly, &rdev->flags);
2610                 err = 0;
2611         } else if (cmd_match(buf, "-writemostly")) {
2612                 clear_bit(WriteMostly, &rdev->flags);
2613                 err = 0;
2614         } else if (cmd_match(buf, "blocked")) {
2615                 set_bit(Blocked, &rdev->flags);
2616                 err = 0;
2617         } else if (cmd_match(buf, "-blocked")) {
2618                 if (!test_bit(Faulty, &rdev->flags) &&
2619                     rdev->badblocks.unacked_exist) {
2620                         /* metadata handler doesn't understand badblocks,
2621                          * so we need to fail the device
2622                          */
2623                         md_error(rdev->mddev, rdev);
2624                 }
2625                 clear_bit(Blocked, &rdev->flags);
2626                 clear_bit(BlockedBadBlocks, &rdev->flags);
2627                 wake_up(&rdev->blocked_wait);
2628                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2629                 md_wakeup_thread(rdev->mddev->thread);
2630
2631                 err = 0;
2632         } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2633                 set_bit(In_sync, &rdev->flags);
2634                 err = 0;
2635         } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
2636                    !test_bit(Journal, &rdev->flags)) {
2637                 if (rdev->mddev->pers == NULL) {
2638                         clear_bit(In_sync, &rdev->flags);
2639                         rdev->saved_raid_disk = rdev->raid_disk;
2640                         rdev->raid_disk = -1;
2641                         err = 0;
2642                 }
2643         } else if (cmd_match(buf, "write_error")) {
2644                 set_bit(WriteErrorSeen, &rdev->flags);
2645                 err = 0;
2646         } else if (cmd_match(buf, "-write_error")) {
2647                 clear_bit(WriteErrorSeen, &rdev->flags);
2648                 err = 0;
2649         } else if (cmd_match(buf, "want_replacement")) {
2650                 /* Any non-spare device that is not a replacement can
2651                  * become want_replacement at any time, but we then need to
2652                  * check if recovery is needed.
2653                  */
2654                 if (rdev->raid_disk >= 0 &&
2655                     !test_bit(Journal, &rdev->flags) &&
2656                     !test_bit(Replacement, &rdev->flags))
2657                         set_bit(WantReplacement, &rdev->flags);
2658                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2659                 md_wakeup_thread(rdev->mddev->thread);
2660                 err = 0;
2661         } else if (cmd_match(buf, "-want_replacement")) {
2662                 /* Clearing 'want_replacement' is always allowed.
2663                  * Once replacements starts it is too late though.
2664                  */
2665                 err = 0;
2666                 clear_bit(WantReplacement, &rdev->flags);
2667         } else if (cmd_match(buf, "replacement")) {
2668                 /* Can only set a device as a replacement when array has not
2669                  * yet been started.  Once running, replacement is automatic
2670                  * from spares, or by assigning 'slot'.
2671                  */
2672                 if (rdev->mddev->pers)
2673                         err = -EBUSY;
2674                 else {
2675                         set_bit(Replacement, &rdev->flags);
2676                         err = 0;
2677                 }
2678         } else if (cmd_match(buf, "-replacement")) {
2679                 /* Similarly, can only clear Replacement before start */
2680                 if (rdev->mddev->pers)
2681                         err = -EBUSY;
2682                 else {
2683                         clear_bit(Replacement, &rdev->flags);
2684                         err = 0;
2685                 }
2686         } else if (cmd_match(buf, "re-add")) {
2687                 if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
2688                         /* clear_bit is performed _after_ all the devices
2689                          * have their local Faulty bit cleared. If any writes
2690                          * happen in the meantime in the local node, they
2691                          * will land in the local bitmap, which will be synced
2692                          * by this node eventually
2693                          */
2694                         if (!mddev_is_clustered(rdev->mddev) ||
2695                             (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
2696                                 clear_bit(Faulty, &rdev->flags);
2697                                 err = add_bound_rdev(rdev);
2698                         }
2699                 } else
2700                         err = -EBUSY;
2701         }
2702         if (!err)
2703                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2704         return err ? err : len;
2705 }
2706 static struct rdev_sysfs_entry rdev_state =
2707 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
2708
2709 static ssize_t
2710 errors_show(struct md_rdev *rdev, char *page)
2711 {
2712         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2713 }
2714
2715 static ssize_t
2716 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
2717 {
2718         unsigned int n;
2719         int rv;
2720
2721         rv = kstrtouint(buf, 10, &n);
2722         if (rv < 0)
2723                 return rv;
2724         atomic_set(&rdev->corrected_errors, n);
2725         return len;
2726 }
2727 static struct rdev_sysfs_entry rdev_errors =
2728 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2729
2730 static ssize_t
2731 slot_show(struct md_rdev *rdev, char *page)
2732 {
2733         if (test_bit(Journal, &rdev->flags))
2734                 return sprintf(page, "journal\n");
2735         else if (rdev->raid_disk < 0)
2736                 return sprintf(page, "none\n");
2737         else
2738                 return sprintf(page, "%d\n", rdev->raid_disk);
2739 }
2740
2741 static ssize_t
2742 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2743 {
2744         int slot;
2745         int err;
2746
2747         if (test_bit(Journal, &rdev->flags))
2748                 return -EBUSY;
2749         if (strncmp(buf, "none", 4)==0)
2750                 slot = -1;
2751         else {
2752                 err = kstrtouint(buf, 10, (unsigned int *)&slot);
2753                 if (err < 0)
2754                         return err;
2755         }
2756         if (rdev->mddev->pers && slot == -1) {
2757                 /* Setting 'slot' on an active array requires also
2758                  * updating the 'rd%d' link, and communicating
2759                  * with the personality with ->hot_*_disk.
2760                  * For now we only support removing
2761                  * failed/spare devices.  This normally happens automatically,
2762                  * but not when the metadata is externally managed.
2763                  */
2764                 if (rdev->raid_disk == -1)
2765                         return -EEXIST;
2766                 /* personality does all needed checks */
2767                 if (rdev->mddev->pers->hot_remove_disk == NULL)
2768                         return -EINVAL;
2769                 clear_bit(Blocked, &rdev->flags);
2770                 remove_and_add_spares(rdev->mddev, rdev);
2771                 if (rdev->raid_disk >= 0)
2772                         return -EBUSY;
2773                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2774                 md_wakeup_thread(rdev->mddev->thread);
2775         } else if (rdev->mddev->pers) {
2776                 /* Activating a spare .. or possibly reactivating
2777                  * if we ever get bitmaps working here.
2778                  */
2779                 int err;
2780
2781                 if (rdev->raid_disk != -1)
2782                         return -EBUSY;
2783
2784                 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2785                         return -EBUSY;
2786
2787                 if (rdev->mddev->pers->hot_add_disk == NULL)
2788                         return -EINVAL;
2789
2790                 if (slot >= rdev->mddev->raid_disks &&
2791                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2792                         return -ENOSPC;
2793
2794                 rdev->raid_disk = slot;
2795                 if (test_bit(In_sync, &rdev->flags))
2796                         rdev->saved_raid_disk = slot;
2797                 else
2798                         rdev->saved_raid_disk = -1;
2799                 clear_bit(In_sync, &rdev->flags);
2800                 clear_bit(Bitmap_sync, &rdev->flags);
2801                 err = rdev->mddev->pers->
2802                         hot_add_disk(rdev->mddev, rdev);
2803                 if (err) {
2804                         rdev->raid_disk = -1;
2805                         return err;
2806                 } else
2807                         sysfs_notify_dirent_safe(rdev->sysfs_state);
2808                 if (sysfs_link_rdev(rdev->mddev, rdev))
2809                         /* failure here is OK */;
2810                 /* don't wakeup anyone, leave that to userspace. */
2811         } else {
2812                 if (slot >= rdev->mddev->raid_disks &&
2813                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2814                         return -ENOSPC;
2815                 rdev->raid_disk = slot;
2816                 /* assume it is working */
2817                 clear_bit(Faulty, &rdev->flags);
2818                 clear_bit(WriteMostly, &rdev->flags);
2819                 set_bit(In_sync, &rdev->flags);
2820                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2821         }
2822         return len;
2823 }
2824
2825 static struct rdev_sysfs_entry rdev_slot =
2826 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2827
2828 static ssize_t
2829 offset_show(struct md_rdev *rdev, char *page)
2830 {
2831         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2832 }
2833
2834 static ssize_t
2835 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
2836 {
2837         unsigned long long offset;
2838         if (kstrtoull(buf, 10, &offset) < 0)
2839                 return -EINVAL;
2840         if (rdev->mddev->pers && rdev->raid_disk >= 0)
2841                 return -EBUSY;
2842         if (rdev->sectors && rdev->mddev->external)
2843                 /* Must set offset before size, so overlap checks
2844                  * can be sane */
2845                 return -EBUSY;
2846         rdev->data_offset = offset;
2847         rdev->new_data_offset = offset;
2848         return len;
2849 }
2850
2851 static struct rdev_sysfs_entry rdev_offset =
2852 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2853
2854 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
2855 {
2856         return sprintf(page, "%llu\n",
2857                        (unsigned long long)rdev->new_data_offset);
2858 }
2859
2860 static ssize_t new_offset_store(struct md_rdev *rdev,
2861                                 const char *buf, size_t len)
2862 {
2863         unsigned long long new_offset;
2864         struct mddev *mddev = rdev->mddev;
2865
2866         if (kstrtoull(buf, 10, &new_offset) < 0)
2867                 return -EINVAL;
2868
2869         if (mddev->sync_thread ||
2870             test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
2871                 return -EBUSY;
2872         if (new_offset == rdev->data_offset)
2873                 /* reset is always permitted */
2874                 ;
2875         else if (new_offset > rdev->data_offset) {
2876                 /* must not push array size beyond rdev_sectors */
2877                 if (new_offset - rdev->data_offset
2878                     + mddev->dev_sectors > rdev->sectors)
2879                                 return -E2BIG;
2880         }
2881         /* Metadata worries about other space details. */
2882
2883         /* decreasing the offset is inconsistent with a backwards
2884          * reshape.
2885          */
2886         if (new_offset < rdev->data_offset &&
2887             mddev->reshape_backwards)
2888                 return -EINVAL;
2889         /* Increasing offset is inconsistent with forwards
2890          * reshape.  reshape_direction should be set to
2891          * 'backwards' first.
2892          */
2893         if (new_offset > rdev->data_offset &&
2894             !mddev->reshape_backwards)
2895                 return -EINVAL;
2896
2897         if (mddev->pers && mddev->persistent &&
2898             !super_types[mddev->major_version]
2899             .allow_new_offset(rdev, new_offset))
2900                 return -E2BIG;
2901         rdev->new_data_offset = new_offset;
2902         if (new_offset > rdev->data_offset)
2903                 mddev->reshape_backwards = 1;
2904         else if (new_offset < rdev->data_offset)
2905                 mddev->reshape_backwards = 0;
2906
2907         return len;
2908 }
2909 static struct rdev_sysfs_entry rdev_new_offset =
2910 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
2911
2912 static ssize_t
2913 rdev_size_show(struct md_rdev *rdev, char *page)
2914 {
2915         return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2916 }
2917
2918 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2919 {
2920         /* check if two start/length pairs overlap */
2921         if (s1+l1 <= s2)
2922                 return 0;
2923         if (s2+l2 <= s1)
2924                 return 0;
2925         return 1;
2926 }
2927
2928 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2929 {
2930         unsigned long long blocks;
2931         sector_t new;
2932
2933         if (kstrtoull(buf, 10, &blocks) < 0)
2934                 return -EINVAL;
2935
2936         if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2937                 return -EINVAL; /* sector conversion overflow */
2938
2939         new = blocks * 2;
2940         if (new != blocks * 2)
2941                 return -EINVAL; /* unsigned long long to sector_t overflow */
2942
2943         *sectors = new;
2944         return 0;
2945 }
2946
2947 static ssize_t
2948 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2949 {
2950         struct mddev *my_mddev = rdev->mddev;
2951         sector_t oldsectors = rdev->sectors;
2952         sector_t sectors;
2953
2954         if (test_bit(Journal, &rdev->flags))
2955                 return -EBUSY;
2956         if (strict_blocks_to_sectors(buf, &sectors) < 0)
2957                 return -EINVAL;
2958         if (rdev->data_offset != rdev->new_data_offset)
2959                 return -EINVAL; /* too confusing */
2960         if (my_mddev->pers && rdev->raid_disk >= 0) {
2961                 if (my_mddev->persistent) {
2962                         sectors = super_types[my_mddev->major_version].
2963                                 rdev_size_change(rdev, sectors);
2964                         if (!sectors)
2965                                 return -EBUSY;
2966                 } else if (!sectors)
2967                         sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
2968                                 rdev->data_offset;
2969                 if (!my_mddev->pers->resize)
2970                         /* Cannot change size for RAID0 or Linear etc */
2971                         return -EINVAL;
2972         }
2973         if (sectors < my_mddev->dev_sectors)
2974                 return -EINVAL; /* component must fit device */
2975
2976         rdev->sectors = sectors;
2977         if (sectors > oldsectors && my_mddev->external) {
2978                 /* Need to check that all other rdevs with the same
2979                  * ->bdev do not overlap.  'rcu' is sufficient to walk
2980                  * the rdev lists safely.
2981                  * This check does not provide a hard guarantee, it
2982                  * just helps avoid dangerous mistakes.
2983                  */
2984                 struct mddev *mddev;
2985                 int overlap = 0;
2986                 struct list_head *tmp;
2987
2988                 rcu_read_lock();
2989                 for_each_mddev(mddev, tmp) {
2990                         struct md_rdev *rdev2;
2991
2992                         rdev_for_each(rdev2, mddev)
2993                                 if (rdev->bdev == rdev2->bdev &&
2994                                     rdev != rdev2 &&
2995                                     overlaps(rdev->data_offset, rdev->sectors,
2996                                              rdev2->data_offset,
2997                                              rdev2->sectors)) {
2998                                         overlap = 1;
2999                                         break;
3000                                 }
3001                         if (overlap) {
3002                                 mddev_put(mddev);
3003                                 break;
3004                         }
3005                 }
3006                 rcu_read_unlock();
3007                 if (overlap) {
3008                         /* Someone else could have slipped in a size
3009                          * change here, but doing so is just silly.
3010                          * We put oldsectors back because we *know* it is
3011                          * safe, and trust userspace not to race with
3012                          * itself
3013                          */
3014                         rdev->sectors = oldsectors;
3015                         return -EBUSY;
3016                 }
3017         }
3018         return len;
3019 }
3020
3021 static struct rdev_sysfs_entry rdev_size =
3022 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3023
3024 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3025 {
3026         unsigned long long recovery_start = rdev->recovery_offset;
3027
3028         if (test_bit(In_sync, &rdev->flags) ||
3029             recovery_start == MaxSector)
3030                 return sprintf(page, "none\n");
3031
3032         return sprintf(page, "%llu\n", recovery_start);
3033 }
3034
3035 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3036 {
3037         unsigned long long recovery_start;
3038
3039         if (cmd_match(buf, "none"))
3040                 recovery_start = MaxSector;
3041         else if (kstrtoull(buf, 10, &recovery_start))
3042                 return -EINVAL;
3043
3044         if (rdev->mddev->pers &&
3045             rdev->raid_disk >= 0)
3046                 return -EBUSY;
3047
3048         rdev->recovery_offset = recovery_start;
3049         if (recovery_start == MaxSector)
3050                 set_bit(In_sync, &rdev->flags);
3051         else
3052                 clear_bit(In_sync, &rdev->flags);
3053         return len;
3054 }
3055
3056 static struct rdev_sysfs_entry rdev_recovery_start =
3057 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3058
3059 /* sysfs access to bad-blocks list.
3060  * We present two files.
3061  * 'bad-blocks' lists sector numbers and lengths of ranges that
3062  *    are recorded as bad.  The list is truncated to fit within
3063  *    the one-page limit of sysfs.
3064  *    Writing "sector length" to this file adds an acknowledged
3065  *    bad block list.
3066  * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3067  *    been acknowledged.  Writing to this file adds bad blocks
3068  *    without acknowledging them.  This is largely for testing.
3069  */
3070 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3071 {
3072         return badblocks_show(&rdev->badblocks, page, 0);
3073 }
3074 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3075 {
3076         int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3077         /* Maybe that ack was all we needed */
3078         if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3079                 wake_up(&rdev->blocked_wait);
3080         return rv;
3081 }
3082 static struct rdev_sysfs_entry rdev_bad_blocks =
3083 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3084
3085 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3086 {
3087         return badblocks_show(&rdev->badblocks, page, 1);
3088 }
3089 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3090 {
3091         return badblocks_store(&rdev->badblocks, page, len, 1);
3092 }
3093 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3094 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3095
3096 static struct attribute *rdev_default_attrs[] = {
3097         &rdev_state.attr,
3098         &rdev_errors.attr,
3099         &rdev_slot.attr,
3100         &rdev_offset.attr,
3101         &rdev_new_offset.attr,
3102         &rdev_size.attr,
3103         &rdev_recovery_start.attr,
3104         &rdev_bad_blocks.attr,
3105         &rdev_unack_bad_blocks.attr,
3106         NULL,
3107 };
3108 static ssize_t
3109 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3110 {
3111         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3112         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3113
3114         if (!entry->show)
3115                 return -EIO;
3116         if (!rdev->mddev)
3117                 return -EBUSY;
3118         return entry->show(rdev, page);
3119 }
3120
3121 static ssize_t
3122 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3123               const char *page, size_t length)
3124 {
3125         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3126         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3127         ssize_t rv;
3128         struct mddev *mddev = rdev->mddev;
3129
3130         if (!entry->store)
3131                 return -EIO;
3132         if (!capable(CAP_SYS_ADMIN))
3133                 return -EACCES;
3134         rv = mddev ? mddev_lock(mddev): -EBUSY;
3135         if (!rv) {
3136                 if (rdev->mddev == NULL)
3137                         rv = -EBUSY;
3138                 else
3139                         rv = entry->store(rdev, page, length);
3140                 mddev_unlock(mddev);
3141         }
3142         return rv;
3143 }
3144
3145 static void rdev_free(struct kobject *ko)
3146 {
3147         struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3148         kfree(rdev);
3149 }
3150 static const struct sysfs_ops rdev_sysfs_ops = {
3151         .show           = rdev_attr_show,
3152         .store          = rdev_attr_store,
3153 };
3154 static struct kobj_type rdev_ktype = {
3155         .release        = rdev_free,
3156         .sysfs_ops      = &rdev_sysfs_ops,
3157         .default_attrs  = rdev_default_attrs,
3158 };
3159
3160 int md_rdev_init(struct md_rdev *rdev)
3161 {
3162         rdev->desc_nr = -1;
3163         rdev->saved_raid_disk = -1;
3164         rdev->raid_disk = -1;
3165         rdev->flags = 0;
3166         rdev->data_offset = 0;
3167         rdev->new_data_offset = 0;
3168         rdev->sb_events = 0;
3169         rdev->last_read_error.tv_sec  = 0;
3170         rdev->last_read_error.tv_nsec = 0;
3171         rdev->sb_loaded = 0;
3172         rdev->bb_page = NULL;
3173         atomic_set(&rdev->nr_pending, 0);
3174         atomic_set(&rdev->read_errors, 0);
3175         atomic_set(&rdev->corrected_errors, 0);
3176
3177         INIT_LIST_HEAD(&rdev->same_set);
3178         init_waitqueue_head(&rdev->blocked_wait);
3179
3180         /* Add space to store bad block list.
3181          * This reserves the space even on arrays where it cannot
3182          * be used - I wonder if that matters
3183          */
3184         return badblocks_init(&rdev->badblocks, 0);
3185 }
3186 EXPORT_SYMBOL_GPL(md_rdev_init);
3187 /*
3188  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3189  *
3190  * mark the device faulty if:
3191  *
3192  *   - the device is nonexistent (zero size)
3193  *   - the device has no valid superblock
3194  *
3195  * a faulty rdev _never_ has rdev->sb set.
3196  */
3197 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3198 {
3199         char b[BDEVNAME_SIZE];
3200         int err;
3201         struct md_rdev *rdev;
3202         sector_t size;
3203
3204         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3205         if (!rdev) {
3206                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
3207                 return ERR_PTR(-ENOMEM);
3208         }
3209
3210         err = md_rdev_init(rdev);
3211         if (err)
3212                 goto abort_free;
3213         err = alloc_disk_sb(rdev);
3214         if (err)
3215                 goto abort_free;
3216
3217         err = lock_rdev(rdev, newdev, super_format == -2);
3218         if (err)
3219                 goto abort_free;
3220
3221         kobject_init(&rdev->kobj, &rdev_ktype);
3222
3223         size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3224         if (!size) {
3225                 printk(KERN_WARNING
3226                         "md: %s has zero or unknown size, marking faulty!\n",
3227                         bdevname(rdev->bdev,b));
3228                 err = -EINVAL;
3229                 goto abort_free;
3230         }
3231
3232         if (super_format >= 0) {
3233                 err = super_types[super_format].
3234                         load_super(rdev, NULL, super_minor);
3235                 if (err == -EINVAL) {
3236                         printk(KERN_WARNING
3237                                 "md: %s does not have a valid v%d.%d "
3238                                "superblock, not importing!\n",
3239                                 bdevname(rdev->bdev,b),
3240                                super_format, super_minor);
3241                         goto abort_free;
3242                 }
3243                 if (err < 0) {
3244                         printk(KERN_WARNING
3245                                 "md: could not read %s's sb, not importing!\n",
3246                                 bdevname(rdev->bdev,b));
3247                         goto abort_free;
3248                 }
3249         }
3250
3251         return rdev;
3252
3253 abort_free:
3254         if (rdev->bdev)
3255                 unlock_rdev(rdev);
3256         md_rdev_clear(rdev);
3257         kfree(rdev);
3258         return ERR_PTR(err);
3259 }
3260
3261 /*
3262  * Check a full RAID array for plausibility
3263  */
3264
3265 static void analyze_sbs(struct mddev *mddev)
3266 {
3267         int i;
3268         struct md_rdev *rdev, *freshest, *tmp;
3269         char b[BDEVNAME_SIZE];
3270
3271         freshest = NULL;
3272         rdev_for_each_safe(rdev, tmp, mddev)
3273                 switch (super_types[mddev->major_version].
3274                         load_super(rdev, freshest, mddev->minor_version)) {
3275                 case 1:
3276                         freshest = rdev;
3277                         break;
3278                 case 0:
3279                         break;
3280                 default:
3281                         printk( KERN_ERR \
3282                                 "md: fatal superblock inconsistency in %s"
3283                                 " -- removing from array\n",
3284                                 bdevname(rdev->bdev,b));
3285                         md_kick_rdev_from_array(rdev);
3286                 }
3287
3288         super_types[mddev->major_version].
3289                 validate_super(mddev, freshest);
3290
3291         i = 0;
3292         rdev_for_each_safe(rdev, tmp, mddev) {
3293                 if (mddev->max_disks &&
3294                     (rdev->desc_nr >= mddev->max_disks ||
3295                      i > mddev->max_disks)) {
3296                         printk(KERN_WARNING
3297                                "md: %s: %s: only %d devices permitted\n",
3298                                mdname(mddev), bdevname(rdev->bdev, b),
3299                                mddev->max_disks);
3300                         md_kick_rdev_from_array(rdev);
3301                         continue;
3302                 }
3303                 if (rdev != freshest) {
3304                         if (super_types[mddev->major_version].
3305                             validate_super(mddev, rdev)) {
3306                                 printk(KERN_WARNING "md: kicking non-fresh %s"
3307                                         " from array!\n",
3308                                         bdevname(rdev->bdev,b));
3309                                 md_kick_rdev_from_array(rdev);
3310                                 continue;
3311                         }
3312                 }
3313                 if (mddev->level == LEVEL_MULTIPATH) {
3314                         rdev->desc_nr = i++;
3315                         rdev->raid_disk = rdev->desc_nr;
3316                         set_bit(In_sync, &rdev->flags);
3317                 } else if (rdev->raid_disk >=
3318                             (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3319                            !test_bit(Journal, &rdev->flags)) {
3320                         rdev->raid_disk = -1;
3321                         clear_bit(In_sync, &rdev->flags);
3322                 }
3323         }
3324 }
3325
3326 /* Read a fixed-point number.
3327  * Numbers in sysfs attributes should be in "standard" units where
3328  * possible, so time should be in seconds.
3329  * However we internally use a a much smaller unit such as
3330  * milliseconds or jiffies.
3331  * This function takes a decimal number with a possible fractional
3332  * component, and produces an integer which is the result of
3333  * multiplying that number by 10^'scale'.
3334  * all without any floating-point arithmetic.
3335  */
3336 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3337 {
3338         unsigned long result = 0;
3339         long decimals = -1;
3340         while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3341                 if (*cp == '.')
3342                         decimals = 0;
3343                 else if (decimals < scale) {
3344                         unsigned int value;
3345                         value = *cp - '0';
3346                         result = result * 10 + value;
3347                         if (decimals >= 0)
3348                                 decimals++;
3349                 }
3350                 cp++;
3351         }
3352         if (*cp == '\n')
3353                 cp++;
3354         if (*cp)
3355                 return -EINVAL;
3356         if (decimals < 0)
3357                 decimals = 0;
3358         while (decimals < scale) {
3359                 result *= 10;
3360                 decimals ++;
3361         }
3362         *res = result;
3363         return 0;
3364 }
3365
3366 static ssize_t
3367 safe_delay_show(struct mddev *mddev, char *page)
3368 {
3369         int msec = (mddev->safemode_delay*1000)/HZ;
3370         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3371 }
3372 static ssize_t
3373 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3374 {
3375         unsigned long msec;
3376
3377         if (mddev_is_clustered(mddev)) {
3378                 pr_info("md: Safemode is disabled for clustered mode\n");
3379                 return -EINVAL;
3380         }
3381
3382         if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3383                 return -EINVAL;
3384         if (msec == 0)
3385                 mddev->safemode_delay = 0;
3386         else {
3387                 unsigned long old_delay = mddev->safemode_delay;
3388                 unsigned long new_delay = (msec*HZ)/1000;
3389
3390                 if (new_delay == 0)
3391                         new_delay = 1;
3392                 mddev->safemode_delay = new_delay;
3393                 if (new_delay < old_delay || old_delay == 0)
3394                         mod_timer(&mddev->safemode_timer, jiffies+1);
3395         }
3396         return len;
3397 }
3398 static struct md_sysfs_entry md_safe_delay =
3399 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3400
3401 static ssize_t
3402 level_show(struct mddev *mddev, char *page)
3403 {
3404         struct md_personality *p;
3405         int ret;
3406         spin_lock(&mddev->lock);
3407         p = mddev->pers;
3408         if (p)
3409                 ret = sprintf(page, "%s\n", p->name);
3410         else if (mddev->clevel[0])
3411                 ret = sprintf(page, "%s\n", mddev->clevel);
3412         else if (mddev->level != LEVEL_NONE)
3413                 ret = sprintf(page, "%d\n", mddev->level);
3414         else
3415                 ret = 0;
3416         spin_unlock(&mddev->lock);
3417         return ret;
3418 }
3419
3420 static ssize_t
3421 level_store(struct mddev *mddev, const char *buf, size_t len)
3422 {
3423         char clevel[16];
3424         ssize_t rv;
3425         size_t slen = len;
3426         struct md_personality *pers, *oldpers;
3427         long level;
3428         void *priv, *oldpriv;
3429         struct md_rdev *rdev;
3430
3431         if (slen == 0 || slen >= sizeof(clevel))
3432                 return -EINVAL;
3433
3434         rv = mddev_lock(mddev);
3435         if (rv)
3436                 return rv;
3437
3438         if (mddev->pers == NULL) {
3439                 strncpy(mddev->clevel, buf, slen);
3440                 if (mddev->clevel[slen-1] == '\n')
3441                         slen--;
3442                 mddev->clevel[slen] = 0;
3443                 mddev->level = LEVEL_NONE;
3444                 rv = len;
3445                 goto out_unlock;
3446         }
3447         rv = -EROFS;
3448         if (mddev->ro)
3449                 goto out_unlock;
3450
3451         /* request to change the personality.  Need to ensure:
3452          *  - array is not engaged in resync/recovery/reshape
3453          *  - old personality can be suspended
3454          *  - new personality will access other array.
3455          */
3456
3457         rv = -EBUSY;
3458         if (mddev->sync_thread ||
3459             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3460             mddev->reshape_position != MaxSector ||
3461             mddev->sysfs_active)
3462                 goto out_unlock;
3463
3464         rv = -EINVAL;
3465         if (!mddev->pers->quiesce) {
3466                 printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
3467                        mdname(mddev), mddev->pers->name);
3468                 goto out_unlock;
3469         }
3470
3471         /* Now find the new personality */
3472         strncpy(clevel, buf, slen);
3473         if (clevel[slen-1] == '\n')
3474                 slen--;
3475         clevel[slen] = 0;
3476         if (kstrtol(clevel, 10, &level))
3477                 level = LEVEL_NONE;
3478
3479         if (request_module("md-%s", clevel) != 0)
3480                 request_module("md-level-%s", clevel);
3481         spin_lock(&pers_lock);
3482         pers = find_pers(level, clevel);
3483         if (!pers || !try_module_get(pers->owner)) {
3484                 spin_unlock(&pers_lock);
3485                 printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
3486                 rv = -EINVAL;
3487                 goto out_unlock;
3488         }
3489         spin_unlock(&pers_lock);
3490
3491         if (pers == mddev->pers) {
3492                 /* Nothing to do! */
3493                 module_put(pers->owner);
3494                 rv = len;
3495                 goto out_unlock;
3496         }
3497         if (!pers->takeover) {
3498                 module_put(pers->owner);
3499                 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
3500                        mdname(mddev), clevel);
3501                 rv = -EINVAL;
3502                 goto out_unlock;
3503         }
3504
3505         rdev_for_each(rdev, mddev)
3506                 rdev->new_raid_disk = rdev->raid_disk;
3507
3508         /* ->takeover must set new_* and/or delta_disks
3509          * if it succeeds, and may set them when it fails.
3510          */
3511         priv = pers->takeover(mddev);
3512         if (IS_ERR(priv)) {
3513                 mddev->new_level = mddev->level;
3514                 mddev->new_layout = mddev->layout;
3515                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3516                 mddev->raid_disks -= mddev->delta_disks;
3517                 mddev->delta_disks = 0;
3518                 mddev->reshape_backwards = 0;
3519                 module_put(pers->owner);
3520                 printk(KERN_WARNING "md: %s: %s would not accept array\n",
3521                        mdname(mddev), clevel);
3522                 rv = PTR_ERR(priv);
3523                 goto out_unlock;
3524         }
3525
3526         /* Looks like we have a winner */
3527         mddev_suspend(mddev);
3528         mddev_detach(mddev);
3529
3530         spin_lock(&mddev->lock);
3531         oldpers = mddev->pers;
3532         oldpriv = mddev->private;
3533         mddev->pers = pers;
3534         mddev->private = priv;
3535         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3536         mddev->level = mddev->new_level;
3537         mddev->layout = mddev->new_layout;
3538         mddev->chunk_sectors = mddev->new_chunk_sectors;
3539         mddev->delta_disks = 0;
3540         mddev->reshape_backwards = 0;
3541         mddev->degraded = 0;
3542         spin_unlock(&mddev->lock);
3543
3544         if (oldpers->sync_request == NULL &&
3545             mddev->external) {
3546                 /* We are converting from a no-redundancy array
3547                  * to a redundancy array and metadata is managed
3548                  * externally so we need to be sure that writes
3549                  * won't block due to a need to transition
3550                  *      clean->dirty
3551                  * until external management is started.
3552                  */
3553                 mddev->in_sync = 0;
3554                 mddev->safemode_delay = 0;
3555                 mddev->safemode = 0;
3556         }
3557
3558         oldpers->free(mddev, oldpriv);
3559
3560         if (oldpers->sync_request == NULL &&
3561             pers->sync_request != NULL) {
3562                 /* need to add the md_redundancy_group */
3563                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3564                         printk(KERN_WARNING
3565                                "md: cannot register extra attributes for %s\n",
3566                                mdname(mddev));
3567                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
3568         }
3569         if (oldpers->sync_request != NULL &&
3570             pers->sync_request == NULL) {
3571                 /* need to remove the md_redundancy_group */
3572                 if (mddev->to_remove == NULL)
3573                         mddev->to_remove = &md_redundancy_group;
3574         }
3575
3576         rdev_for_each(rdev, mddev) {
3577                 if (rdev->raid_disk < 0)
3578                         continue;
3579                 if (rdev->new_raid_disk >= mddev->raid_disks)
3580                         rdev->new_raid_disk = -1;
3581                 if (rdev->new_raid_disk == rdev->raid_disk)
3582                         continue;
3583                 sysfs_unlink_rdev(mddev, rdev);
3584         }
3585         rdev_for_each(rdev, mddev) {
3586                 if (rdev->raid_disk < 0)
3587                         continue;
3588                 if (rdev->new_raid_disk == rdev->raid_disk)
3589                         continue;
3590                 rdev->raid_disk = rdev->new_raid_disk;
3591                 if (rdev->raid_disk < 0)
3592                         clear_bit(In_sync, &rdev->flags);
3593                 else {
3594                         if (sysfs_link_rdev(mddev, rdev))
3595                                 printk(KERN_WARNING "md: cannot register rd%d"
3596                                        " for %s after level change\n",
3597                                        rdev->raid_disk, mdname(mddev));
3598                 }
3599         }
3600
3601         if (pers->sync_request == NULL) {
3602                 /* this is now an array without redundancy, so
3603                  * it must always be in_sync
3604                  */
3605                 mddev->in_sync = 1;
3606                 del_timer_sync(&mddev->safemode_timer);
3607         }
3608         blk_set_stacking_limits(&mddev->queue->limits);
3609         pers->run(mddev);
3610         set_bit(MD_CHANGE_DEVS, &mddev->flags);
3611         mddev_resume(mddev);
3612         if (!mddev->thread)
3613                 md_update_sb(mddev, 1);
3614         sysfs_notify(&mddev->kobj, NULL, "level");
3615         md_new_event(mddev);
3616         rv = len;
3617 out_unlock:
3618         mddev_unlock(mddev);
3619         return rv;
3620 }
3621
3622 static struct md_sysfs_entry md_level =
3623 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3624
3625 static ssize_t
3626 layout_show(struct mddev *mddev, char *page)
3627 {
3628         /* just a number, not meaningful for all levels */
3629         if (mddev->reshape_position != MaxSector &&
3630             mddev->layout != mddev->new_layout)
3631                 return sprintf(page, "%d (%d)\n",
3632                                mddev->new_layout, mddev->layout);
3633         return sprintf(page, "%d\n", mddev->layout);
3634 }
3635
3636 static ssize_t
3637 layout_store(struct mddev *mddev, const char *buf, size_t len)
3638 {
3639         unsigned int n;
3640         int err;
3641
3642         err = kstrtouint(buf, 10, &n);
3643         if (err < 0)
3644                 return err;
3645         err = mddev_lock(mddev);
3646         if (err)
3647                 return err;
3648
3649         if (mddev->pers) {
3650                 if (mddev->pers->check_reshape == NULL)
3651                         err = -EBUSY;
3652                 else if (mddev->ro)
3653                         err = -EROFS;
3654                 else {
3655                         mddev->new_layout = n;
3656                         err = mddev->pers->check_reshape(mddev);
3657                         if (err)
3658                                 mddev->new_layout = mddev->layout;
3659                 }
3660         } else {
3661                 mddev->new_layout = n;
3662                 if (mddev->reshape_position == MaxSector)
3663                         mddev->layout = n;
3664         }
3665         mddev_unlock(mddev);
3666         return err ?: len;
3667 }
3668 static struct md_sysfs_entry md_layout =
3669 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3670
3671 static ssize_t
3672 raid_disks_show(struct mddev *mddev, char *page)
3673 {
3674         if (mddev->raid_disks == 0)
3675                 return 0;
3676         if (mddev->reshape_position != MaxSector &&
3677             mddev->delta_disks != 0)
3678                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3679                                mddev->raid_disks - mddev->delta_disks);
3680         return sprintf(page, "%d\n", mddev->raid_disks);
3681 }
3682
3683 static int update_raid_disks(struct mddev *mddev, int raid_disks);
3684
3685 static ssize_t
3686 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
3687 {
3688         unsigned int n;
3689         int err;
3690
3691         err = kstrtouint(buf, 10, &n);
3692         if (err < 0)
3693                 return err;
3694
3695         err = mddev_lock(mddev);
3696         if (err)
3697                 return err;
3698         if (mddev->pers)
3699                 err = update_raid_disks(mddev, n);
3700         else if (mddev->reshape_position != MaxSector) {
3701                 struct md_rdev *rdev;
3702                 int olddisks = mddev->raid_disks - mddev->delta_disks;
3703
3704                 err = -EINVAL;
3705                 rdev_for_each(rdev, mddev) {
3706                         if (olddisks < n &&
3707                             rdev->data_offset < rdev->new_data_offset)
3708                                 goto out_unlock;
3709                         if (olddisks > n &&
3710                             rdev->data_offset > rdev->new_data_offset)
3711                                 goto out_unlock;
3712                 }
3713                 err = 0;
3714                 mddev->delta_disks = n - olddisks;
3715                 mddev->raid_disks = n;
3716                 mddev->reshape_backwards = (mddev->delta_disks < 0);
3717         } else
3718                 mddev->raid_disks = n;
3719 out_unlock:
3720         mddev_unlock(mddev);
3721         return err ? err : len;
3722 }
3723 static struct md_sysfs_entry md_raid_disks =
3724 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3725
3726 static ssize_t
3727 chunk_size_show(struct mddev *mddev, char *page)
3728 {
3729         if (mddev->reshape_position != MaxSector &&
3730             mddev->chunk_sectors != mddev->new_chunk_sectors)
3731                 return sprintf(page, "%d (%d)\n",
3732                                mddev->new_chunk_sectors << 9,
3733                                mddev->chunk_sectors << 9);
3734         return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3735 }
3736
3737 static ssize_t
3738 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
3739 {
3740         unsigned long n;
3741         int err;
3742
3743         err = kstrtoul(buf, 10, &n);
3744         if (err < 0)
3745                 return err;
3746
3747         err = mddev_lock(mddev);
3748         if (err)
3749                 return err;
3750         if (mddev->pers) {
3751                 if (mddev->pers->check_reshape == NULL)
3752                         err = -EBUSY;
3753                 else if (mddev->ro)
3754                         err = -EROFS;
3755                 else {
3756                         mddev->new_chunk_sectors = n >> 9;
3757                         err = mddev->pers->check_reshape(mddev);
3758                         if (err)
3759                                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3760                 }
3761         } else {
3762                 mddev->new_chunk_sectors = n >> 9;
3763                 if (mddev->reshape_position == MaxSector)
3764                         mddev->chunk_sectors = n >> 9;
3765         }
3766         mddev_unlock(mddev);
3767         return err ?: len;
3768 }
3769 static struct md_sysfs_entry md_chunk_size =
3770 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3771
3772 static ssize_t
3773 resync_start_show(struct mddev *mddev, char *page)
3774 {
3775         if (mddev->recovery_cp == MaxSector)
3776                 return sprintf(page, "none\n");
3777         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3778 }
3779
3780 static ssize_t
3781 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3782 {
3783         unsigned long long n;
3784         int err;
3785
3786         if (cmd_match(buf, "none"))
3787                 n = MaxSector;
3788         else {
3789                 err = kstrtoull(buf, 10, &n);
3790                 if (err < 0)
3791                         return err;
3792                 if (n != (sector_t)n)
3793                         return -EINVAL;
3794         }
3795
3796         err = mddev_lock(mddev);
3797         if (err)
3798                 return err;
3799         if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3800                 err = -EBUSY;
3801
3802         if (!err) {
3803                 mddev->recovery_cp = n;
3804                 if (mddev->pers)
3805                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3806         }
3807         mddev_unlock(mddev);
3808         return err ?: len;
3809 }
3810 static struct md_sysfs_entry md_resync_start =
3811 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
3812                 resync_start_show, resync_start_store);
3813
3814 /*
3815  * The array state can be:
3816  *
3817  * clear
3818  *     No devices, no size, no level
3819  *     Equivalent to STOP_ARRAY ioctl
3820  * inactive
3821  *     May have some settings, but array is not active
3822  *        all IO results in error
3823  *     When written, doesn't tear down array, but just stops it
3824  * suspended (not supported yet)
3825  *     All IO requests will block. The array can be reconfigured.
3826  *     Writing this, if accepted, will block until array is quiescent
3827  * readonly
3828  *     no resync can happen.  no superblocks get written.
3829  *     write requests fail
3830  * read-auto
3831  *     like readonly, but behaves like 'clean' on a write request.
3832  *
3833  * clean - no pending writes, but otherwise active.
3834  *     When written to inactive array, starts without resync
3835  *     If a write request arrives then
3836  *       if metadata is known, mark 'dirty' and switch to 'active'.
3837  *       if not known, block and switch to write-pending
3838  *     If written to an active array that has pending writes, then fails.
3839  * active
3840  *     fully active: IO and resync can be happening.
3841  *     When written to inactive array, starts with resync
3842  *
3843  * write-pending
3844  *     clean, but writes are blocked waiting for 'active' to be written.
3845  *
3846  * active-idle
3847  *     like active, but no writes have been seen for a while (100msec).
3848  *
3849  */
3850 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3851                    write_pending, active_idle, bad_word};
3852 static char *array_states[] = {
3853         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3854         "write-pending", "active-idle", NULL };
3855
3856 static int match_word(const char *word, char **list)
3857 {
3858         int n;
3859         for (n=0; list[n]; n++)
3860                 if (cmd_match(word, list[n]))
3861                         break;
3862         return n;
3863 }
3864
3865 static ssize_t
3866 array_state_show(struct mddev *mddev, char *page)
3867 {
3868         enum array_state st = inactive;
3869
3870         if (mddev->pers)
3871                 switch(mddev->ro) {
3872                 case 1:
3873                         st = readonly;
3874                         break;
3875                 case 2:
3876                         st = read_auto;
3877                         break;
3878                 case 0:
3879                         if (mddev->in_sync)
3880                                 st = clean;
3881                         else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3882                                 st = write_pending;
3883                         else if (mddev->safemode)
3884                                 st = active_idle;
3885                         else
3886                                 st = active;
3887                 }
3888         else {
3889                 if (list_empty(&mddev->disks) &&
3890                     mddev->raid_disks == 0 &&
3891                     mddev->dev_sectors == 0)
3892                         st = clear;
3893                 else
3894                         st = inactive;
3895         }
3896         return sprintf(page, "%s\n", array_states[st]);
3897 }
3898
3899 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
3900 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
3901 static int do_md_run(struct mddev *mddev);
3902 static int restart_array(struct mddev *mddev);
3903
3904 static ssize_t
3905 array_state_store(struct mddev *mddev, const char *buf, size_t len)
3906 {
3907         int err;
3908         enum array_state st = match_word(buf, array_states);
3909
3910         if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
3911                 /* don't take reconfig_mutex when toggling between
3912                  * clean and active
3913                  */
3914                 spin_lock(&mddev->lock);
3915                 if (st == active) {
3916                         restart_array(mddev);
3917                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3918                         wake_up(&mddev->sb_wait);
3919                         err = 0;
3920                 } else /* st == clean */ {
3921                         restart_array(mddev);
3922                         if (atomic_read(&mddev->writes_pending) == 0) {
3923                                 if (mddev->in_sync == 0) {
3924                                         mddev->in_sync = 1;
3925                                         if (mddev->safemode == 1)
3926                                                 mddev->safemode = 0;
3927                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3928                                 }
3929                                 err = 0;
3930                         } else
3931                                 err = -EBUSY;
3932                 }
3933                 spin_unlock(&mddev->lock);
3934                 return err ?: len;
3935         }
3936         err = mddev_lock(mddev);
3937         if (err)
3938                 return err;
3939         err = -EINVAL;
3940         switch(st) {
3941         case bad_word:
3942                 break;
3943         case clear:
3944                 /* stopping an active array */
3945                 err = do_md_stop(mddev, 0, NULL);
3946                 break;
3947         case inactive:
3948                 /* stopping an active array */
3949                 if (mddev->pers)
3950                         err = do_md_stop(mddev, 2, NULL);
3951                 else
3952                         err = 0; /* already inactive */
3953                 break;
3954         case suspended:
3955                 break; /* not supported yet */
3956         case readonly:
3957                 if (mddev->pers)
3958                         err = md_set_readonly(mddev, NULL);
3959                 else {
3960                         mddev->ro = 1;
3961                         set_disk_ro(mddev->gendisk, 1);
3962                         err = do_md_run(mddev);
3963                 }
3964                 break;
3965         case read_auto:
3966                 if (mddev->pers) {
3967                         if (mddev->ro == 0)
3968                                 err = md_set_readonly(mddev, NULL);
3969                         else if (mddev->ro == 1)
3970                                 err = restart_array(mddev);
3971                         if (err == 0) {
3972                                 mddev->ro = 2;
3973                                 set_disk_ro(mddev->gendisk, 0);
3974                         }
3975                 } else {
3976                         mddev->ro = 2;
3977                         err = do_md_run(mddev);
3978                 }
3979                 break;
3980         case clean:
3981                 if (mddev->pers) {
3982                         err = restart_array(mddev);
3983                         if (err)
3984                                 break;
3985                         spin_lock(&mddev->lock);
3986                         if (atomic_read(&mddev->writes_pending) == 0) {
3987                                 if (mddev->in_sync == 0) {
3988                                         mddev->in_sync = 1;
3989                                         if (mddev->safemode == 1)
3990                                                 mddev->safemode = 0;
3991                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3992                                 }
3993                                 err = 0;
3994                         } else
3995                                 err = -EBUSY;
3996                         spin_unlock(&mddev->lock);
3997                 } else
3998                         err = -EINVAL;
3999                 break;
4000         case active:
4001                 if (mddev->pers) {
4002                         err = restart_array(mddev);
4003                         if (err)
4004                                 break;
4005                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
4006                         wake_up(&mddev->sb_wait);
4007                         err = 0;
4008                 } else {
4009                         mddev->ro = 0;
4010                         set_disk_ro(mddev->gendisk, 0);
4011                         err = do_md_run(mddev);
4012                 }
4013                 break;
4014         case write_pending:
4015         case active_idle:
4016                 /* these cannot be set */
4017                 break;
4018         }
4019
4020         if (!err) {
4021                 if (mddev->hold_active == UNTIL_IOCTL)
4022                         mddev->hold_active = 0;
4023                 sysfs_notify_dirent_safe(mddev->sysfs_state);
4024         }
4025         mddev_unlock(mddev);
4026         return err ?: len;
4027 }
4028 static struct md_sysfs_entry md_array_state =
4029 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
4030
4031 static ssize_t
4032 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
4033         return sprintf(page, "%d\n",
4034                        atomic_read(&mddev->max_corr_read_errors));
4035 }
4036
4037 static ssize_t
4038 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
4039 {
4040         unsigned int n;
4041         int rv;
4042
4043         rv = kstrtouint(buf, 10, &n);
4044         if (rv < 0)
4045                 return rv;
4046         atomic_set(&mddev->max_corr_read_errors, n);
4047         return len;
4048 }
4049
4050 static struct md_sysfs_entry max_corr_read_errors =
4051 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4052         max_corrected_read_errors_store);
4053
4054 static ssize_t
4055 null_show(struct mddev *mddev, char *page)
4056 {
4057         return -EINVAL;
4058 }
4059
4060 static ssize_t
4061 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4062 {
4063         /* buf must be %d:%d\n? giving major and minor numbers */
4064         /* The new device is added to the array.
4065          * If the array has a persistent superblock, we read the
4066          * superblock to initialise info and check validity.
4067          * Otherwise, only checking done is that in bind_rdev_to_array,
4068          * which mainly checks size.
4069          */
4070         char *e;
4071         int major = simple_strtoul(buf, &e, 10);
4072         int minor;
4073         dev_t dev;
4074         struct md_rdev *rdev;
4075         int err;
4076
4077         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4078                 return -EINVAL;
4079         minor = simple_strtoul(e+1, &e, 10);
4080         if (*e && *e != '\n')
4081                 return -EINVAL;
4082         dev = MKDEV(major, minor);
4083         if (major != MAJOR(dev) ||
4084             minor != MINOR(dev))
4085                 return -EOVERFLOW;
4086
4087         flush_workqueue(md_misc_wq);
4088
4089         err = mddev_lock(mddev);
4090         if (err)
4091                 return err;
4092         if (mddev->persistent) {
4093                 rdev = md_import_device(dev, mddev->major_version,
4094                                         mddev->minor_version);
4095                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4096                         struct md_rdev *rdev0
4097                                 = list_entry(mddev->disks.next,
4098                                              struct md_rdev, same_set);
4099                         err = super_types[mddev->major_version]
4100                                 .load_super(rdev, rdev0, mddev->minor_version);
4101                         if (err < 0)
4102                                 goto out;
4103                 }
4104         } else if (mddev->external)
4105                 rdev = md_import_device(dev, -2, -1);
4106         else
4107                 rdev = md_import_device(dev, -1, -1);
4108
4109         if (IS_ERR(rdev)) {
4110                 mddev_unlock(mddev);
4111                 return PTR_ERR(rdev);
4112         }
4113         err = bind_rdev_to_array(rdev, mddev);
4114  out:
4115         if (err)
4116                 export_rdev(rdev);
4117         mddev_unlock(mddev);
4118         return err ? err : len;
4119 }
4120
4121 static struct md_sysfs_entry md_new_device =
4122 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4123
4124 static ssize_t
4125 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4126 {
4127         char *end;
4128         unsigned long chunk, end_chunk;
4129         int err;
4130
4131         err = mddev_lock(mddev);
4132         if (err)
4133                 return err;
4134         if (!mddev->bitmap)
4135                 goto out;
4136         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4137         while (*buf) {
4138                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4139                 if (buf == end) break;
4140                 if (*end == '-') { /* range */
4141                         buf = end + 1;
4142                         end_chunk = simple_strtoul(buf, &end, 0);
4143                         if (buf == end) break;
4144                 }
4145                 if (*end && !isspace(*end)) break;
4146                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4147                 buf = skip_spaces(end);
4148         }
4149         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4150 out:
4151         mddev_unlock(mddev);
4152         return len;
4153 }
4154
4155 static struct md_sysfs_entry md_bitmap =
4156 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4157
4158 static ssize_t
4159 size_show(struct mddev *mddev, char *page)
4160 {
4161         return sprintf(page, "%llu\n",
4162                 (unsigned long long)mddev->dev_sectors / 2);
4163 }
4164
4165 static int update_size(struct mddev *mddev, sector_t num_sectors);
4166
4167 static ssize_t
4168 size_store(struct mddev *mddev, const char *buf, size_t len)
4169 {
4170         /* If array is inactive, we can reduce the component size, but
4171          * not increase it (except from 0).
4172          * If array is active, we can try an on-line resize
4173          */
4174         sector_t sectors;
4175         int err = strict_blocks_to_sectors(buf, &sectors);
4176
4177         if (err < 0)
4178                 return err;
4179         err = mddev_lock(mddev);
4180         if (err)
4181                 return err;
4182         if (mddev->pers) {
4183                 err = update_size(mddev, sectors);
4184                 md_update_sb(mddev, 1);
4185         } else {
4186                 if (mddev->dev_sectors == 0 ||
4187                     mddev->dev_sectors > sectors)
4188                         mddev->dev_sectors = sectors;
4189                 else
4190                         err = -ENOSPC;
4191         }
4192         mddev_unlock(mddev);
4193         return err ? err : len;
4194 }
4195
4196 static struct md_sysfs_entry md_size =
4197 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4198
4199 /* Metadata version.
4200  * This is one of
4201  *   'none' for arrays with no metadata (good luck...)
4202  *   'external' for arrays with externally managed metadata,
4203  * or N.M for internally known formats
4204  */
4205 static ssize_t
4206 metadata_show(struct mddev *mddev, char *page)
4207 {
4208         if (mddev->persistent)
4209                 return sprintf(page, "%d.%d\n",
4210                                mddev->major_version, mddev->minor_version);
4211         else if (mddev->external)
4212                 return sprintf(page, "external:%s\n", mddev->metadata_type);
4213         else
4214                 return sprintf(page, "none\n");
4215 }
4216
4217 static ssize_t
4218 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4219 {
4220         int major, minor;
4221         char *e;
4222         int err;
4223         /* Changing the details of 'external' metadata is
4224          * always permitted.  Otherwise there must be
4225          * no devices attached to the array.
4226          */
4227
4228         err = mddev_lock(mddev);
4229         if (err)
4230                 return err;
4231         err = -EBUSY;
4232         if (mddev->external && strncmp(buf, "external:", 9) == 0)
4233                 ;
4234         else if (!list_empty(&mddev->disks))
4235                 goto out_unlock;
4236
4237         err = 0;
4238         if (cmd_match(buf, "none")) {
4239                 mddev->persistent = 0;
4240                 mddev->external = 0;
4241                 mddev->major_version = 0;
4242                 mddev->minor_version = 90;
4243                 goto out_unlock;
4244         }
4245         if (strncmp(buf, "external:", 9) == 0) {
4246                 size_t namelen = len-9;
4247                 if (namelen >= sizeof(mddev->metadata_type))
4248                         namelen = sizeof(mddev->metadata_type)-1;
4249                 strncpy(mddev->metadata_type, buf+9, namelen);
4250                 mddev->metadata_type[namelen] = 0;
4251                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4252                         mddev->metadata_type[--namelen] = 0;
4253                 mddev->persistent = 0;
4254                 mddev->external = 1;
4255                 mddev->major_version = 0;
4256                 mddev->minor_version = 90;
4257                 goto out_unlock;
4258         }
4259         major = simple_strtoul(buf, &e, 10);
4260         err = -EINVAL;
4261         if (e==buf || *e != '.')
4262                 goto out_unlock;
4263         buf = e+1;
4264         minor = simple_strtoul(buf, &e, 10);
4265         if (e==buf || (*e && *e != '\n') )
4266                 goto out_unlock;
4267         err = -ENOENT;
4268         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4269                 goto out_unlock;
4270         mddev->major_version = major;
4271         mddev->minor_version = minor;
4272         mddev->persistent = 1;
4273         mddev->external = 0;
4274         err = 0;
4275 out_unlock:
4276         mddev_unlock(mddev);
4277         return err ?: len;
4278 }
4279
4280 static struct md_sysfs_entry md_metadata =
4281 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4282
4283 static ssize_t
4284 action_show(struct mddev *mddev, char *page)
4285 {
4286         char *type = "idle";
4287         unsigned long recovery = mddev->recovery;
4288         if (test_bit(MD_RECOVERY_FROZEN, &recovery))
4289                 type = "frozen";
4290         else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4291             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4292                 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
4293                         type = "reshape";
4294                 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4295                         if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
4296                                 type = "resync";
4297                         else if (test_bit(MD_RECOVERY_CHECK, &recovery))
4298                                 type = "check";
4299                         else
4300                                 type = "repair";
4301                 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
4302                         type = "recover";
4303                 else if (mddev->reshape_position != MaxSector)
4304                         type = "reshape";
4305         }
4306         return sprintf(page, "%s\n", type);
4307 }
4308
4309 static ssize_t
4310 action_store(struct mddev *mddev, const char *page, size_t len)
4311 {
4312         if (!mddev->pers || !mddev->pers->sync_request)
4313                 return -EINVAL;
4314
4315
4316         if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4317                 if (cmd_match(page, "frozen"))
4318                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4319                 else
4320                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4321                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4322                     mddev_lock(mddev) == 0) {
4323                         flush_workqueue(md_misc_wq);
4324                         if (mddev->sync_thread) {
4325                                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4326                                 md_reap_sync_thread(mddev);
4327                         }
4328                         mddev_unlock(mddev);
4329                 }
4330         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4331                 return -EBUSY;
4332         else if (cmd_match(page, "resync"))
4333                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4334         else if (cmd_match(page, "recover")) {
4335                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4336                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4337         } else if (cmd_match(page, "reshape")) {
4338                 int err;
4339                 if (mddev->pers->start_reshape == NULL)
4340                         return -EINVAL;
4341                 err = mddev_lock(mddev);
4342                 if (!err) {
4343                         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4344                                 err =  -EBUSY;
4345                         else {
4346                                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4347                                 err = mddev->pers->start_reshape(mddev);
4348                         }
4349                         mddev_unlock(mddev);
4350                 }
4351                 if (err)
4352                         return err;
4353                 sysfs_notify(&mddev->kobj, NULL, "degraded");
4354         } else {
4355                 if (cmd_match(page, "check"))
4356                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4357                 else if (!cmd_match(page, "repair"))
4358                         return -EINVAL;
4359                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4360                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4361                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4362         }
4363         if (mddev->ro == 2) {
4364                 /* A write to sync_action is enough to justify
4365                  * canceling read-auto mode
4366                  */
4367                 mddev->ro = 0;
4368                 md_wakeup_thread(mddev->sync_thread);
4369         }
4370         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4371         md_wakeup_thread(mddev->thread);
4372         sysfs_notify_dirent_safe(mddev->sysfs_action);
4373         return len;
4374 }
4375
4376 static struct md_sysfs_entry md_scan_mode =
4377 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4378
4379 static ssize_t
4380 last_sync_action_show(struct mddev *mddev, char *page)
4381 {
4382         return sprintf(page, "%s\n", mddev->last_sync_action);
4383 }
4384
4385 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4386
4387 static ssize_t
4388 mismatch_cnt_show(struct mddev *mddev, char *page)
4389 {
4390         return sprintf(page, "%llu\n",
4391                        (unsigned long long)
4392                        atomic64_read(&mddev->resync_mismatches));
4393 }
4394
4395 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4396
4397 static ssize_t
4398 sync_min_show(struct mddev *mddev, char *page)
4399 {
4400         return sprintf(page, "%d (%s)\n", speed_min(mddev),
4401                        mddev->sync_speed_min ? "local": "system");
4402 }
4403
4404 static ssize_t
4405 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4406 {
4407         unsigned int min;
4408         int rv;
4409
4410         if (strncmp(buf, "system", 6)==0) {
4411                 min = 0;
4412         } else {
4413                 rv = kstrtouint(buf, 10, &min);
4414                 if (rv < 0)
4415                         return rv;
4416                 if (min == 0)
4417                         return -EINVAL;
4418         }
4419         mddev->sync_speed_min = min;
4420         return len;
4421 }
4422
4423 static struct md_sysfs_entry md_sync_min =
4424 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4425
4426 static ssize_t
4427 sync_max_show(struct mddev *mddev, char *page)
4428 {
4429         return sprintf(page, "%d (%s)\n", speed_max(mddev),
4430                        mddev->sync_speed_max ? "local": "system");
4431 }
4432
4433 static ssize_t
4434 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4435 {
4436         unsigned int max;
4437         int rv;
4438
4439         if (strncmp(buf, "system", 6)==0) {
4440                 max = 0;
4441         } else {
4442                 rv = kstrtouint(buf, 10, &max);
4443                 if (rv < 0)
4444                         return rv;
4445                 if (max == 0)
4446                         return -EINVAL;
4447         }
4448         mddev->sync_speed_max = max;
4449         return len;
4450 }
4451
4452 static struct md_sysfs_entry md_sync_max =
4453 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4454
4455 static ssize_t
4456 degraded_show(struct mddev *mddev, char *page)
4457 {
4458         return sprintf(page, "%d\n", mddev->degraded);
4459 }
4460 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
4461
4462 static ssize_t
4463 sync_force_parallel_show(struct mddev *mddev, char *page)
4464 {
4465         return sprintf(page, "%d\n", mddev->parallel_resync);
4466 }
4467
4468 static ssize_t
4469 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
4470 {
4471         long n;
4472
4473         if (kstrtol(buf, 10, &n))
4474                 return -EINVAL;
4475
4476         if (n != 0 && n != 1)
4477                 return -EINVAL;
4478
4479         mddev->parallel_resync = n;
4480
4481         if (mddev->sync_thread)
4482                 wake_up(&resync_wait);
4483
4484         return len;
4485 }
4486
4487 /* force parallel resync, even with shared block devices */
4488 static struct md_sysfs_entry md_sync_force_parallel =
4489 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
4490        sync_force_parallel_show, sync_force_parallel_store);
4491
4492 static ssize_t
4493 sync_speed_show(struct mddev *mddev, char *page)
4494 {
4495         unsigned long resync, dt, db;
4496         if (mddev->curr_resync == 0)
4497                 return sprintf(page, "none\n");
4498         resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
4499         dt = (jiffies - mddev->resync_mark) / HZ;
4500         if (!dt) dt++;
4501         db = resync - mddev->resync_mark_cnt;
4502         return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
4503 }
4504
4505 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
4506
4507 static ssize_t
4508 sync_completed_show(struct mddev *mddev, char *page)
4509 {
4510         unsigned long long max_sectors, resync;
4511
4512         if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4513                 return sprintf(page, "none\n");
4514
4515         if (mddev->curr_resync == 1 ||
4516             mddev->curr_resync == 2)
4517                 return sprintf(page, "delayed\n");
4518
4519         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
4520             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4521                 max_sectors = mddev->resync_max_sectors;
4522         else
4523                 max_sectors = mddev->dev_sectors;
4524
4525         resync = mddev->curr_resync_completed;
4526         return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4527 }
4528
4529 static struct md_sysfs_entry md_sync_completed =
4530         __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
4531
4532 static ssize_t
4533 min_sync_show(struct mddev *mddev, char *page)
4534 {
4535         return sprintf(page, "%llu\n",
4536                        (unsigned long long)mddev->resync_min);
4537 }
4538 static ssize_t
4539 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
4540 {
4541         unsigned long long min;
4542         int err;
4543
4544         if (kstrtoull(buf, 10, &min))
4545                 return -EINVAL;
4546
4547         spin_lock(&mddev->lock);
4548         err = -EINVAL;
4549         if (min > mddev->resync_max)
4550                 goto out_unlock;
4551
4552         err = -EBUSY;
4553         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4554                 goto out_unlock;
4555
4556         /* Round down to multiple of 4K for safety */
4557         mddev->resync_min = round_down(min, 8);
4558         err = 0;
4559
4560 out_unlock:
4561         spin_unlock(&mddev->lock);
4562         return err ?: len;
4563 }
4564
4565 static struct md_sysfs_entry md_min_sync =
4566 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
4567
4568 static ssize_t
4569 max_sync_show(struct mddev *mddev, char *page)
4570 {
4571         if (mddev->resync_max == MaxSector)
4572                 return sprintf(page, "max\n");
4573         else
4574                 return sprintf(page, "%llu\n",
4575                                (unsigned long long)mddev->resync_max);
4576 }
4577 static ssize_t
4578 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
4579 {
4580         int err;
4581         spin_lock(&mddev->lock);
4582         if (strncmp(buf, "max", 3) == 0)
4583                 mddev->resync_max = MaxSector;
4584         else {
4585                 unsigned long long max;
4586                 int chunk;
4587
4588                 err = -EINVAL;
4589                 if (kstrtoull(buf, 10, &max))
4590                         goto out_unlock;
4591                 if (max < mddev->resync_min)
4592                         goto out_unlock;
4593
4594                 err = -EBUSY;
4595                 if (max < mddev->resync_max &&
4596                     mddev->ro == 0 &&
4597                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4598                         goto out_unlock;
4599
4600                 /* Must be a multiple of chunk_size */
4601                 chunk = mddev->chunk_sectors;
4602                 if (chunk) {
4603                         sector_t temp = max;
4604
4605                         err = -EINVAL;
4606                         if (sector_div(temp, chunk))
4607                                 goto out_unlock;
4608                 }
4609                 mddev->resync_max = max;
4610         }
4611         wake_up(&mddev->recovery_wait);
4612         err = 0;
4613 out_unlock:
4614         spin_unlock(&mddev->lock);
4615         return err ?: len;
4616 }
4617
4618 static struct md_sysfs_entry md_max_sync =
4619 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
4620
4621 static ssize_t
4622 suspend_lo_show(struct mddev *mddev, char *page)
4623 {
4624         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
4625 }
4626
4627 static ssize_t
4628 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
4629 {
4630         unsigned long long old, new;
4631         int err;
4632
4633         err = kstrtoull(buf, 10, &new);
4634         if (err < 0)
4635                 return err;
4636         if (new != (sector_t)new)
4637                 return -EINVAL;
4638
4639         err = mddev_lock(mddev);
4640         if (err)
4641                 return err;
4642         err = -EINVAL;
4643         if (mddev->pers == NULL ||
4644             mddev->pers->quiesce == NULL)
4645                 goto unlock;
4646         old = mddev->suspend_lo;
4647         mddev->suspend_lo = new;
4648         if (new >= old)
4649                 /* Shrinking suspended region */
4650                 mddev->pers->quiesce(mddev, 2);
4651         else {
4652                 /* Expanding suspended region - need to wait */
4653                 mddev->pers->quiesce(mddev, 1);
4654                 mddev->pers->quiesce(mddev, 0);
4655         }
4656         err = 0;
4657 unlock:
4658         mddev_unlock(mddev);
4659         return err ?: len;
4660 }
4661 static struct md_sysfs_entry md_suspend_lo =
4662 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
4663
4664 static ssize_t
4665 suspend_hi_show(struct mddev *mddev, char *page)
4666 {
4667         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
4668 }
4669
4670 static ssize_t
4671 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
4672 {
4673         unsigned long long old, new;
4674         int err;
4675
4676         err = kstrtoull(buf, 10, &new);
4677         if (err < 0)
4678                 return err;
4679         if (new != (sector_t)new)
4680                 return -EINVAL;
4681
4682         err = mddev_lock(mddev);
4683         if (err)
4684                 return err;
4685         err = -EINVAL;
4686         if (mddev->pers == NULL ||
4687             mddev->pers->quiesce == NULL)
4688                 goto unlock;
4689         old = mddev->suspend_hi;
4690         mddev->suspend_hi = new;
4691         if (new <= old)
4692                 /* Shrinking suspended region */
4693                 mddev->pers->quiesce(mddev, 2);
4694         else {
4695                 /* Expanding suspended region - need to wait */
4696                 mddev->pers->quiesce(mddev, 1);
4697                 mddev->pers->quiesce(mddev, 0);
4698         }
4699         err = 0;
4700 unlock:
4701         mddev_unlock(mddev);
4702         return err ?: len;
4703 }
4704 static struct md_sysfs_entry md_suspend_hi =
4705 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
4706
4707 static ssize_t
4708 reshape_position_show(struct mddev *mddev, char *page)
4709 {
4710         if (mddev->reshape_position != MaxSector)
4711                 return sprintf(page, "%llu\n",
4712                                (unsigned long long)mddev->reshape_position);
4713         strcpy(page, "none\n");
4714         return 5;
4715 }
4716
4717 static ssize_t
4718 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
4719 {
4720         struct md_rdev *rdev;
4721         unsigned long long new;
4722         int err;
4723
4724         err = kstrtoull(buf, 10, &new);
4725         if (err < 0)
4726                 return err;
4727         if (new != (sector_t)new)
4728                 return -EINVAL;
4729         err = mddev_lock(mddev);
4730         if (err)
4731                 return err;
4732         err = -EBUSY;
4733         if (mddev->pers)
4734                 goto unlock;
4735         mddev->reshape_position = new;
4736         mddev->delta_disks = 0;
4737         mddev->reshape_backwards = 0;
4738         mddev->new_level = mddev->level;
4739         mddev->new_layout = mddev->layout;
4740         mddev->new_chunk_sectors = mddev->chunk_sectors;
4741         rdev_for_each(rdev, mddev)
4742                 rdev->new_data_offset = rdev->data_offset;
4743         err = 0;
4744 unlock:
4745         mddev_unlock(mddev);
4746         return err ?: len;
4747 }
4748
4749 static struct md_sysfs_entry md_reshape_position =
4750 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
4751        reshape_position_store);
4752
4753 static ssize_t
4754 reshape_direction_show(struct mddev *mddev, char *page)
4755 {
4756         return sprintf(page, "%s\n",
4757                        mddev->reshape_backwards ? "backwards" : "forwards");
4758 }
4759
4760 static ssize_t
4761 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
4762 {
4763         int backwards = 0;
4764         int err;
4765
4766         if (cmd_match(buf, "forwards"))
4767                 backwards = 0;
4768         else if (cmd_match(buf, "backwards"))
4769                 backwards = 1;
4770         else
4771                 return -EINVAL;
4772         if (mddev->reshape_backwards == backwards)
4773                 return len;
4774
4775         err = mddev_lock(mddev);
4776         if (err)
4777                 return err;
4778         /* check if we are allowed to change */
4779         if (mddev->delta_disks)
4780                 err = -EBUSY;
4781         else if (mddev->persistent &&
4782             mddev->major_version == 0)
4783                 err =  -EINVAL;
4784         else
4785                 mddev->reshape_backwards = backwards;
4786         mddev_unlock(mddev);
4787         return err ?: len;
4788 }
4789
4790 static struct md_sysfs_entry md_reshape_direction =
4791 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
4792        reshape_direction_store);
4793
4794 static ssize_t
4795 array_size_show(struct mddev *mddev, char *page)
4796 {
4797         if (mddev->external_size)
4798                 return sprintf(page, "%llu\n",
4799                                (unsigned long long)mddev->array_sectors/2);
4800         else
4801                 return sprintf(page, "default\n");
4802 }
4803
4804 static ssize_t
4805 array_size_store(struct mddev *mddev, const char *buf, size_t len)
4806 {
4807         sector_t sectors;
4808         int err;
4809
4810         err = mddev_lock(mddev);
4811         if (err)
4812                 return err;
4813
4814         if (strncmp(buf, "default", 7) == 0) {
4815                 if (mddev->pers)
4816                         sectors = mddev->pers->size(mddev, 0, 0);
4817                 else
4818                         sectors = mddev->array_sectors;
4819
4820                 mddev->external_size = 0;
4821         } else {
4822                 if (strict_blocks_to_sectors(buf, &sectors) < 0)
4823                         err = -EINVAL;
4824                 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4825                         err = -E2BIG;
4826                 else
4827                         mddev->external_size = 1;
4828         }
4829
4830         if (!err) {
4831                 mddev->array_sectors = sectors;
4832                 if (mddev->pers) {
4833                         set_capacity(mddev->gendisk, mddev->array_sectors);
4834                         revalidate_disk(mddev->gendisk);
4835                 }
4836         }
4837         mddev_unlock(mddev);
4838         return err ?: len;
4839 }
4840
4841 static struct md_sysfs_entry md_array_size =
4842 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
4843        array_size_store);
4844
4845 static struct attribute *md_default_attrs[] = {
4846         &md_level.attr,
4847         &md_layout.attr,
4848         &md_raid_disks.attr,
4849         &md_chunk_size.attr,
4850         &md_size.attr,
4851         &md_resync_start.attr,
4852         &md_metadata.attr,
4853         &md_new_device.attr,
4854         &md_safe_delay.attr,
4855         &md_array_state.attr,
4856         &md_reshape_position.attr,
4857         &md_reshape_direction.attr,
4858         &md_array_size.attr,
4859         &max_corr_read_errors.attr,
4860         NULL,
4861 };
4862
4863 static struct attribute *md_redundancy_attrs[] = {
4864         &md_scan_mode.attr,
4865         &md_last_scan_mode.attr,
4866         &md_mismatches.attr,
4867         &md_sync_min.attr,
4868         &md_sync_max.attr,
4869         &md_sync_speed.attr,
4870         &md_sync_force_parallel.attr,
4871         &md_sync_completed.attr,
4872         &md_min_sync.attr,
4873         &md_max_sync.attr,
4874         &md_suspend_lo.attr,
4875         &md_suspend_hi.attr,
4876         &md_bitmap.attr,
4877         &md_degraded.attr,
4878         NULL,
4879 };
4880 static struct attribute_group md_redundancy_group = {
4881         .name = NULL,
4882         .attrs = md_redundancy_attrs,
4883 };
4884
4885 static ssize_t
4886 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4887 {
4888         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4889         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4890         ssize_t rv;
4891
4892         if (!entry->show)
4893                 return -EIO;
4894         spin_lock(&all_mddevs_lock);
4895         if (list_empty(&mddev->all_mddevs)) {
4896                 spin_unlock(&all_mddevs_lock);
4897                 return -EBUSY;
4898         }
4899         mddev_get(mddev);
4900         spin_unlock(&all_mddevs_lock);
4901
4902         rv = entry->show(mddev, page);
4903         mddev_put(mddev);
4904         return rv;
4905 }
4906
4907 static ssize_t
4908 md_attr_store(struct kobject *kobj, struct attribute *attr,
4909               const char *page, size_t length)
4910 {
4911         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4912         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4913         ssize_t rv;
4914
4915         if (!entry->store)
4916                 return -EIO;
4917         if (!capable(CAP_SYS_ADMIN))
4918                 return -EACCES;
4919         spin_lock(&all_mddevs_lock);
4920         if (list_empty(&mddev->all_mddevs)) {
4921                 spin_unlock(&all_mddevs_lock);
4922                 return -EBUSY;
4923         }
4924         mddev_get(mddev);
4925         spin_unlock(&all_mddevs_lock);
4926         rv = entry->store(mddev, page, length);
4927         mddev_put(mddev);
4928         return rv;
4929 }
4930
4931 static void md_free(struct kobject *ko)
4932 {
4933         struct mddev *mddev = container_of(ko, struct mddev, kobj);
4934
4935         if (mddev->sysfs_state)
4936                 sysfs_put(mddev->sysfs_state);
4937
4938         if (mddev->queue)
4939                 blk_cleanup_queue(mddev->queue);
4940         if (mddev->gendisk) {
4941                 del_gendisk(mddev->gendisk);
4942                 put_disk(mddev->gendisk);
4943         }
4944
4945         kfree(mddev);
4946 }
4947
4948 static const struct sysfs_ops md_sysfs_ops = {
4949         .show   = md_attr_show,
4950         .store  = md_attr_store,
4951 };
4952 static struct kobj_type md_ktype = {
4953         .release        = md_free,
4954         .sysfs_ops      = &md_sysfs_ops,
4955         .default_attrs  = md_default_attrs,
4956 };
4957
4958 int mdp_major = 0;
4959
4960 static void mddev_delayed_delete(struct work_struct *ws)
4961 {
4962         struct mddev *mddev = container_of(ws, struct mddev, del_work);
4963
4964         sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
4965         kobject_del(&mddev->kobj);
4966         kobject_put(&mddev->kobj);
4967 }
4968
4969 static int md_alloc(dev_t dev, char *name)
4970 {
4971         static DEFINE_MUTEX(disks_mutex);
4972         struct mddev *mddev = mddev_find(dev);
4973         struct gendisk *disk;
4974         int partitioned;
4975         int shift;
4976         int unit;
4977         int error;
4978
4979         if (!mddev)
4980                 return -ENODEV;
4981
4982         partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
4983         shift = partitioned ? MdpMinorShift : 0;
4984         unit = MINOR(mddev->unit) >> shift;
4985
4986         /* wait for any previous instance of this device to be
4987          * completely removed (mddev_delayed_delete).
4988          */
4989         flush_workqueue(md_misc_wq);
4990
4991         mutex_lock(&disks_mutex);
4992         error = -EEXIST;
4993         if (mddev->gendisk)
4994                 goto abort;
4995
4996         if (name) {
4997                 /* Need to ensure that 'name' is not a duplicate.
4998                  */
4999                 struct mddev *mddev2;
5000                 spin_lock(&all_mddevs_lock);
5001
5002                 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5003                         if (mddev2->gendisk &&
5004                             strcmp(mddev2->gendisk->disk_name, name) == 0) {
5005                                 spin_unlock(&all_mddevs_lock);
5006                                 goto abort;
5007                         }
5008                 spin_unlock(&all_mddevs_lock);
5009         }
5010
5011         error = -ENOMEM;
5012         mddev->queue = blk_alloc_queue(GFP_KERNEL);
5013         if (!mddev->queue)
5014                 goto abort;
5015         mddev->queue->queuedata = mddev;
5016
5017         blk_queue_make_request(mddev->queue, md_make_request);
5018         blk_set_stacking_limits(&mddev->queue->limits);
5019
5020         disk = alloc_disk(1 << shift);
5021         if (!disk) {
5022                 blk_cleanup_queue(mddev->queue);
5023                 mddev->queue = NULL;
5024                 goto abort;
5025         }
5026         disk->major = MAJOR(mddev->unit);
5027         disk->first_minor = unit << shift;
5028         if (name)
5029                 strcpy(disk->disk_name, name);
5030         else if (partitioned)
5031                 sprintf(disk->disk_name, "md_d%d", unit);
5032         else
5033                 sprintf(disk->disk_name, "md%d", unit);
5034         disk->fops = &md_fops;
5035         disk->private_data = mddev;
5036         disk->queue = mddev->queue;
5037         blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
5038         /* Allow extended partitions.  This makes the
5039          * 'mdp' device redundant, but we can't really
5040          * remove it now.
5041          */
5042         disk->flags |= GENHD_FL_EXT_DEVT;
5043         mddev->gendisk = disk;
5044         /* As soon as we call add_disk(), another thread could get
5045          * through to md_open, so make sure it doesn't get too far
5046          */
5047         mutex_lock(&mddev->open_mutex);
5048         add_disk(disk);
5049
5050         error = kobject_init_and_add(&mddev->kobj, &md_ktype,
5051                                      &disk_to_dev(disk)->kobj, "%s", "md");
5052         if (error) {
5053                 /* This isn't possible, but as kobject_init_and_add is marked
5054                  * __must_check, we must do something with the result
5055                  */
5056                 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
5057                        disk->disk_name);
5058                 error = 0;
5059         }
5060         if (mddev->kobj.sd &&
5061             sysfs_create_group(&mddev->kobj, &md_bitmap_group))
5062                 printk(KERN_DEBUG "pointless warning\n");
5063         mutex_unlock(&mddev->open_mutex);
5064  abort:
5065         mutex_unlock(&disks_mutex);
5066         if (!error && mddev->kobj.sd) {
5067                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
5068                 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
5069         }
5070         mddev_put(mddev);
5071         return error;
5072 }
5073
5074 static struct kobject *md_probe(dev_t dev, int *part, void *data)
5075 {
5076         md_alloc(dev, NULL);
5077         return NULL;
5078 }
5079
5080 static int add_named_array(const char *val, struct kernel_param *kp)
5081 {
5082         /* val must be "md_*" where * is not all digits.
5083          * We allocate an array with a large free minor number, and
5084          * set the name to val.  val must not already be an active name.
5085          */
5086         int len = strlen(val);
5087         char buf[DISK_NAME_LEN];
5088
5089         while (len && val[len-1] == '\n')
5090                 len--;
5091         if (len >= DISK_NAME_LEN)
5092                 return -E2BIG;
5093         strlcpy(buf, val, len+1);
5094         if (strncmp(buf, "md_", 3) != 0)
5095                 return -EINVAL;
5096         return md_alloc(0, buf);
5097 }
5098
5099 static void md_safemode_timeout(unsigned long data)
5100 {
5101         struct mddev *mddev = (struct mddev *) data;
5102
5103         if (!atomic_read(&mddev->writes_pending)) {
5104                 mddev->safemode = 1;
5105                 if (mddev->external)
5106                         sysfs_notify_dirent_safe(mddev->sysfs_state);
5107         }
5108         md_wakeup_thread(mddev->thread);
5109 }
5110
5111 static int start_dirty_degraded;
5112
5113 int md_run(struct mddev *mddev)
5114 {
5115         int err;
5116         struct md_rdev *rdev;
5117         struct md_personality *pers;
5118
5119         if (list_empty(&mddev->disks))
5120                 /* cannot run an array with no devices.. */
5121                 return -EINVAL;
5122
5123         if (mddev->pers)
5124                 return -EBUSY;
5125         /* Cannot run until previous stop completes properly */
5126         if (mddev->sysfs_active)
5127                 return -EBUSY;
5128
5129         /*
5130          * Analyze all RAID superblock(s)
5131          */
5132         if (!mddev->raid_disks) {
5133                 if (!mddev->persistent)
5134                         return -EINVAL;
5135                 analyze_sbs(mddev);
5136         }
5137
5138         if (mddev->level != LEVEL_NONE)
5139                 request_module("md-level-%d", mddev->level);
5140         else if (mddev->clevel[0])
5141                 request_module("md-%s", mddev->clevel);
5142
5143         /*
5144          * Drop all container device buffers, from now on
5145          * the only valid external interface is through the md
5146          * device.
5147          */
5148         rdev_for_each(rdev, mddev) {
5149                 if (test_bit(Faulty, &rdev->flags))
5150                         continue;
5151                 sync_blockdev(rdev->bdev);
5152                 invalidate_bdev(rdev->bdev);
5153
5154                 /* perform some consistency tests on the device.
5155                  * We don't want the data to overlap the metadata,
5156                  * Internal Bitmap issues have been handled elsewhere.
5157                  */
5158                 if (rdev->meta_bdev) {
5159                         /* Nothing to check */;
5160                 } else if (rdev->data_offset < rdev->sb_start) {
5161                         if (mddev->dev_sectors &&
5162                             rdev->data_offset + mddev->dev_sectors
5163                             > rdev->sb_start) {
5164                                 printk("md: %s: data overlaps metadata\n",
5165                                        mdname(mddev));
5166                                 return -EINVAL;
5167                         }
5168                 } else {
5169                         if (rdev->sb_start + rdev->sb_size/512
5170                             > rdev->data_offset) {
5171                                 printk("md: %s: metadata overlaps data\n",
5172                                        mdname(mddev));
5173                                 return -EINVAL;
5174                         }
5175                 }
5176                 sysfs_notify_dirent_safe(rdev->sysfs_state);
5177         }
5178
5179         if (mddev->bio_set == NULL)
5180                 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
5181
5182         spin_lock(&pers_lock);
5183         pers = find_pers(mddev->level, mddev->clevel);
5184         if (!pers || !try_module_get(pers->owner)) {
5185                 spin_unlock(&pers_lock);
5186                 if (mddev->level != LEVEL_NONE)
5187                         printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
5188                                mddev->level);
5189                 else
5190                         printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
5191                                mddev->clevel);
5192                 return -EINVAL;
5193         }
5194         spin_unlock(&pers_lock);
5195         if (mddev->level != pers->level) {
5196                 mddev->level = pers->level;
5197                 mddev->new_level = pers->level;
5198         }
5199         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5200
5201         if (mddev->reshape_position != MaxSector &&
5202             pers->start_reshape == NULL) {
5203                 /* This personality cannot handle reshaping... */
5204                 module_put(pers->owner);
5205                 return -EINVAL;
5206         }
5207
5208         if (pers->sync_request) {
5209                 /* Warn if this is a potentially silly
5210                  * configuration.
5211                  */
5212                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5213                 struct md_rdev *rdev2;
5214                 int warned = 0;
5215
5216                 rdev_for_each(rdev, mddev)
5217                         rdev_for_each(rdev2, mddev) {
5218                                 if (rdev < rdev2 &&
5219                                     rdev->bdev->bd_contains ==
5220                                     rdev2->bdev->bd_contains) {
5221                                         printk(KERN_WARNING
5222                                                "%s: WARNING: %s appears to be"
5223                                                " on the same physical disk as"
5224                                                " %s.\n",
5225                                                mdname(mddev),
5226                                                bdevname(rdev->bdev,b),
5227                                                bdevname(rdev2->bdev,b2));
5228                                         warned = 1;
5229                                 }
5230                         }
5231
5232                 if (warned)
5233                         printk(KERN_WARNING
5234                                "True protection against single-disk"
5235                                " failure might be compromised.\n");
5236         }
5237
5238         mddev->recovery = 0;
5239         /* may be over-ridden by personality */
5240         mddev->resync_max_sectors = mddev->dev_sectors;
5241
5242         mddev->ok_start_degraded = start_dirty_degraded;
5243
5244         if (start_readonly && mddev->ro == 0)
5245                 mddev->ro = 2; /* read-only, but switch on first write */
5246
5247         err = pers->run(mddev);
5248         if (err)
5249                 printk(KERN_ERR "md: pers->run() failed ...\n");
5250         else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
5251                 WARN_ONCE(!mddev->external_size, "%s: default size too small,"
5252                           " but 'external_size' not in effect?\n", __func__);
5253                 printk(KERN_ERR
5254                        "md: invalid array_size %llu > default size %llu\n",
5255                        (unsigned long long)mddev->array_sectors / 2,
5256                        (unsigned long long)pers->size(mddev, 0, 0) / 2);
5257                 err = -EINVAL;
5258         }
5259         if (err == 0 && pers->sync_request &&
5260             (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5261                 struct bitmap *bitmap;
5262
5263                 bitmap = bitmap_create(mddev, -1);
5264                 if (IS_ERR(bitmap)) {
5265                         err = PTR_ERR(bitmap);
5266                         printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
5267                                mdname(mddev), err);
5268                 } else
5269                         mddev->bitmap = bitmap;
5270
5271         }
5272         if (err) {
5273                 mddev_detach(mddev);
5274                 if (mddev->private)
5275                         pers->free(mddev, mddev->private);
5276                 mddev->private = NULL;
5277                 module_put(pers->owner);
5278                 bitmap_destroy(mddev);
5279                 return err;
5280         }
5281         if (mddev->queue) {
5282                 mddev->queue->backing_dev_info.congested_data = mddev;
5283                 mddev->queue->backing_dev_info.congested_fn = md_congested;
5284         }
5285         if (pers->sync_request) {
5286                 if (mddev->kobj.sd &&
5287                     sysfs_create_group(&mddev->kobj, &md_redundancy_group))
5288                         printk(KERN_WARNING
5289                                "md: cannot register extra attributes for %s\n",
5290                                mdname(mddev));
5291                 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
5292         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
5293                 mddev->ro = 0;
5294
5295         atomic_set(&mddev->writes_pending,0);
5296         atomic_set(&mddev->max_corr_read_errors,
5297                    MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
5298         mddev->safemode = 0;
5299         if (mddev_is_clustered(mddev))
5300                 mddev->safemode_delay = 0;
5301         else
5302                 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
5303         mddev->in_sync = 1;
5304         smp_wmb();
5305         spin_lock(&mddev->lock);
5306         mddev->pers = pers;
5307         spin_unlock(&mddev->lock);
5308         rdev_for_each(rdev, mddev)
5309                 if (rdev->raid_disk >= 0)
5310                         if (sysfs_link_rdev(mddev, rdev))
5311                                 /* failure here is OK */;
5312
5313         if (mddev->degraded && !mddev->ro)
5314                 /* This ensures that recovering status is reported immediately
5315                  * via sysfs - until a lack of spares is confirmed.
5316                  */
5317                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5318         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5319
5320         if (mddev->flags & MD_UPDATE_SB_FLAGS)
5321                 md_update_sb(mddev, 0);
5322
5323         md_new_event(mddev);
5324         sysfs_notify_dirent_safe(mddev->sysfs_state);
5325         sysfs_notify_dirent_safe(mddev->sysfs_action);
5326         sysfs_notify(&mddev->kobj, NULL, "degraded");
5327         return 0;
5328 }
5329 EXPORT_SYMBOL_GPL(md_run);
5330
5331 static int do_md_run(struct mddev *mddev)
5332 {
5333         int err;
5334
5335         err = md_run(mddev);
5336         if (err)
5337                 goto out;
5338         err = bitmap_load(mddev);
5339         if (err) {
5340                 bitmap_destroy(mddev);
5341                 goto out;
5342         }
5343
5344         if (mddev_is_clustered(mddev))
5345                 md_allow_write(mddev);
5346
5347         md_wakeup_thread(mddev->thread);
5348         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
5349
5350         set_capacity(mddev->gendisk, mddev->array_sectors);
5351         revalidate_disk(mddev->gendisk);
5352         mddev->changed = 1;
5353         kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
5354 out:
5355         return err;
5356 }
5357
5358 static int restart_array(struct mddev *mddev)
5359 {
5360         struct gendisk *disk = mddev->gendisk;
5361
5362         /* Complain if it has no devices */
5363         if (list_empty(&mddev->disks))
5364                 return -ENXIO;
5365         if (!mddev->pers)
5366                 return -EINVAL;
5367         if (!mddev->ro)
5368                 return -EBUSY;
5369         if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5370                 struct md_rdev *rdev;
5371                 bool has_journal = false;
5372
5373                 rcu_read_lock();
5374                 rdev_for_each_rcu(rdev, mddev) {
5375                         if (test_bit(Journal, &rdev->flags) &&
5376                             !test_bit(Faulty, &rdev->flags)) {
5377                                 has_journal = true;
5378                                 break;
5379                         }
5380                 }
5381                 rcu_read_unlock();
5382
5383                 /* Don't restart rw with journal missing/faulty */
5384                 if (!has_journal)
5385                         return -EINVAL;
5386         }
5387
5388         mddev->safemode = 0;
5389         mddev->ro = 0;
5390         set_disk_ro(disk, 0);
5391         printk(KERN_INFO "md: %s switched to read-write mode.\n",
5392                 mdname(mddev));
5393         /* Kick recovery or resync if necessary */
5394         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5395         md_wakeup_thread(mddev->thread);
5396         md_wakeup_thread(mddev->sync_thread);
5397         sysfs_notify_dirent_safe(mddev->sysfs_state);
5398         return 0;
5399 }
5400
5401 static void md_clean(struct mddev *mddev)
5402 {
5403         mddev->array_sectors = 0;
5404         mddev->external_size = 0;
5405         mddev->dev_sectors = 0;
5406         mddev->raid_disks = 0;
5407         mddev->recovery_cp = 0;
5408         mddev->resync_min = 0;
5409         mddev->resync_max = MaxSector;
5410         mddev->reshape_position = MaxSector;
5411         mddev->external = 0;
5412         mddev->persistent = 0;
5413         mddev->level = LEVEL_NONE;
5414         mddev->clevel[0] = 0;
5415         mddev->flags = 0;
5416         mddev->ro = 0;
5417         mddev->metadata_type[0] = 0;
5418         mddev->chunk_sectors = 0;
5419         mddev->ctime = mddev->utime = 0;
5420         mddev->layout = 0;
5421         mddev->max_disks = 0;
5422         mddev->events = 0;
5423         mddev->can_decrease_events = 0;
5424         mddev->delta_disks = 0;
5425         mddev->reshape_backwards = 0;
5426         mddev->new_level = LEVEL_NONE;
5427         mddev->new_layout = 0;
5428         mddev->new_chunk_sectors = 0;
5429         mddev->curr_resync = 0;
5430         atomic64_set(&mddev->resync_mismatches, 0);
5431         mddev->suspend_lo = mddev->suspend_hi = 0;
5432         mddev->sync_speed_min = mddev->sync_speed_max = 0;
5433         mddev->recovery = 0;
5434         mddev->in_sync = 0;
5435         mddev->changed = 0;
5436         mddev->degraded = 0;
5437         mddev->safemode = 0;
5438         mddev->private = NULL;
5439         mddev->bitmap_info.offset = 0;
5440         mddev->bitmap_info.default_offset = 0;
5441         mddev->bitmap_info.default_space = 0;
5442         mddev->bitmap_info.chunksize = 0;
5443         mddev->bitmap_info.daemon_sleep = 0;
5444         mddev->bitmap_info.max_write_behind = 0;
5445 }
5446
5447 static void __md_stop_writes(struct mddev *mddev)
5448 {
5449         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5450         flush_workqueue(md_misc_wq);
5451         if (mddev->sync_thread) {
5452                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5453                 md_reap_sync_thread(mddev);
5454         }
5455
5456         del_timer_sync(&mddev->safemode_timer);
5457
5458         bitmap_flush(mddev);
5459         md_super_wait(mddev);
5460
5461         if (mddev->ro == 0 &&
5462             ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
5463              (mddev->flags & MD_UPDATE_SB_FLAGS))) {
5464                 /* mark array as shutdown cleanly */
5465                 if (!mddev_is_clustered(mddev))
5466                         mddev->in_sync = 1;
5467                 md_update_sb(mddev, 1);
5468         }
5469 }
5470
5471 void md_stop_writes(struct mddev *mddev)
5472 {
5473         mddev_lock_nointr(mddev);
5474         __md_stop_writes(mddev);
5475         mddev_unlock(mddev);
5476 }
5477 EXPORT_SYMBOL_GPL(md_stop_writes);
5478
5479 static void mddev_detach(struct mddev *mddev)
5480 {
5481         struct bitmap *bitmap = mddev->bitmap;
5482         /* wait for behind writes to complete */
5483         if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
5484                 printk(KERN_INFO "md:%s: behind writes in progress - waiting to stop.\n",
5485                        mdname(mddev));
5486                 /* need to kick something here to make sure I/O goes? */
5487                 wait_event(bitmap->behind_wait,
5488                            atomic_read(&bitmap->behind_writes) == 0);
5489         }
5490         if (mddev->pers && mddev->pers->quiesce) {
5491                 mddev->pers->quiesce(mddev, 1);
5492                 mddev->pers->quiesce(mddev, 0);
5493         }
5494         md_unregister_thread(&mddev->thread);
5495         if (mddev->queue)
5496                 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
5497 }
5498
5499 static void __md_stop(struct mddev *mddev)
5500 {
5501         struct md_personality *pers = mddev->pers;
5502         mddev_detach(mddev);
5503         /* Ensure ->event_work is done */
5504         flush_workqueue(md_misc_wq);
5505         spin_lock(&mddev->lock);
5506         mddev->pers = NULL;
5507         spin_unlock(&mddev->lock);
5508         pers->free(mddev, mddev->private);
5509         mddev->private = NULL;
5510         if (pers->sync_request && mddev->to_remove == NULL)
5511                 mddev->to_remove = &md_redundancy_group;
5512         module_put(pers->owner);
5513         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5514 }
5515
5516 void md_stop(struct mddev *mddev)
5517 {
5518         /* stop the array and free an attached data structures.
5519          * This is called from dm-raid
5520          */
5521         __md_stop(mddev);
5522         bitmap_destroy(mddev);
5523         if (mddev->bio_set)
5524                 bioset_free(mddev->bio_set);
5525 }
5526
5527 EXPORT_SYMBOL_GPL(md_stop);
5528
5529 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5530 {
5531         int err = 0;
5532         int did_freeze = 0;
5533
5534         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5535                 did_freeze = 1;
5536                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5537                 md_wakeup_thread(mddev->thread);
5538         }
5539         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5540                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5541         if (mddev->sync_thread)
5542                 /* Thread might be blocked waiting for metadata update
5543                  * which will now never happen */
5544                 wake_up_process(mddev->sync_thread->tsk);
5545
5546         if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
5547                 return -EBUSY;
5548         mddev_unlock(mddev);
5549         wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
5550                                           &mddev->recovery));
5551         wait_event(mddev->sb_wait,
5552                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
5553         mddev_lock_nointr(mddev);
5554
5555         mutex_lock(&mddev->open_mutex);
5556         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5557             mddev->sync_thread ||
5558             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5559             (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
5560                 printk("md: %s still in use.\n",mdname(mddev));
5561                 if (did_freeze) {
5562                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5563                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5564                         md_wakeup_thread(mddev->thread);
5565                 }
5566                 err = -EBUSY;
5567                 goto out;
5568         }
5569         if (mddev->pers) {
5570                 __md_stop_writes(mddev);
5571
5572                 err  = -ENXIO;
5573                 if (mddev->ro==1)
5574                         goto out;
5575                 mddev->ro = 1;
5576                 set_disk_ro(mddev->gendisk, 1);
5577                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5578                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5579                 md_wakeup_thread(mddev->thread);
5580                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5581                 err = 0;
5582         }
5583 out:
5584         mutex_unlock(&mddev->open_mutex);
5585         return err;
5586 }
5587
5588 /* mode:
5589  *   0 - completely stop and dis-assemble array
5590  *   2 - stop but do not disassemble array
5591  */
5592 static int do_md_stop(struct mddev *mddev, int mode,
5593                       struct block_device *bdev)
5594 {
5595         struct gendisk *disk = mddev->gendisk;
5596         struct md_rdev *rdev;
5597         int did_freeze = 0;
5598
5599         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5600                 did_freeze = 1;
5601                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5602                 md_wakeup_thread(mddev->thread);
5603         }
5604         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5605                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5606         if (mddev->sync_thread)
5607                 /* Thread might be blocked waiting for metadata update
5608                  * which will now never happen */
5609                 wake_up_process(mddev->sync_thread->tsk);
5610
5611         mddev_unlock(mddev);
5612         wait_event(resync_wait, (mddev->sync_thread == NULL &&
5613                                  !test_bit(MD_RECOVERY_RUNNING,
5614                                            &mddev->recovery)));
5615         mddev_lock_nointr(mddev);
5616
5617         mutex_lock(&mddev->open_mutex);
5618         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5619             mddev->sysfs_active ||
5620             mddev->sync_thread ||
5621             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5622             (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
5623                 printk("md: %s still in use.\n",mdname(mddev));
5624                 mutex_unlock(&mddev->open_mutex);
5625                 if (did_freeze) {
5626                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5627                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5628                         md_wakeup_thread(mddev->thread);
5629                 }
5630                 return -EBUSY;
5631         }
5632         if (mddev->pers) {
5633                 if (mddev->ro)
5634                         set_disk_ro(disk, 0);
5635
5636                 __md_stop_writes(mddev);
5637                 __md_stop(mddev);
5638                 mddev->queue->backing_dev_info.congested_fn = NULL;
5639
5640                 /* tell userspace to handle 'inactive' */
5641                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5642
5643                 rdev_for_each(rdev, mddev)
5644                         if (rdev->raid_disk >= 0)
5645                                 sysfs_unlink_rdev(mddev, rdev);
5646
5647                 set_capacity(disk, 0);
5648                 mutex_unlock(&mddev->open_mutex);
5649                 mddev->changed = 1;
5650                 revalidate_disk(disk);
5651
5652                 if (mddev->ro)
5653                         mddev->ro = 0;
5654         } else
5655                 mutex_unlock(&mddev->open_mutex);
5656         /*
5657          * Free resources if final stop
5658          */
5659         if (mode == 0) {
5660                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
5661
5662                 bitmap_destroy(mddev);
5663                 if (mddev->bitmap_info.file) {
5664                         struct file *f = mddev->bitmap_info.file;
5665                         spin_lock(&mddev->lock);
5666                         mddev->bitmap_info.file = NULL;
5667                         spin_unlock(&mddev->lock);
5668                         fput(f);
5669                 }
5670                 mddev->bitmap_info.offset = 0;
5671
5672                 export_array(mddev);
5673
5674                 md_clean(mddev);
5675                 if (mddev->hold_active == UNTIL_STOP)
5676                         mddev->hold_active = 0;
5677         }
5678         md_new_event(mddev);
5679         sysfs_notify_dirent_safe(mddev->sysfs_state);
5680         return 0;
5681 }
5682
5683 #ifndef MODULE
5684 static void autorun_array(struct mddev *mddev)
5685 {
5686         struct md_rdev *rdev;
5687         int err;
5688
5689         if (list_empty(&mddev->disks))
5690                 return;
5691
5692         printk(KERN_INFO "md: running: ");
5693
5694         rdev_for_each(rdev, mddev) {
5695                 char b[BDEVNAME_SIZE];
5696                 printk("<%s>", bdevname(rdev->bdev,b));
5697         }
5698         printk("\n");
5699
5700         err = do_md_run(mddev);
5701         if (err) {
5702                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
5703                 do_md_stop(mddev, 0, NULL);
5704         }
5705 }
5706
5707 /*
5708  * lets try to run arrays based on all disks that have arrived
5709  * until now. (those are in pending_raid_disks)
5710  *
5711  * the method: pick the first pending disk, collect all disks with
5712  * the same UUID, remove all from the pending list and put them into
5713  * the 'same_array' list. Then order this list based on superblock
5714  * update time (freshest comes first), kick out 'old' disks and
5715  * compare superblocks. If everything's fine then run it.
5716  *
5717  * If "unit" is allocated, then bump its reference count
5718  */
5719 static void autorun_devices(int part)
5720 {
5721         struct md_rdev *rdev0, *rdev, *tmp;
5722         struct mddev *mddev;
5723         char b[BDEVNAME_SIZE];
5724
5725         printk(KERN_INFO "md: autorun ...\n");
5726         while (!list_empty(&pending_raid_disks)) {
5727                 int unit;
5728                 dev_t dev;
5729                 LIST_HEAD(candidates);
5730                 rdev0 = list_entry(pending_raid_disks.next,
5731                                          struct md_rdev, same_set);
5732
5733                 printk(KERN_INFO "md: considering %s ...\n",
5734                         bdevname(rdev0->bdev,b));
5735                 INIT_LIST_HEAD(&candidates);
5736                 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
5737                         if (super_90_load(rdev, rdev0, 0) >= 0) {
5738                                 printk(KERN_INFO "md:  adding %s ...\n",
5739                                         bdevname(rdev->bdev,b));
5740                                 list_move(&rdev->same_set, &candidates);
5741                         }
5742                 /*
5743                  * now we have a set of devices, with all of them having
5744                  * mostly sane superblocks. It's time to allocate the
5745                  * mddev.
5746                  */
5747                 if (part) {
5748                         dev = MKDEV(mdp_major,
5749                                     rdev0->preferred_minor << MdpMinorShift);
5750                         unit = MINOR(dev) >> MdpMinorShift;
5751                 } else {
5752                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
5753                         unit = MINOR(dev);
5754                 }
5755                 if (rdev0->preferred_minor != unit) {
5756                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
5757                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
5758                         break;
5759                 }
5760
5761                 md_probe(dev, NULL, NULL);
5762                 mddev = mddev_find(dev);
5763                 if (!mddev || !mddev->gendisk) {
5764                         if (mddev)
5765                                 mddev_put(mddev);
5766                         printk(KERN_ERR
5767                                 "md: cannot allocate memory for md drive.\n");
5768                         break;
5769                 }
5770                 if (mddev_lock(mddev))
5771                         printk(KERN_WARNING "md: %s locked, cannot run\n",
5772                                mdname(mddev));
5773                 else if (mddev->raid_disks || mddev->major_version
5774                          || !list_empty(&mddev->disks)) {
5775                         printk(KERN_WARNING
5776                                 "md: %s already running, cannot run %s\n",
5777                                 mdname(mddev), bdevname(rdev0->bdev,b));
5778                         mddev_unlock(mddev);
5779                 } else {
5780                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
5781                         mddev->persistent = 1;
5782                         rdev_for_each_list(rdev, tmp, &candidates) {
5783                                 list_del_init(&rdev->same_set);
5784                                 if (bind_rdev_to_array(rdev, mddev))
5785                                         export_rdev(rdev);
5786                         }
5787                         autorun_array(mddev);
5788                         mddev_unlock(mddev);
5789                 }
5790                 /* on success, candidates will be empty, on error
5791                  * it won't...
5792                  */
5793                 rdev_for_each_list(rdev, tmp, &candidates) {
5794                         list_del_init(&rdev->same_set);
5795                         export_rdev(rdev);
5796                 }
5797                 mddev_put(mddev);
5798         }
5799         printk(KERN_INFO "md: ... autorun DONE.\n");
5800 }
5801 #endif /* !MODULE */
5802
5803 static int get_version(void __user *arg)
5804 {
5805         mdu_version_t ver;
5806
5807         ver.major = MD_MAJOR_VERSION;
5808         ver.minor = MD_MINOR_VERSION;
5809         ver.patchlevel = MD_PATCHLEVEL_VERSION;
5810
5811         if (copy_to_user(arg, &ver, sizeof(ver)))
5812                 return -EFAULT;
5813
5814         return 0;
5815 }
5816
5817 static int get_array_info(struct mddev *mddev, void __user *arg)
5818 {
5819         mdu_array_info_t info;
5820         int nr,working,insync,failed,spare;
5821         struct md_rdev *rdev;
5822
5823         nr = working = insync = failed = spare = 0;
5824         rcu_read_lock();
5825         rdev_for_each_rcu(rdev, mddev) {
5826                 nr++;
5827                 if (test_bit(Faulty, &rdev->flags))
5828                         failed++;
5829                 else {
5830                         working++;
5831                         if (test_bit(In_sync, &rdev->flags))
5832                                 insync++;
5833                         else
5834                                 spare++;
5835                 }
5836         }
5837         rcu_read_unlock();
5838
5839         info.major_version = mddev->major_version;
5840         info.minor_version = mddev->minor_version;
5841         info.patch_version = MD_PATCHLEVEL_VERSION;
5842         info.ctime         = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
5843         info.level         = mddev->level;
5844         info.size          = mddev->dev_sectors / 2;
5845         if (info.size != mddev->dev_sectors / 2) /* overflow */
5846                 info.size = -1;
5847         info.nr_disks      = nr;
5848         info.raid_disks    = mddev->raid_disks;
5849         info.md_minor      = mddev->md_minor;
5850         info.not_persistent= !mddev->persistent;
5851
5852         info.utime         = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
5853         info.state         = 0;
5854         if (mddev->in_sync)
5855                 info.state = (1<<MD_SB_CLEAN);
5856         if (mddev->bitmap && mddev->bitmap_info.offset)
5857                 info.state |= (1<<MD_SB_BITMAP_PRESENT);
5858         if (mddev_is_clustered(mddev))
5859                 info.state |= (1<<MD_SB_CLUSTERED);
5860         info.active_disks  = insync;
5861         info.working_disks = working;
5862         info.failed_disks  = failed;
5863         info.spare_disks   = spare;
5864
5865         info.layout        = mddev->layout;
5866         info.chunk_size    = mddev->chunk_sectors << 9;
5867
5868         if (copy_to_user(arg, &info, sizeof(info)))
5869                 return -EFAULT;
5870
5871         return 0;
5872 }
5873
5874 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
5875 {
5876         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
5877         char *ptr;
5878         int err;
5879
5880         file = kzalloc(sizeof(*file), GFP_NOIO);
5881         if (!file)
5882                 return -ENOMEM;
5883
5884         err = 0;
5885         spin_lock(&mddev->lock);
5886         /* bitmap enabled */
5887         if (mddev->bitmap_info.file) {
5888                 ptr = file_path(mddev->bitmap_info.file, file->pathname,
5889                                 sizeof(file->pathname));
5890                 if (IS_ERR(ptr))
5891                         err = PTR_ERR(ptr);
5892                 else
5893                         memmove(file->pathname, ptr,
5894                                 sizeof(file->pathname)-(ptr-file->pathname));
5895         }
5896         spin_unlock(&mddev->lock);
5897
5898         if (err == 0 &&
5899             copy_to_user(arg, file, sizeof(*file)))
5900                 err = -EFAULT;
5901
5902         kfree(file);
5903         return err;
5904 }
5905
5906 static int get_disk_info(struct mddev *mddev, void __user * arg)
5907 {
5908         mdu_disk_info_t info;
5909         struct md_rdev *rdev;
5910
5911         if (copy_from_user(&info, arg, sizeof(info)))
5912                 return -EFAULT;
5913
5914         rcu_read_lock();
5915         rdev = md_find_rdev_nr_rcu(mddev, info.number);
5916         if (rdev) {
5917                 info.major = MAJOR(rdev->bdev->bd_dev);
5918                 info.minor = MINOR(rdev->bdev->bd_dev);
5919                 info.raid_disk = rdev->raid_disk;
5920                 info.state = 0;
5921                 if (test_bit(Faulty, &rdev->flags))
5922                         info.state |= (1<<MD_DISK_FAULTY);
5923                 else if (test_bit(In_sync, &rdev->flags)) {
5924                         info.state |= (1<<MD_DISK_ACTIVE);
5925                         info.state |= (1<<MD_DISK_SYNC);
5926                 }
5927                 if (test_bit(Journal, &rdev->flags))
5928                         info.state |= (1<<MD_DISK_JOURNAL);
5929                 if (test_bit(WriteMostly, &rdev->flags))
5930                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
5931         } else {
5932                 info.major = info.minor = 0;
5933                 info.raid_disk = -1;
5934                 info.state = (1<<MD_DISK_REMOVED);
5935         }
5936         rcu_read_unlock();
5937
5938         if (copy_to_user(arg, &info, sizeof(info)))
5939                 return -EFAULT;
5940
5941         return 0;
5942 }
5943
5944 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
5945 {
5946         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5947         struct md_rdev *rdev;
5948         dev_t dev = MKDEV(info->major,info->minor);
5949
5950         if (mddev_is_clustered(mddev) &&
5951                 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
5952                 pr_err("%s: Cannot add to clustered mddev.\n",
5953                                mdname(mddev));
5954                 return -EINVAL;
5955         }
5956
5957         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
5958                 return -EOVERFLOW;
5959
5960         if (!mddev->raid_disks) {
5961                 int err;
5962                 /* expecting a device which has a superblock */
5963                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
5964                 if (IS_ERR(rdev)) {
5965                         printk(KERN_WARNING
5966                                 "md: md_import_device returned %ld\n",
5967                                 PTR_ERR(rdev));
5968                         return PTR_ERR(rdev);
5969                 }
5970                 if (!list_empty(&mddev->disks)) {
5971                         struct md_rdev *rdev0
5972                                 = list_entry(mddev->disks.next,
5973                                              struct md_rdev, same_set);
5974                         err = super_types[mddev->major_version]
5975                                 .load_super(rdev, rdev0, mddev->minor_version);
5976                         if (err < 0) {
5977                                 printk(KERN_WARNING
5978                                         "md: %s has different UUID to %s\n",
5979                                         bdevname(rdev->bdev,b),
5980                                         bdevname(rdev0->bdev,b2));
5981                                 export_rdev(rdev);
5982                                 return -EINVAL;
5983                         }
5984                 }
5985                 err = bind_rdev_to_array(rdev, mddev);
5986                 if (err)
5987                         export_rdev(rdev);
5988                 return err;
5989         }
5990
5991         /*
5992          * add_new_disk can be used once the array is assembled
5993          * to add "hot spares".  They must already have a superblock
5994          * written
5995          */
5996         if (mddev->pers) {
5997                 int err;
5998                 if (!mddev->pers->hot_add_disk) {
5999                         printk(KERN_WARNING
6000                                 "%s: personality does not support diskops!\n",
6001                                mdname(mddev));
6002                         return -EINVAL;
6003                 }
6004                 if (mddev->persistent)
6005                         rdev = md_import_device(dev, mddev->major_version,
6006                                                 mddev->minor_version);
6007                 else
6008                         rdev = md_import_device(dev, -1, -1);
6009                 if (IS_ERR(rdev)) {
6010                         printk(KERN_WARNING
6011                                 "md: md_import_device returned %ld\n",
6012                                 PTR_ERR(rdev));
6013                         return PTR_ERR(rdev);
6014                 }
6015                 /* set saved_raid_disk if appropriate */
6016                 if (!mddev->persistent) {
6017                         if (info->state & (1<<MD_DISK_SYNC)  &&
6018                             info->raid_disk < mddev->raid_disks) {
6019                                 rdev->raid_disk = info->raid_disk;
6020                                 set_bit(In_sync, &rdev->flags);
6021                                 clear_bit(Bitmap_sync, &rdev->flags);
6022                         } else
6023                                 rdev->raid_disk = -1;
6024                         rdev->saved_raid_disk = rdev->raid_disk;
6025                 } else
6026                         super_types[mddev->major_version].
6027                                 validate_super(mddev, rdev);
6028                 if ((info->state & (1<<MD_DISK_SYNC)) &&
6029                      rdev->raid_disk != info->raid_disk) {
6030                         /* This was a hot-add request, but events doesn't
6031                          * match, so reject it.
6032                          */
6033                         export_rdev(rdev);
6034                         return -EINVAL;
6035                 }
6036
6037                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
6038                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6039                         set_bit(WriteMostly, &rdev->flags);
6040                 else
6041                         clear_bit(WriteMostly, &rdev->flags);
6042
6043                 if (info->state & (1<<MD_DISK_JOURNAL)) {
6044                         struct md_rdev *rdev2;
6045                         bool has_journal = false;
6046
6047                         /* make sure no existing journal disk */
6048                         rdev_for_each(rdev2, mddev) {
6049                                 if (test_bit(Journal, &rdev2->flags)) {
6050                                         has_journal = true;
6051                                         break;
6052                                 }
6053                         }
6054                         if (has_journal) {
6055                                 export_rdev(rdev);
6056                                 return -EBUSY;
6057                         }
6058                         set_bit(Journal, &rdev->flags);
6059                 }
6060                 /*
6061                  * check whether the device shows up in other nodes
6062                  */
6063                 if (mddev_is_clustered(mddev)) {
6064                         if (info->state & (1 << MD_DISK_CANDIDATE))
6065                                 set_bit(Candidate, &rdev->flags);
6066                         else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
6067                                 /* --add initiated by this node */
6068                                 err = md_cluster_ops->add_new_disk(mddev, rdev);
6069                                 if (err) {
6070                                         export_rdev(rdev);
6071                                         return err;
6072                                 }
6073                         }
6074                 }
6075
6076                 rdev->raid_disk = -1;
6077                 err = bind_rdev_to_array(rdev, mddev);
6078
6079                 if (err)
6080                         export_rdev(rdev);
6081
6082                 if (mddev_is_clustered(mddev)) {
6083                         if (info->state & (1 << MD_DISK_CANDIDATE))
6084                                 md_cluster_ops->new_disk_ack(mddev, (err == 0));
6085                         else {
6086                                 if (err)
6087                                         md_cluster_ops->add_new_disk_cancel(mddev);
6088                                 else
6089                                         err = add_bound_rdev(rdev);
6090                         }
6091
6092                 } else if (!err)
6093                         err = add_bound_rdev(rdev);
6094
6095                 return err;
6096         }
6097
6098         /* otherwise, add_new_disk is only allowed
6099          * for major_version==0 superblocks
6100          */
6101         if (mddev->major_version != 0) {
6102                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
6103                        mdname(mddev));
6104                 return -EINVAL;
6105         }
6106
6107         if (!(info->state & (1<<MD_DISK_FAULTY))) {
6108                 int err;
6109                 rdev = md_import_device(dev, -1, 0);
6110                 if (IS_ERR(rdev)) {
6111                         printk(KERN_WARNING
6112                                 "md: error, md_import_device() returned %ld\n",
6113                                 PTR_ERR(rdev));
6114                         return PTR_ERR(rdev);
6115                 }
6116                 rdev->desc_nr = info->number;
6117                 if (info->raid_disk < mddev->raid_disks)
6118                         rdev->raid_disk = info->raid_disk;
6119                 else
6120                         rdev->raid_disk = -1;
6121
6122                 if (rdev->raid_disk < mddev->raid_disks)
6123                         if (info->state & (1<<MD_DISK_SYNC))
6124                                 set_bit(In_sync, &rdev->flags);
6125
6126                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6127                         set_bit(WriteMostly, &rdev->flags);
6128
6129                 if (!mddev->persistent) {
6130                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
6131                         rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6132                 } else
6133                         rdev->sb_start = calc_dev_sboffset(rdev);
6134                 rdev->sectors = rdev->sb_start;
6135
6136                 err = bind_rdev_to_array(rdev, mddev);
6137                 if (err) {
6138                         export_rdev(rdev);
6139                         return err;
6140                 }
6141         }
6142
6143         return 0;
6144 }
6145
6146 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6147 {
6148         char b[BDEVNAME_SIZE];
6149         struct md_rdev *rdev;
6150
6151         rdev = find_rdev(mddev, dev);
6152         if (!rdev)
6153                 return -ENXIO;
6154
6155         if (rdev->raid_disk < 0)
6156                 goto kick_rdev;
6157
6158         clear_bit(Blocked, &rdev->flags);
6159         remove_and_add_spares(mddev, rdev);
6160
6161         if (rdev->raid_disk >= 0)
6162                 goto busy;
6163
6164 kick_rdev:
6165         if (mddev_is_clustered(mddev))
6166                 md_cluster_ops->remove_disk(mddev, rdev);
6167
6168         md_kick_rdev_from_array(rdev);
6169         md_update_sb(mddev, 1);
6170         md_new_event(mddev);
6171
6172         return 0;
6173 busy:
6174         printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
6175                 bdevname(rdev->bdev,b), mdname(mddev));
6176         return -EBUSY;
6177 }
6178
6179 static int hot_add_disk(struct mddev *mddev, dev_t dev)
6180 {
6181         char b[BDEVNAME_SIZE];
6182         int err;
6183         struct md_rdev *rdev;
6184
6185         if (!mddev->pers)
6186                 return -ENODEV;
6187
6188         if (mddev->major_version != 0) {
6189                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
6190                         " version-0 superblocks.\n",
6191                         mdname(mddev));
6192                 return -EINVAL;
6193         }
6194         if (!mddev->pers->hot_add_disk) {
6195                 printk(KERN_WARNING
6196                         "%s: personality does not support diskops!\n",
6197                         mdname(mddev));
6198                 return -EINVAL;
6199         }
6200
6201         rdev = md_import_device(dev, -1, 0);
6202         if (IS_ERR(rdev)) {
6203                 printk(KERN_WARNING
6204                         "md: error, md_import_device() returned %ld\n",
6205                         PTR_ERR(rdev));
6206                 return -EINVAL;
6207         }
6208
6209         if (mddev->persistent)
6210                 rdev->sb_start = calc_dev_sboffset(rdev);
6211         else
6212                 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6213
6214         rdev->sectors = rdev->sb_start;
6215
6216         if (test_bit(Faulty, &rdev->flags)) {
6217                 printk(KERN_WARNING
6218                         "md: can not hot-add faulty %s disk to %s!\n",
6219                         bdevname(rdev->bdev,b), mdname(mddev));
6220                 err = -EINVAL;
6221                 goto abort_export;
6222         }
6223
6224         clear_bit(In_sync, &rdev->flags);
6225         rdev->desc_nr = -1;
6226         rdev->saved_raid_disk = -1;
6227         err = bind_rdev_to_array(rdev, mddev);
6228         if (err)
6229                 goto abort_export;
6230
6231         /*
6232          * The rest should better be atomic, we can have disk failures
6233          * noticed in interrupt contexts ...
6234          */
6235
6236         rdev->raid_disk = -1;
6237
6238         md_update_sb(mddev, 1);
6239         /*
6240          * Kick recovery, maybe this spare has to be added to the
6241          * array immediately.
6242          */
6243         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6244         md_wakeup_thread(mddev->thread);
6245         md_new_event(mddev);
6246         return 0;
6247
6248 abort_export:
6249         export_rdev(rdev);
6250         return err;
6251 }
6252
6253 static int set_bitmap_file(struct mddev *mddev, int fd)
6254 {
6255         int err = 0;
6256
6257         if (mddev->pers) {
6258                 if (!mddev->pers->quiesce || !mddev->thread)
6259                         return -EBUSY;
6260                 if (mddev->recovery || mddev->sync_thread)
6261                         return -EBUSY;
6262                 /* we should be able to change the bitmap.. */
6263         }
6264
6265         if (fd >= 0) {
6266                 struct inode *inode;
6267                 struct file *f;
6268
6269                 if (mddev->bitmap || mddev->bitmap_info.file)
6270                         return -EEXIST; /* cannot add when bitmap is present */
6271                 f = fget(fd);
6272
6273                 if (f == NULL) {
6274                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
6275                                mdname(mddev));
6276                         return -EBADF;
6277                 }
6278
6279                 inode = f->f_mapping->host;
6280                 if (!S_ISREG(inode->i_mode)) {
6281                         printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
6282                                mdname(mddev));
6283                         err = -EBADF;
6284                 } else if (!(f->f_mode & FMODE_WRITE)) {
6285                         printk(KERN_ERR "%s: error: bitmap file must open for write\n",
6286                                mdname(mddev));
6287                         err = -EBADF;
6288                 } else if (atomic_read(&inode->i_writecount) != 1) {
6289                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
6290                                mdname(mddev));
6291                         err = -EBUSY;
6292                 }
6293                 if (err) {
6294                         fput(f);
6295                         return err;
6296                 }
6297                 mddev->bitmap_info.file = f;
6298                 mddev->bitmap_info.offset = 0; /* file overrides offset */
6299         } else if (mddev->bitmap == NULL)
6300                 return -ENOENT; /* cannot remove what isn't there */
6301         err = 0;
6302         if (mddev->pers) {
6303                 mddev->pers->quiesce(mddev, 1);
6304                 if (fd >= 0) {
6305                         struct bitmap *bitmap;
6306
6307                         bitmap = bitmap_create(mddev, -1);
6308                         if (!IS_ERR(bitmap)) {
6309                                 mddev->bitmap = bitmap;
6310                                 err = bitmap_load(mddev);
6311                         } else
6312                                 err = PTR_ERR(bitmap);
6313                 }
6314                 if (fd < 0 || err) {
6315                         bitmap_destroy(mddev);
6316                         fd = -1; /* make sure to put the file */
6317                 }
6318                 mddev->pers->quiesce(mddev, 0);
6319         }
6320         if (fd < 0) {
6321                 struct file *f = mddev->bitmap_info.file;
6322                 if (f) {
6323                         spin_lock(&mddev->lock);
6324                         mddev->bitmap_info.file = NULL;
6325                         spin_unlock(&mddev->lock);
6326                         fput(f);
6327                 }
6328         }
6329
6330         return err;
6331 }
6332
6333 /*
6334  * set_array_info is used two different ways
6335  * The original usage is when creating a new array.
6336  * In this usage, raid_disks is > 0 and it together with
6337  *  level, size, not_persistent,layout,chunksize determine the
6338  *  shape of the array.
6339  *  This will always create an array with a type-0.90.0 superblock.
6340  * The newer usage is when assembling an array.
6341  *  In this case raid_disks will be 0, and the major_version field is
6342  *  use to determine which style super-blocks are to be found on the devices.
6343  *  The minor and patch _version numbers are also kept incase the
6344  *  super_block handler wishes to interpret them.
6345  */
6346 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
6347 {
6348
6349         if (info->raid_disks == 0) {
6350                 /* just setting version number for superblock loading */
6351                 if (info->major_version < 0 ||
6352                     info->major_version >= ARRAY_SIZE(super_types) ||
6353                     super_types[info->major_version].name == NULL) {
6354                         /* maybe try to auto-load a module? */
6355                         printk(KERN_INFO
6356                                 "md: superblock version %d not known\n",
6357                                 info->major_version);
6358                         return -EINVAL;
6359                 }
6360                 mddev->major_version = info->major_version;
6361                 mddev->minor_version = info->minor_version;
6362                 mddev->patch_version = info->patch_version;
6363                 mddev->persistent = !info->not_persistent;
6364                 /* ensure mddev_put doesn't delete this now that there
6365                  * is some minimal configuration.
6366                  */
6367                 mddev->ctime         = ktime_get_real_seconds();
6368                 return 0;
6369         }
6370         mddev->major_version = MD_MAJOR_VERSION;
6371         mddev->minor_version = MD_MINOR_VERSION;
6372         mddev->patch_version = MD_PATCHLEVEL_VERSION;
6373         mddev->ctime         = ktime_get_real_seconds();
6374
6375         mddev->level         = info->level;
6376         mddev->clevel[0]     = 0;
6377         mddev->dev_sectors   = 2 * (sector_t)info->size;
6378         mddev->raid_disks    = info->raid_disks;
6379         /* don't set md_minor, it is determined by which /dev/md* was
6380          * openned
6381          */
6382         if (info->state & (1<<MD_SB_CLEAN))
6383                 mddev->recovery_cp = MaxSector;
6384         else
6385                 mddev->recovery_cp = 0;
6386         mddev->persistent    = ! info->not_persistent;
6387         mddev->external      = 0;
6388
6389         mddev->layout        = info->layout;
6390         mddev->chunk_sectors = info->chunk_size >> 9;
6391
6392         mddev->max_disks     = MD_SB_DISKS;
6393
6394         if (mddev->persistent)
6395                 mddev->flags         = 0;
6396         set_bit(MD_CHANGE_DEVS, &mddev->flags);
6397
6398         mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
6399         mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
6400         mddev->bitmap_info.offset = 0;
6401
6402         mddev->reshape_position = MaxSector;
6403
6404         /*
6405          * Generate a 128 bit UUID
6406          */
6407         get_random_bytes(mddev->uuid, 16);
6408
6409         mddev->new_level = mddev->level;
6410         mddev->new_chunk_sectors = mddev->chunk_sectors;
6411         mddev->new_layout = mddev->layout;
6412         mddev->delta_disks = 0;
6413         mddev->reshape_backwards = 0;
6414
6415         return 0;
6416 }
6417
6418 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
6419 {
6420         WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
6421
6422         if (mddev->external_size)
6423                 return;
6424
6425         mddev->array_sectors = array_sectors;
6426 }
6427 EXPORT_SYMBOL(md_set_array_sectors);
6428
6429 static int update_size(struct mddev *mddev, sector_t num_sectors)
6430 {
6431         struct md_rdev *rdev;
6432         int rv;
6433         int fit = (num_sectors == 0);
6434
6435         if (mddev->pers->resize == NULL)
6436                 return -EINVAL;
6437         /* The "num_sectors" is the number of sectors of each device that
6438          * is used.  This can only make sense for arrays with redundancy.
6439          * linear and raid0 always use whatever space is available. We can only
6440          * consider changing this number if no resync or reconstruction is
6441          * happening, and if the new size is acceptable. It must fit before the
6442          * sb_start or, if that is <data_offset, it must fit before the size
6443          * of each device.  If num_sectors is zero, we find the largest size
6444          * that fits.
6445          */
6446         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6447             mddev->sync_thread)
6448                 return -EBUSY;
6449         if (mddev->ro)
6450                 return -EROFS;
6451
6452         rdev_for_each(rdev, mddev) {
6453                 sector_t avail = rdev->sectors;
6454
6455                 if (fit && (num_sectors == 0 || num_sectors > avail))
6456                         num_sectors = avail;
6457                 if (avail < num_sectors)
6458                         return -ENOSPC;
6459         }
6460         rv = mddev->pers->resize(mddev, num_sectors);
6461         if (!rv)
6462                 revalidate_disk(mddev->gendisk);
6463         return rv;
6464 }
6465
6466 static int update_raid_disks(struct mddev *mddev, int raid_disks)
6467 {
6468         int rv;
6469         struct md_rdev *rdev;
6470         /* change the number of raid disks */
6471         if (mddev->pers->check_reshape == NULL)
6472                 return -EINVAL;
6473         if (mddev->ro)
6474                 return -EROFS;
6475         if (raid_disks <= 0 ||
6476             (mddev->max_disks && raid_disks >= mddev->max_disks))
6477                 return -EINVAL;
6478         if (mddev->sync_thread ||
6479             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6480             mddev->reshape_position != MaxSector)
6481                 return -EBUSY;
6482
6483         rdev_for_each(rdev, mddev) {
6484                 if (mddev->raid_disks < raid_disks &&
6485                     rdev->data_offset < rdev->new_data_offset)
6486                         return -EINVAL;
6487                 if (mddev->raid_disks > raid_disks &&
6488                     rdev->data_offset > rdev->new_data_offset)
6489                         return -EINVAL;
6490         }
6491
6492         mddev->delta_disks = raid_disks - mddev->raid_disks;
6493         if (mddev->delta_disks < 0)
6494                 mddev->reshape_backwards = 1;
6495         else if (mddev->delta_disks > 0)
6496                 mddev->reshape_backwards = 0;
6497
6498         rv = mddev->pers->check_reshape(mddev);
6499         if (rv < 0) {
6500                 mddev->delta_disks = 0;
6501                 mddev->reshape_backwards = 0;
6502         }
6503         return rv;
6504 }
6505
6506 /*
6507  * update_array_info is used to change the configuration of an
6508  * on-line array.
6509  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
6510  * fields in the info are checked against the array.
6511  * Any differences that cannot be handled will cause an error.
6512  * Normally, only one change can be managed at a time.
6513  */
6514 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
6515 {
6516         int rv = 0;
6517         int cnt = 0;
6518         int state = 0;
6519
6520         /* calculate expected state,ignoring low bits */
6521         if (mddev->bitmap && mddev->bitmap_info.offset)
6522                 state |= (1 << MD_SB_BITMAP_PRESENT);
6523
6524         if (mddev->major_version != info->major_version ||
6525             mddev->minor_version != info->minor_version ||
6526 /*          mddev->patch_version != info->patch_version || */
6527             mddev->ctime         != info->ctime         ||
6528             mddev->level         != info->level         ||
6529 /*          mddev->layout        != info->layout        || */
6530             mddev->persistent    != !info->not_persistent ||
6531             mddev->chunk_sectors != info->chunk_size >> 9 ||
6532             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
6533             ((state^info->state) & 0xfffffe00)
6534                 )
6535                 return -EINVAL;
6536         /* Check there is only one change */
6537         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6538                 cnt++;
6539         if (mddev->raid_disks != info->raid_disks)
6540                 cnt++;
6541         if (mddev->layout != info->layout)
6542                 cnt++;
6543         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
6544                 cnt++;
6545         if (cnt == 0)
6546                 return 0;
6547         if (cnt > 1)
6548                 return -EINVAL;
6549
6550         if (mddev->layout != info->layout) {
6551                 /* Change layout
6552                  * we don't need to do anything at the md level, the
6553                  * personality will take care of it all.
6554                  */
6555                 if (mddev->pers->check_reshape == NULL)
6556                         return -EINVAL;
6557                 else {
6558                         mddev->new_layout = info->layout;
6559                         rv = mddev->pers->check_reshape(mddev);
6560                         if (rv)
6561                                 mddev->new_layout = mddev->layout;
6562                         return rv;
6563                 }
6564         }
6565         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6566                 rv = update_size(mddev, (sector_t)info->size * 2);
6567
6568         if (mddev->raid_disks    != info->raid_disks)
6569                 rv = update_raid_disks(mddev, info->raid_disks);
6570
6571         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
6572                 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
6573                         rv = -EINVAL;
6574                         goto err;
6575                 }
6576                 if (mddev->recovery || mddev->sync_thread) {
6577                         rv = -EBUSY;
6578                         goto err;
6579                 }
6580                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
6581                         struct bitmap *bitmap;
6582                         /* add the bitmap */
6583                         if (mddev->bitmap) {
6584                                 rv = -EEXIST;
6585                                 goto err;
6586                         }
6587                         if (mddev->bitmap_info.default_offset == 0) {
6588                                 rv = -EINVAL;
6589                                 goto err;
6590                         }
6591                         mddev->bitmap_info.offset =
6592                                 mddev->bitmap_info.default_offset;
6593                         mddev->bitmap_info.space =
6594                                 mddev->bitmap_info.default_space;
6595                         mddev->pers->quiesce(mddev, 1);
6596                         bitmap = bitmap_create(mddev, -1);
6597                         if (!IS_ERR(bitmap)) {
6598                                 mddev->bitmap = bitmap;
6599                                 rv = bitmap_load(mddev);
6600                         } else
6601                                 rv = PTR_ERR(bitmap);
6602                         if (rv)
6603                                 bitmap_destroy(mddev);
6604                         mddev->pers->quiesce(mddev, 0);
6605                 } else {
6606                         /* remove the bitmap */
6607                         if (!mddev->bitmap) {
6608                                 rv = -ENOENT;
6609                                 goto err;
6610                         }
6611                         if (mddev->bitmap->storage.file) {
6612                                 rv = -EINVAL;
6613                                 goto err;
6614                         }
6615                         if (mddev->bitmap_info.nodes) {
6616                                 /* hold PW on all the bitmap lock */
6617                                 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
6618                                         printk("md: can't change bitmap to none since the"
6619                                                " array is in use by more than one node\n");
6620                                         rv = -EPERM;
6621                                         md_cluster_ops->unlock_all_bitmaps(mddev);
6622                                         goto err;
6623                                 }
6624
6625                                 mddev->bitmap_info.nodes = 0;
6626                                 md_cluster_ops->leave(mddev);
6627                         }
6628                         mddev->pers->quiesce(mddev, 1);
6629                         bitmap_destroy(mddev);
6630                         mddev->pers->quiesce(mddev, 0);
6631                         mddev->bitmap_info.offset = 0;
6632                 }
6633         }
6634         md_update_sb(mddev, 1);
6635         return rv;
6636 err:
6637         return rv;
6638 }
6639
6640 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
6641 {
6642         struct md_rdev *rdev;
6643         int err = 0;
6644
6645         if (mddev->pers == NULL)
6646                 return -ENODEV;
6647
6648         rcu_read_lock();
6649         rdev = find_rdev_rcu(mddev, dev);
6650         if (!rdev)
6651                 err =  -ENODEV;
6652         else {
6653                 md_error(mddev, rdev);
6654                 if (!test_bit(Faulty, &rdev->flags))
6655                         err = -EBUSY;
6656         }
6657         rcu_read_unlock();
6658         return err;
6659 }
6660
6661 /*
6662  * We have a problem here : there is no easy way to give a CHS
6663  * virtual geometry. We currently pretend that we have a 2 heads
6664  * 4 sectors (with a BIG number of cylinders...). This drives
6665  * dosfs just mad... ;-)
6666  */
6667 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
6668 {
6669         struct mddev *mddev = bdev->bd_disk->private_data;
6670
6671         geo->heads = 2;
6672         geo->sectors = 4;
6673         geo->cylinders = mddev->array_sectors / 8;
6674         return 0;
6675 }
6676
6677 static inline bool md_ioctl_valid(unsigned int cmd)
6678 {
6679         switch (cmd) {
6680         case ADD_NEW_DISK:
6681         case BLKROSET:
6682         case GET_ARRAY_INFO:
6683         case GET_BITMAP_FILE:
6684         case GET_DISK_INFO:
6685         case HOT_ADD_DISK:
6686         case HOT_REMOVE_DISK:
6687         case RAID_AUTORUN:
6688         case RAID_VERSION:
6689         case RESTART_ARRAY_RW:
6690         case RUN_ARRAY:
6691         case SET_ARRAY_INFO:
6692         case SET_BITMAP_FILE:
6693         case SET_DISK_FAULTY:
6694         case STOP_ARRAY:
6695         case STOP_ARRAY_RO:
6696         case CLUSTERED_DISK_NACK:
6697                 return true;
6698         default:
6699                 return false;
6700         }
6701 }
6702
6703 static int md_ioctl(struct block_device *bdev, fmode_t mode,
6704                         unsigned int cmd, unsigned long arg)
6705 {
6706         int err = 0;
6707         void __user *argp = (void __user *)arg;
6708         struct mddev *mddev = NULL;
6709         int ro;
6710
6711         if (!md_ioctl_valid(cmd))
6712                 return -ENOTTY;
6713
6714         switch (cmd) {
6715         case RAID_VERSION:
6716         case GET_ARRAY_INFO:
6717         case GET_DISK_INFO:
6718                 break;
6719         default:
6720                 if (!capable(CAP_SYS_ADMIN))
6721                         return -EACCES;
6722         }
6723
6724         /*
6725          * Commands dealing with the RAID driver but not any
6726          * particular array:
6727          */
6728         switch (cmd) {
6729         case RAID_VERSION:
6730                 err = get_version(argp);
6731                 goto out;
6732
6733 #ifndef MODULE
6734         case RAID_AUTORUN:
6735                 err = 0;
6736                 autostart_arrays(arg);
6737                 goto out;
6738 #endif
6739         default:;
6740         }
6741
6742         /*
6743          * Commands creating/starting a new array:
6744          */
6745
6746         mddev = bdev->bd_disk->private_data;
6747
6748         if (!mddev) {
6749                 BUG();
6750                 goto out;
6751         }
6752
6753         /* Some actions do not requires the mutex */
6754         switch (cmd) {
6755         case GET_ARRAY_INFO:
6756                 if (!mddev->raid_disks && !mddev->external)
6757                         err = -ENODEV;
6758                 else
6759                         err = get_array_info(mddev, argp);
6760                 goto out;
6761
6762         case GET_DISK_INFO:
6763                 if (!mddev->raid_disks && !mddev->external)
6764                         err = -ENODEV;
6765                 else
6766                         err = get_disk_info(mddev, argp);
6767                 goto out;
6768
6769         case SET_DISK_FAULTY:
6770                 err = set_disk_faulty(mddev, new_decode_dev(arg));
6771                 goto out;
6772
6773         case GET_BITMAP_FILE:
6774                 err = get_bitmap_file(mddev, argp);
6775                 goto out;
6776
6777         }
6778
6779         if (cmd == ADD_NEW_DISK)
6780                 /* need to ensure md_delayed_delete() has completed */
6781                 flush_workqueue(md_misc_wq);
6782
6783         if (cmd == HOT_REMOVE_DISK)
6784                 /* need to ensure recovery thread has run */
6785                 wait_event_interruptible_timeout(mddev->sb_wait,
6786                                                  !test_bit(MD_RECOVERY_NEEDED,
6787                                                            &mddev->flags),
6788                                                  msecs_to_jiffies(5000));
6789         if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
6790                 /* Need to flush page cache, and ensure no-one else opens
6791                  * and writes
6792                  */
6793                 mutex_lock(&mddev->open_mutex);
6794                 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
6795                         mutex_unlock(&mddev->open_mutex);
6796                         err = -EBUSY;
6797                         goto out;
6798                 }
6799                 set_bit(MD_STILL_CLOSED, &mddev->flags);
6800                 mutex_unlock(&mddev->open_mutex);
6801                 sync_blockdev(bdev);
6802         }
6803         err = mddev_lock(mddev);
6804         if (err) {
6805                 printk(KERN_INFO
6806                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
6807                         err, cmd);
6808                 goto out;
6809         }
6810
6811         if (cmd == SET_ARRAY_INFO) {
6812                 mdu_array_info_t info;
6813                 if (!arg)
6814                         memset(&info, 0, sizeof(info));
6815                 else if (copy_from_user(&info, argp, sizeof(info))) {
6816                         err = -EFAULT;
6817                         goto unlock;
6818                 }
6819                 if (mddev->pers) {
6820                         err = update_array_info(mddev, &info);
6821                         if (err) {
6822                                 printk(KERN_WARNING "md: couldn't update"
6823                                        " array info. %d\n", err);
6824                                 goto unlock;
6825                         }
6826                         goto unlock;
6827                 }
6828                 if (!list_empty(&mddev->disks)) {
6829                         printk(KERN_WARNING
6830                                "md: array %s already has disks!\n",
6831                                mdname(mddev));
6832                         err = -EBUSY;
6833                         goto unlock;
6834                 }
6835                 if (mddev->raid_disks) {
6836                         printk(KERN_WARNING
6837                                "md: array %s already initialised!\n",
6838                                mdname(mddev));
6839                         err = -EBUSY;
6840                         goto unlock;
6841                 }
6842                 err = set_array_info(mddev, &info);
6843                 if (err) {
6844                         printk(KERN_WARNING "md: couldn't set"
6845                                " array info. %d\n", err);
6846                         goto unlock;
6847                 }
6848                 goto unlock;
6849         }
6850
6851         /*
6852          * Commands querying/configuring an existing array:
6853          */
6854         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
6855          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
6856         if ((!mddev->raid_disks && !mddev->external)
6857             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
6858             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
6859             && cmd != GET_BITMAP_FILE) {
6860                 err = -ENODEV;
6861                 goto unlock;
6862         }
6863
6864         /*
6865          * Commands even a read-only array can execute:
6866          */
6867         switch (cmd) {
6868         case RESTART_ARRAY_RW:
6869                 err = restart_array(mddev);
6870                 goto unlock;
6871
6872         case STOP_ARRAY:
6873                 err = do_md_stop(mddev, 0, bdev);
6874                 goto unlock;
6875
6876         case STOP_ARRAY_RO:
6877                 err = md_set_readonly(mddev, bdev);
6878                 goto unlock;
6879
6880         case HOT_REMOVE_DISK:
6881                 err = hot_remove_disk(mddev, new_decode_dev(arg));
6882                 goto unlock;
6883
6884         case ADD_NEW_DISK:
6885                 /* We can support ADD_NEW_DISK on read-only arrays
6886                  * on if we are re-adding a preexisting device.
6887                  * So require mddev->pers and MD_DISK_SYNC.
6888                  */
6889                 if (mddev->pers) {
6890                         mdu_disk_info_t info;
6891                         if (copy_from_user(&info, argp, sizeof(info)))
6892                                 err = -EFAULT;
6893                         else if (!(info.state & (1<<MD_DISK_SYNC)))
6894                                 /* Need to clear read-only for this */
6895                                 break;
6896                         else
6897                                 err = add_new_disk(mddev, &info);
6898                         goto unlock;
6899                 }
6900                 break;
6901
6902         case BLKROSET:
6903                 if (get_user(ro, (int __user *)(arg))) {
6904                         err = -EFAULT;
6905                         goto unlock;
6906                 }
6907                 err = -EINVAL;
6908
6909                 /* if the bdev is going readonly the value of mddev->ro
6910                  * does not matter, no writes are coming
6911                  */
6912                 if (ro)
6913                         goto unlock;
6914
6915                 /* are we are already prepared for writes? */
6916                 if (mddev->ro != 1)
6917                         goto unlock;
6918
6919                 /* transitioning to readauto need only happen for
6920                  * arrays that call md_write_start
6921                  */
6922                 if (mddev->pers) {
6923                         err = restart_array(mddev);
6924                         if (err == 0) {
6925                                 mddev->ro = 2;
6926                                 set_disk_ro(mddev->gendisk, 0);
6927                         }
6928                 }
6929                 goto unlock;
6930         }
6931
6932         /*
6933          * The remaining ioctls are changing the state of the
6934          * superblock, so we do not allow them on read-only arrays.
6935          */
6936         if (mddev->ro && mddev->pers) {
6937                 if (mddev->ro == 2) {
6938                         mddev->ro = 0;
6939                         sysfs_notify_dirent_safe(mddev->sysfs_state);
6940                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6941                         /* mddev_unlock will wake thread */
6942                         /* If a device failed while we were read-only, we
6943                          * need to make sure the metadata is updated now.
6944                          */
6945                         if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
6946                                 mddev_unlock(mddev);
6947                                 wait_event(mddev->sb_wait,
6948                                            !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
6949                                            !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6950                                 mddev_lock_nointr(mddev);
6951                         }
6952                 } else {
6953                         err = -EROFS;
6954                         goto unlock;
6955                 }
6956         }
6957
6958         switch (cmd) {
6959         case ADD_NEW_DISK:
6960         {
6961                 mdu_disk_info_t info;
6962                 if (copy_from_user(&info, argp, sizeof(info)))
6963                         err = -EFAULT;
6964                 else
6965                         err = add_new_disk(mddev, &info);
6966                 goto unlock;
6967         }
6968
6969         case CLUSTERED_DISK_NACK:
6970                 if (mddev_is_clustered(mddev))
6971                         md_cluster_ops->new_disk_ack(mddev, false);
6972                 else
6973                         err = -EINVAL;
6974                 goto unlock;
6975
6976         case HOT_ADD_DISK:
6977                 err = hot_add_disk(mddev, new_decode_dev(arg));
6978                 goto unlock;
6979
6980         case RUN_ARRAY:
6981                 err = do_md_run(mddev);
6982                 goto unlock;
6983
6984         case SET_BITMAP_FILE:
6985                 err = set_bitmap_file(mddev, (int)arg);
6986                 goto unlock;
6987
6988         default:
6989                 err = -EINVAL;
6990                 goto unlock;
6991         }
6992
6993 unlock:
6994         if (mddev->hold_active == UNTIL_IOCTL &&
6995             err != -EINVAL)
6996                 mddev->hold_active = 0;
6997         mddev_unlock(mddev);
6998 out:
6999         return err;
7000 }
7001 #ifdef CONFIG_COMPAT
7002 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7003                     unsigned int cmd, unsigned long arg)
7004 {
7005         switch (cmd) {
7006         case HOT_REMOVE_DISK:
7007         case HOT_ADD_DISK:
7008         case SET_DISK_FAULTY:
7009         case SET_BITMAP_FILE:
7010                 /* These take in integer arg, do not convert */
7011                 break;
7012         default:
7013                 arg = (unsigned long)compat_ptr(arg);
7014                 break;
7015         }
7016
7017         return md_ioctl(bdev, mode, cmd, arg);
7018 }
7019 #endif /* CONFIG_COMPAT */
7020
7021 static int md_open(struct block_device *bdev, fmode_t mode)
7022 {
7023         /*
7024          * Succeed if we can lock the mddev, which confirms that
7025          * it isn't being stopped right now.
7026          */
7027         struct mddev *mddev = mddev_find(bdev->bd_dev);
7028         int err;
7029
7030         if (!mddev)
7031                 return -ENODEV;
7032
7033         if (mddev->gendisk != bdev->bd_disk) {
7034                 /* we are racing with mddev_put which is discarding this
7035                  * bd_disk.
7036                  */
7037                 mddev_put(mddev);
7038                 /* Wait until bdev->bd_disk is definitely gone */
7039                 flush_workqueue(md_misc_wq);
7040                 /* Then retry the open from the top */
7041                 return -ERESTARTSYS;
7042         }
7043         BUG_ON(mddev != bdev->bd_disk->private_data);
7044
7045         if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
7046                 goto out;
7047
7048         err = 0;
7049         atomic_inc(&mddev->openers);
7050         clear_bit(MD_STILL_CLOSED, &mddev->flags);
7051         mutex_unlock(&mddev->open_mutex);
7052
7053         check_disk_change(bdev);
7054  out:
7055         return err;
7056 }
7057
7058 static void md_release(struct gendisk *disk, fmode_t mode)
7059 {
7060         struct mddev *mddev = disk->private_data;
7061
7062         BUG_ON(!mddev);
7063         atomic_dec(&mddev->openers);
7064         mddev_put(mddev);
7065 }
7066
7067 static int md_media_changed(struct gendisk *disk)
7068 {
7069         struct mddev *mddev = disk->private_data;
7070
7071         return mddev->changed;
7072 }
7073
7074 static int md_revalidate(struct gendisk *disk)
7075 {
7076         struct mddev *mddev = disk->private_data;
7077
7078         mddev->changed = 0;
7079         return 0;
7080 }
7081 static const struct block_device_operations md_fops =
7082 {
7083         .owner          = THIS_MODULE,
7084         .open           = md_open,
7085         .release        = md_release,
7086         .ioctl          = md_ioctl,
7087 #ifdef CONFIG_COMPAT
7088         .compat_ioctl   = md_compat_ioctl,
7089 #endif
7090         .getgeo         = md_getgeo,
7091         .media_changed  = md_media_changed,
7092         .revalidate_disk= md_revalidate,
7093 };
7094
7095 static int md_thread(void *arg)
7096 {
7097         struct md_thread *thread = arg;
7098
7099         /*
7100          * md_thread is a 'system-thread', it's priority should be very
7101          * high. We avoid resource deadlocks individually in each
7102          * raid personality. (RAID5 does preallocation) We also use RR and
7103          * the very same RT priority as kswapd, thus we will never get
7104          * into a priority inversion deadlock.
7105          *
7106          * we definitely have to have equal or higher priority than
7107          * bdflush, otherwise bdflush will deadlock if there are too
7108          * many dirty RAID5 blocks.
7109          */
7110
7111         allow_signal(SIGKILL);
7112         while (!kthread_should_stop()) {
7113
7114                 /* We need to wait INTERRUPTIBLE so that
7115                  * we don't add to the load-average.
7116                  * That means we need to be sure no signals are
7117                  * pending
7118                  */
7119                 if (signal_pending(current))
7120                         flush_signals(current);
7121
7122                 wait_event_interruptible_timeout
7123                         (thread->wqueue,
7124                          test_bit(THREAD_WAKEUP, &thread->flags)
7125                          || kthread_should_stop(),
7126                          thread->timeout);
7127
7128                 clear_bit(THREAD_WAKEUP, &thread->flags);
7129                 if (!kthread_should_stop())
7130                         thread->run(thread);
7131         }
7132
7133         return 0;
7134 }
7135
7136 void md_wakeup_thread(struct md_thread *thread)
7137 {
7138         if (thread) {
7139                 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
7140                 set_bit(THREAD_WAKEUP, &thread->flags);
7141                 wake_up(&thread->wqueue);
7142         }
7143 }
7144 EXPORT_SYMBOL(md_wakeup_thread);
7145
7146 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7147                 struct mddev *mddev, const char *name)
7148 {
7149         struct md_thread *thread;
7150
7151         thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
7152         if (!thread)
7153                 return NULL;
7154
7155         init_waitqueue_head(&thread->wqueue);
7156
7157         thread->run = run;
7158         thread->mddev = mddev;
7159         thread->timeout = MAX_SCHEDULE_TIMEOUT;
7160         thread->tsk = kthread_run(md_thread, thread,
7161                                   "%s_%s",
7162                                   mdname(thread->mddev),
7163                                   name);
7164         if (IS_ERR(thread->tsk)) {
7165                 kfree(thread);
7166                 return NULL;
7167         }
7168         return thread;
7169 }
7170 EXPORT_SYMBOL(md_register_thread);
7171
7172 void md_unregister_thread(struct md_thread **threadp)
7173 {
7174         struct md_thread *thread = *threadp;
7175         if (!thread)
7176                 return;
7177         pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
7178         /* Locking ensures that mddev_unlock does not wake_up a
7179          * non-existent thread
7180          */
7181         spin_lock(&pers_lock);
7182         *threadp = NULL;
7183         spin_unlock(&pers_lock);
7184
7185         kthread_stop(thread->tsk);
7186         kfree(thread);
7187 }
7188 EXPORT_SYMBOL(md_unregister_thread);
7189
7190 void md_error(struct mddev *mddev, struct md_rdev *rdev)
7191 {
7192         if (!rdev || test_bit(Faulty, &rdev->flags))
7193                 return;
7194
7195         if (!mddev->pers || !mddev->pers->error_handler)
7196                 return;
7197         mddev->pers->error_handler(mddev,rdev);
7198         if (mddev->degraded)
7199                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7200         sysfs_notify_dirent_safe(rdev->sysfs_state);
7201         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7202         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7203         md_wakeup_thread(mddev->thread);
7204         if (mddev->event_work.func)
7205                 queue_work(md_misc_wq, &mddev->event_work);
7206         md_new_event(mddev);
7207 }
7208 EXPORT_SYMBOL(md_error);
7209
7210 /* seq_file implementation /proc/mdstat */
7211
7212 static void status_unused(struct seq_file *seq)
7213 {
7214         int i = 0;
7215         struct md_rdev *rdev;
7216
7217         seq_printf(seq, "unused devices: ");
7218
7219         list_for_each_entry(rdev, &pending_raid_disks, same_set) {
7220                 char b[BDEVNAME_SIZE];
7221                 i++;
7222                 seq_printf(seq, "%s ",
7223                               bdevname(rdev->bdev,b));
7224         }
7225         if (!i)
7226                 seq_printf(seq, "<none>");
7227
7228         seq_printf(seq, "\n");
7229 }
7230
7231 static int status_resync(struct seq_file *seq, struct mddev *mddev)
7232 {
7233         sector_t max_sectors, resync, res;
7234         unsigned long dt, db;
7235         sector_t rt;
7236         int scale;
7237         unsigned int per_milli;
7238
7239         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
7240             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7241                 max_sectors = mddev->resync_max_sectors;
7242         else
7243                 max_sectors = mddev->dev_sectors;
7244
7245         resync = mddev->curr_resync;
7246         if (resync <= 3) {
7247                 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7248                         /* Still cleaning up */
7249                         resync = max_sectors;
7250         } else
7251                 resync -= atomic_read(&mddev->recovery_active);
7252
7253         if (resync == 0) {
7254                 if (mddev->recovery_cp < MaxSector) {
7255                         seq_printf(seq, "\tresync=PENDING");
7256                         return 1;
7257                 }
7258                 return 0;
7259         }
7260         if (resync < 3) {
7261                 seq_printf(seq, "\tresync=DELAYED");
7262                 return 1;
7263         }
7264
7265         WARN_ON(max_sectors == 0);
7266         /* Pick 'scale' such that (resync>>scale)*1000 will fit
7267          * in a sector_t, and (max_sectors>>scale) will fit in a
7268          * u32, as those are the requirements for sector_div.
7269          * Thus 'scale' must be at least 10
7270          */
7271         scale = 10;
7272         if (sizeof(sector_t) > sizeof(unsigned long)) {
7273                 while ( max_sectors/2 > (1ULL<<(scale+32)))
7274                         scale++;
7275         }
7276         res = (resync>>scale)*1000;
7277         sector_div(res, (u32)((max_sectors>>scale)+1));
7278
7279         per_milli = res;
7280         {
7281                 int i, x = per_milli/50, y = 20-x;
7282                 seq_printf(seq, "[");
7283                 for (i = 0; i < x; i++)
7284                         seq_printf(seq, "=");
7285                 seq_printf(seq, ">");
7286                 for (i = 0; i < y; i++)
7287                         seq_printf(seq, ".");
7288                 seq_printf(seq, "] ");
7289         }
7290         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
7291                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
7292                     "reshape" :
7293                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
7294                      "check" :
7295                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
7296                       "resync" : "recovery"))),
7297                    per_milli/10, per_milli % 10,
7298                    (unsigned long long) resync/2,
7299                    (unsigned long long) max_sectors/2);
7300
7301         /*
7302          * dt: time from mark until now
7303          * db: blocks written from mark until now
7304          * rt: remaining time
7305          *
7306          * rt is a sector_t, so could be 32bit or 64bit.
7307          * So we divide before multiply in case it is 32bit and close
7308          * to the limit.
7309          * We scale the divisor (db) by 32 to avoid losing precision
7310          * near the end of resync when the number of remaining sectors
7311          * is close to 'db'.
7312          * We then divide rt by 32 after multiplying by db to compensate.
7313          * The '+1' avoids division by zero if db is very small.
7314          */
7315         dt = ((jiffies - mddev->resync_mark) / HZ);
7316         if (!dt) dt++;
7317         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
7318                 - mddev->resync_mark_cnt;
7319
7320         rt = max_sectors - resync;    /* number of remaining sectors */
7321         sector_div(rt, db/32+1);
7322         rt *= dt;
7323         rt >>= 5;
7324
7325         seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
7326                    ((unsigned long)rt % 60)/6);
7327
7328         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
7329         return 1;
7330 }
7331
7332 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
7333 {
7334         struct list_head *tmp;
7335         loff_t l = *pos;
7336         struct mddev *mddev;
7337
7338         if (l >= 0x10000)
7339                 return NULL;
7340         if (!l--)
7341                 /* header */
7342                 return (void*)1;
7343
7344         spin_lock(&all_mddevs_lock);
7345         list_for_each(tmp,&all_mddevs)
7346                 if (!l--) {
7347                         mddev = list_entry(tmp, struct mddev, all_mddevs);
7348                         mddev_get(mddev);
7349                         spin_unlock(&all_mddevs_lock);
7350                         return mddev;
7351                 }
7352         spin_unlock(&all_mddevs_lock);
7353         if (!l--)
7354                 return (void*)2;/* tail */
7355         return NULL;
7356 }
7357
7358 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
7359 {
7360         struct list_head *tmp;
7361         struct mddev *next_mddev, *mddev = v;
7362
7363         ++*pos;
7364         if (v == (void*)2)
7365                 return NULL;
7366
7367         spin_lock(&all_mddevs_lock);
7368         if (v == (void*)1)
7369                 tmp = all_mddevs.next;
7370         else
7371                 tmp = mddev->all_mddevs.next;
7372         if (tmp != &all_mddevs)
7373                 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
7374         else {
7375                 next_mddev = (void*)2;
7376                 *pos = 0x10000;
7377         }
7378         spin_unlock(&all_mddevs_lock);
7379
7380         if (v != (void*)1)
7381                 mddev_put(mddev);
7382         return next_mddev;
7383
7384 }
7385
7386 static void md_seq_stop(struct seq_file *seq, void *v)
7387 {
7388         struct mddev *mddev = v;
7389
7390         if (mddev && v != (void*)1 && v != (void*)2)
7391                 mddev_put(mddev);
7392 }
7393
7394 static int md_seq_show(struct seq_file *seq, void *v)
7395 {
7396         struct mddev *mddev = v;
7397         sector_t sectors;
7398         struct md_rdev *rdev;
7399
7400         if (v == (void*)1) {
7401                 struct md_personality *pers;
7402                 seq_printf(seq, "Personalities : ");
7403                 spin_lock(&pers_lock);
7404                 list_for_each_entry(pers, &pers_list, list)
7405                         seq_printf(seq, "[%s] ", pers->name);
7406
7407                 spin_unlock(&pers_lock);
7408                 seq_printf(seq, "\n");
7409                 seq->poll_event = atomic_read(&md_event_count);
7410                 return 0;
7411         }
7412         if (v == (void*)2) {
7413                 status_unused(seq);
7414                 return 0;
7415         }
7416
7417         spin_lock(&mddev->lock);
7418         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
7419                 seq_printf(seq, "%s : %sactive", mdname(mddev),
7420                                                 mddev->pers ? "" : "in");
7421                 if (mddev->pers) {
7422                         if (mddev->ro==1)
7423                                 seq_printf(seq, " (read-only)");
7424                         if (mddev->ro==2)
7425                                 seq_printf(seq, " (auto-read-only)");
7426                         seq_printf(seq, " %s", mddev->pers->name);
7427                 }
7428
7429                 sectors = 0;
7430                 rcu_read_lock();
7431                 rdev_for_each_rcu(rdev, mddev) {
7432                         char b[BDEVNAME_SIZE];
7433                         seq_printf(seq, " %s[%d]",
7434                                 bdevname(rdev->bdev,b), rdev->desc_nr);
7435                         if (test_bit(WriteMostly, &rdev->flags))
7436                                 seq_printf(seq, "(W)");
7437                         if (test_bit(Journal, &rdev->flags))
7438                                 seq_printf(seq, "(J)");
7439                         if (test_bit(Faulty, &rdev->flags)) {
7440                                 seq_printf(seq, "(F)");
7441                                 continue;
7442                         }
7443                         if (rdev->raid_disk < 0)
7444                                 seq_printf(seq, "(S)"); /* spare */
7445                         if (test_bit(Replacement, &rdev->flags))
7446                                 seq_printf(seq, "(R)");
7447                         sectors += rdev->sectors;
7448                 }
7449                 rcu_read_unlock();
7450
7451                 if (!list_empty(&mddev->disks)) {
7452                         if (mddev->pers)
7453                                 seq_printf(seq, "\n      %llu blocks",
7454                                            (unsigned long long)
7455                                            mddev->array_sectors / 2);
7456                         else
7457                                 seq_printf(seq, "\n      %llu blocks",
7458                                            (unsigned long long)sectors / 2);
7459                 }
7460                 if (mddev->persistent) {
7461                         if (mddev->major_version != 0 ||
7462                             mddev->minor_version != 90) {
7463                                 seq_printf(seq," super %d.%d",
7464                                            mddev->major_version,
7465                                            mddev->minor_version);
7466                         }
7467                 } else if (mddev->external)
7468                         seq_printf(seq, " super external:%s",
7469                                    mddev->metadata_type);
7470                 else
7471                         seq_printf(seq, " super non-persistent");
7472
7473                 if (mddev->pers) {
7474                         mddev->pers->status(seq, mddev);
7475                         seq_printf(seq, "\n      ");
7476                         if (mddev->pers->sync_request) {
7477                                 if (status_resync(seq, mddev))
7478                                         seq_printf(seq, "\n      ");
7479                         }
7480                 } else
7481                         seq_printf(seq, "\n       ");
7482
7483                 bitmap_status(seq, mddev->bitmap);
7484
7485                 seq_printf(seq, "\n");
7486         }
7487         spin_unlock(&mddev->lock);
7488
7489         return 0;
7490 }
7491
7492 static const struct seq_operations md_seq_ops = {
7493         .start  = md_seq_start,
7494         .next   = md_seq_next,
7495         .stop   = md_seq_stop,
7496         .show   = md_seq_show,
7497 };
7498
7499 static int md_seq_open(struct inode *inode, struct file *file)
7500 {
7501         struct seq_file *seq;
7502         int error;
7503
7504         error = seq_open(file, &md_seq_ops);
7505         if (error)
7506                 return error;
7507
7508         seq = file->private_data;
7509         seq->poll_event = atomic_read(&md_event_count);
7510         return error;
7511 }
7512
7513 static int md_unloading;
7514 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
7515 {
7516         struct seq_file *seq = filp->private_data;
7517         int mask;
7518
7519         if (md_unloading)
7520                 return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
7521         poll_wait(filp, &md_event_waiters, wait);
7522
7523         /* always allow read */
7524         mask = POLLIN | POLLRDNORM;
7525
7526         if (seq->poll_event != atomic_read(&md_event_count))
7527                 mask |= POLLERR | POLLPRI;
7528         return mask;
7529 }
7530
7531 static const struct file_operations md_seq_fops = {
7532         .owner          = THIS_MODULE,
7533         .open           = md_seq_open,
7534         .read           = seq_read,
7535         .llseek         = seq_lseek,
7536         .release        = seq_release_private,
7537         .poll           = mdstat_poll,
7538 };
7539
7540 int register_md_personality(struct md_personality *p)
7541 {
7542         printk(KERN_INFO "md: %s personality registered for level %d\n",
7543                                                 p->name, p->level);
7544         spin_lock(&pers_lock);
7545         list_add_tail(&p->list, &pers_list);
7546         spin_unlock(&pers_lock);
7547         return 0;
7548 }
7549 EXPORT_SYMBOL(register_md_personality);
7550
7551 int unregister_md_personality(struct md_personality *p)
7552 {
7553         printk(KERN_INFO "md: %s personality unregistered\n", p->name);
7554         spin_lock(&pers_lock);
7555         list_del_init(&p->list);
7556         spin_unlock(&pers_lock);
7557         return 0;
7558 }
7559 EXPORT_SYMBOL(unregister_md_personality);
7560
7561 int register_md_cluster_operations(struct md_cluster_operations *ops,
7562                                    struct module *module)
7563 {
7564         int ret = 0;
7565         spin_lock(&pers_lock);
7566         if (md_cluster_ops != NULL)
7567                 ret = -EALREADY;
7568         else {
7569                 md_cluster_ops = ops;
7570                 md_cluster_mod = module;
7571         }
7572         spin_unlock(&pers_lock);
7573         return ret;
7574 }
7575 EXPORT_SYMBOL(register_md_cluster_operations);
7576
7577 int unregister_md_cluster_operations(void)
7578 {
7579         spin_lock(&pers_lock);
7580         md_cluster_ops = NULL;
7581         spin_unlock(&pers_lock);
7582         return 0;
7583 }
7584 EXPORT_SYMBOL(unregister_md_cluster_operations);
7585
7586 int md_setup_cluster(struct mddev *mddev, int nodes)
7587 {
7588         int err;
7589
7590         err = request_module("md-cluster");
7591         if (err) {
7592                 pr_err("md-cluster module not found.\n");
7593                 return -ENOENT;
7594         }
7595
7596         spin_lock(&pers_lock);
7597         if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
7598                 spin_unlock(&pers_lock);
7599                 return -ENOENT;
7600         }
7601         spin_unlock(&pers_lock);
7602
7603         return md_cluster_ops->join(mddev, nodes);
7604 }
7605
7606 void md_cluster_stop(struct mddev *mddev)
7607 {
7608         if (!md_cluster_ops)
7609                 return;
7610         md_cluster_ops->leave(mddev);
7611         module_put(md_cluster_mod);
7612 }
7613
7614 static int is_mddev_idle(struct mddev *mddev, int init)
7615 {
7616         struct md_rdev *rdev;
7617         int idle;
7618         int curr_events;
7619
7620         idle = 1;
7621         rcu_read_lock();
7622         rdev_for_each_rcu(rdev, mddev) {
7623                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
7624                 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
7625                               (int)part_stat_read(&disk->part0, sectors[1]) -
7626                               atomic_read(&disk->sync_io);
7627                 /* sync IO will cause sync_io to increase before the disk_stats
7628                  * as sync_io is counted when a request starts, and
7629                  * disk_stats is counted when it completes.
7630                  * So resync activity will cause curr_events to be smaller than
7631                  * when there was no such activity.
7632                  * non-sync IO will cause disk_stat to increase without
7633                  * increasing sync_io so curr_events will (eventually)
7634                  * be larger than it was before.  Once it becomes
7635                  * substantially larger, the test below will cause
7636                  * the array to appear non-idle, and resync will slow
7637                  * down.
7638                  * If there is a lot of outstanding resync activity when
7639                  * we set last_event to curr_events, then all that activity
7640                  * completing might cause the array to appear non-idle
7641                  * and resync will be slowed down even though there might
7642                  * not have been non-resync activity.  This will only
7643                  * happen once though.  'last_events' will soon reflect
7644                  * the state where there is little or no outstanding
7645                  * resync requests, and further resync activity will
7646                  * always make curr_events less than last_events.
7647                  *
7648                  */
7649                 if (init || curr_events - rdev->last_events > 64) {
7650                         rdev->last_events = curr_events;
7651                         idle = 0;
7652                 }
7653         }
7654         rcu_read_unlock();
7655         return idle;
7656 }
7657
7658 void md_done_sync(struct mddev *mddev, int blocks, int ok)
7659 {
7660         /* another "blocks" (512byte) blocks have been synced */
7661         atomic_sub(blocks, &mddev->recovery_active);
7662         wake_up(&mddev->recovery_wait);
7663         if (!ok) {
7664                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7665                 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
7666                 md_wakeup_thread(mddev->thread);
7667                 // stop recovery, signal do_sync ....
7668         }
7669 }
7670 EXPORT_SYMBOL(md_done_sync);
7671
7672 /* md_write_start(mddev, bi)
7673  * If we need to update some array metadata (e.g. 'active' flag
7674  * in superblock) before writing, schedule a superblock update
7675  * and wait for it to complete.
7676  */
7677 void md_write_start(struct mddev *mddev, struct bio *bi)
7678 {
7679         int did_change = 0;
7680         if (bio_data_dir(bi) != WRITE)
7681                 return;
7682
7683         BUG_ON(mddev->ro == 1);
7684         if (mddev->ro == 2) {
7685                 /* need to switch to read/write */
7686                 mddev->ro = 0;
7687                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7688                 md_wakeup_thread(mddev->thread);
7689                 md_wakeup_thread(mddev->sync_thread);
7690                 did_change = 1;
7691         }
7692         atomic_inc(&mddev->writes_pending);
7693         if (mddev->safemode == 1)
7694                 mddev->safemode = 0;
7695         if (mddev->in_sync) {
7696                 spin_lock(&mddev->lock);
7697                 if (mddev->in_sync) {
7698                         mddev->in_sync = 0;
7699                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7700                         set_bit(MD_CHANGE_PENDING, &mddev->flags);
7701                         md_wakeup_thread(mddev->thread);
7702                         did_change = 1;
7703                 }
7704                 spin_unlock(&mddev->lock);
7705         }
7706         if (did_change)
7707                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7708         wait_event(mddev->sb_wait,
7709                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
7710 }
7711 EXPORT_SYMBOL(md_write_start);
7712
7713 void md_write_end(struct mddev *mddev)
7714 {
7715         if (atomic_dec_and_test(&mddev->writes_pending)) {
7716                 if (mddev->safemode == 2)
7717                         md_wakeup_thread(mddev->thread);
7718                 else if (mddev->safemode_delay)
7719                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
7720         }
7721 }
7722 EXPORT_SYMBOL(md_write_end);
7723
7724 /* md_allow_write(mddev)
7725  * Calling this ensures that the array is marked 'active' so that writes
7726  * may proceed without blocking.  It is important to call this before
7727  * attempting a GFP_KERNEL allocation while holding the mddev lock.
7728  * Must be called with mddev_lock held.
7729  *
7730  * In the ->external case MD_CHANGE_PENDING can not be cleared until mddev->lock
7731  * is dropped, so return -EAGAIN after notifying userspace.
7732  */
7733 int md_allow_write(struct mddev *mddev)
7734 {
7735         if (!mddev->pers)
7736                 return 0;
7737         if (mddev->ro)
7738                 return 0;
7739         if (!mddev->pers->sync_request)
7740                 return 0;
7741
7742         spin_lock(&mddev->lock);
7743         if (mddev->in_sync) {
7744                 mddev->in_sync = 0;
7745                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7746                 set_bit(MD_CHANGE_PENDING, &mddev->flags);
7747                 if (mddev->safemode_delay &&
7748                     mddev->safemode == 0)
7749                         mddev->safemode = 1;
7750                 spin_unlock(&mddev->lock);
7751                 md_update_sb(mddev, 0);
7752                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7753         } else
7754                 spin_unlock(&mddev->lock);
7755
7756         if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
7757                 return -EAGAIN;
7758         else
7759                 return 0;
7760 }
7761 EXPORT_SYMBOL_GPL(md_allow_write);
7762
7763 #define SYNC_MARKS      10
7764 #define SYNC_MARK_STEP  (3*HZ)
7765 #define UPDATE_FREQUENCY (5*60*HZ)
7766 void md_do_sync(struct md_thread *thread)
7767 {
7768         struct mddev *mddev = thread->mddev;
7769         struct mddev *mddev2;
7770         unsigned int currspeed = 0,
7771                  window;
7772         sector_t max_sectors,j, io_sectors, recovery_done;
7773         unsigned long mark[SYNC_MARKS];
7774         unsigned long update_time;
7775         sector_t mark_cnt[SYNC_MARKS];
7776         int last_mark,m;
7777         struct list_head *tmp;
7778         sector_t last_check;
7779         int skipped = 0;
7780         struct md_rdev *rdev;
7781         char *desc, *action = NULL;
7782         struct blk_plug plug;
7783         bool cluster_resync_finished = false;
7784
7785         /* just incase thread restarts... */
7786         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7787                 return;
7788         if (mddev->ro) {/* never try to sync a read-only array */
7789                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7790                 return;
7791         }
7792
7793         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7794                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
7795                         desc = "data-check";
7796                         action = "check";
7797                 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7798                         desc = "requested-resync";
7799                         action = "repair";
7800                 } else
7801                         desc = "resync";
7802         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7803                 desc = "reshape";
7804         else
7805                 desc = "recovery";
7806
7807         mddev->last_sync_action = action ?: desc;
7808
7809         /* we overload curr_resync somewhat here.
7810          * 0 == not engaged in resync at all
7811          * 2 == checking that there is no conflict with another sync
7812          * 1 == like 2, but have yielded to allow conflicting resync to
7813          *              commense
7814          * other == active in resync - this many blocks
7815          *
7816          * Before starting a resync we must have set curr_resync to
7817          * 2, and then checked that every "conflicting" array has curr_resync
7818          * less than ours.  When we find one that is the same or higher
7819          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
7820          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
7821          * This will mean we have to start checking from the beginning again.
7822          *
7823          */
7824
7825         do {
7826                 mddev->curr_resync = 2;
7827
7828         try_again:
7829                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7830                         goto skip;
7831                 for_each_mddev(mddev2, tmp) {
7832                         if (mddev2 == mddev)
7833                                 continue;
7834                         if (!mddev->parallel_resync
7835                         &&  mddev2->curr_resync
7836                         &&  match_mddev_units(mddev, mddev2)) {
7837                                 DEFINE_WAIT(wq);
7838                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
7839                                         /* arbitrarily yield */
7840                                         mddev->curr_resync = 1;
7841                                         wake_up(&resync_wait);
7842                                 }
7843                                 if (mddev > mddev2 && mddev->curr_resync == 1)
7844                                         /* no need to wait here, we can wait the next
7845                                          * time 'round when curr_resync == 2
7846                                          */
7847                                         continue;
7848                                 /* We need to wait 'interruptible' so as not to
7849                                  * contribute to the load average, and not to
7850                                  * be caught by 'softlockup'
7851                                  */
7852                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
7853                                 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7854                                     mddev2->curr_resync >= mddev->curr_resync) {
7855                                         printk(KERN_INFO "md: delaying %s of %s"
7856                                                " until %s has finished (they"
7857                                                " share one or more physical units)\n",
7858                                                desc, mdname(mddev), mdname(mddev2));
7859                                         mddev_put(mddev2);
7860                                         if (signal_pending(current))
7861                                                 flush_signals(current);
7862                                         schedule();
7863                                         finish_wait(&resync_wait, &wq);
7864                                         goto try_again;
7865                                 }
7866                                 finish_wait(&resync_wait, &wq);
7867                         }
7868                 }
7869         } while (mddev->curr_resync < 2);
7870
7871         j = 0;
7872         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7873                 /* resync follows the size requested by the personality,
7874                  * which defaults to physical size, but can be virtual size
7875                  */
7876                 max_sectors = mddev->resync_max_sectors;
7877                 atomic64_set(&mddev->resync_mismatches, 0);
7878                 /* we don't use the checkpoint if there's a bitmap */
7879                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7880                         j = mddev->resync_min;
7881                 else if (!mddev->bitmap)
7882                         j = mddev->recovery_cp;
7883
7884         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7885                 max_sectors = mddev->resync_max_sectors;
7886         else {
7887                 /* recovery follows the physical size of devices */
7888                 max_sectors = mddev->dev_sectors;
7889                 j = MaxSector;
7890                 rcu_read_lock();
7891                 rdev_for_each_rcu(rdev, mddev)
7892                         if (rdev->raid_disk >= 0 &&
7893                             !test_bit(Journal, &rdev->flags) &&
7894                             !test_bit(Faulty, &rdev->flags) &&
7895                             !test_bit(In_sync, &rdev->flags) &&
7896                             rdev->recovery_offset < j)
7897                                 j = rdev->recovery_offset;
7898                 rcu_read_unlock();
7899
7900                 /* If there is a bitmap, we need to make sure all
7901                  * writes that started before we added a spare
7902                  * complete before we start doing a recovery.
7903                  * Otherwise the write might complete and (via
7904                  * bitmap_endwrite) set a bit in the bitmap after the
7905                  * recovery has checked that bit and skipped that
7906                  * region.
7907                  */
7908                 if (mddev->bitmap) {
7909                         mddev->pers->quiesce(mddev, 1);
7910                         mddev->pers->quiesce(mddev, 0);
7911                 }
7912         }
7913
7914         printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
7915         printk(KERN_INFO "md: minimum _guaranteed_  speed:"
7916                 " %d KB/sec/disk.\n", speed_min(mddev));
7917         printk(KERN_INFO "md: using maximum available idle IO bandwidth "
7918                "(but not more than %d KB/sec) for %s.\n",
7919                speed_max(mddev), desc);
7920
7921         is_mddev_idle(mddev, 1); /* this initializes IO event counters */
7922
7923         io_sectors = 0;
7924         for (m = 0; m < SYNC_MARKS; m++) {
7925                 mark[m] = jiffies;
7926                 mark_cnt[m] = io_sectors;
7927         }
7928         last_mark = 0;
7929         mddev->resync_mark = mark[last_mark];
7930         mddev->resync_mark_cnt = mark_cnt[last_mark];
7931
7932         /*
7933          * Tune reconstruction:
7934          */
7935         window = 32*(PAGE_SIZE/512);
7936         printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
7937                 window/2, (unsigned long long)max_sectors/2);
7938
7939         atomic_set(&mddev->recovery_active, 0);
7940         last_check = 0;
7941
7942         if (j>2) {
7943                 printk(KERN_INFO
7944                        "md: resuming %s of %s from checkpoint.\n",
7945                        desc, mdname(mddev));
7946                 mddev->curr_resync = j;
7947         } else
7948                 mddev->curr_resync = 3; /* no longer delayed */
7949         mddev->curr_resync_completed = j;
7950         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7951         md_new_event(mddev);
7952         update_time = jiffies;
7953
7954         blk_start_plug(&plug);
7955         while (j < max_sectors) {
7956                 sector_t sectors;
7957
7958                 skipped = 0;
7959
7960                 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7961                     ((mddev->curr_resync > mddev->curr_resync_completed &&
7962                       (mddev->curr_resync - mddev->curr_resync_completed)
7963                       > (max_sectors >> 4)) ||
7964                      time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
7965                      (j - mddev->curr_resync_completed)*2
7966                      >= mddev->resync_max - mddev->curr_resync_completed ||
7967                      mddev->curr_resync_completed > mddev->resync_max
7968                             )) {
7969                         /* time to update curr_resync_completed */
7970                         wait_event(mddev->recovery_wait,
7971                                    atomic_read(&mddev->recovery_active) == 0);
7972                         mddev->curr_resync_completed = j;
7973                         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
7974                             j > mddev->recovery_cp)
7975                                 mddev->recovery_cp = j;
7976                         update_time = jiffies;
7977                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7978                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7979                 }
7980
7981                 while (j >= mddev->resync_max &&
7982                        !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7983                         /* As this condition is controlled by user-space,
7984                          * we can block indefinitely, so use '_interruptible'
7985                          * to avoid triggering warnings.
7986                          */
7987                         flush_signals(current); /* just in case */
7988                         wait_event_interruptible(mddev->recovery_wait,
7989                                                  mddev->resync_max > j
7990                                                  || test_bit(MD_RECOVERY_INTR,
7991                                                              &mddev->recovery));
7992                 }
7993
7994                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7995                         break;
7996
7997                 sectors = mddev->pers->sync_request(mddev, j, &skipped);
7998                 if (sectors == 0) {
7999                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8000                         break;
8001                 }
8002
8003                 if (!skipped) { /* actual IO requested */
8004                         io_sectors += sectors;
8005                         atomic_add(sectors, &mddev->recovery_active);
8006                 }
8007
8008                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8009                         break;
8010
8011                 j += sectors;
8012                 if (j > max_sectors)
8013                         /* when skipping, extra large numbers can be returned. */
8014                         j = max_sectors;
8015                 if (j > 2)
8016                         mddev->curr_resync = j;
8017                 mddev->curr_mark_cnt = io_sectors;
8018                 if (last_check == 0)
8019                         /* this is the earliest that rebuild will be
8020                          * visible in /proc/mdstat
8021                          */
8022                         md_new_event(mddev);
8023
8024                 if (last_check + window > io_sectors || j == max_sectors)
8025                         continue;
8026
8027                 last_check = io_sectors;
8028         repeat:
8029                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8030                         /* step marks */
8031                         int next = (last_mark+1) % SYNC_MARKS;
8032
8033                         mddev->resync_mark = mark[next];
8034                         mddev->resync_mark_cnt = mark_cnt[next];
8035                         mark[next] = jiffies;
8036                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
8037                         last_mark = next;
8038                 }
8039
8040                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8041                         break;
8042
8043                 /*
8044                  * this loop exits only if either when we are slower than
8045                  * the 'hard' speed limit, or the system was IO-idle for
8046                  * a jiffy.
8047                  * the system might be non-idle CPU-wise, but we only care
8048                  * about not overloading the IO subsystem. (things like an
8049                  * e2fsck being done on the RAID array should execute fast)
8050                  */
8051                 cond_resched();
8052
8053                 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
8054                 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
8055                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
8056
8057                 if (currspeed > speed_min(mddev)) {
8058                         if (currspeed > speed_max(mddev)) {
8059                                 msleep(500);
8060                                 goto repeat;
8061                         }
8062                         if (!is_mddev_idle(mddev, 0)) {
8063                                 /*
8064                                  * Give other IO more of a chance.
8065                                  * The faster the devices, the less we wait.
8066                                  */
8067                                 wait_event(mddev->recovery_wait,
8068                                            !atomic_read(&mddev->recovery_active));
8069                         }
8070                 }
8071         }
8072         printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
8073                test_bit(MD_RECOVERY_INTR, &mddev->recovery)
8074                ? "interrupted" : "done");
8075         /*
8076          * this also signals 'finished resyncing' to md_stop
8077          */
8078         blk_finish_plug(&plug);
8079         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
8080
8081         if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8082             !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8083             mddev->curr_resync > 2) {
8084                 mddev->curr_resync_completed = mddev->curr_resync;
8085                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8086         }
8087         /* tell personality and other nodes that we are finished */
8088         if (mddev_is_clustered(mddev)) {
8089                 md_cluster_ops->resync_finish(mddev);
8090                 cluster_resync_finished = true;
8091         }
8092         mddev->pers->sync_request(mddev, max_sectors, &skipped);
8093
8094         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
8095             mddev->curr_resync > 2) {
8096                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8097                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8098                                 if (mddev->curr_resync >= mddev->recovery_cp) {
8099                                         printk(KERN_INFO
8100                                                "md: checkpointing %s of %s.\n",
8101                                                desc, mdname(mddev));
8102                                         if (test_bit(MD_RECOVERY_ERROR,
8103                                                 &mddev->recovery))
8104                                                 mddev->recovery_cp =
8105                                                         mddev->curr_resync_completed;
8106                                         else
8107                                                 mddev->recovery_cp =
8108                                                         mddev->curr_resync;
8109                                 }
8110                         } else
8111                                 mddev->recovery_cp = MaxSector;
8112                 } else {
8113                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8114                                 mddev->curr_resync = MaxSector;
8115                         rcu_read_lock();
8116                         rdev_for_each_rcu(rdev, mddev)
8117                                 if (rdev->raid_disk >= 0 &&
8118                                     mddev->delta_disks >= 0 &&
8119                                     !test_bit(Journal, &rdev->flags) &&
8120                                     !test_bit(Faulty, &rdev->flags) &&
8121                                     !test_bit(In_sync, &rdev->flags) &&
8122                                     rdev->recovery_offset < mddev->curr_resync)
8123                                         rdev->recovery_offset = mddev->curr_resync;
8124                         rcu_read_unlock();
8125                 }
8126         }
8127  skip:
8128         set_bit(MD_CHANGE_DEVS, &mddev->flags);
8129
8130         if (mddev_is_clustered(mddev) &&
8131             test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8132             !cluster_resync_finished)
8133                 md_cluster_ops->resync_finish(mddev);
8134
8135         spin_lock(&mddev->lock);
8136         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8137                 /* We completed so min/max setting can be forgotten if used. */
8138                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8139                         mddev->resync_min = 0;
8140                 mddev->resync_max = MaxSector;
8141         } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8142                 mddev->resync_min = mddev->curr_resync_completed;
8143         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
8144         mddev->curr_resync = 0;
8145         spin_unlock(&mddev->lock);
8146
8147         wake_up(&resync_wait);
8148         md_wakeup_thread(mddev->thread);
8149         return;
8150 }
8151 EXPORT_SYMBOL_GPL(md_do_sync);
8152
8153 static int remove_and_add_spares(struct mddev *mddev,
8154                                  struct md_rdev *this)
8155 {
8156         struct md_rdev *rdev;
8157         int spares = 0;
8158         int removed = 0;
8159
8160         rdev_for_each(rdev, mddev)
8161                 if ((this == NULL || rdev == this) &&
8162                     rdev->raid_disk >= 0 &&
8163                     !test_bit(Blocked, &rdev->flags) &&
8164                     (test_bit(Faulty, &rdev->flags) ||
8165                      (!test_bit(In_sync, &rdev->flags) &&
8166                       !test_bit(Journal, &rdev->flags))) &&
8167                     atomic_read(&rdev->nr_pending)==0) {
8168                         if (mddev->pers->hot_remove_disk(
8169                                     mddev, rdev) == 0) {
8170                                 sysfs_unlink_rdev(mddev, rdev);
8171                                 rdev->raid_disk = -1;
8172                                 removed++;
8173                         }
8174                 }
8175         if (removed && mddev->kobj.sd)
8176                 sysfs_notify(&mddev->kobj, NULL, "degraded");
8177
8178         if (this && removed)
8179                 goto no_add;
8180
8181         rdev_for_each(rdev, mddev) {
8182                 if (this && this != rdev)
8183                         continue;
8184                 if (test_bit(Candidate, &rdev->flags))
8185                         continue;
8186                 if (rdev->raid_disk >= 0 &&
8187                     !test_bit(In_sync, &rdev->flags) &&
8188                     !test_bit(Journal, &rdev->flags) &&
8189                     !test_bit(Faulty, &rdev->flags))
8190                         spares++;
8191                 if (rdev->raid_disk >= 0)
8192                         continue;
8193                 if (test_bit(Faulty, &rdev->flags))
8194                         continue;
8195                 if (!test_bit(Journal, &rdev->flags)) {
8196                         if (mddev->ro &&
8197                             ! (rdev->saved_raid_disk >= 0 &&
8198                                !test_bit(Bitmap_sync, &rdev->flags)))
8199                                 continue;
8200
8201                         rdev->recovery_offset = 0;
8202                 }
8203                 if (mddev->pers->
8204                     hot_add_disk(mddev, rdev) == 0) {
8205                         if (sysfs_link_rdev(mddev, rdev))
8206                                 /* failure here is OK */;
8207                         if (!test_bit(Journal, &rdev->flags))
8208                                 spares++;
8209                         md_new_event(mddev);
8210                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
8211                 }
8212         }
8213 no_add:
8214         if (removed)
8215                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
8216         return spares;
8217 }
8218
8219 static void md_start_sync(struct work_struct *ws)
8220 {
8221         struct mddev *mddev = container_of(ws, struct mddev, del_work);
8222         int ret = 0;
8223
8224         if (mddev_is_clustered(mddev)) {
8225                 ret = md_cluster_ops->resync_start(mddev);
8226                 if (ret) {
8227                         mddev->sync_thread = NULL;
8228                         goto out;
8229                 }
8230         }
8231
8232         mddev->sync_thread = md_register_thread(md_do_sync,
8233                                                 mddev,
8234                                                 "resync");
8235 out:
8236         if (!mddev->sync_thread) {
8237                 if (!(mddev_is_clustered(mddev) && ret == -EAGAIN))
8238                         printk(KERN_ERR "%s: could not start resync"
8239                                " thread...\n",
8240                                mdname(mddev));
8241                 /* leave the spares where they are, it shouldn't hurt */
8242                 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8243                 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8244                 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8245                 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8246                 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8247                 wake_up(&resync_wait);
8248                 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8249                                        &mddev->recovery))
8250                         if (mddev->sysfs_action)
8251                                 sysfs_notify_dirent_safe(mddev->sysfs_action);
8252         } else
8253                 md_wakeup_thread(mddev->sync_thread);
8254         sysfs_notify_dirent_safe(mddev->sysfs_action);
8255         md_new_event(mddev);
8256 }
8257
8258 /*
8259  * This routine is regularly called by all per-raid-array threads to
8260  * deal with generic issues like resync and super-block update.
8261  * Raid personalities that don't have a thread (linear/raid0) do not
8262  * need this as they never do any recovery or update the superblock.
8263  *
8264  * It does not do any resync itself, but rather "forks" off other threads
8265  * to do that as needed.
8266  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
8267  * "->recovery" and create a thread at ->sync_thread.
8268  * When the thread finishes it sets MD_RECOVERY_DONE
8269  * and wakeups up this thread which will reap the thread and finish up.
8270  * This thread also removes any faulty devices (with nr_pending == 0).
8271  *
8272  * The overall approach is:
8273  *  1/ if the superblock needs updating, update it.
8274  *  2/ If a recovery thread is running, don't do anything else.
8275  *  3/ If recovery has finished, clean up, possibly marking spares active.
8276  *  4/ If there are any faulty devices, remove them.
8277  *  5/ If array is degraded, try to add spares devices
8278  *  6/ If array has spares or is not in-sync, start a resync thread.
8279  */
8280 void md_check_recovery(struct mddev *mddev)
8281 {
8282         if (mddev->suspended)
8283                 return;
8284
8285         if (mddev->bitmap)
8286                 bitmap_daemon_work(mddev);
8287
8288         if (signal_pending(current)) {
8289                 if (mddev->pers->sync_request && !mddev->external) {
8290                         printk(KERN_INFO "md: %s in immediate safe mode\n",
8291                                mdname(mddev));
8292                         mddev->safemode = 2;
8293                 }
8294                 flush_signals(current);
8295         }
8296
8297         if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
8298                 return;
8299         if ( ! (
8300                 (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
8301                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8302                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8303                 test_bit(MD_RELOAD_SB, &mddev->flags) ||
8304                 (mddev->external == 0 && mddev->safemode == 1) ||
8305                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
8306                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
8307                 ))
8308                 return;
8309
8310         if (mddev_trylock(mddev)) {
8311                 int spares = 0;
8312
8313                 if (mddev->ro) {
8314                         struct md_rdev *rdev;
8315                         if (!mddev->external && mddev->in_sync)
8316                                 /* 'Blocked' flag not needed as failed devices
8317                                  * will be recorded if array switched to read/write.
8318                                  * Leaving it set will prevent the device
8319                                  * from being removed.
8320                                  */
8321                                 rdev_for_each(rdev, mddev)
8322                                         clear_bit(Blocked, &rdev->flags);
8323                         /* On a read-only array we can:
8324                          * - remove failed devices
8325                          * - add already-in_sync devices if the array itself
8326                          *   is in-sync.
8327                          * As we only add devices that are already in-sync,
8328                          * we can activate the spares immediately.
8329                          */
8330                         remove_and_add_spares(mddev, NULL);
8331                         /* There is no thread, but we need to call
8332                          * ->spare_active and clear saved_raid_disk
8333                          */
8334                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8335                         md_reap_sync_thread(mddev);
8336                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8337                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8338                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
8339                         goto unlock;
8340                 }
8341
8342                 if (mddev_is_clustered(mddev)) {
8343                         struct md_rdev *rdev;
8344                         /* kick the device if another node issued a
8345                          * remove disk.
8346                          */
8347                         rdev_for_each(rdev, mddev) {
8348                                 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
8349                                                 rdev->raid_disk < 0)
8350                                         md_kick_rdev_from_array(rdev);
8351                         }
8352
8353                         if (test_and_clear_bit(MD_RELOAD_SB, &mddev->flags))
8354                                 md_reload_sb(mddev, mddev->good_device_nr);
8355                 }
8356
8357                 if (!mddev->external) {
8358                         int did_change = 0;
8359                         spin_lock(&mddev->lock);
8360                         if (mddev->safemode &&
8361                             !atomic_read(&mddev->writes_pending) &&
8362                             !mddev->in_sync &&
8363                             mddev->recovery_cp == MaxSector) {
8364                                 mddev->in_sync = 1;
8365                                 did_change = 1;
8366                                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
8367                         }
8368                         if (mddev->safemode == 1)
8369                                 mddev->safemode = 0;
8370                         spin_unlock(&mddev->lock);
8371                         if (did_change)
8372                                 sysfs_notify_dirent_safe(mddev->sysfs_state);
8373                 }
8374
8375                 if (mddev->flags & MD_UPDATE_SB_FLAGS)
8376                         md_update_sb(mddev, 0);
8377
8378                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
8379                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
8380                         /* resync/recovery still happening */
8381                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8382                         goto unlock;
8383                 }
8384                 if (mddev->sync_thread) {
8385                         md_reap_sync_thread(mddev);
8386                         goto unlock;
8387                 }
8388                 /* Set RUNNING before clearing NEEDED to avoid
8389                  * any transients in the value of "sync_action".
8390                  */
8391                 mddev->curr_resync_completed = 0;
8392                 spin_lock(&mddev->lock);
8393                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8394                 spin_unlock(&mddev->lock);
8395                 /* Clear some bits that don't mean anything, but
8396                  * might be left set
8397                  */
8398                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
8399                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8400
8401                 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8402                     test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
8403                         goto not_running;
8404                 /* no recovery is running.
8405                  * remove any failed drives, then
8406                  * add spares if possible.
8407                  * Spares are also removed and re-added, to allow
8408                  * the personality to fail the re-add.
8409                  */
8410
8411                 if (mddev->reshape_position != MaxSector) {
8412                         if (mddev->pers->check_reshape == NULL ||
8413                             mddev->pers->check_reshape(mddev) != 0)
8414                                 /* Cannot proceed */
8415                                 goto not_running;
8416                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8417                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8418                 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
8419                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8420                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8421                         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8422                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8423                 } else if (mddev->recovery_cp < MaxSector) {
8424                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8425                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8426                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
8427                         /* nothing to be done ... */
8428                         goto not_running;
8429
8430                 if (mddev->pers->sync_request) {
8431                         if (spares) {
8432                                 /* We are adding a device or devices to an array
8433                                  * which has the bitmap stored on all devices.
8434                                  * So make sure all bitmap pages get written
8435                                  */
8436                                 bitmap_write_all(mddev->bitmap);
8437                         }
8438                         INIT_WORK(&mddev->del_work, md_start_sync);
8439                         queue_work(md_misc_wq, &mddev->del_work);
8440                         goto unlock;
8441                 }
8442         not_running:
8443                 if (!mddev->sync_thread) {
8444                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8445                         wake_up(&resync_wait);
8446                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8447                                                &mddev->recovery))
8448                                 if (mddev->sysfs_action)
8449                                         sysfs_notify_dirent_safe(mddev->sysfs_action);
8450                 }
8451         unlock:
8452                 wake_up(&mddev->sb_wait);
8453                 mddev_unlock(mddev);
8454         }
8455 }
8456 EXPORT_SYMBOL(md_check_recovery);
8457
8458 void md_reap_sync_thread(struct mddev *mddev)
8459 {
8460         struct md_rdev *rdev;
8461
8462         /* resync has finished, collect result */
8463         md_unregister_thread(&mddev->sync_thread);
8464         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8465             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
8466                 /* success...*/
8467                 /* activate any spares */
8468                 if (mddev->pers->spare_active(mddev)) {
8469                         sysfs_notify(&mddev->kobj, NULL,
8470                                      "degraded");
8471                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
8472                 }
8473         }
8474         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8475             mddev->pers->finish_reshape)
8476                 mddev->pers->finish_reshape(mddev);
8477
8478         /* If array is no-longer degraded, then any saved_raid_disk
8479          * information must be scrapped.
8480          */
8481         if (!mddev->degraded)
8482                 rdev_for_each(rdev, mddev)
8483                         rdev->saved_raid_disk = -1;
8484
8485         md_update_sb(mddev, 1);
8486         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8487         clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8488         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8489         clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8490         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8491         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8492         wake_up(&resync_wait);
8493         /* flag recovery needed just to double check */
8494         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8495         sysfs_notify_dirent_safe(mddev->sysfs_action);
8496         md_new_event(mddev);
8497         if (mddev->event_work.func)
8498                 queue_work(md_misc_wq, &mddev->event_work);
8499 }
8500 EXPORT_SYMBOL(md_reap_sync_thread);
8501
8502 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
8503 {
8504         sysfs_notify_dirent_safe(rdev->sysfs_state);
8505         wait_event_timeout(rdev->blocked_wait,
8506                            !test_bit(Blocked, &rdev->flags) &&
8507                            !test_bit(BlockedBadBlocks, &rdev->flags),
8508                            msecs_to_jiffies(5000));
8509         rdev_dec_pending(rdev, mddev);
8510 }
8511 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
8512
8513 void md_finish_reshape(struct mddev *mddev)
8514 {
8515         /* called be personality module when reshape completes. */
8516         struct md_rdev *rdev;
8517
8518         rdev_for_each(rdev, mddev) {
8519                 if (rdev->data_offset > rdev->new_data_offset)
8520                         rdev->sectors += rdev->data_offset - rdev->new_data_offset;
8521                 else
8522                         rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
8523                 rdev->data_offset = rdev->new_data_offset;
8524         }
8525 }
8526 EXPORT_SYMBOL(md_finish_reshape);
8527
8528 /* Bad block management */
8529
8530 /* Returns 1 on success, 0 on failure */
8531 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8532                        int is_new)
8533 {
8534         int rv;
8535         if (is_new)
8536                 s += rdev->new_data_offset;
8537         else
8538                 s += rdev->data_offset;
8539         rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
8540         if (rv == 0) {
8541                 /* Make sure they get written out promptly */
8542                 sysfs_notify_dirent_safe(rdev->sysfs_state);
8543                 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
8544                 set_bit(MD_CHANGE_PENDING, &rdev->mddev->flags);
8545                 md_wakeup_thread(rdev->mddev->thread);
8546                 return 1;
8547         } else
8548                 return 0;
8549 }
8550 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
8551
8552 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8553                          int is_new)
8554 {
8555         if (is_new)
8556                 s += rdev->new_data_offset;
8557         else
8558                 s += rdev->data_offset;
8559         return badblocks_clear(&rdev->badblocks,
8560                                   s, sectors);
8561 }
8562 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
8563
8564 static int md_notify_reboot(struct notifier_block *this,
8565                             unsigned long code, void *x)
8566 {
8567         struct list_head *tmp;
8568         struct mddev *mddev;
8569         int need_delay = 0;
8570
8571         for_each_mddev(mddev, tmp) {
8572                 if (mddev_trylock(mddev)) {
8573                         if (mddev->pers)
8574                                 __md_stop_writes(mddev);
8575                         if (mddev->persistent)
8576                                 mddev->safemode = 2;
8577                         mddev_unlock(mddev);
8578                 }
8579                 need_delay = 1;
8580         }
8581         /*
8582          * certain more exotic SCSI devices are known to be
8583          * volatile wrt too early system reboots. While the
8584          * right place to handle this issue is the given
8585          * driver, we do want to have a safe RAID driver ...
8586          */
8587         if (need_delay)
8588                 mdelay(1000*1);
8589
8590         return NOTIFY_DONE;
8591 }
8592
8593 static struct notifier_block md_notifier = {
8594         .notifier_call  = md_notify_reboot,
8595         .next           = NULL,
8596         .priority       = INT_MAX, /* before any real devices */
8597 };
8598
8599 static void md_geninit(void)
8600 {
8601         pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
8602
8603         proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
8604 }
8605
8606 static int __init md_init(void)
8607 {
8608         int ret = -ENOMEM;
8609
8610         md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
8611         if (!md_wq)
8612                 goto err_wq;
8613
8614         md_misc_wq = alloc_workqueue("md_misc", 0, 0);
8615         if (!md_misc_wq)
8616                 goto err_misc_wq;
8617
8618         if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
8619                 goto err_md;
8620
8621         if ((ret = register_blkdev(0, "mdp")) < 0)
8622                 goto err_mdp;
8623         mdp_major = ret;
8624
8625         blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
8626                             md_probe, NULL, NULL);
8627         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
8628                             md_probe, NULL, NULL);
8629
8630         register_reboot_notifier(&md_notifier);
8631         raid_table_header = register_sysctl_table(raid_root_table);
8632
8633         md_geninit();
8634         return 0;
8635
8636 err_mdp:
8637         unregister_blkdev(MD_MAJOR, "md");
8638 err_md:
8639         destroy_workqueue(md_misc_wq);
8640 err_misc_wq:
8641         destroy_workqueue(md_wq);
8642 err_wq:
8643         return ret;
8644 }
8645
8646 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
8647 {
8648         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
8649         struct md_rdev *rdev2;
8650         int role, ret;
8651         char b[BDEVNAME_SIZE];
8652
8653         /* Check for change of roles in the active devices */
8654         rdev_for_each(rdev2, mddev) {
8655                 if (test_bit(Faulty, &rdev2->flags))
8656                         continue;
8657
8658                 /* Check if the roles changed */
8659                 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
8660
8661                 if (test_bit(Candidate, &rdev2->flags)) {
8662                         if (role == 0xfffe) {
8663                                 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
8664                                 md_kick_rdev_from_array(rdev2);
8665                                 continue;
8666                         }
8667                         else
8668                                 clear_bit(Candidate, &rdev2->flags);
8669                 }
8670
8671                 if (role != rdev2->raid_disk) {
8672                         /* got activated */
8673                         if (rdev2->raid_disk == -1 && role != 0xffff) {
8674                                 rdev2->saved_raid_disk = role;
8675                                 ret = remove_and_add_spares(mddev, rdev2);
8676                                 pr_info("Activated spare: %s\n",
8677                                                 bdevname(rdev2->bdev,b));
8678                         }
8679                         /* device faulty
8680                          * We just want to do the minimum to mark the disk
8681                          * as faulty. The recovery is performed by the
8682                          * one who initiated the error.
8683                          */
8684                         if ((role == 0xfffe) || (role == 0xfffd)) {
8685                                 md_error(mddev, rdev2);
8686                                 clear_bit(Blocked, &rdev2->flags);
8687                         }
8688                 }
8689         }
8690
8691         if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
8692                 update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
8693
8694         /* Finally set the event to be up to date */
8695         mddev->events = le64_to_cpu(sb->events);
8696 }
8697
8698 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
8699 {
8700         int err;
8701         struct page *swapout = rdev->sb_page;
8702         struct mdp_superblock_1 *sb;
8703
8704         /* Store the sb page of the rdev in the swapout temporary
8705          * variable in case we err in the future
8706          */
8707         rdev->sb_page = NULL;
8708         alloc_disk_sb(rdev);
8709         ClearPageUptodate(rdev->sb_page);
8710         rdev->sb_loaded = 0;
8711         err = super_types[mddev->major_version].load_super(rdev, NULL, mddev->minor_version);
8712
8713         if (err < 0) {
8714                 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
8715                                 __func__, __LINE__, rdev->desc_nr, err);
8716                 put_page(rdev->sb_page);
8717                 rdev->sb_page = swapout;
8718                 rdev->sb_loaded = 1;
8719                 return err;
8720         }
8721
8722         sb = page_address(rdev->sb_page);
8723         /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
8724          * is not set
8725          */
8726
8727         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
8728                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
8729
8730         /* The other node finished recovery, call spare_active to set
8731          * device In_sync and mddev->degraded
8732          */
8733         if (rdev->recovery_offset == MaxSector &&
8734             !test_bit(In_sync, &rdev->flags) &&
8735             mddev->pers->spare_active(mddev))
8736                 sysfs_notify(&mddev->kobj, NULL, "degraded");
8737
8738         put_page(swapout);
8739         return 0;
8740 }
8741
8742 void md_reload_sb(struct mddev *mddev, int nr)
8743 {
8744         struct md_rdev *rdev;
8745         int err;
8746
8747         /* Find the rdev */
8748         rdev_for_each_rcu(rdev, mddev) {
8749                 if (rdev->desc_nr == nr)
8750                         break;
8751         }
8752
8753         if (!rdev || rdev->desc_nr != nr) {
8754                 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
8755                 return;
8756         }
8757
8758         err = read_rdev(mddev, rdev);
8759         if (err < 0)
8760                 return;
8761
8762         check_sb_changes(mddev, rdev);
8763
8764         /* Read all rdev's to update recovery_offset */
8765         rdev_for_each_rcu(rdev, mddev)
8766                 read_rdev(mddev, rdev);
8767 }
8768 EXPORT_SYMBOL(md_reload_sb);
8769
8770 #ifndef MODULE
8771
8772 /*
8773  * Searches all registered partitions for autorun RAID arrays
8774  * at boot time.
8775  */
8776
8777 static LIST_HEAD(all_detected_devices);
8778 struct detected_devices_node {
8779         struct list_head list;
8780         dev_t dev;
8781 };
8782
8783 void md_autodetect_dev(dev_t dev)
8784 {
8785         struct detected_devices_node *node_detected_dev;
8786
8787         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
8788         if (node_detected_dev) {
8789                 node_detected_dev->dev = dev;
8790                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
8791         } else {
8792                 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
8793                         ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
8794         }
8795 }
8796
8797 static void autostart_arrays(int part)
8798 {
8799         struct md_rdev *rdev;
8800         struct detected_devices_node *node_detected_dev;
8801         dev_t dev;
8802         int i_scanned, i_passed;
8803
8804         i_scanned = 0;
8805         i_passed = 0;
8806
8807         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
8808
8809         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
8810                 i_scanned++;
8811                 node_detected_dev = list_entry(all_detected_devices.next,
8812                                         struct detected_devices_node, list);
8813                 list_del(&node_detected_dev->list);
8814                 dev = node_detected_dev->dev;
8815                 kfree(node_detected_dev);
8816                 rdev = md_import_device(dev,0, 90);
8817                 if (IS_ERR(rdev))
8818                         continue;
8819
8820                 if (test_bit(Faulty, &rdev->flags))
8821                         continue;
8822
8823                 set_bit(AutoDetected, &rdev->flags);
8824                 list_add(&rdev->same_set, &pending_raid_disks);
8825                 i_passed++;
8826         }
8827
8828         printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
8829                                                 i_scanned, i_passed);
8830
8831         autorun_devices(part);
8832 }
8833
8834 #endif /* !MODULE */
8835
8836 static __exit void md_exit(void)
8837 {
8838         struct mddev *mddev;
8839         struct list_head *tmp;
8840         int delay = 1;
8841
8842         blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
8843         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
8844
8845         unregister_blkdev(MD_MAJOR,"md");
8846         unregister_blkdev(mdp_major, "mdp");
8847         unregister_reboot_notifier(&md_notifier);
8848         unregister_sysctl_table(raid_table_header);
8849
8850         /* We cannot unload the modules while some process is
8851          * waiting for us in select() or poll() - wake them up
8852          */
8853         md_unloading = 1;
8854         while (waitqueue_active(&md_event_waiters)) {
8855                 /* not safe to leave yet */
8856                 wake_up(&md_event_waiters);
8857                 msleep(delay);
8858                 delay += delay;
8859         }
8860         remove_proc_entry("mdstat", NULL);
8861
8862         for_each_mddev(mddev, tmp) {
8863                 export_array(mddev);
8864                 mddev->hold_active = 0;
8865         }
8866         destroy_workqueue(md_misc_wq);
8867         destroy_workqueue(md_wq);
8868 }
8869
8870 subsys_initcall(md_init);
8871 module_exit(md_exit)
8872
8873 static int get_ro(char *buffer, struct kernel_param *kp)
8874 {
8875         return sprintf(buffer, "%d", start_readonly);
8876 }
8877 static int set_ro(const char *val, struct kernel_param *kp)
8878 {
8879         return kstrtouint(val, 10, (unsigned int *)&start_readonly);
8880 }
8881
8882 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
8883 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
8884 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
8885
8886 MODULE_LICENSE("GPL");
8887 MODULE_DESCRIPTION("MD RAID framework");
8888 MODULE_ALIAS("md");
8889 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);