2 * Copyright (C) 2010-2011 Neil Brown
3 * Copyright (C) 2010-2016 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/slab.h>
9 #include <linux/module.h>
17 #include <linux/device-mapper.h>
19 #define DM_MSG_PREFIX "raid"
20 #define MAX_RAID_DEVICES 253 /* md-raid kernel limit */
22 static bool devices_handle_discard_safely = false;
25 * The following flags are used by dm-raid.c to set up the array state.
26 * They must be cleared before md_run is called.
28 #define FirstUse 10 /* rdev flag */
32 * Two DM devices, one to hold metadata and one to hold the
33 * actual data/parity. The reason for this is to not confuse
34 * ti->len and give more flexibility in altering size and
37 * While it is possible for this device to be associated
38 * with a different physical device than the data_dev, it
39 * is intended for it to be the same.
40 * |--------- Physical Device ---------|
41 * |- meta_dev -|------ data_dev ------|
43 struct dm_dev *meta_dev;
44 struct dm_dev *data_dev;
49 * Flags for rs->ctr_flags field.
54 #define CTR_FLAG_SYNC 0x1 /* 1 */ /* Not with raid0! */
55 #define CTR_FLAG_NOSYNC 0x2 /* 1 */ /* Not with raid0! */
56 #define CTR_FLAG_REBUILD 0x4 /* 2 */ /* Not with raid0! */
57 #define CTR_FLAG_DAEMON_SLEEP 0x8 /* 2 */ /* Not with raid0! */
58 #define CTR_FLAG_MIN_RECOVERY_RATE 0x10 /* 2 */ /* Not with raid0! */
59 #define CTR_FLAG_MAX_RECOVERY_RATE 0x20 /* 2 */ /* Not with raid0! */
60 #define CTR_FLAG_MAX_WRITE_BEHIND 0x40 /* 2 */ /* Only with raid1! */
61 #define CTR_FLAG_WRITE_MOSTLY 0x80 /* 2 */ /* Only with raid1! */
62 #define CTR_FLAG_STRIPE_CACHE 0x100 /* 2 */ /* Only with raid4/5/6! */
63 #define CTR_FLAG_REGION_SIZE 0x200 /* 2 */ /* Not with raid0! */
64 #define CTR_FLAG_RAID10_COPIES 0x400 /* 2 */ /* Only with raid10 */
65 #define CTR_FLAG_RAID10_FORMAT 0x800 /* 2 */ /* Only with raid10 */
68 * Definitions of various constructor flags to
69 * be used in checks of valid / invalid flags
72 /* Define all any sync flags */
73 #define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)
75 /* Define flags for options without argument (e.g. 'nosync') */
76 #define CTR_FLAG_OPTIONS_NO_ARGS CTR_FLAGS_ANY_SYNC
78 /* Define flags for options with one argument (e.g. 'delta_disks +2') */
79 #define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \
80 CTR_FLAG_WRITE_MOSTLY | \
81 CTR_FLAG_DAEMON_SLEEP | \
82 CTR_FLAG_MIN_RECOVERY_RATE | \
83 CTR_FLAG_MAX_RECOVERY_RATE | \
84 CTR_FLAG_MAX_WRITE_BEHIND | \
85 CTR_FLAG_STRIPE_CACHE | \
86 CTR_FLAG_REGION_SIZE | \
87 CTR_FLAG_RAID10_COPIES | \
88 CTR_FLAG_RAID10_FORMAT)
90 /* All ctr optional arguments */
91 #define ALL_CTR_FLAGS (CTR_FLAG_OPTIONS_NO_ARGS | \
92 CTR_FLAG_OPTIONS_ONE_ARG)
94 /* Invalid options definitions per raid level... */
96 /* "raid0" does not accept any options */
97 #define RAID0_INVALID_FLAGS ALL_CTR_FLAGS
99 /* "raid1" does not accept stripe cache or any raid10 options */
100 #define RAID1_INVALID_FLAGS (CTR_FLAG_STRIPE_CACHE | \
101 CTR_FLAG_RAID10_COPIES | \
102 CTR_FLAG_RAID10_FORMAT)
104 /* "raid10" does not accept any raid1 or stripe cache options */
105 #define RAID10_INVALID_FLAGS (CTR_FLAG_WRITE_MOSTLY | \
106 CTR_FLAG_MAX_WRITE_BEHIND | \
107 CTR_FLAG_STRIPE_CACHE)
109 * "raid4/5/6" do not accept any raid1 or raid10 specific options
111 * "raid6" does not accept "nosync", because it is not guaranteed
112 * that both parity and q-syndrome are being written properly with
115 #define RAID45_INVALID_FLAGS (CTR_FLAG_WRITE_MOSTLY | \
116 CTR_FLAG_MAX_WRITE_BEHIND | \
117 CTR_FLAG_RAID10_FORMAT | \
118 CTR_FLAG_RAID10_COPIES)
119 #define RAID6_INVALID_FLAGS (CTR_FLAG_NOSYNC | RAID45_INVALID_FLAGS)
120 /* ...invalid options definitions per raid level */
123 struct dm_target *ti;
125 uint32_t bitmap_loaded;
129 struct raid_type *raid_type;
130 struct dm_target_callbacks callbacks;
132 struct raid_dev dev[0];
135 /* Supported raid types and properties. */
136 static struct raid_type {
137 const char *name; /* RAID algorithm. */
138 const char *descr; /* Descriptor text for logging. */
139 const unsigned parity_devs; /* # of parity devices. */
140 const unsigned minimal_devs; /* minimal # of devices in set. */
141 const unsigned level; /* RAID level. */
142 const unsigned algorithm; /* RAID algorithm. */
144 {"raid0", "RAID0 (striping)", 0, 2, 0, 0 /* NONE */},
145 {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */},
146 {"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX /* Varies */},
147 {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
148 {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
149 {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
150 {"raid5_ls", "RAID5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
151 {"raid5_rs", "RAID5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
152 {"raid6_zr", "RAID6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
153 {"raid6_nr", "RAID6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
154 {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
157 /* True, if @v is in inclusive range [@min, @max] */
158 static bool _in_range(long v, long min, long max)
160 return v >= min && v <= max;
163 /* ctr flag bit manipulation... */
164 /* Set single @flag in @flags */
165 static void _set_flag(uint32_t flag, uint32_t *flags)
167 WARN_ON_ONCE(hweight32(flag) != 1);
171 /* Test single @flag in @flags */
172 static bool _test_flag(uint32_t flag, uint32_t flags)
174 WARN_ON_ONCE(hweight32(flag) != 1);
175 return (flag & flags) ? true : false;
178 /* Test multiple @flags in @all_flags */
179 static bool _test_flags(uint32_t flags, uint32_t all_flags)
181 return (flags & all_flags) ? true : false;
184 /* Return true if single @flag is set in @*flags, else set it and return false */
185 static bool _test_and_set_flag(uint32_t flag, uint32_t *flags)
187 if (_test_flag(flag, *flags))
190 _set_flag(flag, flags);
193 /* ...ctr and runtime flag bit manipulation */
195 /* All table line arguments are defined here */
196 static struct arg_name_flag {
199 } _arg_name_flags[] = {
200 { CTR_FLAG_SYNC, "sync"},
201 { CTR_FLAG_NOSYNC, "nosync"},
202 { CTR_FLAG_REBUILD, "rebuild"},
203 { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"},
204 { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"},
205 { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"},
206 { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"},
207 { CTR_FLAG_WRITE_MOSTLY, "writemostly"},
208 { CTR_FLAG_STRIPE_CACHE, "stripe_cache"},
209 { CTR_FLAG_REGION_SIZE, "region_size"},
210 { CTR_FLAG_RAID10_COPIES, "raid10_copies"},
211 { CTR_FLAG_RAID10_FORMAT, "raid10_format"},
214 /* Return argument name string for given @flag */
215 static const char *_argname_by_flag(const uint32_t flag)
217 if (hweight32(flag) == 1) {
218 struct arg_name_flag *anf = _arg_name_flags + ARRAY_SIZE(_arg_name_flags);
220 while (anf-- > _arg_name_flags)
221 if (_test_flag(flag, anf->flag))
225 DMERR("%s called with more than one flag!", __func__);
231 * bool helpers to test for various raid levels of a raid type
234 /* Return true, if raid type in @rt is raid0 */
235 static bool rt_is_raid0(struct raid_type *rt)
240 /* Return true, if raid type in @rt is raid1 */
241 static bool rt_is_raid1(struct raid_type *rt)
243 return rt->level == 1;
246 /* Return true, if raid type in @rt is raid10 */
247 static bool rt_is_raid10(struct raid_type *rt)
249 return rt->level == 10;
252 /* Return true, if raid type in @rt is raid4/5 */
253 static bool rt_is_raid45(struct raid_type *rt)
255 return _in_range(rt->level, 4, 5);
258 /* Return true, if raid type in @rt is raid6 */
259 static bool rt_is_raid6(struct raid_type *rt)
261 return rt->level == 6;
264 /* Return true, if raid type in @rt is raid4/5/6 */
265 static bool rt_is_raid456(struct raid_type *rt)
267 return _in_range(rt->level, 4, 6);
269 /* END: raid level bools */
272 * Convenience functions to set ti->error to @errmsg and
273 * return @r in order to shorten code in a lot of places
275 static int ti_error_ret(struct dm_target *ti, const char *errmsg, int r)
277 ti->error = (char *) errmsg;
281 static int ti_error_einval(struct dm_target *ti, const char *errmsg)
283 return ti_error_ret(ti, errmsg, -EINVAL);
285 /* END: convenience functions to set ti->error to @errmsg... */
287 /* Return invalid ctr flags for the raid level of @rs */
288 static uint32_t _invalid_flags(struct raid_set *rs)
290 if (rt_is_raid0(rs->raid_type))
291 return RAID0_INVALID_FLAGS;
292 else if (rt_is_raid1(rs->raid_type))
293 return RAID1_INVALID_FLAGS;
294 else if (rt_is_raid10(rs->raid_type))
295 return RAID10_INVALID_FLAGS;
296 else if (rt_is_raid45(rs->raid_type))
297 return RAID45_INVALID_FLAGS;
298 else if (rt_is_raid6(rs->raid_type))
299 return RAID6_INVALID_FLAGS;
305 * Check for any invalid flags set on @rs defined by bitset @invalid_flags
307 * Has to be called after parsing of the ctr flags!
309 static int rs_check_for_invalid_flags(struct raid_set *rs)
311 if (_test_flags(rs->ctr_flags, _invalid_flags(rs)))
312 return ti_error_einval(rs->ti, "Invalid flag combined");
317 static char *raid10_md_layout_to_format(int layout)
320 * Bit 16 and 17 stand for "offset" and "use_far_sets"
321 * Refer to MD's raid10.c for details
323 if ((layout & 0x10000) && (layout & 0x20000))
326 if ((layout & 0xFF) > 1)
332 static unsigned raid10_md_layout_to_copies(int layout)
334 if ((layout & 0xFF) > 1)
335 return layout & 0xFF;
336 return (layout >> 8) & 0xFF;
339 static int raid10_format_to_md_layout(char *format, unsigned copies)
341 unsigned n = 1, f = 1;
343 if (!strcasecmp("near", format))
348 if (!strcasecmp("offset", format))
349 return 0x30000 | (f << 8) | n;
351 if (!strcasecmp("far", format))
352 return 0x20000 | (f << 8) | n;
357 static struct raid_type *get_raid_type(const char *name)
361 for (i = 0; i < ARRAY_SIZE(raid_types); i++)
362 if (!strcmp(raid_types[i].name, name))
363 return &raid_types[i];
368 static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs)
373 if (raid_devs <= raid_type->parity_devs)
374 return ERR_PTR(ti_error_einval(ti, "Insufficient number of devices"));
376 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
378 return ERR_PTR(ti_error_ret(ti, "Cannot allocate raid context", -ENOMEM));
383 rs->raid_type = raid_type;
384 rs->md.raid_disks = raid_devs;
385 rs->md.level = raid_type->level;
386 rs->md.new_level = rs->md.level;
387 rs->md.layout = raid_type->algorithm;
388 rs->md.new_layout = rs->md.layout;
389 rs->md.delta_disks = 0;
390 rs->md.recovery_cp = 0;
392 for (i = 0; i < raid_devs; i++)
393 md_rdev_init(&rs->dev[i].rdev);
396 * Remaining items to be initialized by further RAID params:
399 * rs->md.chunk_sectors
400 * rs->md.new_chunk_sectors
407 static void context_free(struct raid_set *rs)
411 for (i = 0; i < rs->md.raid_disks; i++) {
412 if (rs->dev[i].meta_dev)
413 dm_put_device(rs->ti, rs->dev[i].meta_dev);
414 md_rdev_clear(&rs->dev[i].rdev);
415 if (rs->dev[i].data_dev)
416 dm_put_device(rs->ti, rs->dev[i].data_dev);
423 * For every device we have two words
424 * <meta_dev>: meta device name or '-' if missing
425 * <data_dev>: data device name or '-' if missing
427 * The following are permitted:
430 * <meta_dev> <data_dev>
432 * The following is not allowed:
435 * This code parses those words. If there is a failure,
436 * the caller must use context_free to unwind the operations.
438 static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
442 int metadata_available = 0;
446 /* Put off the number of raid devices argument to get to dev pairs */
447 arg = dm_shift_arg(as);
451 for (i = 0; i < rs->md.raid_disks; i++) {
452 rs->dev[i].rdev.raid_disk = i;
454 rs->dev[i].meta_dev = NULL;
455 rs->dev[i].data_dev = NULL;
458 * There are no offsets, since there is a separate device
459 * for data and metadata.
461 rs->dev[i].rdev.data_offset = 0;
462 rs->dev[i].rdev.mddev = &rs->md;
464 arg = dm_shift_arg(as);
468 if (strcmp(arg, "-")) {
469 r = dm_get_device(rs->ti, arg,
470 dm_table_get_mode(rs->ti->table),
471 &rs->dev[i].meta_dev);
473 return ti_error_ret(rs->ti, "RAID metadata device lookup failure", r);
475 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
476 if (!rs->dev[i].rdev.sb_page)
477 return ti_error_ret(rs->ti, "Failed to allocate superblock page", -ENOMEM);
480 arg = dm_shift_arg(as);
484 if (!strcmp(arg, "-")) {
485 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
486 (!rs->dev[i].rdev.recovery_offset))
487 return ti_error_einval(rs->ti, "Drive designated for rebuild not specified");
489 if (rs->dev[i].meta_dev)
490 return ti_error_einval(rs->ti, "No data device supplied with metadata device");
495 r = dm_get_device(rs->ti, arg,
496 dm_table_get_mode(rs->ti->table),
497 &rs->dev[i].data_dev);
499 return ti_error_ret(rs->ti, "RAID device lookup failure", r);
501 if (rs->dev[i].meta_dev) {
502 metadata_available = 1;
503 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
505 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
506 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
507 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
511 if (metadata_available) {
513 rs->md.persistent = 1;
514 rs->md.major_version = 2;
515 } else if (rebuild && !rs->md.recovery_cp) {
517 * Without metadata, we will not be able to tell if the array
518 * is in-sync or not - we must assume it is not. Therefore,
519 * it is impossible to rebuild a drive.
521 * Even if there is metadata, the on-disk information may
522 * indicate that the array is not in-sync and it will then
525 * User could specify 'nosync' option if desperate.
527 DMERR("Unable to rebuild drive while array is not in-sync");
528 return ti_error_einval(rs->ti, "Unable to rebuild drive while array is not in-sync");
535 * validate_region_size
537 * @region_size: region size in sectors. If 0, pick a size (4MiB default).
539 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
540 * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
542 * Returns: 0 on success, -EINVAL on failure.
544 static int validate_region_size(struct raid_set *rs, unsigned long region_size)
546 unsigned long min_region_size = rs->ti->len / (1 << 21);
550 * Choose a reasonable default. All figures in sectors.
552 if (min_region_size > (1 << 13)) {
553 /* If not a power of 2, make it the next power of 2 */
554 region_size = roundup_pow_of_two(min_region_size);
555 DMINFO("Choosing default region size of %lu sectors",
558 DMINFO("Choosing default region size of 4MiB");
559 region_size = 1 << 13; /* sectors */
563 * Validate user-supplied value.
565 if (region_size > rs->ti->len)
566 return ti_error_einval(rs->ti, "Supplied region size is too large");
568 if (region_size < min_region_size) {
569 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
570 region_size, min_region_size);
571 return ti_error_einval(rs->ti, "Supplied region size is too small");
574 if (!is_power_of_2(region_size))
575 return ti_error_einval(rs->ti, "Region size is not a power of 2");
577 if (region_size < rs->md.chunk_sectors)
578 return ti_error_einval(rs->ti, "Region size is smaller than the chunk size");
582 * Convert sectors to bytes.
584 rs->md.bitmap_info.chunksize = (region_size << 9);
590 * validate_raid_redundancy
593 * Determine if there are enough devices in the array that haven't
594 * failed (or are being rebuilt) to form a usable array.
596 * Returns: 0 on success, -EINVAL on failure.
598 static int validate_raid_redundancy(struct raid_set *rs)
600 unsigned i, rebuild_cnt = 0;
601 unsigned rebuilds_per_group = 0, copies, d;
602 unsigned group_size, last_group_start;
604 for (i = 0; i < rs->md.raid_disks; i++)
605 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
606 !rs->dev[i].rdev.sb_page)
609 switch (rs->raid_type->level) {
611 if (rebuild_cnt >= rs->md.raid_disks)
617 if (rebuild_cnt > rs->raid_type->parity_devs)
621 copies = raid10_md_layout_to_copies(rs->md.layout);
622 if (rebuild_cnt < copies)
626 * It is possible to have a higher rebuild count for RAID10,
627 * as long as the failed devices occur in different mirror
628 * groups (i.e. different stripes).
630 * When checking "near" format, make sure no adjacent devices
631 * have failed beyond what can be handled. In addition to the
632 * simple case where the number of devices is a multiple of the
633 * number of copies, we must also handle cases where the number
634 * of devices is not a multiple of the number of copies.
635 * E.g. dev1 dev2 dev3 dev4 dev5
639 if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
640 for (i = 0; i < rs->md.raid_disks * copies; i++) {
642 rebuilds_per_group = 0;
643 d = i % rs->md.raid_disks;
644 if ((!rs->dev[d].rdev.sb_page ||
645 !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
646 (++rebuilds_per_group >= copies))
653 * When checking "far" and "offset" formats, we need to ensure
654 * that the device that holds its copy is not also dead or
655 * being rebuilt. (Note that "far" and "offset" formats only
656 * support two copies right now. These formats also only ever
657 * use the 'use_far_sets' variant.)
659 * This check is somewhat complicated by the need to account
660 * for arrays that are not a multiple of (far) copies. This
661 * results in the need to treat the last (potentially larger)
664 group_size = (rs->md.raid_disks / copies);
665 last_group_start = (rs->md.raid_disks / group_size) - 1;
666 last_group_start *= group_size;
667 for (i = 0; i < rs->md.raid_disks; i++) {
668 if (!(i % copies) && !(i > last_group_start))
669 rebuilds_per_group = 0;
670 if ((!rs->dev[i].rdev.sb_page ||
671 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
672 (++rebuilds_per_group >= copies))
688 * Possible arguments are...
689 * <chunk_size> [optional_args]
691 * Argument definitions
692 * <chunk_size> The number of sectors per disk that
693 * will form the "stripe"
694 * [[no]sync] Force or prevent recovery of the
696 * [rebuild <idx>] Rebuild the drive indicated by the index
697 * [daemon_sleep <ms>] Time between bitmap daemon work to
699 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
700 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
701 * [write_mostly <idx>] Indicate a write mostly drive via index
702 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
703 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs
704 * [region_size <sectors>] Defines granularity of bitmap
706 * RAID10-only options:
707 * [raid10_copies <# copies>] Number of copies. (Default: 2)
708 * [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
710 static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
711 unsigned num_raid_params)
713 char *raid10_format = "near";
714 unsigned raid10_copies = 2;
716 unsigned value, region_size = 0;
717 sector_t sectors_per_dev = rs->ti->len;
719 const char *arg, *key;
722 arg = dm_shift_arg(as);
723 num_raid_params--; /* Account for chunk_size argument */
725 if (kstrtouint(arg, 10, &value) < 0)
726 return ti_error_einval(rs->ti, "Bad numerical argument given for chunk_size");
729 * First, parse the in-order required arguments
730 * "chunk_size" is the only argument of this type.
732 if (rt_is_raid1(rs->raid_type)) {
734 DMERR("Ignoring chunk size parameter for RAID 1");
736 } else if (!is_power_of_2(value))
737 return ti_error_einval(rs->ti, "Chunk size must be a power of 2");
739 return ti_error_einval(rs->ti, "Chunk size value is too small");
741 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
744 * We set each individual device as In_sync with a completed
745 * 'recovery_offset'. If there has been a device failure or
746 * replacement then one of the following cases applies:
748 * 1) User specifies 'rebuild'.
749 * - Device is reset when param is read.
750 * 2) A new device is supplied.
751 * - No matching superblock found, resets device.
752 * 3) Device failure was transient and returns on reload.
753 * - Failure noticed, resets device for bitmap replay.
754 * 4) Device hadn't completed recovery after previous failure.
755 * - Superblock is read and overrides recovery_offset.
757 * What is found in the superblocks of the devices is always
758 * authoritative, unless 'rebuild' or '[no]sync' was specified.
760 for (i = 0; i < rs->md.raid_disks; i++) {
761 set_bit(In_sync, &rs->dev[i].rdev.flags);
762 rs->dev[i].rdev.recovery_offset = MaxSector;
766 * Second, parse the unordered optional arguments
768 for (i = 0; i < num_raid_params; i++) {
769 arg = dm_shift_arg(as);
771 return ti_error_einval(rs->ti, "Not enough raid parameters given");
773 if (!strcasecmp(arg, "nosync")) {
774 rs->md.recovery_cp = MaxSector;
775 _set_flag(CTR_FLAG_NOSYNC, &rs->ctr_flags);
778 if (!strcasecmp(arg, "sync")) {
779 rs->md.recovery_cp = 0;
780 _set_flag(CTR_FLAG_SYNC, &rs->ctr_flags);
785 arg = dm_shift_arg(as);
786 i++; /* Account for the argument pairs */
788 return ti_error_einval(rs->ti, "Wrong number of raid parameters given");
791 * Parameters that take a string value are checked here.
794 if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_FORMAT))) {
795 if (_test_and_set_flag(CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags))
796 return ti_error_einval(rs->ti, "Only one raid10_format argument pair allowed");
797 if (!rt_is_raid10(rs->raid_type))
798 return ti_error_einval(rs->ti, "'raid10_format' is an invalid parameter for this RAID type");
799 if (strcmp("near", arg) &&
800 strcmp("far", arg) &&
801 strcmp("offset", arg))
802 return ti_error_einval(rs->ti, "Invalid 'raid10_format' value given");
804 raid10_format = (char *) arg;
808 if (kstrtouint(arg, 10, &value) < 0)
809 return ti_error_einval(rs->ti, "Bad numerical argument given in raid params");
811 if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_REBUILD))) {
813 * "rebuild" is being passed in by userspace to provide
814 * indexes of replaced devices and to set up additional
815 * devices on raid level takeover.
817 if (!_in_range(value, 0, rs->md.raid_disks - 1))
818 return ti_error_einval(rs->ti, "Invalid rebuild index given");
820 rd = rs->dev + value;
821 clear_bit(In_sync, &rd->rdev.flags);
822 clear_bit(Faulty, &rd->rdev.flags);
823 rd->rdev.recovery_offset = 0;
824 _set_flag(CTR_FLAG_REBUILD, &rs->ctr_flags);
825 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_WRITE_MOSTLY))) {
826 if (!rt_is_raid1(rs->raid_type))
827 return ti_error_einval(rs->ti, "write_mostly option is only valid for RAID1");
829 if (!_in_range(value, 0, rs->md.raid_disks - 1))
830 return ti_error_einval(rs->ti, "Invalid write_mostly index given");
832 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
833 _set_flag(CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags);
834 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) {
835 if (!rt_is_raid1(rs->raid_type))
836 return ti_error_einval(rs->ti, "max_write_behind option is only valid for RAID1");
838 if (_test_and_set_flag(CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags))
839 return ti_error_einval(rs->ti, "Only one max_write_behind argument pair allowed");
842 * In device-mapper, we specify things in sectors, but
843 * MD records this value in kB
846 if (value > COUNTER_MAX)
847 return ti_error_einval(rs->ti, "Max write-behind limit out of range");
849 rs->md.bitmap_info.max_write_behind = value;
850 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_DAEMON_SLEEP))) {
851 if (_test_and_set_flag(CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
852 return ti_error_einval(rs->ti, "Only one daemon_sleep argument pair allowed");
853 if (!value || (value > MAX_SCHEDULE_TIMEOUT))
854 return ti_error_einval(rs->ti, "daemon sleep period out of range");
855 rs->md.bitmap_info.daemon_sleep = value;
856 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_STRIPE_CACHE))) {
857 if (_test_and_set_flag(CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags))
858 return ti_error_einval(rs->ti, "Only one stripe_cache argument pair allowed");
860 * In device-mapper, we specify things in sectors, but
861 * MD records this value in kB
865 if (!rt_is_raid456(rs->raid_type))
866 return ti_error_einval(rs->ti, "Inappropriate argument: stripe_cache");
867 if (raid5_set_cache_size(&rs->md, (int)value))
868 return ti_error_einval(rs->ti, "Bad stripe_cache size");
870 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) {
871 if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
872 return ti_error_einval(rs->ti, "Only one min_recovery_rate argument pair allowed");
874 return ti_error_einval(rs->ti, "min_recovery_rate out of range");
875 rs->md.sync_speed_min = (int)value;
876 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) {
877 if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
878 return ti_error_einval(rs->ti, "Only one max_recovery_rate argument pair allowed");
880 return ti_error_einval(rs->ti, "max_recovery_rate out of range");
881 rs->md.sync_speed_max = (int)value;
882 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_REGION_SIZE))) {
883 if (_test_and_set_flag(CTR_FLAG_REGION_SIZE, &rs->ctr_flags))
884 return ti_error_einval(rs->ti, "Only one region_size argument pair allowed");
887 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_COPIES))) {
888 if (_test_and_set_flag(CTR_FLAG_RAID10_COPIES, &rs->ctr_flags))
889 return ti_error_einval(rs->ti, "Only one raid10_copies argument pair allowed");
891 if (!_in_range(value, 2, rs->md.raid_disks))
892 return ti_error_einval(rs->ti, "Bad value for 'raid10_copies'");
894 raid10_copies = value;
896 DMERR("Unable to parse RAID parameter: %s", key);
897 return ti_error_einval(rs->ti, "Unable to parse RAID parameters");
901 if (validate_region_size(rs, region_size))
904 if (rs->md.chunk_sectors)
905 max_io_len = rs->md.chunk_sectors;
907 max_io_len = region_size;
909 if (dm_set_target_max_io_len(rs->ti, max_io_len))
912 if (rt_is_raid10(rs->raid_type)) {
913 if (raid10_copies > rs->md.raid_disks)
914 return ti_error_einval(rs->ti, "Not enough devices to satisfy specification");
917 * If the format is not "near", we only support
918 * two copies at the moment.
920 if (strcmp("near", raid10_format) && (raid10_copies > 2))
921 return ti_error_einval(rs->ti, "Too many copies for given RAID10 format.");
923 /* (Len * #mirrors) / #devices */
924 sectors_per_dev = rs->ti->len * raid10_copies;
925 sector_div(sectors_per_dev, rs->md.raid_disks);
927 rs->md.layout = raid10_format_to_md_layout(raid10_format,
929 rs->md.new_layout = rs->md.layout;
930 } else if (!rt_is_raid1(rs->raid_type) &&
931 sector_div(sectors_per_dev,
932 (rs->md.raid_disks - rs->raid_type->parity_devs)))
933 return ti_error_einval(rs->ti, "Target length not divisible by number of data devices");
935 rs->md.dev_sectors = sectors_per_dev;
937 /* Assume there are no metadata devices until the drives are parsed */
938 rs->md.persistent = 0;
941 /* Check, if any invalid ctr arguments have been passed in for the raid level */
942 return rs_check_for_invalid_flags(rs);
945 static void do_table_event(struct work_struct *ws)
947 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
949 dm_table_event(rs->ti->table);
952 static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
954 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
956 return mddev_congested(&rs->md, bits);
960 * This structure is never routinely used by userspace, unlike md superblocks.
961 * Devices with this superblock should only ever be accessed via device-mapper.
963 #define DM_RAID_MAGIC 0x64526D44
964 struct dm_raid_superblock {
965 __le32 magic; /* "DmRd" */
966 __le32 features; /* Used to indicate possible future changes */
968 __le32 num_devices; /* Number of devices in this array. (Max 64) */
969 __le32 array_position; /* The position of this drive in the array */
971 __le64 events; /* Incremented by md when superblock updated */
972 __le64 failed_devices; /* Bit field of devices to indicate failures */
975 * This offset tracks the progress of the repair or replacement of
976 * an individual drive.
978 __le64 disk_recovery_offset;
981 * This offset tracks the progress of the initial array
982 * synchronisation/parity calculation.
984 __le64 array_resync_offset;
987 * RAID characteristics
991 __le32 stripe_sectors;
993 /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
996 static int read_disk_sb(struct md_rdev *rdev, int size)
998 BUG_ON(!rdev->sb_page);
1000 if (rdev->sb_loaded)
1003 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, 1)) {
1004 DMERR("Failed to read superblock of device at position %d",
1006 md_error(rdev->mddev, rdev);
1010 rdev->sb_loaded = 1;
1015 static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
1018 uint64_t failed_devices;
1019 struct dm_raid_superblock *sb;
1020 struct raid_set *rs = container_of(mddev, struct raid_set, md);
1022 sb = page_address(rdev->sb_page);
1023 failed_devices = le64_to_cpu(sb->failed_devices);
1025 for (i = 0; i < mddev->raid_disks; i++)
1026 if (!rs->dev[i].data_dev ||
1027 test_bit(Faulty, &(rs->dev[i].rdev.flags)))
1028 failed_devices |= (1ULL << i);
1030 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
1032 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
1033 sb->features = cpu_to_le32(0); /* No features yet */
1035 sb->num_devices = cpu_to_le32(mddev->raid_disks);
1036 sb->array_position = cpu_to_le32(rdev->raid_disk);
1038 sb->events = cpu_to_le64(mddev->events);
1039 sb->failed_devices = cpu_to_le64(failed_devices);
1041 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
1042 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
1044 sb->level = cpu_to_le32(mddev->level);
1045 sb->layout = cpu_to_le32(mddev->layout);
1046 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
1052 * This function creates a superblock if one is not found on the device
1053 * and will decide which superblock to use if there's a choice.
1055 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
1057 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
1060 struct dm_raid_superblock *sb;
1061 struct dm_raid_superblock *refsb;
1062 uint64_t events_sb, events_refsb;
1065 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
1066 if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
1067 DMERR("superblock size of a logical block is no longer valid");
1071 r = read_disk_sb(rdev, rdev->sb_size);
1075 sb = page_address(rdev->sb_page);
1078 * Two cases that we want to write new superblocks and rebuild:
1079 * 1) New device (no matching magic number)
1080 * 2) Device specified for rebuild (!In_sync w/ offset == 0)
1082 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
1083 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
1084 super_sync(rdev->mddev, rdev);
1086 set_bit(FirstUse, &rdev->flags);
1088 /* Force writing of superblocks to disk */
1089 set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
1091 /* Any superblock is better than none, choose that if given */
1092 return refdev ? 0 : 1;
1098 events_sb = le64_to_cpu(sb->events);
1100 refsb = page_address(refdev->sb_page);
1101 events_refsb = le64_to_cpu(refsb->events);
1103 return (events_sb > events_refsb) ? 1 : 0;
1106 static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
1109 struct raid_set *rs = container_of(mddev, struct raid_set, md);
1111 uint64_t failed_devices;
1112 struct dm_raid_superblock *sb;
1113 uint32_t new_devs = 0;
1114 uint32_t rebuilds = 0;
1116 struct dm_raid_superblock *sb2;
1118 sb = page_address(rdev->sb_page);
1119 events_sb = le64_to_cpu(sb->events);
1120 failed_devices = le64_to_cpu(sb->failed_devices);
1123 * Initialise to 1 if this is a new superblock.
1125 mddev->events = events_sb ? : 1;
1128 * Reshaping is not currently allowed
1130 if (le32_to_cpu(sb->level) != mddev->level) {
1131 DMERR("Reshaping arrays not yet supported. (RAID level change)");
1134 if (le32_to_cpu(sb->layout) != mddev->layout) {
1135 DMERR("Reshaping arrays not yet supported. (RAID layout change)");
1136 DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
1137 DMERR(" Old layout: %s w/ %d copies",
1138 raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
1139 raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
1140 DMERR(" New layout: %s w/ %d copies",
1141 raid10_md_layout_to_format(mddev->layout),
1142 raid10_md_layout_to_copies(mddev->layout));
1145 if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
1146 DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
1150 /* We can only change the number of devices in RAID1 right now */
1151 if (!rt_is_raid1(rs->raid_type) &&
1152 (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
1153 DMERR("Reshaping arrays not yet supported. (device count change)");
1157 if (!(_test_flags(CTR_FLAGS_ANY_SYNC, rs->ctr_flags)))
1158 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
1161 * During load, we set FirstUse if a new superblock was written.
1162 * There are two reasons we might not have a superblock:
1163 * 1) The array is brand new - in which case, all of the
1164 * devices must have their In_sync bit set. Also,
1165 * recovery_cp must be 0, unless forced.
1166 * 2) This is a new device being added to an old array
1167 * and the new device needs to be rebuilt - in which
1168 * case the In_sync bit will /not/ be set and
1169 * recovery_cp must be MaxSector.
1171 rdev_for_each(r, mddev) {
1172 if (!test_bit(In_sync, &r->flags)) {
1173 DMINFO("Device %d specified for rebuild: "
1174 "Clearing superblock", r->raid_disk);
1176 } else if (test_bit(FirstUse, &r->flags))
1181 if (new_devs == mddev->raid_disks) {
1182 DMINFO("Superblocks created for new array");
1183 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
1184 } else if (new_devs) {
1185 DMERR("New device injected "
1186 "into existing array without 'rebuild' "
1187 "parameter specified");
1190 } else if (new_devs) {
1191 DMERR("'rebuild' devices cannot be "
1192 "injected into an array with other first-time devices");
1194 } else if (mddev->recovery_cp != MaxSector) {
1195 DMERR("'rebuild' specified while array is not in-sync");
1200 * Now we set the Faulty bit for those devices that are
1201 * recorded in the superblock as failed.
1203 rdev_for_each(r, mddev) {
1206 sb2 = page_address(r->sb_page);
1207 sb2->failed_devices = 0;
1210 * Check for any device re-ordering.
1212 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
1213 role = le32_to_cpu(sb2->array_position);
1214 if (role != r->raid_disk) {
1215 if (!rt_is_raid1(rs->raid_type))
1216 return ti_error_einval(rs->ti, "Cannot change device "
1217 "positions in RAID array");
1218 DMINFO("RAID1 device #%d now at position #%d",
1219 role, r->raid_disk);
1223 * Partial recovery is performed on
1224 * returning failed devices.
1226 if (failed_devices & (1 << role))
1227 set_bit(Faulty, &r->flags);
1234 static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
1236 struct mddev *mddev = &rs->md;
1237 struct dm_raid_superblock *sb = page_address(rdev->sb_page);
1240 * If mddev->events is not set, we know we have not yet initialized
1243 if (!mddev->events && super_init_validation(mddev, rdev))
1246 if (le32_to_cpu(sb->features)) {
1247 rs->ti->error = "Unable to assemble array: No feature flags supported yet";
1251 /* Enable bitmap creation for RAID levels != 0 */
1252 mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096);
1253 rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
1255 if (!test_bit(FirstUse, &rdev->flags)) {
1256 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
1257 if (rdev->recovery_offset != MaxSector)
1258 clear_bit(In_sync, &rdev->flags);
1262 * If a device comes back, set it as not In_sync and no longer faulty.
1264 if (test_bit(Faulty, &rdev->flags)) {
1265 clear_bit(Faulty, &rdev->flags);
1266 clear_bit(In_sync, &rdev->flags);
1267 rdev->saved_raid_disk = rdev->raid_disk;
1268 rdev->recovery_offset = 0;
1271 clear_bit(FirstUse, &rdev->flags);
1277 * Analyse superblocks and select the freshest.
1279 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
1282 struct raid_dev *dev;
1283 struct md_rdev *rdev, *tmp, *freshest;
1284 struct mddev *mddev = &rs->md;
1287 rdev_for_each_safe(rdev, tmp, mddev) {
1289 * Skipping super_load due to CTR_FLAG_SYNC will cause
1290 * the array to undergo initialization again as
1291 * though it were new. This is the intended effect
1292 * of the "sync" directive.
1294 * When reshaping capability is added, we must ensure
1295 * that the "sync" directive is disallowed during the
1298 rdev->sectors = to_sector(i_size_read(rdev->bdev->bd_inode));
1300 if (_test_flag(CTR_FLAG_SYNC, rs->ctr_flags))
1303 if (!rdev->meta_bdev)
1306 r = super_load(rdev, freshest);
1315 dev = container_of(rdev, struct raid_dev, rdev);
1317 dm_put_device(ti, dev->meta_dev);
1319 dev->meta_dev = NULL;
1320 rdev->meta_bdev = NULL;
1323 put_page(rdev->sb_page);
1325 rdev->sb_page = NULL;
1327 rdev->sb_loaded = 0;
1330 * We might be able to salvage the data device
1331 * even though the meta device has failed. For
1332 * now, we behave as though '- -' had been
1333 * set for this device in the table.
1336 dm_put_device(ti, dev->data_dev);
1338 dev->data_dev = NULL;
1341 list_del(&rdev->same_set);
1348 if (validate_raid_redundancy(rs))
1349 return ti_error_einval(rs->ti, "Insufficient redundancy to activate array");
1352 * Validation of the freshest device provides the source of
1353 * validation for the remaining devices.
1355 if (super_validate(rs, freshest))
1356 return ti_error_einval(rs->ti, "Unable to assemble array: Invalid superblocks");
1358 rdev_for_each(rdev, mddev)
1359 if ((rdev != freshest) && super_validate(rs, rdev))
1366 * Enable/disable discard support on RAID set depending on
1367 * RAID level and discard properties of underlying RAID members.
1369 static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
1374 /* Assume discards not supported until after checks below. */
1375 ti->discards_supported = false;
1377 /* RAID level 4,5,6 require discard_zeroes_data for data integrity! */
1378 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
1380 for (i = 0; i < rs->md.raid_disks; i++) {
1381 struct request_queue *q;
1383 if (!rs->dev[i].rdev.bdev)
1386 q = bdev_get_queue(rs->dev[i].rdev.bdev);
1387 if (!q || !blk_queue_discard(q))
1391 if (!q->limits.discard_zeroes_data)
1393 if (!devices_handle_discard_safely) {
1394 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
1395 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
1401 /* All RAID members properly support discards */
1402 ti->discards_supported = true;
1405 * RAID1 and RAID10 personalities require bio splitting,
1406 * RAID0/4/5/6 don't and process large discard bios properly.
1408 ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10);
1409 ti->num_discard_bios = 1;
1413 * Construct a RAID0/1/10/4/5/6 mapping:
1415 * <raid_type> <#raid_params> <raid_params>{0,} \
1416 * <#raid_devs> [<meta_dev1> <dev1>]{1,}
1418 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
1419 * details on possible <raid_params>.
1421 * Userspace is free to initialize the metadata devices, hence the superblocks to
1422 * enforce recreation based on the passed in table parameters.
1425 static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
1428 struct raid_type *rt;
1429 unsigned num_raid_params, num_raid_devs;
1430 struct raid_set *rs = NULL;
1432 struct dm_arg_set as = { argc, argv }, as_nrd;
1433 struct dm_arg _args[] = {
1434 { 0, as.argc, "Cannot understand number of raid parameters" },
1435 { 1, 254, "Cannot understand number of raid devices parameters" }
1438 /* Must have <raid_type> */
1439 arg = dm_shift_arg(&as);
1441 return ti_error_einval(rs->ti, "No arguments");
1443 rt = get_raid_type(arg);
1445 return ti_error_einval(rs->ti, "Unrecognised raid_type");
1447 /* Must have <#raid_params> */
1448 if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error))
1451 /* number of raid device tupples <meta_dev data_dev> */
1453 dm_consume_args(&as_nrd, num_raid_params);
1454 _args[1].max = (as_nrd.argc - 1) / 2;
1455 if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error))
1458 if (!_in_range(num_raid_devs, 1, MAX_RAID_DEVICES))
1459 return ti_error_einval(rs->ti, "Invalid number of supplied raid devices");
1461 rs = context_alloc(ti, rt, num_raid_devs);
1465 r = parse_raid_params(rs, &as, num_raid_params);
1469 r = parse_dev_params(rs, &as);
1473 rs->md.sync_super = super_sync;
1474 r = analyse_superblocks(ti, rs);
1478 INIT_WORK(&rs->md.event_work, do_table_event);
1480 ti->num_flush_bios = 1;
1483 * Disable/enable discard support on RAID set.
1485 configure_discard_support(ti, rs);
1487 /* Has to be held on running the array */
1488 mddev_lock_nointr(&rs->md);
1489 r = md_run(&rs->md);
1490 rs->md.in_sync = 0; /* Assume already marked dirty */
1491 mddev_unlock(&rs->md);
1494 ti->error = "Fail to run raid array";
1498 if (ti->len != rs->md.array_sectors) {
1499 r = ti_error_einval(ti, "Array size does not match requested target length");
1502 rs->callbacks.congested_fn = raid_is_congested;
1503 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
1505 mddev_suspend(&rs->md);
1516 static void raid_dtr(struct dm_target *ti)
1518 struct raid_set *rs = ti->private;
1520 list_del_init(&rs->callbacks.list);
1525 static int raid_map(struct dm_target *ti, struct bio *bio)
1527 struct raid_set *rs = ti->private;
1528 struct mddev *mddev = &rs->md;
1530 mddev->pers->make_request(mddev, bio);
1532 return DM_MAPIO_SUBMITTED;
1535 static const char *decipher_sync_action(struct mddev *mddev)
1537 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
1540 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1541 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
1542 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1545 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1546 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1548 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1553 if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
1560 static void raid_status(struct dm_target *ti, status_type_t type,
1561 unsigned status_flags, char *result, unsigned maxlen)
1563 struct raid_set *rs = ti->private;
1564 unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
1566 int i, array_in_sync = 0;
1570 case STATUSTYPE_INFO:
1571 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
1573 if (!rt_is_raid0(rs->raid_type)) {
1574 if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
1575 sync = rs->md.curr_resync_completed;
1577 sync = rs->md.recovery_cp;
1579 if (sync >= rs->md.resync_max_sectors) {
1584 sync = rs->md.resync_max_sectors;
1585 } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) {
1587 * If "check" or "repair" is occurring, the array has
1588 * undergone and initial sync and the health characters
1589 * should not be 'a' anymore.
1594 * The array may be doing an initial sync, or it may
1595 * be rebuilding individual components. If all the
1596 * devices are In_sync, then it is the array that is
1597 * being initialized.
1599 for (i = 0; i < rs->md.raid_disks; i++)
1600 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
1606 sync = rs->md.resync_max_sectors;
1610 * Status characters:
1611 * 'D' = Dead/Failed device
1612 * 'a' = Alive but not in-sync
1613 * 'A' = Alive and in-sync
1615 for (i = 0; i < rs->md.raid_disks; i++) {
1616 if (test_bit(Faulty, &rs->dev[i].rdev.flags))
1618 else if (!array_in_sync ||
1619 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1627 * The in-sync ratio shows the progress of:
1628 * - Initializing the array
1629 * - Rebuilding a subset of devices of the array
1630 * The user can distinguish between the two by referring
1631 * to the status characters.
1633 DMEMIT(" %llu/%llu",
1634 (unsigned long long) sync,
1635 (unsigned long long) rs->md.resync_max_sectors);
1639 * See Documentation/device-mapper/dm-raid.c for
1640 * information on each of these states.
1642 DMEMIT(" %s", decipher_sync_action(&rs->md));
1645 * resync_mismatches/mismatch_cnt
1646 * This field shows the number of discrepancies found when
1647 * performing a "check" of the array.
1650 (strcmp(rs->md.last_sync_action, "check")) ? 0 :
1651 (unsigned long long)
1652 atomic64_read(&rs->md.resync_mismatches));
1654 case STATUSTYPE_TABLE:
1655 /* The string you would use to construct this array */
1656 for (i = 0; i < rs->md.raid_disks; i++) {
1657 if (_test_flag(CTR_FLAG_REBUILD, rs->ctr_flags) &&
1658 rs->dev[i].data_dev &&
1659 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1660 raid_param_cnt += 2; /* for rebuilds */
1661 if (rs->dev[i].data_dev &&
1662 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1663 raid_param_cnt += 2;
1666 raid_param_cnt += (hweight32(rs->ctr_flags & ~CTR_FLAG_REBUILD) * 2);
1667 if (rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC))
1670 DMEMIT("%s %u %u", rs->raid_type->name,
1671 raid_param_cnt, rs->md.chunk_sectors);
1673 if (_test_flag(CTR_FLAG_SYNC, rs->ctr_flags) &&
1674 rs->md.recovery_cp == MaxSector)
1676 if (_test_flag(CTR_FLAG_NOSYNC, rs->ctr_flags))
1679 for (i = 0; i < rs->md.raid_disks; i++)
1680 if (_test_flag(CTR_FLAG_REBUILD, rs->ctr_flags) &&
1681 rs->dev[i].data_dev &&
1682 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1683 DMEMIT(" rebuild %u", i);
1685 if (_test_flag(CTR_FLAG_DAEMON_SLEEP, rs->ctr_flags))
1686 DMEMIT(" daemon_sleep %lu",
1687 rs->md.bitmap_info.daemon_sleep);
1689 if (_test_flag(CTR_FLAG_MIN_RECOVERY_RATE, rs->ctr_flags))
1690 DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
1692 if (_test_flag(CTR_FLAG_MAX_RECOVERY_RATE, rs->ctr_flags))
1693 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
1695 for (i = 0; i < rs->md.raid_disks; i++)
1696 if (rs->dev[i].data_dev &&
1697 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1698 DMEMIT(" write_mostly %u", i);
1700 if (_test_flag(CTR_FLAG_MAX_WRITE_BEHIND, rs->ctr_flags))
1701 DMEMIT(" max_write_behind %lu",
1702 rs->md.bitmap_info.max_write_behind);
1704 if (_test_flag(CTR_FLAG_STRIPE_CACHE, rs->ctr_flags)) {
1705 struct r5conf *conf = rs->md.private;
1707 /* convert from kiB to sectors */
1708 DMEMIT(" stripe_cache %d",
1709 conf ? conf->max_nr_stripes * 2 : 0);
1712 if (_test_flag(CTR_FLAG_REGION_SIZE, rs->ctr_flags))
1713 DMEMIT(" region_size %lu",
1714 rs->md.bitmap_info.chunksize >> 9);
1716 if (_test_flag(CTR_FLAG_RAID10_COPIES, rs->ctr_flags))
1717 DMEMIT(" raid10_copies %u",
1718 raid10_md_layout_to_copies(rs->md.layout));
1720 if (_test_flag(CTR_FLAG_RAID10_FORMAT, rs->ctr_flags))
1721 DMEMIT(" raid10_format %s",
1722 raid10_md_layout_to_format(rs->md.layout));
1724 DMEMIT(" %d", rs->md.raid_disks);
1725 for (i = 0; i < rs->md.raid_disks; i++) {
1726 if (rs->dev[i].meta_dev)
1727 DMEMIT(" %s", rs->dev[i].meta_dev->name);
1731 if (rs->dev[i].data_dev)
1732 DMEMIT(" %s", rs->dev[i].data_dev->name);
1739 static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
1741 struct raid_set *rs = ti->private;
1742 struct mddev *mddev = &rs->md;
1744 if (!strcasecmp(argv[0], "reshape")) {
1745 DMERR("Reshape not supported.");
1749 if (!mddev->pers || !mddev->pers->sync_request)
1752 if (!strcasecmp(argv[0], "frozen"))
1753 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1755 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1757 if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
1758 if (mddev->sync_thread) {
1759 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1760 md_reap_sync_thread(mddev);
1762 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1763 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
1765 else if (!strcasecmp(argv[0], "resync"))
1766 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1767 else if (!strcasecmp(argv[0], "recover")) {
1768 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
1769 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1771 if (!strcasecmp(argv[0], "check"))
1772 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
1773 else if (!!strcasecmp(argv[0], "repair"))
1775 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
1776 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
1778 if (mddev->ro == 2) {
1779 /* A write to sync_action is enough to justify
1780 * canceling read-auto mode
1783 if (!mddev->suspended)
1784 md_wakeup_thread(mddev->sync_thread);
1786 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1787 if (!mddev->suspended)
1788 md_wakeup_thread(mddev->thread);
1793 static int raid_iterate_devices(struct dm_target *ti,
1794 iterate_devices_callout_fn fn, void *data)
1796 struct raid_set *rs = ti->private;
1800 for (i = 0; !r && i < rs->md.raid_disks; i++)
1801 if (rs->dev[i].data_dev)
1803 rs->dev[i].data_dev,
1804 0, /* No offset on data devs */
1811 static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
1813 struct raid_set *rs = ti->private;
1814 unsigned chunk_size = rs->md.chunk_sectors << 9;
1815 struct r5conf *conf = rs->md.private;
1817 blk_limits_io_min(limits, chunk_size);
1818 blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
1821 static void raid_presuspend(struct dm_target *ti)
1823 struct raid_set *rs = ti->private;
1825 md_stop_writes(&rs->md);
1828 static void raid_postsuspend(struct dm_target *ti)
1830 struct raid_set *rs = ti->private;
1832 mddev_suspend(&rs->md);
1835 static void attempt_restore_of_faulty_devices(struct raid_set *rs)
1838 uint64_t failed_devices, cleared_failed_devices = 0;
1839 unsigned long flags;
1840 struct dm_raid_superblock *sb;
1843 for (i = 0; i < rs->md.raid_disks; i++) {
1844 r = &rs->dev[i].rdev;
1845 if (test_bit(Faulty, &r->flags) && r->sb_page &&
1846 sync_page_io(r, 0, r->sb_size, r->sb_page, REQ_OP_READ, 0,
1848 DMINFO("Faulty %s device #%d has readable super block."
1849 " Attempting to revive it.",
1850 rs->raid_type->name, i);
1853 * Faulty bit may be set, but sometimes the array can
1854 * be suspended before the personalities can respond
1855 * by removing the device from the array (i.e. calling
1856 * 'hot_remove_disk'). If they haven't yet removed
1857 * the failed device, its 'raid_disk' number will be
1858 * '>= 0' - meaning we must call this function
1861 if ((r->raid_disk >= 0) &&
1862 (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0))
1863 /* Failed to revive this device, try next */
1867 r->saved_raid_disk = i;
1869 clear_bit(Faulty, &r->flags);
1870 clear_bit(WriteErrorSeen, &r->flags);
1871 clear_bit(In_sync, &r->flags);
1872 if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
1874 r->saved_raid_disk = -1;
1877 r->recovery_offset = 0;
1878 cleared_failed_devices |= 1 << i;
1882 if (cleared_failed_devices) {
1883 rdev_for_each(r, &rs->md) {
1884 sb = page_address(r->sb_page);
1885 failed_devices = le64_to_cpu(sb->failed_devices);
1886 failed_devices &= ~cleared_failed_devices;
1887 sb->failed_devices = cpu_to_le64(failed_devices);
1892 static void raid_resume(struct dm_target *ti)
1894 struct raid_set *rs = ti->private;
1896 if (!rt_is_raid0(rs->raid_type)) {
1897 set_bit(MD_CHANGE_DEVS, &rs->md.flags);
1899 if (!rs->bitmap_loaded) {
1900 bitmap_load(&rs->md);
1901 rs->bitmap_loaded = 1;
1904 * A secondary resume while the device is active.
1905 * Take this opportunity to check whether any failed
1906 * devices are reachable again.
1908 attempt_restore_of_faulty_devices(rs);
1911 clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
1914 mddev_resume(&rs->md);
1917 static struct target_type raid_target = {
1919 .version = {1, 8, 1},
1920 .module = THIS_MODULE,
1924 .status = raid_status,
1925 .message = raid_message,
1926 .iterate_devices = raid_iterate_devices,
1927 .io_hints = raid_io_hints,
1928 .presuspend = raid_presuspend,
1929 .postsuspend = raid_postsuspend,
1930 .resume = raid_resume,
1933 static int __init dm_raid_init(void)
1935 DMINFO("Loading target version %u.%u.%u",
1936 raid_target.version[0],
1937 raid_target.version[1],
1938 raid_target.version[2]);
1939 return dm_register_target(&raid_target);
1942 static void __exit dm_raid_exit(void)
1944 dm_unregister_target(&raid_target);
1947 module_init(dm_raid_init);
1948 module_exit(dm_raid_exit);
1950 module_param(devices_handle_discard_safely, bool, 0644);
1951 MODULE_PARM_DESC(devices_handle_discard_safely,
1952 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
1954 MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
1955 MODULE_ALIAS("dm-raid1");
1956 MODULE_ALIAS("dm-raid10");
1957 MODULE_ALIAS("dm-raid4");
1958 MODULE_ALIAS("dm-raid5");
1959 MODULE_ALIAS("dm-raid6");
1960 MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
1961 MODULE_LICENSE("GPL");