2 * Copyright (C) 2010-2011 Neil Brown
3 * Copyright (C) 2010-2016 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/slab.h>
9 #include <linux/module.h>
17 #include <linux/device-mapper.h>
19 #define DM_MSG_PREFIX "raid"
20 #define MAX_RAID_DEVICES 253 /* md-raid kernel limit */
23 * Minimum sectors of free reshape space per raid device
25 #define MIN_FREE_RESHAPE_SPACE to_sector(4*4096)
27 static bool devices_handle_discard_safely
= false;
30 * The following flags are used by dm-raid.c to set up the array state.
31 * They must be cleared before md_run is called.
33 #define FirstUse 10 /* rdev flag */
37 * Two DM devices, one to hold metadata and one to hold the
38 * actual data/parity. The reason for this is to not confuse
39 * ti->len and give more flexibility in altering size and
42 * While it is possible for this device to be associated
43 * with a different physical device than the data_dev, it
44 * is intended for it to be the same.
45 * |--------- Physical Device ---------|
46 * |- meta_dev -|------ data_dev ------|
48 struct dm_dev
*meta_dev
;
49 struct dm_dev
*data_dev
;
54 * Bits for establishing rs->ctr_flags
59 #define __CTR_FLAG_SYNC 0 /* 1 */ /* Not with raid0! */
60 #define __CTR_FLAG_NOSYNC 1 /* 1 */ /* Not with raid0! */
61 #define __CTR_FLAG_REBUILD 2 /* 2 */ /* Not with raid0! */
62 #define __CTR_FLAG_DAEMON_SLEEP 3 /* 2 */ /* Not with raid0! */
63 #define __CTR_FLAG_MIN_RECOVERY_RATE 4 /* 2 */ /* Not with raid0! */
64 #define __CTR_FLAG_MAX_RECOVERY_RATE 5 /* 2 */ /* Not with raid0! */
65 #define __CTR_FLAG_MAX_WRITE_BEHIND 6 /* 2 */ /* Only with raid1! */
66 #define __CTR_FLAG_WRITE_MOSTLY 7 /* 2 */ /* Only with raid1! */
67 #define __CTR_FLAG_STRIPE_CACHE 8 /* 2 */ /* Only with raid4/5/6! */
68 #define __CTR_FLAG_REGION_SIZE 9 /* 2 */ /* Not with raid0! */
69 #define __CTR_FLAG_RAID10_COPIES 10 /* 2 */ /* Only with raid10 */
70 #define __CTR_FLAG_RAID10_FORMAT 11 /* 2 */ /* Only with raid10 */
72 #define __CTR_FLAG_DELTA_DISKS 12 /* 2 */ /* Only with reshapable raid4/5/6/10! */
73 #define __CTR_FLAG_DATA_OFFSET 13 /* 2 */ /* Only with reshapable raid4/5/6/10! */
74 #define __CTR_FLAG_RAID10_USE_NEAR_SETS 14 /* 2 */ /* Only with raid10! */
77 * Flags for rs->ctr_flags field.
79 #define CTR_FLAG_SYNC (1 << __CTR_FLAG_SYNC)
80 #define CTR_FLAG_NOSYNC (1 << __CTR_FLAG_NOSYNC)
81 #define CTR_FLAG_REBUILD (1 << __CTR_FLAG_REBUILD)
82 #define CTR_FLAG_DAEMON_SLEEP (1 << __CTR_FLAG_DAEMON_SLEEP)
83 #define CTR_FLAG_MIN_RECOVERY_RATE (1 << __CTR_FLAG_MIN_RECOVERY_RATE)
84 #define CTR_FLAG_MAX_RECOVERY_RATE (1 << __CTR_FLAG_MAX_RECOVERY_RATE)
85 #define CTR_FLAG_MAX_WRITE_BEHIND (1 << __CTR_FLAG_MAX_WRITE_BEHIND)
86 #define CTR_FLAG_WRITE_MOSTLY (1 << __CTR_FLAG_WRITE_MOSTLY)
87 #define CTR_FLAG_STRIPE_CACHE (1 << __CTR_FLAG_STRIPE_CACHE)
88 #define CTR_FLAG_REGION_SIZE (1 << __CTR_FLAG_REGION_SIZE)
89 #define CTR_FLAG_RAID10_COPIES (1 << __CTR_FLAG_RAID10_COPIES)
90 #define CTR_FLAG_RAID10_FORMAT (1 << __CTR_FLAG_RAID10_FORMAT)
91 #define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS)
92 #define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET)
93 #define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS)
96 * Definitions of various constructor flags to
97 * be used in checks of valid / invalid flags
100 /* Define all any sync flags */
101 #define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)
103 /* Define flags for options without argument (e.g. 'nosync') */
104 #define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \
105 CTR_FLAG_RAID10_USE_NEAR_SETS)
107 /* Define flags for options with one argument (e.g. 'delta_disks +2') */
108 #define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \
109 CTR_FLAG_WRITE_MOSTLY | \
110 CTR_FLAG_DAEMON_SLEEP | \
111 CTR_FLAG_MIN_RECOVERY_RATE | \
112 CTR_FLAG_MAX_RECOVERY_RATE | \
113 CTR_FLAG_MAX_WRITE_BEHIND | \
114 CTR_FLAG_STRIPE_CACHE | \
115 CTR_FLAG_REGION_SIZE | \
116 CTR_FLAG_RAID10_COPIES | \
117 CTR_FLAG_RAID10_FORMAT | \
118 CTR_FLAG_DELTA_DISKS | \
119 CTR_FLAG_DATA_OFFSET)
121 /* Valid options definitions per raid level... */
123 /* "raid0" does only accept data offset */
124 #define RAID0_VALID_FLAGS (CTR_FLAG_DATA_OFFSET)
126 /* "raid1" does not accept stripe cache, data offset, delta_disks or any raid10 options */
127 #define RAID1_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
129 CTR_FLAG_WRITE_MOSTLY | \
130 CTR_FLAG_DAEMON_SLEEP | \
131 CTR_FLAG_MIN_RECOVERY_RATE | \
132 CTR_FLAG_MAX_RECOVERY_RATE | \
133 CTR_FLAG_MAX_WRITE_BEHIND | \
134 CTR_FLAG_REGION_SIZE | \
135 CTR_FLAG_DATA_OFFSET)
137 /* "raid10" does not accept any raid1 or stripe cache options */
138 #define RAID10_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
140 CTR_FLAG_DAEMON_SLEEP | \
141 CTR_FLAG_MIN_RECOVERY_RATE | \
142 CTR_FLAG_MAX_RECOVERY_RATE | \
143 CTR_FLAG_REGION_SIZE | \
144 CTR_FLAG_RAID10_COPIES | \
145 CTR_FLAG_RAID10_FORMAT | \
146 CTR_FLAG_DELTA_DISKS | \
147 CTR_FLAG_DATA_OFFSET | \
148 CTR_FLAG_RAID10_USE_NEAR_SETS)
151 * "raid4/5/6" do not accept any raid1 or raid10 specific options
153 * "raid6" does not accept "nosync", because it is not guaranteed
154 * that both parity and q-syndrome are being written properly with
157 #define RAID45_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
159 CTR_FLAG_DAEMON_SLEEP | \
160 CTR_FLAG_MIN_RECOVERY_RATE | \
161 CTR_FLAG_MAX_RECOVERY_RATE | \
162 CTR_FLAG_MAX_WRITE_BEHIND | \
163 CTR_FLAG_STRIPE_CACHE | \
164 CTR_FLAG_REGION_SIZE | \
165 CTR_FLAG_DELTA_DISKS | \
166 CTR_FLAG_DATA_OFFSET)
168 #define RAID6_VALID_FLAGS (CTR_FLAG_SYNC | \
170 CTR_FLAG_DAEMON_SLEEP | \
171 CTR_FLAG_MIN_RECOVERY_RATE | \
172 CTR_FLAG_MAX_RECOVERY_RATE | \
173 CTR_FLAG_MAX_WRITE_BEHIND | \
174 CTR_FLAG_STRIPE_CACHE | \
175 CTR_FLAG_REGION_SIZE | \
176 CTR_FLAG_DELTA_DISKS | \
177 CTR_FLAG_DATA_OFFSET)
178 /* ...valid options definitions per raid level */
181 * Flags for rs->runtime_flags field
182 * (RT_FLAG prefix meaning "runtime flag")
184 * These are all internal and used to define runtime state,
185 * e.g. to prevent another resume from preresume processing
186 * the raid set all over again.
188 #define RT_FLAG_RS_PRERESUMED 0
189 #define RT_FLAG_RS_RESUMED 1
190 #define RT_FLAG_RS_BITMAP_LOADED 2
191 #define RT_FLAG_UPDATE_SBS 3
192 #define RT_FLAG_RESHAPE_RS 4
193 #define RT_FLAG_KEEP_RS_FROZEN 5
195 /* Array elements of 64 bit needed for rebuild/write_mostly bits */
196 #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
199 * raid set level, layout and chunk sectors backup/restore
204 int new_chunk_sectors
;
208 struct dm_target
*ti
;
210 uint32_t bitmap_loaded
;
211 uint32_t stripe_cache_entries
;
212 unsigned long ctr_flags
;
213 unsigned long runtime_flags
;
215 uint64_t rebuild_disks
[DISKS_ARRAY_ELEMS
];
221 int requested_bitmap_chunk_sectors
;
224 struct raid_type
*raid_type
;
225 struct dm_target_callbacks callbacks
;
227 struct raid_dev dev
[0];
230 static void rs_config_backup(struct raid_set
*rs
, struct rs_layout
*l
)
232 struct mddev
*mddev
= &rs
->md
;
234 l
->new_level
= mddev
->new_level
;
235 l
->new_layout
= mddev
->new_layout
;
236 l
->new_chunk_sectors
= mddev
->new_chunk_sectors
;
239 static void rs_config_restore(struct raid_set
*rs
, struct rs_layout
*l
)
241 struct mddev
*mddev
= &rs
->md
;
243 mddev
->new_level
= l
->new_level
;
244 mddev
->new_layout
= l
->new_layout
;
245 mddev
->new_chunk_sectors
= l
->new_chunk_sectors
;
248 /* raid10 algorithms (i.e. formats) */
249 #define ALGORITHM_RAID10_DEFAULT 0
250 #define ALGORITHM_RAID10_NEAR 1
251 #define ALGORITHM_RAID10_OFFSET 2
252 #define ALGORITHM_RAID10_FAR 3
254 /* Supported raid types and properties. */
255 static struct raid_type
{
256 const char *name
; /* RAID algorithm. */
257 const char *descr
; /* Descriptor text for logging. */
258 const unsigned parity_devs
; /* # of parity devices. */
259 const unsigned minimal_devs
; /* minimal # of devices in set. */
260 const unsigned level
; /* RAID level. */
261 const unsigned algorithm
; /* RAID algorithm. */
263 {"raid0", "raid0 (striping)", 0, 2, 0, 0 /* NONE */},
264 {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 /* NONE */},
265 {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR
},
266 {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET
},
267 {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR
},
268 {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT
},
269 {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N
}, /* raid4 layout = raid5_n */
270 {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N
},
271 {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC
},
272 {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC
},
273 {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC
},
274 {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC
},
275 {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART
},
276 {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART
},
277 {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE
},
278 {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6
},
279 {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6
},
280 {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6
},
281 {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6
},
282 {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6
}
285 /* True, if @v is in inclusive range [@min, @max] */
286 static bool __within_range(long v
, long min
, long max
)
288 return v
>= min
&& v
<= max
;
291 /* All table line arguments are defined here */
292 static struct arg_name_flag
{
293 const unsigned long flag
;
295 } __arg_name_flags
[] = {
296 { CTR_FLAG_SYNC
, "sync"},
297 { CTR_FLAG_NOSYNC
, "nosync"},
298 { CTR_FLAG_REBUILD
, "rebuild"},
299 { CTR_FLAG_DAEMON_SLEEP
, "daemon_sleep"},
300 { CTR_FLAG_MIN_RECOVERY_RATE
, "min_recovery_rate"},
301 { CTR_FLAG_MAX_RECOVERY_RATE
, "max_recovery_rate"},
302 { CTR_FLAG_MAX_WRITE_BEHIND
, "max_write_behind"},
303 { CTR_FLAG_WRITE_MOSTLY
, "writemostly"},
304 { CTR_FLAG_STRIPE_CACHE
, "stripe_cache"},
305 { CTR_FLAG_REGION_SIZE
, "region_size"},
306 { CTR_FLAG_RAID10_COPIES
, "raid10_copies"},
307 { CTR_FLAG_RAID10_FORMAT
, "raid10_format"},
308 { CTR_FLAG_DATA_OFFSET
, "data_offset"},
309 { CTR_FLAG_DELTA_DISKS
, "delta_disks"},
310 { CTR_FLAG_RAID10_USE_NEAR_SETS
, "raid10_use_near_sets"},
313 /* Return argument name string for given @flag */
314 static const char *dm_raid_arg_name_by_flag(const uint32_t flag
)
316 if (hweight32(flag
) == 1) {
317 struct arg_name_flag
*anf
= __arg_name_flags
+ ARRAY_SIZE(__arg_name_flags
);
319 while (anf
-- > __arg_name_flags
)
320 if (flag
& anf
->flag
)
324 DMERR("%s called with more than one flag!", __func__
);
330 * bool helpers to test for various raid levels of a raid set,
331 * is. it's level as reported by the superblock rather than
332 * the requested raid_type passed to the constructor.
334 /* Return true, if raid set in @rs is raid0 */
335 static bool rs_is_raid0(struct raid_set
*rs
)
337 return !rs
->md
.level
;
340 /* Return true, if raid set in @rs is raid1 */
341 static bool rs_is_raid1(struct raid_set
*rs
)
343 return rs
->md
.level
== 1;
346 /* Return true, if raid set in @rs is raid10 */
347 static bool rs_is_raid10(struct raid_set
*rs
)
349 return rs
->md
.level
== 10;
352 /* Return true, if raid set in @rs is level 4, 5 or 6 */
353 static bool rs_is_raid456(struct raid_set
*rs
)
355 return __within_range(rs
->md
.level
, 4, 6);
358 /* Return true, if raid set in @rs is reshapable */
359 static unsigned int __is_raid10_far(int layout
);
360 static bool rs_is_reshapable(struct raid_set
*rs
)
362 return rs_is_raid456(rs
) ||
363 (rs_is_raid10(rs
) && !__is_raid10_far(rs
->md
.new_layout
));
366 /* Return true, if raid set in @rs is recovering */
367 static bool rs_is_recovering(struct raid_set
*rs
)
369 return rs
->md
.recovery_cp
!= MaxSector
;
372 /* Return true, if raid set in @rs is reshaping */
373 static bool rs_is_reshaping(struct raid_set
*rs
)
375 return rs
->md
.reshape_position
!= MaxSector
;
379 * bool helpers to test for various raid levels of a raid type
382 /* Return true, if raid type in @rt is raid0 */
383 static bool rt_is_raid0(struct raid_type
*rt
)
388 /* Return true, if raid type in @rt is raid1 */
389 static bool rt_is_raid1(struct raid_type
*rt
)
391 return rt
->level
== 1;
394 /* Return true, if raid type in @rt is raid10 */
395 static bool rt_is_raid10(struct raid_type
*rt
)
397 return rt
->level
== 10;
400 /* Return true, if raid type in @rt is raid4/5 */
401 static bool rt_is_raid45(struct raid_type
*rt
)
403 return __within_range(rt
->level
, 4, 5);
406 /* Return true, if raid type in @rt is raid6 */
407 static bool rt_is_raid6(struct raid_type
*rt
)
409 return rt
->level
== 6;
412 /* Return true, if raid type in @rt is raid4/5/6 */
413 static bool rt_is_raid456(struct raid_type
*rt
)
415 return __within_range(rt
->level
, 4, 6);
417 /* END: raid level bools */
419 /* Return valid ctr flags for the raid level of @rs */
420 static unsigned long __valid_flags(struct raid_set
*rs
)
422 if (rt_is_raid0(rs
->raid_type
))
423 return RAID0_VALID_FLAGS
;
424 else if (rt_is_raid1(rs
->raid_type
))
425 return RAID1_VALID_FLAGS
;
426 else if (rt_is_raid10(rs
->raid_type
))
427 return RAID10_VALID_FLAGS
;
428 else if (rt_is_raid45(rs
->raid_type
))
429 return RAID45_VALID_FLAGS
;
430 else if (rt_is_raid6(rs
->raid_type
))
431 return RAID6_VALID_FLAGS
;
437 * Check for valid flags set on @rs
439 * Has to be called after parsing of the ctr flags!
441 static int rs_check_for_valid_flags(struct raid_set
*rs
)
443 if (rs
->ctr_flags
& ~__valid_flags(rs
)) {
444 rs
->ti
->error
= "Invalid flags combination";
451 /* MD raid10 bit definitions and helpers */
452 #define RAID10_OFFSET (1 << 16) /* stripes with data copies area adjacent on devices */
453 #define RAID10_BROCKEN_USE_FAR_SETS (1 << 17) /* Broken in raid10.c: use sets instead of whole stripe rotation */
454 #define RAID10_USE_FAR_SETS (1 << 18) /* Use sets instead of whole stripe rotation */
455 #define RAID10_FAR_COPIES_SHIFT 8 /* raid10 # far copies shift (2nd byte of layout) */
457 /* Return md raid10 near copies for @layout */
458 static unsigned int __raid10_near_copies(int layout
)
460 return layout
& 0xFF;
463 /* Return md raid10 far copies for @layout */
464 static unsigned int __raid10_far_copies(int layout
)
466 return __raid10_near_copies(layout
>> RAID10_FAR_COPIES_SHIFT
);
469 /* Return true if md raid10 offset for @layout */
470 static unsigned int __is_raid10_offset(int layout
)
472 return layout
& RAID10_OFFSET
;
475 /* Return true if md raid10 near for @layout */
476 static unsigned int __is_raid10_near(int layout
)
478 return !__is_raid10_offset(layout
) && __raid10_near_copies(layout
) > 1;
481 /* Return true if md raid10 far for @layout */
482 static unsigned int __is_raid10_far(int layout
)
484 return !__is_raid10_offset(layout
) && __raid10_far_copies(layout
) > 1;
487 /* Return md raid10 layout string for @layout */
488 static const char *raid10_md_layout_to_format(int layout
)
491 * Bit 16 stands for "offset"
492 * (i.e. adjacent stripes hold copies)
494 * Refer to MD's raid10.c for details
496 if (__is_raid10_offset(layout
))
499 if (__raid10_near_copies(layout
) > 1)
502 WARN_ON(__raid10_far_copies(layout
) < 2);
507 /* Return md raid10 algorithm for @name */
508 static int raid10_name_to_format(const char *name
)
510 if (!strcasecmp(name
, "near"))
511 return ALGORITHM_RAID10_NEAR
;
512 else if (!strcasecmp(name
, "offset"))
513 return ALGORITHM_RAID10_OFFSET
;
514 else if (!strcasecmp(name
, "far"))
515 return ALGORITHM_RAID10_FAR
;
520 /* Return md raid10 copies for @layout */
521 static unsigned int raid10_md_layout_to_copies(int layout
)
523 return __raid10_near_copies(layout
) > 1 ?
524 __raid10_near_copies(layout
) : __raid10_far_copies(layout
);
527 /* Return md raid10 format id for @format string */
528 static int raid10_format_to_md_layout(struct raid_set
*rs
,
529 unsigned int algorithm
,
532 unsigned int n
= 1, f
= 1, r
= 0;
535 * MD resilienece flaw:
537 * enabling use_far_sets for far/offset formats causes copies
538 * to be colocated on the same devs together with their origins!
540 * -> disable it for now in the definition above
542 if (algorithm
== ALGORITHM_RAID10_DEFAULT
||
543 algorithm
== ALGORITHM_RAID10_NEAR
)
546 else if (algorithm
== ALGORITHM_RAID10_OFFSET
) {
549 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS
, &rs
->ctr_flags
))
550 r
|= RAID10_USE_FAR_SETS
;
552 } else if (algorithm
== ALGORITHM_RAID10_FAR
) {
555 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS
, &rs
->ctr_flags
))
556 r
|= RAID10_USE_FAR_SETS
;
561 return r
| (f
<< RAID10_FAR_COPIES_SHIFT
) | n
;
563 /* END: MD raid10 bit definitions and helpers */
565 /* Check for any of the raid10 algorithms */
566 static int __got_raid10(struct raid_type
*rtp
, const int layout
)
568 if (rtp
->level
== 10) {
569 switch (rtp
->algorithm
) {
570 case ALGORITHM_RAID10_DEFAULT
:
571 case ALGORITHM_RAID10_NEAR
:
572 return __is_raid10_near(layout
);
573 case ALGORITHM_RAID10_OFFSET
:
574 return __is_raid10_offset(layout
);
575 case ALGORITHM_RAID10_FAR
:
576 return __is_raid10_far(layout
);
585 /* Return raid_type for @name */
586 static struct raid_type
*get_raid_type(const char *name
)
588 struct raid_type
*rtp
= raid_types
+ ARRAY_SIZE(raid_types
);
590 while (rtp
-- > raid_types
)
591 if (!strcasecmp(rtp
->name
, name
))
597 /* Return raid_type for @name based derived from @level and @layout */
598 static struct raid_type
*get_raid_type_by_ll(const int level
, const int layout
)
600 struct raid_type
*rtp
= raid_types
+ ARRAY_SIZE(raid_types
);
602 while (rtp
-- > raid_types
) {
603 /* RAID10 special checks based on @layout flags/properties */
604 if (rtp
->level
== level
&&
605 (__got_raid10(rtp
, layout
) || rtp
->algorithm
== layout
))
613 * Conditionally change bdev capacity of @rs
614 * in case of a disk add/remove reshape
616 static void rs_set_capacity(struct raid_set
*rs
)
618 struct mddev
*mddev
= &rs
->md
;
620 /* Make sure we access most actual mddev properties */
622 if (rs
->ti
->len
!= mddev
->array_sectors
&& !rs_is_reshaping(rs
)) {
623 struct gendisk
*gendisk
= dm_disk(dm_table_get_md(rs
->ti
->table
));
625 set_capacity(gendisk
, mddev
->array_sectors
);
626 revalidate_disk(gendisk
);
631 * Set the mddev properties in @rs to the current
632 * ones retrieved from the freshest superblock
634 static void rs_set_cur(struct raid_set
*rs
)
636 struct mddev
*mddev
= &rs
->md
;
638 mddev
->new_level
= mddev
->level
;
639 mddev
->new_layout
= mddev
->layout
;
640 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
644 * Set the mddev properties in @rs to the new
645 * ones requested by the ctr
647 static void rs_set_new(struct raid_set
*rs
)
649 struct mddev
*mddev
= &rs
->md
;
651 mddev
->level
= mddev
->new_level
;
652 mddev
->layout
= mddev
->new_layout
;
653 mddev
->chunk_sectors
= mddev
->new_chunk_sectors
;
654 mddev
->raid_disks
= rs
->raid_disks
;
655 mddev
->delta_disks
= 0;
658 static struct raid_set
*raid_set_alloc(struct dm_target
*ti
, struct raid_type
*raid_type
,
664 if (raid_devs
<= raid_type
->parity_devs
) {
665 ti
->error
= "Insufficient number of devices";
666 return ERR_PTR(-EINVAL
);
669 rs
= kzalloc(sizeof(*rs
) + raid_devs
* sizeof(rs
->dev
[0]), GFP_KERNEL
);
671 ti
->error
= "Cannot allocate raid context";
672 return ERR_PTR(-ENOMEM
);
677 rs
->raid_disks
= raid_devs
;
681 rs
->raid_type
= raid_type
;
682 rs
->stripe_cache_entries
= 256;
683 rs
->md
.raid_disks
= raid_devs
;
684 rs
->md
.level
= raid_type
->level
;
685 rs
->md
.new_level
= rs
->md
.level
;
686 rs
->md
.layout
= raid_type
->algorithm
;
687 rs
->md
.new_layout
= rs
->md
.layout
;
688 rs
->md
.delta_disks
= 0;
689 rs
->md
.recovery_cp
= rs_is_raid0(rs
) ? MaxSector
: 0;
691 for (i
= 0; i
< raid_devs
; i
++)
692 md_rdev_init(&rs
->dev
[i
].rdev
);
695 * Remaining items to be initialized by further RAID params:
698 * rs->md.chunk_sectors
699 * rs->md.new_chunk_sectors
706 static void raid_set_free(struct raid_set
*rs
)
710 for (i
= 0; i
< rs
->md
.raid_disks
; i
++) {
711 if (rs
->dev
[i
].meta_dev
)
712 dm_put_device(rs
->ti
, rs
->dev
[i
].meta_dev
);
713 md_rdev_clear(&rs
->dev
[i
].rdev
);
714 if (rs
->dev
[i
].data_dev
)
715 dm_put_device(rs
->ti
, rs
->dev
[i
].data_dev
);
722 * For every device we have two words
723 * <meta_dev>: meta device name or '-' if missing
724 * <data_dev>: data device name or '-' if missing
726 * The following are permitted:
729 * <meta_dev> <data_dev>
731 * The following is not allowed:
734 * This code parses those words. If there is a failure,
735 * the caller must use raid_set_free() to unwind the operations.
737 static int parse_dev_params(struct raid_set
*rs
, struct dm_arg_set
*as
)
741 int metadata_available
= 0;
745 /* Put off the number of raid devices argument to get to dev pairs */
746 arg
= dm_shift_arg(as
);
750 for (i
= 0; i
< rs
->md
.raid_disks
; i
++) {
751 rs
->dev
[i
].rdev
.raid_disk
= i
;
753 rs
->dev
[i
].meta_dev
= NULL
;
754 rs
->dev
[i
].data_dev
= NULL
;
757 * There are no offsets, since there is a separate device
758 * for data and metadata.
760 rs
->dev
[i
].rdev
.data_offset
= 0;
761 rs
->dev
[i
].rdev
.mddev
= &rs
->md
;
763 arg
= dm_shift_arg(as
);
767 if (strcmp(arg
, "-")) {
768 r
= dm_get_device(rs
->ti
, arg
, dm_table_get_mode(rs
->ti
->table
),
769 &rs
->dev
[i
].meta_dev
);
771 rs
->ti
->error
= "RAID metadata device lookup failure";
775 rs
->dev
[i
].rdev
.sb_page
= alloc_page(GFP_KERNEL
);
776 if (!rs
->dev
[i
].rdev
.sb_page
) {
777 rs
->ti
->error
= "Failed to allocate superblock page";
782 arg
= dm_shift_arg(as
);
786 if (!strcmp(arg
, "-")) {
787 if (!test_bit(In_sync
, &rs
->dev
[i
].rdev
.flags
) &&
788 (!rs
->dev
[i
].rdev
.recovery_offset
)) {
789 rs
->ti
->error
= "Drive designated for rebuild not specified";
793 if (rs
->dev
[i
].meta_dev
) {
794 rs
->ti
->error
= "No data device supplied with metadata device";
801 r
= dm_get_device(rs
->ti
, arg
, dm_table_get_mode(rs
->ti
->table
),
802 &rs
->dev
[i
].data_dev
);
804 rs
->ti
->error
= "RAID device lookup failure";
808 if (rs
->dev
[i
].meta_dev
) {
809 metadata_available
= 1;
810 rs
->dev
[i
].rdev
.meta_bdev
= rs
->dev
[i
].meta_dev
->bdev
;
812 rs
->dev
[i
].rdev
.bdev
= rs
->dev
[i
].data_dev
->bdev
;
813 list_add_tail(&rs
->dev
[i
].rdev
.same_set
, &rs
->md
.disks
);
814 if (!test_bit(In_sync
, &rs
->dev
[i
].rdev
.flags
))
818 if (metadata_available
) {
820 rs
->md
.persistent
= 1;
821 rs
->md
.major_version
= 2;
822 } else if (rebuild
&& !rs
->md
.recovery_cp
) {
824 * Without metadata, we will not be able to tell if the array
825 * is in-sync or not - we must assume it is not. Therefore,
826 * it is impossible to rebuild a drive.
828 * Even if there is metadata, the on-disk information may
829 * indicate that the array is not in-sync and it will then
832 * User could specify 'nosync' option if desperate.
834 rs
->ti
->error
= "Unable to rebuild drive while array is not in-sync";
842 * validate_region_size
844 * @region_size: region size in sectors. If 0, pick a size (4MiB default).
846 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
847 * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
849 * Returns: 0 on success, -EINVAL on failure.
851 static int validate_region_size(struct raid_set
*rs
, unsigned long region_size
)
853 unsigned long min_region_size
= rs
->ti
->len
/ (1 << 21);
857 * Choose a reasonable default. All figures in sectors.
859 if (min_region_size
> (1 << 13)) {
860 /* If not a power of 2, make it the next power of 2 */
861 region_size
= roundup_pow_of_two(min_region_size
);
862 DMINFO("Choosing default region size of %lu sectors",
865 DMINFO("Choosing default region size of 4MiB");
866 region_size
= 1 << 13; /* sectors */
870 * Validate user-supplied value.
872 if (region_size
> rs
->ti
->len
) {
873 rs
->ti
->error
= "Supplied region size is too large";
877 if (region_size
< min_region_size
) {
878 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
879 region_size
, min_region_size
);
880 rs
->ti
->error
= "Supplied region size is too small";
884 if (!is_power_of_2(region_size
)) {
885 rs
->ti
->error
= "Region size is not a power of 2";
889 if (region_size
< rs
->md
.chunk_sectors
) {
890 rs
->ti
->error
= "Region size is smaller than the chunk size";
896 * Convert sectors to bytes.
898 rs
->md
.bitmap_info
.chunksize
= (region_size
<< 9);
904 * validate_raid_redundancy
907 * Determine if there are enough devices in the array that haven't
908 * failed (or are being rebuilt) to form a usable array.
910 * Returns: 0 on success, -EINVAL on failure.
912 static int validate_raid_redundancy(struct raid_set
*rs
)
914 unsigned i
, rebuild_cnt
= 0;
915 unsigned rebuilds_per_group
= 0, copies
;
916 unsigned group_size
, last_group_start
;
918 for (i
= 0; i
< rs
->md
.raid_disks
; i
++)
919 if (!test_bit(In_sync
, &rs
->dev
[i
].rdev
.flags
) ||
920 !rs
->dev
[i
].rdev
.sb_page
)
923 switch (rs
->raid_type
->level
) {
925 if (rebuild_cnt
>= rs
->md
.raid_disks
)
931 if (rebuild_cnt
> rs
->raid_type
->parity_devs
)
935 copies
= raid10_md_layout_to_copies(rs
->md
.new_layout
);
936 if (rebuild_cnt
< copies
)
940 * It is possible to have a higher rebuild count for RAID10,
941 * as long as the failed devices occur in different mirror
942 * groups (i.e. different stripes).
944 * When checking "near" format, make sure no adjacent devices
945 * have failed beyond what can be handled. In addition to the
946 * simple case where the number of devices is a multiple of the
947 * number of copies, we must also handle cases where the number
948 * of devices is not a multiple of the number of copies.
949 * E.g. dev1 dev2 dev3 dev4 dev5
953 if (__is_raid10_near(rs
->md
.new_layout
)) {
954 for (i
= 0; i
< rs
->raid_disks
; i
++) {
956 rebuilds_per_group
= 0;
957 if ((!rs
->dev
[i
].rdev
.sb_page
||
958 !test_bit(In_sync
, &rs
->dev
[i
].rdev
.flags
)) &&
959 (++rebuilds_per_group
>= copies
))
966 * When checking "far" and "offset" formats, we need to ensure
967 * that the device that holds its copy is not also dead or
968 * being rebuilt. (Note that "far" and "offset" formats only
969 * support two copies right now. These formats also only ever
970 * use the 'use_far_sets' variant.)
972 * This check is somewhat complicated by the need to account
973 * for arrays that are not a multiple of (far) copies. This
974 * results in the need to treat the last (potentially larger)
977 group_size
= (rs
->md
.raid_disks
/ copies
);
978 last_group_start
= (rs
->md
.raid_disks
/ group_size
) - 1;
979 last_group_start
*= group_size
;
980 for (i
= 0; i
< rs
->md
.raid_disks
; i
++) {
981 if (!(i
% copies
) && !(i
> last_group_start
))
982 rebuilds_per_group
= 0;
983 if ((!rs
->dev
[i
].rdev
.sb_page
||
984 !test_bit(In_sync
, &rs
->dev
[i
].rdev
.flags
)) &&
985 (++rebuilds_per_group
>= copies
))
1001 * Possible arguments are...
1002 * <chunk_size> [optional_args]
1004 * Argument definitions
1005 * <chunk_size> The number of sectors per disk that
1006 * will form the "stripe"
1007 * [[no]sync] Force or prevent recovery of the
1009 * [rebuild <idx>] Rebuild the drive indicated by the index
1010 * [daemon_sleep <ms>] Time between bitmap daemon work to
1012 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
1013 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
1014 * [write_mostly <idx>] Indicate a write mostly drive via index
1015 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
1016 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs
1017 * [region_size <sectors>] Defines granularity of bitmap
1019 * RAID10-only options:
1020 * [raid10_copies <# copies>] Number of copies. (Default: 2)
1021 * [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
1023 static int parse_raid_params(struct raid_set
*rs
, struct dm_arg_set
*as
,
1024 unsigned num_raid_params
)
1026 int value
, raid10_format
= ALGORITHM_RAID10_DEFAULT
;
1027 unsigned raid10_copies
= 2;
1029 unsigned region_size
= 0;
1030 sector_t max_io_len
;
1031 const char *arg
, *key
;
1032 struct raid_dev
*rd
;
1033 struct raid_type
*rt
= rs
->raid_type
;
1035 arg
= dm_shift_arg(as
);
1036 num_raid_params
--; /* Account for chunk_size argument */
1038 if (kstrtoint(arg
, 10, &value
) < 0) {
1039 rs
->ti
->error
= "Bad numerical argument given for chunk_size";
1044 * First, parse the in-order required arguments
1045 * "chunk_size" is the only argument of this type.
1047 if (rt_is_raid1(rt
)) {
1049 DMERR("Ignoring chunk size parameter for RAID 1");
1051 } else if (!is_power_of_2(value
)) {
1052 rs
->ti
->error
= "Chunk size must be a power of 2";
1054 } else if (value
< 8) {
1055 rs
->ti
->error
= "Chunk size value is too small";
1059 rs
->md
.new_chunk_sectors
= rs
->md
.chunk_sectors
= value
;
1062 * We set each individual device as In_sync with a completed
1063 * 'recovery_offset'. If there has been a device failure or
1064 * replacement then one of the following cases applies:
1066 * 1) User specifies 'rebuild'.
1067 * - Device is reset when param is read.
1068 * 2) A new device is supplied.
1069 * - No matching superblock found, resets device.
1070 * 3) Device failure was transient and returns on reload.
1071 * - Failure noticed, resets device for bitmap replay.
1072 * 4) Device hadn't completed recovery after previous failure.
1073 * - Superblock is read and overrides recovery_offset.
1075 * What is found in the superblocks of the devices is always
1076 * authoritative, unless 'rebuild' or '[no]sync' was specified.
1078 for (i
= 0; i
< rs
->md
.raid_disks
; i
++) {
1079 set_bit(In_sync
, &rs
->dev
[i
].rdev
.flags
);
1080 rs
->dev
[i
].rdev
.recovery_offset
= MaxSector
;
1084 * Second, parse the unordered optional arguments
1086 for (i
= 0; i
< num_raid_params
; i
++) {
1087 key
= dm_shift_arg(as
);
1089 rs
->ti
->error
= "Not enough raid parameters given";
1093 if (!strcasecmp(key
, dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC
))) {
1094 if (test_and_set_bit(__CTR_FLAG_NOSYNC
, &rs
->ctr_flags
)) {
1095 rs
->ti
->error
= "Only one 'nosync' argument allowed";
1098 rs
->md
.recovery_cp
= MaxSector
;
1101 if (!strcasecmp(key
, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC
))) {
1102 if (test_and_set_bit(__CTR_FLAG_SYNC
, &rs
->ctr_flags
)) {
1103 rs
->ti
->error
= "Only one 'sync' argument allowed";
1106 rs
->md
.recovery_cp
= 0;
1109 if (!strcasecmp(key
, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS
))) {
1110 if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS
, &rs
->ctr_flags
)) {
1111 rs
->ti
->error
= "Only one 'raid10_use_new_sets' argument allowed";
1117 arg
= dm_shift_arg(as
);
1118 i
++; /* Account for the argument pairs */
1120 rs
->ti
->error
= "Wrong number of raid parameters given";
1125 * Parameters that take a string value are checked here.
1128 if (!strcasecmp(key
, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT
))) {
1129 if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT
, &rs
->ctr_flags
)) {
1130 rs
->ti
->error
= "Only one 'raid10_format' argument pair allowed";
1133 if (!rt_is_raid10(rt
)) {
1134 rs
->ti
->error
= "'raid10_format' is an invalid parameter for this RAID type";
1137 raid10_format
= raid10_name_to_format(arg
);
1138 if (raid10_format
< 0) {
1139 rs
->ti
->error
= "Invalid 'raid10_format' value given";
1140 return raid10_format
;
1145 if (kstrtoint(arg
, 10, &value
) < 0) {
1146 rs
->ti
->error
= "Bad numerical argument given in raid params";
1150 if (!strcasecmp(key
, dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD
))) {
1152 * "rebuild" is being passed in by userspace to provide
1153 * indexes of replaced devices and to set up additional
1154 * devices on raid level takeover.
1156 if (!__within_range(value
, 0, rs
->raid_disks
- 1)) {
1157 rs
->ti
->error
= "Invalid rebuild index given";
1161 if (test_and_set_bit(value
, (void *) rs
->rebuild_disks
)) {
1162 rs
->ti
->error
= "rebuild for this index already given";
1166 rd
= rs
->dev
+ value
;
1167 clear_bit(In_sync
, &rd
->rdev
.flags
);
1168 clear_bit(Faulty
, &rd
->rdev
.flags
);
1169 rd
->rdev
.recovery_offset
= 0;
1170 set_bit(__CTR_FLAG_REBUILD
, &rs
->ctr_flags
);
1171 } else if (!strcasecmp(key
, dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY
))) {
1172 if (!rt_is_raid1(rt
)) {
1173 rs
->ti
->error
= "write_mostly option is only valid for RAID1";
1177 if (!__within_range(value
, 0, rs
->md
.raid_disks
- 1)) {
1178 rs
->ti
->error
= "Invalid write_mostly index given";
1182 set_bit(WriteMostly
, &rs
->dev
[value
].rdev
.flags
);
1183 set_bit(__CTR_FLAG_WRITE_MOSTLY
, &rs
->ctr_flags
);
1184 } else if (!strcasecmp(key
, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND
))) {
1185 if (!rt_is_raid1(rt
)) {
1186 rs
->ti
->error
= "max_write_behind option is only valid for RAID1";
1190 if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND
, &rs
->ctr_flags
)) {
1191 rs
->ti
->error
= "Only one max_write_behind argument pair allowed";
1196 * In device-mapper, we specify things in sectors, but
1197 * MD records this value in kB
1200 if (value
> COUNTER_MAX
) {
1201 rs
->ti
->error
= "Max write-behind limit out of range";
1205 rs
->md
.bitmap_info
.max_write_behind
= value
;
1206 } else if (!strcasecmp(key
, dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP
))) {
1207 if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP
, &rs
->ctr_flags
)) {
1208 rs
->ti
->error
= "Only one daemon_sleep argument pair allowed";
1211 if (!value
|| (value
> MAX_SCHEDULE_TIMEOUT
)) {
1212 rs
->ti
->error
= "daemon sleep period out of range";
1215 rs
->md
.bitmap_info
.daemon_sleep
= value
;
1216 } else if (!strcasecmp(key
, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET
))) {
1217 /* Userspace passes new data_offset after having extended the the data image LV */
1218 if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET
, &rs
->ctr_flags
)) {
1219 rs
->ti
->error
= "Only one data_offset argument pair allowed";
1222 /* Ensure sensible data offset */
1224 rs
->ti
->error
= "Bogus data_offset value";
1227 rs
->data_offset
= value
;
1228 } else if (!strcasecmp(key
, dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS
))) {
1229 /* Define the +/-# of disks to add to/remove from the given raid set */
1230 if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS
, &rs
->ctr_flags
)) {
1231 rs
->ti
->error
= "Only one delta_disks argument pair allowed";
1234 /* Ensure MAX_RAID_DEVICES and raid type minimal_devs! */
1235 if (!__within_range(abs(value
), 1, MAX_RAID_DEVICES
- rt
->minimal_devs
)) {
1236 rs
->ti
->error
= "Too many delta_disk requested";
1240 rs
->delta_disks
= value
;
1241 } else if (!strcasecmp(key
, dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE
))) {
1242 if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE
, &rs
->ctr_flags
)) {
1243 rs
->ti
->error
= "Only one stripe_cache argument pair allowed";
1247 if (!rt_is_raid456(rt
)) {
1248 rs
->ti
->error
= "Inappropriate argument: stripe_cache";
1252 rs
->stripe_cache_entries
= value
;
1253 } else if (!strcasecmp(key
, dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE
))) {
1254 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE
, &rs
->ctr_flags
)) {
1255 rs
->ti
->error
= "Only one min_recovery_rate argument pair allowed";
1258 if (value
> INT_MAX
) {
1259 rs
->ti
->error
= "min_recovery_rate out of range";
1262 rs
->md
.sync_speed_min
= (int)value
;
1263 } else if (!strcasecmp(key
, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE
))) {
1264 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE
, &rs
->ctr_flags
)) {
1265 rs
->ti
->error
= "Only one max_recovery_rate argument pair allowed";
1268 if (value
> INT_MAX
) {
1269 rs
->ti
->error
= "max_recovery_rate out of range";
1272 rs
->md
.sync_speed_max
= (int)value
;
1273 } else if (!strcasecmp(key
, dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE
))) {
1274 if (test_and_set_bit(__CTR_FLAG_REGION_SIZE
, &rs
->ctr_flags
)) {
1275 rs
->ti
->error
= "Only one region_size argument pair allowed";
1279 region_size
= value
;
1280 rs
->requested_bitmap_chunk_sectors
= value
;
1281 } else if (!strcasecmp(key
, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES
))) {
1282 if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES
, &rs
->ctr_flags
)) {
1283 rs
->ti
->error
= "Only one raid10_copies argument pair allowed";
1287 if (!__within_range(value
, 2, rs
->md
.raid_disks
)) {
1288 rs
->ti
->error
= "Bad value for 'raid10_copies'";
1292 raid10_copies
= value
;
1294 DMERR("Unable to parse RAID parameter: %s", key
);
1295 rs
->ti
->error
= "Unable to parse RAID parameter";
1300 if (validate_region_size(rs
, region_size
))
1303 if (rs
->md
.chunk_sectors
)
1304 max_io_len
= rs
->md
.chunk_sectors
;
1306 max_io_len
= region_size
;
1308 if (dm_set_target_max_io_len(rs
->ti
, max_io_len
))
1311 if (rt_is_raid10(rt
)) {
1312 if (raid10_copies
> rs
->md
.raid_disks
) {
1313 rs
->ti
->error
= "Not enough devices to satisfy specification";
1317 rs
->md
.new_layout
= raid10_format_to_md_layout(rs
, raid10_format
, raid10_copies
);
1318 if (rs
->md
.new_layout
< 0) {
1319 rs
->ti
->error
= "Error getting raid10 format";
1320 return rs
->md
.new_layout
;
1323 rt
= get_raid_type_by_ll(10, rs
->md
.new_layout
);
1325 rs
->ti
->error
= "Failed to recognize new raid10 layout";
1329 if ((rt
->algorithm
== ALGORITHM_RAID10_DEFAULT
||
1330 rt
->algorithm
== ALGORITHM_RAID10_NEAR
) &&
1331 test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS
, &rs
->ctr_flags
)) {
1332 rs
->ti
->error
= "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible";
1337 rs
->raid10_copies
= raid10_copies
;
1339 /* Assume there are no metadata devices until the drives are parsed */
1340 rs
->md
.persistent
= 0;
1341 rs
->md
.external
= 1;
1343 /* Check, if any invalid ctr arguments have been passed in for the raid level */
1344 return rs_check_for_valid_flags(rs
);
1347 /* Set raid4/5/6 cache size */
1348 static int rs_set_raid456_stripe_cache(struct raid_set
*rs
)
1351 struct r5conf
*conf
;
1352 struct mddev
*mddev
= &rs
->md
;
1353 uint32_t min_stripes
= max(mddev
->chunk_sectors
, mddev
->new_chunk_sectors
) / 2;
1354 uint32_t nr_stripes
= rs
->stripe_cache_entries
;
1356 if (!rt_is_raid456(rs
->raid_type
)) {
1357 rs
->ti
->error
= "Inappropriate raid level; cannot change stripe_cache size";
1361 if (nr_stripes
< min_stripes
) {
1362 DMINFO("Adjusting requested %u stripe cache entries to %u to suit stripe size",
1363 nr_stripes
, min_stripes
);
1364 nr_stripes
= min_stripes
;
1367 conf
= mddev
->private;
1369 rs
->ti
->error
= "Cannot change stripe_cache size on inactive RAID set";
1373 /* Try setting number of stripes in raid456 stripe cache */
1374 if (conf
->min_nr_stripes
!= nr_stripes
) {
1375 r
= raid5_set_cache_size(mddev
, nr_stripes
);
1377 rs
->ti
->error
= "Failed to set raid4/5/6 stripe cache size";
1381 DMINFO("%u stripe cache entries", nr_stripes
);
1387 /* Return # of data stripes as kept in mddev as of @rs (i.e. as of superblock) */
1388 static unsigned int mddev_data_stripes(struct raid_set
*rs
)
1390 return rs
->md
.raid_disks
- rs
->raid_type
->parity_devs
;
1393 /* Return # of data stripes of @rs (i.e. as of ctr) */
1394 static unsigned int rs_data_stripes(struct raid_set
*rs
)
1396 return rs
->raid_disks
- rs
->raid_type
->parity_devs
;
1399 /* Calculate the sectors per device and per array used for @rs */
1400 static int rs_set_dev_and_array_sectors(struct raid_set
*rs
, bool use_mddev
)
1403 unsigned int data_stripes
;
1404 struct mddev
*mddev
= &rs
->md
;
1405 struct md_rdev
*rdev
;
1406 sector_t array_sectors
= rs
->ti
->len
, dev_sectors
= rs
->ti
->len
;
1407 sector_t cur_dev_sectors
= rs
->dev
[0].rdev
.sectors
;
1410 delta_disks
= mddev
->delta_disks
;
1411 data_stripes
= mddev_data_stripes(rs
);
1413 delta_disks
= rs
->delta_disks
;
1414 data_stripes
= rs_data_stripes(rs
);
1417 /* Special raid1 case w/o delta_disks support (yet) */
1418 if (rt_is_raid1(rs
->raid_type
))
1420 else if (rt_is_raid10(rs
->raid_type
)) {
1421 if (rs
->raid10_copies
< 2 ||
1423 rs
->ti
->error
= "Bogus raid10 data copies or delta disks";
1427 dev_sectors
*= rs
->raid10_copies
;
1428 if (sector_div(dev_sectors
, data_stripes
))
1431 array_sectors
= (data_stripes
+ delta_disks
) * dev_sectors
;
1432 if (sector_div(array_sectors
, rs
->raid10_copies
))
1435 } else if (sector_div(dev_sectors
, data_stripes
))
1439 /* Striped layouts */
1440 array_sectors
= (data_stripes
+ delta_disks
) * dev_sectors
;
1442 rdev_for_each(rdev
, mddev
)
1443 rdev
->sectors
= dev_sectors
;
1445 mddev
->array_sectors
= array_sectors
;
1446 mddev
->dev_sectors
= dev_sectors
;
1448 if (!rs_is_raid0(rs
) && dev_sectors
> cur_dev_sectors
)
1449 mddev
->recovery_cp
= dev_sectors
;
1453 rs
->ti
->error
= "Target length not divisible by number of data devices";
1457 static void do_table_event(struct work_struct
*ws
)
1459 struct raid_set
*rs
= container_of(ws
, struct raid_set
, md
.event_work
);
1461 rs_set_capacity(rs
);
1462 dm_table_event(rs
->ti
->table
);
1465 static int raid_is_congested(struct dm_target_callbacks
*cb
, int bits
)
1467 struct raid_set
*rs
= container_of(cb
, struct raid_set
, callbacks
);
1469 return mddev_congested(&rs
->md
, bits
);
1473 * Make sure a valid takover (level switch) is being requested on @rs
1475 * Conversions of raid sets from one MD personality to another
1476 * have to conform to restrictions which are enforced here.
1478 * Degration is already checked for in rs_check_conversion() below.
1480 static int rs_check_takeover(struct raid_set
*rs
)
1482 struct mddev
*mddev
= &rs
->md
;
1483 unsigned int near_copies
;
1485 if (rs
->md
.degraded
) {
1486 rs
->ti
->error
= "Can't takeover degraded raid set";
1490 if (rs_is_reshaping(rs
)) {
1491 rs
->ti
->error
= "Can't takeover reshaping raid set";
1495 switch (mddev
->level
) {
1497 /* raid0 -> raid1/5 with one disk */
1498 if ((mddev
->new_level
== 1 || mddev
->new_level
== 5) &&
1499 mddev
->raid_disks
== 1)
1502 /* raid0 -> raid10 */
1503 if (mddev
->new_level
== 10 &&
1504 !(rs
->raid_disks
% mddev
->raid_disks
))
1507 /* raid0 with multiple disks -> raid4/5/6 */
1508 if (__within_range(mddev
->new_level
, 4, 6) &&
1509 mddev
->new_layout
== ALGORITHM_PARITY_N
&&
1510 mddev
->raid_disks
> 1)
1516 /* Can't takeover raid10_offset! */
1517 if (__is_raid10_offset(mddev
->layout
))
1520 near_copies
= __raid10_near_copies(mddev
->layout
);
1522 /* raid10* -> raid0 */
1523 if (mddev
->new_level
== 0) {
1524 /* Can takeover raid10_near with raid disks divisable by data copies! */
1525 if (near_copies
> 1 &&
1526 !(mddev
->raid_disks
% near_copies
)) {
1527 mddev
->raid_disks
/= near_copies
;
1528 mddev
->delta_disks
= mddev
->raid_disks
;
1532 /* Can takeover raid10_far */
1533 if (near_copies
== 1 &&
1534 __raid10_far_copies(mddev
->layout
) > 1)
1540 /* raid10_{near,far} -> raid1 */
1541 if (mddev
->new_level
== 1 &&
1542 max(near_copies
, __raid10_far_copies(mddev
->layout
)) == mddev
->raid_disks
)
1545 /* raid10_{near,far} with 2 disks -> raid4/5 */
1546 if (__within_range(mddev
->new_level
, 4, 5) &&
1547 mddev
->raid_disks
== 2)
1552 /* raid1 with 2 disks -> raid4/5 */
1553 if (__within_range(mddev
->new_level
, 4, 5) &&
1554 mddev
->raid_disks
== 2) {
1555 mddev
->degraded
= 1;
1559 /* raid1 -> raid0 */
1560 if (mddev
->new_level
== 0 &&
1561 mddev
->raid_disks
== 1)
1564 /* raid1 -> raid10 */
1565 if (mddev
->new_level
== 10)
1571 /* raid4 -> raid0 */
1572 if (mddev
->new_level
== 0)
1575 /* raid4 -> raid1/5 with 2 disks */
1576 if ((mddev
->new_level
== 1 || mddev
->new_level
== 5) &&
1577 mddev
->raid_disks
== 2)
1580 /* raid4 -> raid5/6 with parity N */
1581 if (__within_range(mddev
->new_level
, 5, 6) &&
1582 mddev
->layout
== ALGORITHM_PARITY_N
)
1587 /* raid5 with parity N -> raid0 */
1588 if (mddev
->new_level
== 0 &&
1589 mddev
->layout
== ALGORITHM_PARITY_N
)
1592 /* raid5 with parity N -> raid4 */
1593 if (mddev
->new_level
== 4 &&
1594 mddev
->layout
== ALGORITHM_PARITY_N
)
1597 /* raid5 with 2 disks -> raid1/4/10 */
1598 if ((mddev
->new_level
== 1 || mddev
->new_level
== 4 || mddev
->new_level
== 10) &&
1599 mddev
->raid_disks
== 2)
1602 /* raid5 with parity N -> raid6 with parity N */
1603 if (mddev
->new_level
== 6 &&
1604 ((mddev
->layout
== ALGORITHM_PARITY_N
&& mddev
->new_layout
== ALGORITHM_PARITY_N
) ||
1605 __within_range(mddev
->new_layout
, ALGORITHM_LEFT_ASYMMETRIC_6
, ALGORITHM_RIGHT_SYMMETRIC_6
)))
1610 /* raid6 with parity N -> raid0 */
1611 if (mddev
->new_level
== 0 &&
1612 mddev
->layout
== ALGORITHM_PARITY_N
)
1615 /* raid6 with parity N -> raid4 */
1616 if (mddev
->new_level
== 4 &&
1617 mddev
->layout
== ALGORITHM_PARITY_N
)
1620 /* raid6_*_n with parity N -> raid5_* */
1621 if (mddev
->new_level
== 5 &&
1622 ((mddev
->layout
== ALGORITHM_PARITY_N
&& mddev
->new_layout
== ALGORITHM_PARITY_N
) ||
1623 __within_range(mddev
->new_layout
, ALGORITHM_LEFT_ASYMMETRIC
, ALGORITHM_RIGHT_SYMMETRIC
)))
1630 rs
->ti
->error
= "takeover not possible";
1634 /* True if @rs requested to be taken over */
1635 static bool rs_takeover_requested(struct raid_set
*rs
)
1637 return rs
->md
.new_level
!= rs
->md
.level
;
1640 /* True if @rs is requested to reshape by ctr */
1641 static bool rs_reshape_requested(struct raid_set
*rs
)
1643 struct mddev
*mddev
= &rs
->md
;
1648 return !__is_raid10_far(mddev
->new_layout
) &&
1649 mddev
->new_level
== mddev
->level
&&
1650 (mddev
->new_layout
!= mddev
->layout
||
1651 mddev
->new_chunk_sectors
!= mddev
->chunk_sectors
||
1652 rs
->raid_disks
+ rs
->delta_disks
!= mddev
->raid_disks
);
1656 #define FEATURE_FLAG_SUPPORTS_V190 0x1 /* Supports extended superblock */
1658 /* State flags for sb->flags */
1659 #define SB_FLAG_RESHAPE_ACTIVE 0x1
1660 #define SB_FLAG_RESHAPE_BACKWARDS 0x2
1663 * This structure is never routinely used by userspace, unlike md superblocks.
1664 * Devices with this superblock should only ever be accessed via device-mapper.
1666 #define DM_RAID_MAGIC 0x64526D44
1667 struct dm_raid_superblock
{
1668 __le32 magic
; /* "DmRd" */
1669 __le32 compat_features
; /* Used to indicate compatible features (like 1.9.0 ondisk metadata extension) */
1671 __le32 num_devices
; /* Number of devices in this raid set. (Max 64) */
1672 __le32 array_position
; /* The position of this drive in the raid set */
1674 __le64 events
; /* Incremented by md when superblock updated */
1675 __le64 failed_devices
; /* Pre 1.9.0 part of bit field of devices to */
1676 /* indicate failures (see extension below) */
1679 * This offset tracks the progress of the repair or replacement of
1680 * an individual drive.
1682 __le64 disk_recovery_offset
;
1685 * This offset tracks the progress of the initial raid set
1686 * synchronisation/parity calculation.
1688 __le64 array_resync_offset
;
1691 * raid characteristics
1695 __le32 stripe_sectors
;
1697 /********************************************************************
1698 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
1700 * FEATURE_FLAG_SUPPORTS_V190 in the features member indicates that those exist
1703 __le32 flags
; /* Flags defining array states for reshaping */
1706 * This offset tracks the progress of a raid
1707 * set reshape in order to be able to restart it
1709 __le64 reshape_position
;
1712 * These define the properties of the array in case of an interrupted reshape
1716 __le32 new_stripe_sectors
;
1719 __le64 array_sectors
; /* Array size in sectors */
1722 * Sector offsets to data on devices (reshaping).
1723 * Needed to support out of place reshaping, thus
1724 * not writing over any stripes whilst converting
1725 * them from old to new layout
1728 __le64 new_data_offset
;
1730 __le64 sectors
; /* Used device size in sectors */
1733 * Additonal Bit field of devices indicating failures to support
1734 * up to 256 devices with the 1.9.0 on-disk metadata format
1736 __le64 extended_failed_devices
[DISKS_ARRAY_ELEMS
- 1];
1738 __le32 incompat_features
; /* Used to indicate any incompatible features */
1740 /* Always set rest up to logical block size to 0 when writing (see get_metadata_device() below). */
1744 * Check for reshape constraints on raid set @rs:
1746 * - reshape function non-existent
1748 * - ongoing recovery
1751 * Returns 0 if none or -EPERM if given constraint
1752 * and error message reference in @errmsg
1754 static int rs_check_reshape(struct raid_set
*rs
)
1756 struct mddev
*mddev
= &rs
->md
;
1758 if (!mddev
->pers
|| !mddev
->pers
->check_reshape
)
1759 rs
->ti
->error
= "Reshape not supported";
1760 else if (mddev
->degraded
)
1761 rs
->ti
->error
= "Can't reshape degraded raid set";
1762 else if (rs_is_recovering(rs
))
1763 rs
->ti
->error
= "Convert request on recovering raid set prohibited";
1764 else if (mddev
->reshape_position
&& rs_is_reshaping(rs
))
1765 rs
->ti
->error
= "raid set already reshaping!";
1766 else if (!(rs_is_raid10(rs
) || rs_is_raid456(rs
)))
1767 rs
->ti
->error
= "Reshaping only supported for raid4/5/6/10";
1774 static int read_disk_sb(struct md_rdev
*rdev
, int size
)
1776 BUG_ON(!rdev
->sb_page
);
1778 if (rdev
->sb_loaded
)
1781 if (!sync_page_io(rdev
, 0, size
, rdev
->sb_page
, REQ_OP_READ
, 0, 1)) {
1782 DMERR("Failed to read superblock of device at position %d",
1784 md_error(rdev
->mddev
, rdev
);
1788 rdev
->sb_loaded
= 1;
1793 static void sb_retrieve_failed_devices(struct dm_raid_superblock
*sb
, uint64_t *failed_devices
)
1795 failed_devices
[0] = le64_to_cpu(sb
->failed_devices
);
1796 memset(failed_devices
+ 1, 0, sizeof(sb
->extended_failed_devices
));
1798 if (le32_to_cpu(sb
->compat_features
) & FEATURE_FLAG_SUPPORTS_V190
) {
1799 int i
= ARRAY_SIZE(sb
->extended_failed_devices
);
1802 failed_devices
[i
+1] = le64_to_cpu(sb
->extended_failed_devices
[i
]);
1806 static void sb_update_failed_devices(struct dm_raid_superblock
*sb
, uint64_t *failed_devices
)
1808 int i
= ARRAY_SIZE(sb
->extended_failed_devices
);
1810 sb
->failed_devices
= cpu_to_le64(failed_devices
[0]);
1812 sb
->extended_failed_devices
[i
] = cpu_to_le64(failed_devices
[i
+1]);
1816 * Synchronize the superblock members with the raid set properties
1818 * All superblock data is little endian.
1820 static void super_sync(struct mddev
*mddev
, struct md_rdev
*rdev
)
1822 bool update_failed_devices
= false;
1824 uint64_t failed_devices
[DISKS_ARRAY_ELEMS
];
1825 struct dm_raid_superblock
*sb
;
1826 struct raid_set
*rs
= container_of(mddev
, struct raid_set
, md
);
1828 /* No metadata device, no superblock */
1829 if (!rdev
->meta_bdev
)
1832 BUG_ON(!rdev
->sb_page
);
1834 sb
= page_address(rdev
->sb_page
);
1836 sb_retrieve_failed_devices(sb
, failed_devices
);
1838 for (i
= 0; i
< rs
->raid_disks
; i
++)
1839 if (!rs
->dev
[i
].data_dev
|| test_bit(Faulty
, &rs
->dev
[i
].rdev
.flags
)) {
1840 update_failed_devices
= true;
1841 set_bit(i
, (void *) failed_devices
);
1844 if (update_failed_devices
)
1845 sb_update_failed_devices(sb
, failed_devices
);
1847 sb
->magic
= cpu_to_le32(DM_RAID_MAGIC
);
1848 sb
->compat_features
= cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190
);
1850 sb
->num_devices
= cpu_to_le32(mddev
->raid_disks
);
1851 sb
->array_position
= cpu_to_le32(rdev
->raid_disk
);
1853 sb
->events
= cpu_to_le64(mddev
->events
);
1855 sb
->disk_recovery_offset
= cpu_to_le64(rdev
->recovery_offset
);
1856 sb
->array_resync_offset
= cpu_to_le64(mddev
->recovery_cp
);
1858 sb
->level
= cpu_to_le32(mddev
->level
);
1859 sb
->layout
= cpu_to_le32(mddev
->layout
);
1860 sb
->stripe_sectors
= cpu_to_le32(mddev
->chunk_sectors
);
1862 sb
->new_level
= cpu_to_le32(mddev
->new_level
);
1863 sb
->new_layout
= cpu_to_le32(mddev
->new_layout
);
1864 sb
->new_stripe_sectors
= cpu_to_le32(mddev
->new_chunk_sectors
);
1866 sb
->delta_disks
= cpu_to_le32(mddev
->delta_disks
);
1868 smp_rmb(); /* Make sure we access most recent reshape position */
1869 sb
->reshape_position
= cpu_to_le64(mddev
->reshape_position
);
1870 if (le64_to_cpu(sb
->reshape_position
) != MaxSector
) {
1871 /* Flag ongoing reshape */
1872 sb
->flags
|= cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE
);
1874 if (mddev
->delta_disks
< 0 || mddev
->reshape_backwards
)
1875 sb
->flags
|= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS
);
1877 /* Clear reshape flags */
1878 sb
->flags
&= ~(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE
|SB_FLAG_RESHAPE_BACKWARDS
));
1881 sb
->array_sectors
= cpu_to_le64(mddev
->array_sectors
);
1882 sb
->data_offset
= cpu_to_le64(rdev
->data_offset
);
1883 sb
->new_data_offset
= cpu_to_le64(rdev
->new_data_offset
);
1884 sb
->sectors
= cpu_to_le64(rdev
->sectors
);
1886 /* Zero out the rest of the payload after the size of the superblock */
1887 memset(sb
+ 1, 0, rdev
->sb_size
- sizeof(*sb
));
1893 * This function creates a superblock if one is not found on the device
1894 * and will decide which superblock to use if there's a choice.
1896 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
1898 static int super_load(struct md_rdev
*rdev
, struct md_rdev
*refdev
)
1901 struct dm_raid_superblock
*sb
;
1902 struct dm_raid_superblock
*refsb
;
1903 uint64_t events_sb
, events_refsb
;
1906 rdev
->sb_size
= bdev_logical_block_size(rdev
->meta_bdev
);
1907 if (rdev
->sb_size
< sizeof(*sb
) || rdev
->sb_size
> PAGE_SIZE
) {
1908 DMERR("superblock size of a logical block is no longer valid");
1912 r
= read_disk_sb(rdev
, rdev
->sb_size
);
1916 sb
= page_address(rdev
->sb_page
);
1919 * Two cases that we want to write new superblocks and rebuild:
1920 * 1) New device (no matching magic number)
1921 * 2) Device specified for rebuild (!In_sync w/ offset == 0)
1923 if ((sb
->magic
!= cpu_to_le32(DM_RAID_MAGIC
)) ||
1924 (!test_bit(In_sync
, &rdev
->flags
) && !rdev
->recovery_offset
)) {
1925 super_sync(rdev
->mddev
, rdev
);
1927 set_bit(FirstUse
, &rdev
->flags
);
1928 sb
->compat_features
= cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190
);
1930 /* Force writing of superblocks to disk */
1931 set_bit(MD_CHANGE_DEVS
, &rdev
->mddev
->flags
);
1933 /* Any superblock is better than none, choose that if given */
1934 return refdev
? 0 : 1;
1940 events_sb
= le64_to_cpu(sb
->events
);
1942 refsb
= page_address(refdev
->sb_page
);
1943 events_refsb
= le64_to_cpu(refsb
->events
);
1945 return (events_sb
> events_refsb
) ? 1 : 0;
1948 static int super_init_validation(struct raid_set
*rs
, struct md_rdev
*rdev
)
1952 struct mddev
*mddev
= &rs
->md
;
1954 uint64_t failed_devices
[DISKS_ARRAY_ELEMS
];
1955 struct dm_raid_superblock
*sb
;
1956 uint32_t new_devs
= 0, rebuild_and_new
= 0, rebuilds
= 0;
1958 struct dm_raid_superblock
*sb2
;
1960 sb
= page_address(rdev
->sb_page
);
1961 events_sb
= le64_to_cpu(sb
->events
);
1964 * Initialise to 1 if this is a new superblock.
1966 mddev
->events
= events_sb
? : 1;
1968 mddev
->reshape_position
= MaxSector
;
1971 * Reshaping is supported, e.g. reshape_position is valid
1972 * in superblock and superblock content is authoritative.
1974 if (le32_to_cpu(sb
->compat_features
) & FEATURE_FLAG_SUPPORTS_V190
) {
1975 /* Superblock is authoritative wrt given raid set layout! */
1976 mddev
->raid_disks
= le32_to_cpu(sb
->num_devices
);
1977 mddev
->level
= le32_to_cpu(sb
->level
);
1978 mddev
->layout
= le32_to_cpu(sb
->layout
);
1979 mddev
->chunk_sectors
= le32_to_cpu(sb
->stripe_sectors
);
1980 mddev
->new_level
= le32_to_cpu(sb
->new_level
);
1981 mddev
->new_layout
= le32_to_cpu(sb
->new_layout
);
1982 mddev
->new_chunk_sectors
= le32_to_cpu(sb
->new_stripe_sectors
);
1983 mddev
->delta_disks
= le32_to_cpu(sb
->delta_disks
);
1984 mddev
->array_sectors
= le64_to_cpu(sb
->array_sectors
);
1986 /* raid was reshaping and got interrupted */
1987 if (le32_to_cpu(sb
->flags
) & SB_FLAG_RESHAPE_ACTIVE
) {
1988 if (test_bit(__CTR_FLAG_DELTA_DISKS
, &rs
->ctr_flags
)) {
1989 DMERR("Reshape requested but raid set is still reshaping");
1993 if (mddev
->delta_disks
< 0 ||
1994 (!mddev
->delta_disks
&& (le32_to_cpu(sb
->flags
) & SB_FLAG_RESHAPE_BACKWARDS
)))
1995 mddev
->reshape_backwards
= 1;
1997 mddev
->reshape_backwards
= 0;
1999 mddev
->reshape_position
= le64_to_cpu(sb
->reshape_position
);
2000 rs
->raid_type
= get_raid_type_by_ll(mddev
->level
, mddev
->layout
);
2005 * No takeover/reshaping, because we don't have the extended v1.9.0 metadata
2007 if (le32_to_cpu(sb
->level
) != mddev
->level
) {
2008 DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
2011 if (le32_to_cpu(sb
->layout
) != mddev
->layout
) {
2012 DMERR("Reshaping raid sets not yet supported. (raid layout change)");
2013 DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb
->layout
), mddev
->layout
);
2014 DMERR(" Old layout: %s w/ %d copies",
2015 raid10_md_layout_to_format(le32_to_cpu(sb
->layout
)),
2016 raid10_md_layout_to_copies(le32_to_cpu(sb
->layout
)));
2017 DMERR(" New layout: %s w/ %d copies",
2018 raid10_md_layout_to_format(mddev
->layout
),
2019 raid10_md_layout_to_copies(mddev
->layout
));
2022 if (le32_to_cpu(sb
->stripe_sectors
) != mddev
->chunk_sectors
) {
2023 DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
2027 /* We can only change the number of devices in raid1 with old (i.e. pre 1.0.7) metadata */
2028 if (!rt_is_raid1(rs
->raid_type
) &&
2029 (le32_to_cpu(sb
->num_devices
) != mddev
->raid_disks
)) {
2030 DMERR("Reshaping raid sets not yet supported. (device count change from %u to %u)",
2031 sb
->num_devices
, mddev
->raid_disks
);
2035 /* Table line is checked vs. authoritative superblock */
2039 if (!test_bit(__CTR_FLAG_NOSYNC
, &rs
->ctr_flags
))
2040 mddev
->recovery_cp
= le64_to_cpu(sb
->array_resync_offset
);
2043 * During load, we set FirstUse if a new superblock was written.
2044 * There are two reasons we might not have a superblock:
2045 * 1) The raid set is brand new - in which case, all of the
2046 * devices must have their In_sync bit set. Also,
2047 * recovery_cp must be 0, unless forced.
2048 * 2) This is a new device being added to an old raid set
2049 * and the new device needs to be rebuilt - in which
2050 * case the In_sync bit will /not/ be set and
2051 * recovery_cp must be MaxSector.
2052 * 3) This is/are a new device(s) being added to an old
2053 * raid set during takeover to a higher raid level
2054 * to provide capacity for redundancy or during reshape
2055 * to add capacity to grow the raid set.
2058 rdev_for_each(r
, mddev
) {
2059 if (test_bit(FirstUse
, &r
->flags
))
2062 if (!test_bit(In_sync
, &r
->flags
)) {
2063 DMINFO("Device %d specified for rebuild; clearing superblock",
2067 if (test_bit(FirstUse
, &r
->flags
))
2074 if (new_devs
== rs
->raid_disks
|| !rebuilds
) {
2075 /* Replace a broken device */
2076 if (new_devs
== 1 && !rs
->delta_disks
)
2078 if (new_devs
== rs
->raid_disks
) {
2079 DMINFO("Superblocks created for new raid set");
2080 set_bit(MD_ARRAY_FIRST_USE
, &mddev
->flags
);
2081 mddev
->recovery_cp
= 0;
2082 } else if (new_devs
!= rebuilds
&&
2083 new_devs
!= rs
->delta_disks
) {
2084 DMERR("New device injected into existing raid set without "
2085 "'delta_disks' or 'rebuild' parameter specified");
2088 } else if (new_devs
&& new_devs
!= rebuilds
) {
2089 DMERR("%u 'rebuild' devices cannot be injected into"
2090 " a raid set with %u other first-time devices",
2091 rebuilds
, new_devs
);
2093 } else if (rebuilds
) {
2094 if (rebuild_and_new
&& rebuilds
!= rebuild_and_new
) {
2095 DMERR("new device%s provided without 'rebuild'",
2096 new_devs
> 1 ? "s" : "");
2098 } else if (rs_is_recovering(rs
)) {
2099 DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
2100 (unsigned long long) mddev
->recovery_cp
);
2102 } else if (rs_is_reshaping(rs
)) {
2103 DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)",
2104 (unsigned long long) mddev
->reshape_position
);
2110 * Now we set the Faulty bit for those devices that are
2111 * recorded in the superblock as failed.
2113 sb_retrieve_failed_devices(sb
, failed_devices
);
2114 rdev_for_each(r
, mddev
) {
2117 sb2
= page_address(r
->sb_page
);
2118 sb2
->failed_devices
= 0;
2119 memset(sb2
->extended_failed_devices
, 0, sizeof(sb2
->extended_failed_devices
));
2122 * Check for any device re-ordering.
2124 if (!test_bit(FirstUse
, &r
->flags
) && (r
->raid_disk
>= 0)) {
2125 role
= le32_to_cpu(sb2
->array_position
);
2129 if (role
!= r
->raid_disk
) {
2130 if (__is_raid10_near(mddev
->layout
)) {
2131 if (mddev
->raid_disks
% __raid10_near_copies(mddev
->layout
) ||
2132 rs
->raid_disks
% rs
->raid10_copies
) {
2134 "Cannot change raid10 near set to odd # of devices!";
2138 sb2
->array_position
= cpu_to_le32(r
->raid_disk
);
2140 } else if (!(rs_is_raid10(rs
) && rt_is_raid0(rs
->raid_type
)) &&
2141 !(rs_is_raid0(rs
) && rt_is_raid10(rs
->raid_type
)) &&
2142 !rt_is_raid1(rs
->raid_type
)) {
2143 rs
->ti
->error
= "Cannot change device positions in raid set";
2147 DMINFO("raid device #%d now at position #%d", role
, r
->raid_disk
);
2151 * Partial recovery is performed on
2152 * returning failed devices.
2154 if (test_bit(role
, (void *) failed_devices
))
2155 set_bit(Faulty
, &r
->flags
);
2162 static int super_validate(struct raid_set
*rs
, struct md_rdev
*rdev
)
2164 struct mddev
*mddev
= &rs
->md
;
2165 struct dm_raid_superblock
*sb
;
2167 if (rs_is_raid0(rs
) || !rdev
->sb_page
)
2170 sb
= page_address(rdev
->sb_page
);
2173 * If mddev->events is not set, we know we have not yet initialized
2176 if (!mddev
->events
&& super_init_validation(rs
, rdev
))
2179 if (le32_to_cpu(sb
->compat_features
) != FEATURE_FLAG_SUPPORTS_V190
) {
2180 rs
->ti
->error
= "Unable to assemble array: Unknown flag(s) in compatible feature flags";
2184 if (sb
->incompat_features
) {
2185 rs
->ti
->error
= "Unable to assemble array: No incompatible feature flags supported yet";
2189 /* Enable bitmap creation for RAID levels != 0 */
2190 mddev
->bitmap_info
.offset
= rt_is_raid0(rs
->raid_type
) ? 0 : to_sector(4096);
2191 rdev
->mddev
->bitmap_info
.default_offset
= mddev
->bitmap_info
.offset
;
2193 if (!test_and_clear_bit(FirstUse
, &rdev
->flags
)) {
2194 /* Retrieve device size stored in superblock to be prepared for shrink */
2195 rdev
->sectors
= le64_to_cpu(sb
->sectors
);
2196 rdev
->recovery_offset
= le64_to_cpu(sb
->disk_recovery_offset
);
2197 if (rdev
->recovery_offset
== MaxSector
)
2198 set_bit(In_sync
, &rdev
->flags
);
2200 * If no reshape in progress -> we're recovering single
2201 * disk(s) and have to set the device(s) to out-of-sync
2203 else if (!rs_is_reshaping(rs
))
2204 clear_bit(In_sync
, &rdev
->flags
); /* Mandatory for recovery */
2208 * If a device comes back, set it as not In_sync and no longer faulty.
2210 if (test_and_clear_bit(Faulty
, &rdev
->flags
)) {
2211 rdev
->recovery_offset
= 0;
2212 clear_bit(In_sync
, &rdev
->flags
);
2213 rdev
->saved_raid_disk
= rdev
->raid_disk
;
2216 /* Reshape support -> restore repective data offsets */
2217 rdev
->data_offset
= le64_to_cpu(sb
->data_offset
);
2218 rdev
->new_data_offset
= le64_to_cpu(sb
->new_data_offset
);
2224 * Analyse superblocks and select the freshest.
2226 static int analyse_superblocks(struct dm_target
*ti
, struct raid_set
*rs
)
2229 struct raid_dev
*dev
;
2230 struct md_rdev
*rdev
, *tmp
, *freshest
;
2231 struct mddev
*mddev
= &rs
->md
;
2234 rdev_for_each_safe(rdev
, tmp
, mddev
) {
2236 * Skipping super_load due to CTR_FLAG_SYNC will cause
2237 * the array to undergo initialization again as
2238 * though it were new. This is the intended effect
2239 * of the "sync" directive.
2241 * When reshaping capability is added, we must ensure
2242 * that the "sync" directive is disallowed during the
2245 if (test_bit(__CTR_FLAG_SYNC
, &rs
->ctr_flags
))
2248 if (!rdev
->meta_bdev
)
2251 r
= super_load(rdev
, freshest
);
2260 dev
= container_of(rdev
, struct raid_dev
, rdev
);
2262 dm_put_device(ti
, dev
->meta_dev
);
2264 dev
->meta_dev
= NULL
;
2265 rdev
->meta_bdev
= NULL
;
2268 put_page(rdev
->sb_page
);
2270 rdev
->sb_page
= NULL
;
2272 rdev
->sb_loaded
= 0;
2275 * We might be able to salvage the data device
2276 * even though the meta device has failed. For
2277 * now, we behave as though '- -' had been
2278 * set for this device in the table.
2281 dm_put_device(ti
, dev
->data_dev
);
2283 dev
->data_dev
= NULL
;
2286 list_del(&rdev
->same_set
);
2293 if (validate_raid_redundancy(rs
)) {
2294 rs
->ti
->error
= "Insufficient redundancy to activate array";
2299 * Validation of the freshest device provides the source of
2300 * validation for the remaining devices.
2302 rs
->ti
->error
= "Unable to assemble array: Invalid superblocks";
2303 if (super_validate(rs
, freshest
))
2306 rdev_for_each(rdev
, mddev
)
2307 if ((rdev
!= freshest
) && super_validate(rs
, rdev
))
2313 * Adjust data_offset and new_data_offset on all disk members of @rs
2314 * for out of place reshaping if requested by contructor
2316 * We need free space at the beginning of each raid disk for forward
2317 * and at the end for backward reshapes which userspace has to provide
2318 * via remapping/reordering of space.
2320 static int rs_adjust_data_offsets(struct raid_set
*rs
)
2322 sector_t data_offset
= 0, new_data_offset
= 0;
2323 struct md_rdev
*rdev
;
2325 /* Constructor did not request data offset change */
2326 if (!test_bit(__CTR_FLAG_DATA_OFFSET
, &rs
->ctr_flags
)) {
2327 if (!rs_is_reshapable(rs
))
2333 /* HM FIXME: get InSync raid_dev? */
2334 rdev
= &rs
->dev
[0].rdev
;
2336 if (rs
->delta_disks
< 0) {
2338 * Removing disks (reshaping backwards):
2340 * - before reshape: data is at offset 0 and free space
2341 * is at end of each component LV
2343 * - after reshape: data is at offset rs->data_offset != 0 on each component LV
2346 new_data_offset
= rs
->data_offset
;
2348 } else if (rs
->delta_disks
> 0) {
2350 * Adding disks (reshaping forwards):
2352 * - before reshape: data is at offset rs->data_offset != 0 and
2353 * free space is at begin of each component LV
2355 * - after reshape: data is at offset 0 on each component LV
2357 data_offset
= rs
->data_offset
;
2358 new_data_offset
= 0;
2362 * User space passes in 0 for data offset after having removed reshape space
2364 * - or - (data offset != 0)
2366 * Changing RAID layout or chunk size -> toggle offsets
2368 * - before reshape: data is at offset rs->data_offset 0 and
2369 * free space is at end of each component LV
2371 * data is at offset rs->data_offset != 0 and
2372 * free space is at begin of each component LV
2374 * - after reshape: data is at offset 0 if i was at offset != 0
2375 * of at offset != 0 if it was at offset 0
2376 * on each component LV
2379 data_offset
= rs
->data_offset
? rdev
->data_offset
: 0;
2380 new_data_offset
= data_offset
? 0 : rs
->data_offset
;
2381 set_bit(RT_FLAG_UPDATE_SBS
, &rs
->runtime_flags
);
2385 * Make sure we got a minimum amount of free sectors per device
2387 if (rs
->data_offset
&&
2388 to_sector(i_size_read(rdev
->bdev
->bd_inode
)) - rdev
->sectors
< MIN_FREE_RESHAPE_SPACE
) {
2389 rs
->ti
->error
= data_offset
? "No space for forward reshape" :
2390 "No space for backward reshape";
2394 /* Adjust data offsets on all rdevs */
2395 rdev_for_each(rdev
, &rs
->md
) {
2396 rdev
->data_offset
= data_offset
;
2397 rdev
->new_data_offset
= new_data_offset
;
2403 /* Userpace reordered disks -> adjust raid_disk indexes in @rs */
2404 static void __reorder_raid_disk_indexes(struct raid_set
*rs
)
2407 struct md_rdev
*rdev
;
2409 rdev_for_each(rdev
, &rs
->md
) {
2410 rdev
->raid_disk
= i
++;
2411 rdev
->saved_raid_disk
= rdev
->new_raid_disk
= -1;
2416 * Setup @rs for takeover by a different raid level
2418 static int rs_setup_takeover(struct raid_set
*rs
)
2420 struct mddev
*mddev
= &rs
->md
;
2421 struct md_rdev
*rdev
;
2422 unsigned int d
= mddev
->raid_disks
= rs
->raid_disks
;
2423 sector_t new_data_offset
= rs
->dev
[0].rdev
.data_offset
? 0 : rs
->data_offset
;
2425 if (rt_is_raid10(rs
->raid_type
)) {
2426 if (mddev
->level
== 0) {
2427 /* Userpace reordered disks -> adjust raid_disk indexes */
2428 __reorder_raid_disk_indexes(rs
);
2430 /* raid0 -> raid10_far layout */
2431 mddev
->layout
= raid10_format_to_md_layout(rs
, ALGORITHM_RAID10_FAR
,
2433 } else if (mddev
->level
== 1)
2434 /* raid1 -> raid10_near layout */
2435 mddev
->layout
= raid10_format_to_md_layout(rs
, ALGORITHM_RAID10_NEAR
,
2442 clear_bit(MD_ARRAY_FIRST_USE
, &mddev
->flags
);
2443 mddev
->recovery_cp
= MaxSector
;
2446 rdev
= &rs
->dev
[d
].rdev
;
2448 if (test_bit(d
, (void *) rs
->rebuild_disks
)) {
2449 clear_bit(In_sync
, &rdev
->flags
);
2450 clear_bit(Faulty
, &rdev
->flags
);
2451 mddev
->recovery_cp
= rdev
->recovery_offset
= 0;
2452 /* Bitmap has to be created when we do an "up" takeover */
2453 set_bit(MD_ARRAY_FIRST_USE
, &mddev
->flags
);
2456 rdev
->new_data_offset
= new_data_offset
;
2464 * - change raid layout
2465 * - change chunk size
2469 static int rs_setup_reshape(struct raid_set
*rs
)
2472 unsigned int cur_raid_devs
, d
;
2473 struct mddev
*mddev
= &rs
->md
;
2474 struct md_rdev
*rdev
;
2476 mddev
->delta_disks
= rs
->delta_disks
;
2477 cur_raid_devs
= mddev
->raid_disks
;
2479 /* Ignore impossible layout change whilst adding/removing disks */
2480 if (mddev
->delta_disks
&&
2481 mddev
->layout
!= mddev
->new_layout
) {
2482 DMINFO("Ignoring invalid layout change with delta_disks=%d", rs
->delta_disks
);
2483 mddev
->new_layout
= mddev
->layout
;
2487 * Adjust array size:
2489 * - in case of adding disks, array size has
2490 * to grow after the disk adding reshape,
2491 * which'll hapen in the event handler;
2492 * reshape will happen forward, so space has to
2493 * be available at the beginning of each disk
2495 * - in case of removing disks, array size
2496 * has to shrink before starting the reshape,
2497 * which'll happen here;
2498 * reshape will happen backward, so space has to
2499 * be available at the end of each disk
2501 * - data_offset and new_data_offset are
2502 * adjusted for afreentioned out of place
2503 * reshaping based on userspace passing in
2504 * the "data_offset <sectors>" key/value
2505 * pair via te constructor
2509 if (rs
->delta_disks
> 0) {
2510 /* Prepare disks for check in raid4/5/6/10 {check|start}_reshape */
2511 for (d
= cur_raid_devs
; d
< rs
->raid_disks
; d
++) {
2512 rdev
= &rs
->dev
[d
].rdev
;
2513 clear_bit(In_sync
, &rdev
->flags
);
2516 * save_raid_disk needs to be -1, or recovery_offset will be set to 0
2517 * by md, which'll store that erroneously in the superblock on reshape
2519 rdev
->saved_raid_disk
= -1;
2520 rdev
->raid_disk
= d
;
2522 rdev
->sectors
= mddev
->dev_sectors
;
2523 rdev
->recovery_offset
= MaxSector
;
2526 mddev
->reshape_backwards
= 0; /* adding disks -> forward reshape */
2528 /* Remove disk(s) */
2529 } else if (rs
->delta_disks
< 0) {
2530 r
= rs_set_dev_and_array_sectors(rs
, true);
2531 mddev
->reshape_backwards
= 1; /* removing disk(s) -> backward reshape */
2533 /* Change layout and/or chunk size */
2536 * Reshape layout (e.g. raid5_ls -> raid5_n) and/or chunk size:
2538 * keeping number of disks and do layout change ->
2540 * toggle reshape_backward depending on data_offset:
2542 * - free space upfront -> reshape forward
2544 * - free space at the end -> reshape backward
2547 * This utilizes free reshape space avoiding the need
2548 * for userspace to move (parts of) LV segments in
2549 * case of layout/chunksize change (for disk
2550 * adding/removing reshape space has to be at
2551 * the proper address (see above with delta_disks):
2553 * add disk(s) -> begin
2554 * remove disk(s)-> end
2556 mddev
->reshape_backwards
= rs
->dev
[0].rdev
.data_offset
? 0 : 1;
2563 * Enable/disable discard support on RAID set depending on
2564 * RAID level and discard properties of underlying RAID members.
2566 static void configure_discard_support(struct raid_set
*rs
)
2570 struct dm_target
*ti
= rs
->ti
;
2572 /* Assume discards not supported until after checks below. */
2573 ti
->discards_supported
= false;
2575 /* RAID level 4,5,6 require discard_zeroes_data for data integrity! */
2576 raid456
= (rs
->md
.level
== 4 || rs
->md
.level
== 5 || rs
->md
.level
== 6);
2578 for (i
= 0; i
< rs
->md
.raid_disks
; i
++) {
2579 struct request_queue
*q
;
2581 if (!rs
->dev
[i
].rdev
.bdev
)
2584 q
= bdev_get_queue(rs
->dev
[i
].rdev
.bdev
);
2585 if (!q
|| !blk_queue_discard(q
))
2589 if (!q
->limits
.discard_zeroes_data
)
2591 if (!devices_handle_discard_safely
) {
2592 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
2593 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
2599 /* All RAID members properly support discards */
2600 ti
->discards_supported
= true;
2603 * RAID1 and RAID10 personalities require bio splitting,
2604 * RAID0/4/5/6 don't and process large discard bios properly.
2606 ti
->split_discard_bios
= !!(rs
->md
.level
== 1 || rs
->md
.level
== 10);
2607 ti
->num_discard_bios
= 1;
2611 * Construct a RAID0/1/10/4/5/6 mapping:
2613 * <raid_type> <#raid_params> <raid_params>{0,} \
2614 * <#raid_devs> [<meta_dev1> <dev1>]{1,}
2616 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
2617 * details on possible <raid_params>.
2619 * Userspace is free to initialize the metadata devices, hence the superblocks to
2620 * enforce recreation based on the passed in table parameters.
2623 static int raid_ctr(struct dm_target
*ti
, unsigned argc
, char **argv
)
2626 struct raid_type
*rt
;
2627 unsigned num_raid_params
, num_raid_devs
;
2628 struct raid_set
*rs
= NULL
;
2630 struct rs_layout rs_layout
;
2631 struct dm_arg_set as
= { argc
, argv
}, as_nrd
;
2632 struct dm_arg _args
[] = {
2633 { 0, as
.argc
, "Cannot understand number of raid parameters" },
2634 { 1, 254, "Cannot understand number of raid devices parameters" }
2637 /* Must have <raid_type> */
2638 arg
= dm_shift_arg(&as
);
2640 ti
->error
= "No arguments";
2644 rt
= get_raid_type(arg
);
2646 ti
->error
= "Unrecognised raid_type";
2650 /* Must have <#raid_params> */
2651 if (dm_read_arg_group(_args
, &as
, &num_raid_params
, &ti
->error
))
2654 /* number of raid device tupples <meta_dev data_dev> */
2656 dm_consume_args(&as_nrd
, num_raid_params
);
2657 _args
[1].max
= (as_nrd
.argc
- 1) / 2;
2658 if (dm_read_arg(_args
+ 1, &as_nrd
, &num_raid_devs
, &ti
->error
))
2661 if (!__within_range(num_raid_devs
, 1, MAX_RAID_DEVICES
)) {
2662 ti
->error
= "Invalid number of supplied raid devices";
2666 rs
= raid_set_alloc(ti
, rt
, num_raid_devs
);
2670 r
= parse_raid_params(rs
, &as
, num_raid_params
);
2674 r
= parse_dev_params(rs
, &as
);
2678 rs
->md
.sync_super
= super_sync
;
2680 r
= rs_set_dev_and_array_sectors(rs
, false);
2685 * Backup any new raid set level, layout, ...
2686 * requested to be able to compare to superblock
2687 * members for conversion decisions.
2689 rs_config_backup(rs
, &rs_layout
);
2691 r
= analyse_superblocks(ti
, rs
);
2695 INIT_WORK(&rs
->md
.event_work
, do_table_event
);
2697 ti
->num_flush_bios
= 1;
2699 /* Restore any requested new layout for conversion decision */
2700 rs_config_restore(rs
, &rs_layout
);
2702 if (test_bit(MD_ARRAY_FIRST_USE
, &rs
->md
.flags
)) {
2703 set_bit(RT_FLAG_UPDATE_SBS
, &rs
->runtime_flags
);
2705 } else if (rs_is_reshaping(rs
))
2706 ; /* skip rs setup */
2707 else if (rs_takeover_requested(rs
)) {
2708 if (rs_is_reshaping(rs
)) {
2709 ti
->error
= "Can't takeover a reshaping raid set";
2714 * If a takeover is needed, just set the level to
2715 * the new requested one and allow the raid set to run.
2717 r
= rs_check_takeover(rs
);
2721 r
= rs_setup_takeover(rs
);
2725 set_bit(RT_FLAG_UPDATE_SBS
, &rs
->runtime_flags
);
2726 set_bit(RT_FLAG_KEEP_RS_FROZEN
, &rs
->runtime_flags
);
2728 } else if (rs_reshape_requested(rs
)) {
2729 if (rs_is_reshaping(rs
)) {
2730 ti
->error
= "raid set already reshaping!";
2734 if (rs_is_raid10(rs
)) {
2735 if (rs
->raid_disks
!= rs
->md
.raid_disks
&&
2736 __is_raid10_near(rs
->md
.layout
) &&
2737 rs
->raid10_copies
&&
2738 rs
->raid10_copies
!= __raid10_near_copies(rs
->md
.layout
)) {
2740 * raid disk have to be multiple of data copies to allow this conversion,
2742 * This is actually not a reshape it is a
2743 * rebuild of any additional mirrors per group
2745 if (rs
->raid_disks
% rs
->raid10_copies
) {
2746 ti
->error
= "Can't reshape raid10 mirror groups";
2750 /* Userpace reordered disks to add/remove mirrors -> adjust raid_disk indexes */
2751 __reorder_raid_disk_indexes(rs
);
2752 rs
->md
.layout
= raid10_format_to_md_layout(rs
, ALGORITHM_RAID10_NEAR
,
2754 rs
->md
.new_layout
= rs
->md
.layout
;
2757 set_bit(RT_FLAG_RESHAPE_RS
, &rs
->runtime_flags
);
2759 } else if (rs_is_raid456(rs
))
2760 set_bit(RT_FLAG_RESHAPE_RS
, &rs
->runtime_flags
);
2763 * HM FIXME: process raid1 via delta_disks as well?
2764 * Would cause allocations in raid1->check_reshape
2765 * though, thus more issues with potential failures
2767 else if (rs_is_raid1(rs
)) {
2768 set_bit(RT_FLAG_KEEP_RS_FROZEN
, &rs
->runtime_flags
);
2769 rs
->md
.raid_disks
= rs
->raid_disks
;
2772 if (test_bit(RT_FLAG_RESHAPE_RS
, &rs
->runtime_flags
)) {
2773 set_bit(RT_FLAG_UPDATE_SBS
, &rs
->runtime_flags
);
2774 set_bit(RT_FLAG_KEEP_RS_FROZEN
, &rs
->runtime_flags
);
2777 if (rs
->md
.raid_disks
< rs
->raid_disks
)
2778 set_bit(MD_ARRAY_FIRST_USE
, &rs
->md
.flags
);
2784 /* If constructor requested it, change data and new_data offsets */
2785 r
= rs_adjust_data_offsets(rs
);
2789 /* Start raid set read-only and assumed clean to change in raid_resume() */
2792 set_bit(MD_RECOVERY_FROZEN
, &rs
->md
.recovery
);
2794 /* Has to be held on running the array */
2795 mddev_lock_nointr(&rs
->md
);
2796 r
= md_run(&rs
->md
);
2797 rs
->md
.in_sync
= 0; /* Assume already marked dirty */
2800 ti
->error
= "Failed to run raid array";
2801 mddev_unlock(&rs
->md
);
2805 rs
->callbacks
.congested_fn
= raid_is_congested
;
2806 dm_table_add_target_callbacks(ti
->table
, &rs
->callbacks
);
2808 mddev_suspend(&rs
->md
);
2810 /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
2811 if (rs_is_raid456(rs
)) {
2812 r
= rs_set_raid456_stripe_cache(rs
);
2814 goto bad_stripe_cache
;
2817 /* Now do an early reshape check */
2818 if (test_bit(RT_FLAG_RESHAPE_RS
, &rs
->runtime_flags
)) {
2819 r
= rs_check_reshape(rs
);
2823 /* Restore new, ctr requested layout to perform check */
2824 rs_config_restore(rs
, &rs_layout
);
2826 r
= rs
->md
.pers
->check_reshape(&rs
->md
);
2828 ti
->error
= "Reshape check failed";
2829 goto bad_check_reshape
;
2833 mddev_unlock(&rs
->md
);
2845 static void raid_dtr(struct dm_target
*ti
)
2847 struct raid_set
*rs
= ti
->private;
2849 list_del_init(&rs
->callbacks
.list
);
2854 static int raid_map(struct dm_target
*ti
, struct bio
*bio
)
2856 struct raid_set
*rs
= ti
->private;
2857 struct mddev
*mddev
= &rs
->md
;
2860 * If we're reshaping to add disk(s)), ti->len and
2861 * mddev->array_sectors will differ during the process
2862 * (ti->len > mddev->array_sectors), so we have to requeue
2863 * bios with addresses > mddev->array_sectors here or
2864 * or there will occur accesses past EOD of the component
2865 * data images thus erroring the raid set.
2867 if (unlikely(bio_end_sector(bio
) > mddev
->array_sectors
))
2868 return DM_MAPIO_REQUEUE
;
2870 mddev
->pers
->make_request(mddev
, bio
);
2872 return DM_MAPIO_SUBMITTED
;
2875 /* Return string describing the current sync action of @mddev */
2876 static const char *decipher_sync_action(struct mddev
*mddev
)
2878 if (test_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
))
2881 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) ||
2882 (!mddev
->ro
&& test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))) {
2883 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
2886 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
2887 if (!test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
2889 else if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
2894 if (test_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
))
2902 * Return status string @rdev
2904 * Status characters:
2906 * 'D' = Dead/Failed device
2907 * 'a' = Alive but not in-sync
2908 * 'A' = Alive and in-sync
2910 static const char *__raid_dev_status(struct md_rdev
*rdev
, bool array_in_sync
)
2912 if (test_bit(Faulty
, &rdev
->flags
))
2914 else if (!array_in_sync
|| !test_bit(In_sync
, &rdev
->flags
))
2920 /* Helper to return resync/reshape progress for @rs and @array_in_sync */
2921 static sector_t
rs_get_progress(struct raid_set
*rs
,
2922 sector_t resync_max_sectors
, bool *array_in_sync
)
2924 sector_t r
, recovery_cp
, curr_resync_completed
;
2925 struct mddev
*mddev
= &rs
->md
;
2927 curr_resync_completed
= mddev
->curr_resync_completed
?: mddev
->recovery_cp
;
2928 recovery_cp
= mddev
->recovery_cp
;
2929 *array_in_sync
= false;
2931 if (rs_is_raid0(rs
)) {
2932 r
= resync_max_sectors
;
2933 *array_in_sync
= true;
2936 r
= mddev
->reshape_position
;
2938 /* Reshape is relative to the array size */
2939 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
) ||
2941 if (r
== MaxSector
) {
2942 *array_in_sync
= true;
2943 r
= resync_max_sectors
;
2945 /* Got to reverse on backward reshape */
2946 if (mddev
->reshape_backwards
)
2947 r
= mddev
->array_sectors
- r
;
2949 /* Devide by # of data stripes */
2950 sector_div(r
, mddev_data_stripes(rs
));
2953 /* Sync is relative to the component device size */
2954 } else if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
2955 r
= curr_resync_completed
;
2959 if (r
== MaxSector
) {
2963 *array_in_sync
= true;
2964 r
= resync_max_sectors
;
2965 } else if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
)) {
2967 * If "check" or "repair" is occurring, the raid set has
2968 * undergone an initial sync and the health characters
2969 * should not be 'a' anymore.
2971 *array_in_sync
= true;
2973 struct md_rdev
*rdev
;
2976 * The raid set may be doing an initial sync, or it may
2977 * be rebuilding individual components. If all the
2978 * devices are In_sync, then it is the raid set that is
2979 * being initialized.
2981 rdev_for_each(rdev
, mddev
)
2982 if (!test_bit(In_sync
, &rdev
->flags
))
2983 *array_in_sync
= true;
2985 r
= 0; /* HM FIXME: TESTME: https://bugzilla.redhat.com/show_bug.cgi?id=1210637 ? */
2993 /* Helper to return @dev name or "-" if !@dev */
2994 static const char *__get_dev_name(struct dm_dev
*dev
)
2996 return dev
? dev
->name
: "-";
2999 static void raid_status(struct dm_target
*ti
, status_type_t type
,
3000 unsigned int status_flags
, char *result
, unsigned int maxlen
)
3002 struct raid_set
*rs
= ti
->private;
3003 struct mddev
*mddev
= &rs
->md
;
3004 struct r5conf
*conf
= mddev
->private;
3005 int max_nr_stripes
= conf
? conf
->max_nr_stripes
: 0;
3007 unsigned int raid_param_cnt
= 1; /* at least 1 for chunksize */
3008 unsigned int sz
= 0;
3009 unsigned int write_mostly_params
= 0;
3010 sector_t progress
, resync_max_sectors
, resync_mismatches
;
3011 const char *sync_action
;
3012 struct raid_type
*rt
;
3013 struct md_rdev
*rdev
;
3016 case STATUSTYPE_INFO
:
3017 /* *Should* always succeed */
3018 rt
= get_raid_type_by_ll(mddev
->new_level
, mddev
->new_layout
);
3022 DMEMIT("%s %d ", rt
->name
, mddev
->raid_disks
);
3024 /* Access most recent mddev properties for status output */
3026 /* Get sensible max sectors even if raid set not yet started */
3027 resync_max_sectors
= test_bit(RT_FLAG_RS_PRERESUMED
, &rs
->runtime_flags
) ?
3028 mddev
->resync_max_sectors
: mddev
->dev_sectors
;
3029 progress
= rs_get_progress(rs
, resync_max_sectors
, &array_in_sync
);
3030 resync_mismatches
= (mddev
->last_sync_action
&& !strcasecmp(mddev
->last_sync_action
, "check")) ?
3031 atomic64_read(&mddev
->resync_mismatches
) : 0;
3032 sync_action
= decipher_sync_action(&rs
->md
);
3034 /* HM FIXME: do we want another state char for raid0? It shows 'D' or 'A' now */
3035 rdev_for_each(rdev
, mddev
)
3036 DMEMIT(__raid_dev_status(rdev
, array_in_sync
));
3039 * In-sync/Reshape ratio:
3040 * The in-sync ratio shows the progress of:
3041 * - Initializing the raid set
3042 * - Rebuilding a subset of devices of the raid set
3043 * The user can distinguish between the two by referring
3044 * to the status characters.
3046 * The reshape ratio shows the progress of
3047 * changing the raid layout or the number of
3048 * disks of a raid set
3050 DMEMIT(" %llu/%llu", (unsigned long long) progress
,
3051 (unsigned long long) resync_max_sectors
);
3057 * See Documentation/device-mapper/dm-raid.txt for
3058 * information on each of these states.
3060 DMEMIT(" %s", sync_action
);
3065 * resync_mismatches/mismatch_cnt
3066 * This field shows the number of discrepancies found when
3067 * performing a "check" of the raid set.
3069 DMEMIT(" %llu", (unsigned long long) resync_mismatches
);
3074 * data_offset (needed for out of space reshaping)
3075 * This field shows the data offset into the data
3076 * image LV where the first stripes data starts.
3078 * We keep data_offset equal on all raid disks of the set,
3079 * so retrieving it from the first raid disk is sufficient.
3081 DMEMIT(" %llu", (unsigned long long) rs
->dev
[0].rdev
.data_offset
);
3084 case STATUSTYPE_TABLE
:
3085 /* Report the table line string you would use to construct this raid set */
3087 /* Calculate raid parameter count */
3088 rdev_for_each(rdev
, mddev
)
3089 if (test_bit(WriteMostly
, &rdev
->flags
))
3090 write_mostly_params
+= 2;
3091 raid_param_cnt
+= memweight(rs
->rebuild_disks
,
3092 DISKS_ARRAY_ELEMS
* sizeof(*rs
->rebuild_disks
)) * 2 +
3093 write_mostly_params
+
3094 hweight32(rs
->ctr_flags
& CTR_FLAG_OPTIONS_NO_ARGS
) +
3095 hweight32(rs
->ctr_flags
& CTR_FLAG_OPTIONS_ONE_ARG
) * 2;
3096 /* Emit table line */
3097 DMEMIT("%s %u %u", rs
->raid_type
->name
, raid_param_cnt
, mddev
->new_chunk_sectors
);
3098 if (test_bit(__CTR_FLAG_RAID10_FORMAT
, &rs
->ctr_flags
))
3099 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT
),
3100 raid10_md_layout_to_format(mddev
->layout
));
3101 if (test_bit(__CTR_FLAG_RAID10_COPIES
, &rs
->ctr_flags
))
3102 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES
),
3103 raid10_md_layout_to_copies(mddev
->layout
));
3104 if (test_bit(__CTR_FLAG_NOSYNC
, &rs
->ctr_flags
))
3105 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC
));
3106 if (test_bit(__CTR_FLAG_SYNC
, &rs
->ctr_flags
))
3107 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC
));
3108 if (test_bit(__CTR_FLAG_REGION_SIZE
, &rs
->ctr_flags
))
3109 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE
),
3110 (unsigned long long) to_sector(mddev
->bitmap_info
.chunksize
));
3111 if (test_bit(__CTR_FLAG_DATA_OFFSET
, &rs
->ctr_flags
))
3112 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET
),
3113 (unsigned long long) rs
->data_offset
);
3114 if (test_bit(__CTR_FLAG_DAEMON_SLEEP
, &rs
->ctr_flags
))
3115 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP
),
3116 mddev
->bitmap_info
.daemon_sleep
);
3117 if (test_bit(__CTR_FLAG_DELTA_DISKS
, &rs
->ctr_flags
))
3118 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS
),
3119 mddev
->delta_disks
);
3120 if (test_bit(__CTR_FLAG_STRIPE_CACHE
, &rs
->ctr_flags
))
3121 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE
),
3123 rdev_for_each(rdev
, mddev
)
3124 if (test_bit(rdev
->raid_disk
, (void *) rs
->rebuild_disks
))
3125 DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD
),
3127 rdev_for_each(rdev
, mddev
)
3128 if (test_bit(WriteMostly
, &rdev
->flags
))
3129 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY
),
3131 if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND
, &rs
->ctr_flags
))
3132 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND
),
3133 mddev
->bitmap_info
.max_write_behind
);
3134 if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE
, &rs
->ctr_flags
))
3135 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE
),
3136 mddev
->sync_speed_max
);
3137 if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE
, &rs
->ctr_flags
))
3138 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE
),
3139 mddev
->sync_speed_min
);
3140 DMEMIT(" %d", rs
->raid_disks
);
3141 rdev_for_each(rdev
, mddev
) {
3142 struct raid_dev
*rd
= container_of(rdev
, struct raid_dev
, rdev
);
3144 DMEMIT(" %s %s", __get_dev_name(rd
->meta_dev
),
3145 __get_dev_name(rd
->data_dev
));
3150 static int raid_message(struct dm_target
*ti
, unsigned argc
, char **argv
)
3152 struct raid_set
*rs
= ti
->private;
3153 struct mddev
*mddev
= &rs
->md
;
3155 if (!mddev
->pers
|| !mddev
->pers
->sync_request
)
3158 if (!strcasecmp(argv
[0], "frozen"))
3159 set_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
3161 clear_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
3163 if (!strcasecmp(argv
[0], "idle") || !strcasecmp(argv
[0], "frozen")) {
3164 if (mddev
->sync_thread
) {
3165 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
3166 md_reap_sync_thread(mddev
);
3168 } else if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) ||
3169 test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))
3171 else if (!strcasecmp(argv
[0], "resync"))
3172 ; /* MD_RECOVERY_NEEDED set below */
3173 else if (!strcasecmp(argv
[0], "recover"))
3174 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
3176 if (!strcasecmp(argv
[0], "check"))
3177 set_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
3178 else if (!!strcasecmp(argv
[0], "repair"))
3180 set_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
);
3181 set_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
3183 if (mddev
->ro
== 2) {
3184 /* A write to sync_action is enough to justify
3185 * canceling read-auto mode
3188 if (!mddev
->suspended
&& mddev
->sync_thread
)
3189 md_wakeup_thread(mddev
->sync_thread
);
3191 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3192 if (!mddev
->suspended
&& mddev
->thread
)
3193 md_wakeup_thread(mddev
->thread
);
3198 static int raid_iterate_devices(struct dm_target
*ti
,
3199 iterate_devices_callout_fn fn
, void *data
)
3201 struct raid_set
*rs
= ti
->private;
3205 for (i
= 0; !r
&& i
< rs
->md
.raid_disks
; i
++)
3206 if (rs
->dev
[i
].data_dev
)
3208 rs
->dev
[i
].data_dev
,
3209 0, /* No offset on data devs */
3216 static void raid_io_hints(struct dm_target
*ti
, struct queue_limits
*limits
)
3218 struct raid_set
*rs
= ti
->private;
3219 unsigned chunk_size
= rs
->md
.chunk_sectors
<< 9;
3220 struct r5conf
*conf
= rs
->md
.private;
3222 blk_limits_io_min(limits
, chunk_size
);
3223 blk_limits_io_opt(limits
, chunk_size
* (conf
->raid_disks
- conf
->max_degraded
));
3226 static void raid_presuspend(struct dm_target
*ti
)
3228 struct raid_set
*rs
= ti
->private;
3230 md_stop_writes(&rs
->md
);
3233 static void raid_postsuspend(struct dm_target
*ti
)
3235 struct raid_set
*rs
= ti
->private;
3237 if (test_and_clear_bit(RT_FLAG_RS_RESUMED
, &rs
->runtime_flags
)) {
3238 if (!rs
->md
.suspended
)
3239 mddev_suspend(&rs
->md
);
3244 static void attempt_restore_of_faulty_devices(struct raid_set
*rs
)
3247 uint64_t failed_devices
, cleared_failed_devices
= 0;
3248 unsigned long flags
;
3249 struct dm_raid_superblock
*sb
;
3252 for (i
= 0; i
< rs
->md
.raid_disks
; i
++) {
3253 r
= &rs
->dev
[i
].rdev
;
3254 if (test_bit(Faulty
, &r
->flags
) && r
->sb_page
&&
3255 sync_page_io(r
, 0, r
->sb_size
, r
->sb_page
, REQ_OP_READ
, 0,
3257 DMINFO("Faulty %s device #%d has readable super block."
3258 " Attempting to revive it.",
3259 rs
->raid_type
->name
, i
);
3262 * Faulty bit may be set, but sometimes the array can
3263 * be suspended before the personalities can respond
3264 * by removing the device from the array (i.e. calling
3265 * 'hot_remove_disk'). If they haven't yet removed
3266 * the failed device, its 'raid_disk' number will be
3267 * '>= 0' - meaning we must call this function
3270 if ((r
->raid_disk
>= 0) &&
3271 (r
->mddev
->pers
->hot_remove_disk(r
->mddev
, r
) != 0))
3272 /* Failed to revive this device, try next */
3276 r
->saved_raid_disk
= i
;
3278 clear_bit(Faulty
, &r
->flags
);
3279 clear_bit(WriteErrorSeen
, &r
->flags
);
3280 clear_bit(In_sync
, &r
->flags
);
3281 if (r
->mddev
->pers
->hot_add_disk(r
->mddev
, r
)) {
3283 r
->saved_raid_disk
= -1;
3286 r
->recovery_offset
= 0;
3287 cleared_failed_devices
|= 1 << i
;
3291 if (cleared_failed_devices
) {
3292 rdev_for_each(r
, &rs
->md
) {
3293 sb
= page_address(r
->sb_page
);
3294 failed_devices
= le64_to_cpu(sb
->failed_devices
);
3295 failed_devices
&= ~cleared_failed_devices
;
3296 sb
->failed_devices
= cpu_to_le64(failed_devices
);
3301 static int __load_dirty_region_bitmap(struct raid_set
*rs
)
3305 /* Try loading the bitmap unless "raid0", which does not have one */
3306 if (!rs_is_raid0(rs
) &&
3307 !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED
, &rs
->runtime_flags
)) {
3308 r
= bitmap_load(&rs
->md
);
3310 DMERR("Failed to load bitmap");
3316 /* Enforce updating all superblocks */
3317 static void rs_update_sbs(struct raid_set
*rs
)
3319 struct mddev
*mddev
= &rs
->md
;
3322 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
3324 md_update_sb(mddev
, 1);
3329 * Reshape changes raid algorithm of @rs to new one within personality
3330 * (e.g. raid6_zr -> raid6_nc), changes stripe size, adds/removes
3331 * disks from a raid set thus growing/shrinking it or resizes the set
3333 * Call mddev_lock_nointr() before!
3335 static int rs_start_reshape(struct raid_set
*rs
)
3338 struct mddev
*mddev
= &rs
->md
;
3339 struct md_personality
*pers
= mddev
->pers
;
3341 r
= rs_setup_reshape(rs
);
3345 /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
3346 if (mddev
->suspended
)
3347 mddev_resume(mddev
);
3350 * Check any reshape constraints enforced by the personalility
3352 * May as well already kick the reshape off so that * pers->start_reshape() becomes optional.
3354 r
= pers
->check_reshape(mddev
);
3356 rs
->ti
->error
= "pers->check_reshape() failed";
3361 * Personality may not provide start reshape method in which
3362 * case check_reshape above has already covered everything
3364 if (pers
->start_reshape
) {
3365 r
= pers
->start_reshape(mddev
);
3367 rs
->ti
->error
= "pers->start_reshape() failed";
3372 /* Suspend because a resume will happen in raid_resume() */
3373 if (!mddev
->suspended
)
3374 mddev_suspend(mddev
);
3377 * Now reshape got set up, update superblocks to
3378 * reflect the fact so that a table reload will
3379 * access proper superblock content in the ctr.
3386 static int raid_preresume(struct dm_target
*ti
)
3389 struct raid_set
*rs
= ti
->private;
3390 struct mddev
*mddev
= &rs
->md
;
3392 /* This is a resume after a suspend of the set -> it's already started */
3393 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED
, &rs
->runtime_flags
))
3397 * The superblocks need to be updated on disk if the
3398 * array is new or new devices got added (thus zeroed
3399 * out by userspace) or __load_dirty_region_bitmap
3400 * will overwrite them in core with old data or fail.
3402 if (test_bit(RT_FLAG_UPDATE_SBS
, &rs
->runtime_flags
))
3406 * Disable/enable discard support on raid set after any
3407 * conversion, because devices can have been added
3409 configure_discard_support(rs
);
3411 /* Load the bitmap from disk unless raid0 */
3412 r
= __load_dirty_region_bitmap(rs
);
3416 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
3417 if (test_bit(RT_FLAG_RS_BITMAP_LOADED
, &rs
->runtime_flags
) &&
3418 mddev
->bitmap_info
.chunksize
!= to_bytes(rs
->requested_bitmap_chunk_sectors
)) {
3419 r
= bitmap_resize(mddev
->bitmap
, mddev
->dev_sectors
,
3420 to_bytes(rs
->requested_bitmap_chunk_sectors
), 0);
3422 DMERR("Failed to resize bitmap");
3425 /* Check for any resize/reshape on @rs and adjust/initiate */
3426 /* Be prepared for mddev_resume() in raid_resume() */
3427 set_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
3428 if (mddev
->recovery_cp
&& mddev
->recovery_cp
< MaxSector
) {
3429 set_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
);
3430 set_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
3431 mddev
->resync_min
= mddev
->recovery_cp
;
3434 rs_set_capacity(rs
);
3436 /* Check for any reshape request and region size change unless new raid set */
3437 if (test_and_clear_bit(RT_FLAG_RESHAPE_RS
, &rs
->runtime_flags
)) {
3438 /* Initiate a reshape. */
3439 mddev_lock_nointr(mddev
);
3440 r
= rs_start_reshape(rs
);
3441 mddev_unlock(mddev
);
3443 DMWARN("Failed to check/start reshape, continuing without change");
3450 static void raid_resume(struct dm_target
*ti
)
3452 struct raid_set
*rs
= ti
->private;
3453 struct mddev
*mddev
= &rs
->md
;
3455 if (test_and_set_bit(RT_FLAG_RS_RESUMED
, &rs
->runtime_flags
)) {
3457 * A secondary resume while the device is active.
3458 * Take this opportunity to check whether any failed
3459 * devices are reachable again.
3461 attempt_restore_of_faulty_devices(rs
);
3467 * When passing in flags to the ctr, we expect userspace
3468 * to reset them because they made it to the superblocks
3469 * and reload the mapping anyway.
3471 * -> only unfreeze recovery in case of a table reload or
3472 * we'll have a bogus recovery/reshape position
3473 * retrieved from the superblock by the ctr because
3474 * the ongoing recovery/reshape will change it after read.
3476 if (!test_bit(RT_FLAG_KEEP_RS_FROZEN
, &rs
->runtime_flags
))
3477 clear_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
3479 if (mddev
->suspended
)
3480 mddev_resume(mddev
);
3484 static struct target_type raid_target
= {
3486 .version
= {1, 9, 0},
3487 .module
= THIS_MODULE
,
3491 .status
= raid_status
,
3492 .message
= raid_message
,
3493 .iterate_devices
= raid_iterate_devices
,
3494 .io_hints
= raid_io_hints
,
3495 .presuspend
= raid_presuspend
,
3496 .postsuspend
= raid_postsuspend
,
3497 .preresume
= raid_preresume
,
3498 .resume
= raid_resume
,
3501 static int __init
dm_raid_init(void)
3503 DMINFO("Loading target version %u.%u.%u",
3504 raid_target
.version
[0],
3505 raid_target
.version
[1],
3506 raid_target
.version
[2]);
3507 return dm_register_target(&raid_target
);
3510 static void __exit
dm_raid_exit(void)
3512 dm_unregister_target(&raid_target
);
3515 module_init(dm_raid_init
);
3516 module_exit(dm_raid_exit
);
3518 module_param(devices_handle_discard_safely
, bool, 0644);
3519 MODULE_PARM_DESC(devices_handle_discard_safely
,
3520 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
3522 MODULE_DESCRIPTION(DM_NAME
" raid0/1/10/4/5/6 target");
3523 MODULE_ALIAS("dm-raid0");
3524 MODULE_ALIAS("dm-raid1");
3525 MODULE_ALIAS("dm-raid10");
3526 MODULE_ALIAS("dm-raid4");
3527 MODULE_ALIAS("dm-raid5");
3528 MODULE_ALIAS("dm-raid6");
3529 MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
3530 MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
3531 MODULE_LICENSE("GPL");