2 * Copyright (C) 2010-2011 Neil Brown
3 * Copyright (C) 2010-2011 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/slab.h>
9 #include <linux/module.h>
16 #include <linux/device-mapper.h>
18 #define DM_MSG_PREFIX "raid"
21 * The following flags are used by dm-raid.c to set up the array state.
22 * They must be cleared before md_run is called.
24 #define FirstUse 10 /* rdev flag */
28 * Two DM devices, one to hold metadata and one to hold the
29 * actual data/parity. The reason for this is to not confuse
30 * ti->len and give more flexibility in altering size and
33 * While it is possible for this device to be associated
34 * with a different physical device than the data_dev, it
35 * is intended for it to be the same.
36 * |--------- Physical Device ---------|
37 * |- meta_dev -|------ data_dev ------|
39 struct dm_dev
*meta_dev
;
40 struct dm_dev
*data_dev
;
45 * Flags for rs->print_flags field.
48 #define DMPF_NOSYNC 0x2
49 #define DMPF_REBUILD 0x4
50 #define DMPF_DAEMON_SLEEP 0x8
51 #define DMPF_MIN_RECOVERY_RATE 0x10
52 #define DMPF_MAX_RECOVERY_RATE 0x20
53 #define DMPF_MAX_WRITE_BEHIND 0x40
54 #define DMPF_STRIPE_CACHE 0x80
55 #define DMPF_REGION_SIZE 0X100
59 uint32_t bitmap_loaded
;
63 struct raid_type
*raid_type
;
64 struct dm_target_callbacks callbacks
;
66 struct raid_dev dev
[0];
69 /* Supported raid types and properties. */
70 static struct raid_type
{
71 const char *name
; /* RAID algorithm. */
72 const char *descr
; /* Descriptor text for logging. */
73 const unsigned parity_devs
; /* # of parity devices. */
74 const unsigned minimal_devs
; /* minimal # of devices in set. */
75 const unsigned level
; /* RAID level. */
76 const unsigned algorithm
; /* RAID algorithm. */
78 {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */},
79 {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0
},
80 {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC
},
81 {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC
},
82 {"raid5_ls", "RAID5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC
},
83 {"raid5_rs", "RAID5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC
},
84 {"raid6_zr", "RAID6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART
},
85 {"raid6_nr", "RAID6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART
},
86 {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE
}
89 static struct raid_type
*get_raid_type(char *name
)
93 for (i
= 0; i
< ARRAY_SIZE(raid_types
); i
++)
94 if (!strcmp(raid_types
[i
].name
, name
))
95 return &raid_types
[i
];
100 static struct raid_set
*context_alloc(struct dm_target
*ti
, struct raid_type
*raid_type
, unsigned raid_devs
)
105 if (raid_devs
<= raid_type
->parity_devs
) {
106 ti
->error
= "Insufficient number of devices";
107 return ERR_PTR(-EINVAL
);
110 rs
= kzalloc(sizeof(*rs
) + raid_devs
* sizeof(rs
->dev
[0]), GFP_KERNEL
);
112 ti
->error
= "Cannot allocate raid context";
113 return ERR_PTR(-ENOMEM
);
119 rs
->raid_type
= raid_type
;
120 rs
->md
.raid_disks
= raid_devs
;
121 rs
->md
.level
= raid_type
->level
;
122 rs
->md
.new_level
= rs
->md
.level
;
123 rs
->md
.layout
= raid_type
->algorithm
;
124 rs
->md
.new_layout
= rs
->md
.layout
;
125 rs
->md
.delta_disks
= 0;
126 rs
->md
.recovery_cp
= 0;
128 for (i
= 0; i
< raid_devs
; i
++)
129 md_rdev_init(&rs
->dev
[i
].rdev
);
132 * Remaining items to be initialized by further RAID params:
135 * rs->md.chunk_sectors
136 * rs->md.new_chunk_sectors
143 static void context_free(struct raid_set
*rs
)
147 for (i
= 0; i
< rs
->md
.raid_disks
; i
++) {
148 if (rs
->dev
[i
].meta_dev
)
149 dm_put_device(rs
->ti
, rs
->dev
[i
].meta_dev
);
150 md_rdev_clear(&rs
->dev
[i
].rdev
);
151 if (rs
->dev
[i
].data_dev
)
152 dm_put_device(rs
->ti
, rs
->dev
[i
].data_dev
);
159 * For every device we have two words
160 * <meta_dev>: meta device name or '-' if missing
161 * <data_dev>: data device name or '-' if missing
163 * The following are permitted:
166 * <meta_dev> <data_dev>
168 * The following is not allowed:
171 * This code parses those words. If there is a failure,
172 * the caller must use context_free to unwind the operations.
174 static int dev_parms(struct raid_set
*rs
, char **argv
)
178 int metadata_available
= 0;
181 for (i
= 0; i
< rs
->md
.raid_disks
; i
++, argv
+= 2) {
182 rs
->dev
[i
].rdev
.raid_disk
= i
;
184 rs
->dev
[i
].meta_dev
= NULL
;
185 rs
->dev
[i
].data_dev
= NULL
;
188 * There are no offsets, since there is a separate device
189 * for data and metadata.
191 rs
->dev
[i
].rdev
.data_offset
= 0;
192 rs
->dev
[i
].rdev
.mddev
= &rs
->md
;
194 if (strcmp(argv
[0], "-")) {
195 ret
= dm_get_device(rs
->ti
, argv
[0],
196 dm_table_get_mode(rs
->ti
->table
),
197 &rs
->dev
[i
].meta_dev
);
198 rs
->ti
->error
= "RAID metadata device lookup failure";
202 rs
->dev
[i
].rdev
.sb_page
= alloc_page(GFP_KERNEL
);
203 if (!rs
->dev
[i
].rdev
.sb_page
)
207 if (!strcmp(argv
[1], "-")) {
208 if (!test_bit(In_sync
, &rs
->dev
[i
].rdev
.flags
) &&
209 (!rs
->dev
[i
].rdev
.recovery_offset
)) {
210 rs
->ti
->error
= "Drive designated for rebuild not specified";
214 rs
->ti
->error
= "No data device supplied with metadata device";
215 if (rs
->dev
[i
].meta_dev
)
221 ret
= dm_get_device(rs
->ti
, argv
[1],
222 dm_table_get_mode(rs
->ti
->table
),
223 &rs
->dev
[i
].data_dev
);
225 rs
->ti
->error
= "RAID device lookup failure";
229 if (rs
->dev
[i
].meta_dev
) {
230 metadata_available
= 1;
231 rs
->dev
[i
].rdev
.meta_bdev
= rs
->dev
[i
].meta_dev
->bdev
;
233 rs
->dev
[i
].rdev
.bdev
= rs
->dev
[i
].data_dev
->bdev
;
234 list_add(&rs
->dev
[i
].rdev
.same_set
, &rs
->md
.disks
);
235 if (!test_bit(In_sync
, &rs
->dev
[i
].rdev
.flags
))
239 if (metadata_available
) {
241 rs
->md
.persistent
= 1;
242 rs
->md
.major_version
= 2;
243 } else if (rebuild
&& !rs
->md
.recovery_cp
) {
245 * Without metadata, we will not be able to tell if the array
246 * is in-sync or not - we must assume it is not. Therefore,
247 * it is impossible to rebuild a drive.
249 * Even if there is metadata, the on-disk information may
250 * indicate that the array is not in-sync and it will then
253 * User could specify 'nosync' option if desperate.
255 DMERR("Unable to rebuild drive while array is not in-sync");
256 rs
->ti
->error
= "RAID device lookup failure";
264 * validate_region_size
266 * @region_size: region size in sectors. If 0, pick a size (4MiB default).
268 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
269 * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
271 * Returns: 0 on success, -EINVAL on failure.
273 static int validate_region_size(struct raid_set
*rs
, unsigned long region_size
)
275 unsigned long min_region_size
= rs
->ti
->len
/ (1 << 21);
279 * Choose a reasonable default. All figures in sectors.
281 if (min_region_size
> (1 << 13)) {
282 DMINFO("Choosing default region size of %lu sectors",
284 region_size
= min_region_size
;
286 DMINFO("Choosing default region size of 4MiB");
287 region_size
= 1 << 13; /* sectors */
291 * Validate user-supplied value.
293 if (region_size
> rs
->ti
->len
) {
294 rs
->ti
->error
= "Supplied region size is too large";
298 if (region_size
< min_region_size
) {
299 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
300 region_size
, min_region_size
);
301 rs
->ti
->error
= "Supplied region size is too small";
305 if (!is_power_of_2(region_size
)) {
306 rs
->ti
->error
= "Region size is not a power of 2";
310 if (region_size
< rs
->md
.chunk_sectors
) {
311 rs
->ti
->error
= "Region size is smaller than the chunk size";
317 * Convert sectors to bytes.
319 rs
->md
.bitmap_info
.chunksize
= (region_size
<< 9);
325 * Possible arguments are...
326 * <chunk_size> [optional_args]
328 * Argument definitions
329 * <chunk_size> The number of sectors per disk that
330 * will form the "stripe"
331 * [[no]sync] Force or prevent recovery of the
333 * [rebuild <idx>] Rebuild the drive indicated by the index
334 * [daemon_sleep <ms>] Time between bitmap daemon work to
336 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
337 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
338 * [write_mostly <idx>] Indicate a write mostly drive via index
339 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
340 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs
341 * [region_size <sectors>] Defines granularity of bitmap
343 static int parse_raid_params(struct raid_set
*rs
, char **argv
,
344 unsigned num_raid_params
)
346 unsigned i
, rebuild_cnt
= 0;
347 unsigned long value
, region_size
= 0;
348 sector_t sectors_per_dev
= rs
->ti
->len
;
353 * First, parse the in-order required arguments
354 * "chunk_size" is the only argument of this type.
356 if ((strict_strtoul(argv
[0], 10, &value
) < 0)) {
357 rs
->ti
->error
= "Bad chunk size";
359 } else if (rs
->raid_type
->level
== 1) {
361 DMERR("Ignoring chunk size parameter for RAID 1");
363 } else if (!is_power_of_2(value
)) {
364 rs
->ti
->error
= "Chunk size must be a power of 2";
366 } else if (value
< 8) {
367 rs
->ti
->error
= "Chunk size value is too small";
371 rs
->md
.new_chunk_sectors
= rs
->md
.chunk_sectors
= value
;
376 * We set each individual device as In_sync with a completed
377 * 'recovery_offset'. If there has been a device failure or
378 * replacement then one of the following cases applies:
380 * 1) User specifies 'rebuild'.
381 * - Device is reset when param is read.
382 * 2) A new device is supplied.
383 * - No matching superblock found, resets device.
384 * 3) Device failure was transient and returns on reload.
385 * - Failure noticed, resets device for bitmap replay.
386 * 4) Device hadn't completed recovery after previous failure.
387 * - Superblock is read and overrides recovery_offset.
389 * What is found in the superblocks of the devices is always
390 * authoritative, unless 'rebuild' or '[no]sync' was specified.
392 for (i
= 0; i
< rs
->md
.raid_disks
; i
++) {
393 set_bit(In_sync
, &rs
->dev
[i
].rdev
.flags
);
394 rs
->dev
[i
].rdev
.recovery_offset
= MaxSector
;
398 * Second, parse the unordered optional arguments
400 for (i
= 0; i
< num_raid_params
; i
++) {
401 if (!strcasecmp(argv
[i
], "nosync")) {
402 rs
->md
.recovery_cp
= MaxSector
;
403 rs
->print_flags
|= DMPF_NOSYNC
;
406 if (!strcasecmp(argv
[i
], "sync")) {
407 rs
->md
.recovery_cp
= 0;
408 rs
->print_flags
|= DMPF_SYNC
;
412 /* The rest of the optional arguments come in key/value pairs */
413 if ((i
+ 1) >= num_raid_params
) {
414 rs
->ti
->error
= "Wrong number of raid parameters given";
419 if (strict_strtoul(argv
[i
], 10, &value
) < 0) {
420 rs
->ti
->error
= "Bad numerical argument given in raid params";
424 if (!strcasecmp(key
, "rebuild")) {
427 switch (rs
->raid_type
->level
) {
429 if (rebuild_cnt
>= rs
->md
.raid_disks
) {
430 rs
->ti
->error
= "Too many rebuild devices specified";
437 if (rebuild_cnt
> rs
->raid_type
->parity_devs
) {
438 rs
->ti
->error
= "Too many rebuild devices specified for given RAID type";
443 DMERR("The rebuild parameter is not supported for %s", rs
->raid_type
->name
);
444 rs
->ti
->error
= "Rebuild not supported for this RAID type";
448 if (value
> rs
->md
.raid_disks
) {
449 rs
->ti
->error
= "Invalid rebuild index given";
452 clear_bit(In_sync
, &rs
->dev
[value
].rdev
.flags
);
453 rs
->dev
[value
].rdev
.recovery_offset
= 0;
454 rs
->print_flags
|= DMPF_REBUILD
;
455 } else if (!strcasecmp(key
, "write_mostly")) {
456 if (rs
->raid_type
->level
!= 1) {
457 rs
->ti
->error
= "write_mostly option is only valid for RAID1";
460 if (value
>= rs
->md
.raid_disks
) {
461 rs
->ti
->error
= "Invalid write_mostly drive index given";
464 set_bit(WriteMostly
, &rs
->dev
[value
].rdev
.flags
);
465 } else if (!strcasecmp(key
, "max_write_behind")) {
466 if (rs
->raid_type
->level
!= 1) {
467 rs
->ti
->error
= "max_write_behind option is only valid for RAID1";
470 rs
->print_flags
|= DMPF_MAX_WRITE_BEHIND
;
473 * In device-mapper, we specify things in sectors, but
474 * MD records this value in kB
477 if (value
> COUNTER_MAX
) {
478 rs
->ti
->error
= "Max write-behind limit out of range";
481 rs
->md
.bitmap_info
.max_write_behind
= value
;
482 } else if (!strcasecmp(key
, "daemon_sleep")) {
483 rs
->print_flags
|= DMPF_DAEMON_SLEEP
;
484 if (!value
|| (value
> MAX_SCHEDULE_TIMEOUT
)) {
485 rs
->ti
->error
= "daemon sleep period out of range";
488 rs
->md
.bitmap_info
.daemon_sleep
= value
;
489 } else if (!strcasecmp(key
, "stripe_cache")) {
490 rs
->print_flags
|= DMPF_STRIPE_CACHE
;
493 * In device-mapper, we specify things in sectors, but
494 * MD records this value in kB
498 if (rs
->raid_type
->level
< 5) {
499 rs
->ti
->error
= "Inappropriate argument: stripe_cache";
502 if (raid5_set_cache_size(&rs
->md
, (int)value
)) {
503 rs
->ti
->error
= "Bad stripe_cache size";
506 } else if (!strcasecmp(key
, "min_recovery_rate")) {
507 rs
->print_flags
|= DMPF_MIN_RECOVERY_RATE
;
508 if (value
> INT_MAX
) {
509 rs
->ti
->error
= "min_recovery_rate out of range";
512 rs
->md
.sync_speed_min
= (int)value
;
513 } else if (!strcasecmp(key
, "max_recovery_rate")) {
514 rs
->print_flags
|= DMPF_MAX_RECOVERY_RATE
;
515 if (value
> INT_MAX
) {
516 rs
->ti
->error
= "max_recovery_rate out of range";
519 rs
->md
.sync_speed_max
= (int)value
;
520 } else if (!strcasecmp(key
, "region_size")) {
521 rs
->print_flags
|= DMPF_REGION_SIZE
;
524 DMERR("Unable to parse RAID parameter: %s", key
);
525 rs
->ti
->error
= "Unable to parse RAID parameters";
530 if (validate_region_size(rs
, region_size
))
533 if (rs
->md
.chunk_sectors
)
534 max_io_len
= rs
->md
.chunk_sectors
;
536 max_io_len
= region_size
;
538 if (dm_set_target_max_io_len(rs
->ti
, max_io_len
))
541 if ((rs
->raid_type
->level
> 1) &&
542 sector_div(sectors_per_dev
, (rs
->md
.raid_disks
- rs
->raid_type
->parity_devs
))) {
543 rs
->ti
->error
= "Target length not divisible by number of data devices";
546 rs
->md
.dev_sectors
= sectors_per_dev
;
548 /* Assume there are no metadata devices until the drives are parsed */
549 rs
->md
.persistent
= 0;
555 static void do_table_event(struct work_struct
*ws
)
557 struct raid_set
*rs
= container_of(ws
, struct raid_set
, md
.event_work
);
559 dm_table_event(rs
->ti
->table
);
562 static int raid_is_congested(struct dm_target_callbacks
*cb
, int bits
)
564 struct raid_set
*rs
= container_of(cb
, struct raid_set
, callbacks
);
566 if (rs
->raid_type
->level
== 1)
567 return md_raid1_congested(&rs
->md
, bits
);
569 return md_raid5_congested(&rs
->md
, bits
);
573 * This structure is never routinely used by userspace, unlike md superblocks.
574 * Devices with this superblock should only ever be accessed via device-mapper.
576 #define DM_RAID_MAGIC 0x64526D44
577 struct dm_raid_superblock
{
578 __le32 magic
; /* "DmRd" */
579 __le32 features
; /* Used to indicate possible future changes */
581 __le32 num_devices
; /* Number of devices in this array. (Max 64) */
582 __le32 array_position
; /* The position of this drive in the array */
584 __le64 events
; /* Incremented by md when superblock updated */
585 __le64 failed_devices
; /* Bit field of devices to indicate failures */
588 * This offset tracks the progress of the repair or replacement of
589 * an individual drive.
591 __le64 disk_recovery_offset
;
594 * This offset tracks the progress of the initial array
595 * synchronisation/parity calculation.
597 __le64 array_resync_offset
;
600 * RAID characteristics
604 __le32 stripe_sectors
;
606 __u8 pad
[452]; /* Round struct to 512 bytes. */
607 /* Always set to 0 when writing. */
610 static int read_disk_sb(struct md_rdev
*rdev
, int size
)
612 BUG_ON(!rdev
->sb_page
);
617 if (!sync_page_io(rdev
, 0, size
, rdev
->sb_page
, READ
, 1)) {
618 DMERR("Failed to read superblock of device at position %d",
620 md_error(rdev
->mddev
, rdev
);
629 static void super_sync(struct mddev
*mddev
, struct md_rdev
*rdev
)
632 uint64_t failed_devices
;
633 struct dm_raid_superblock
*sb
;
634 struct raid_set
*rs
= container_of(mddev
, struct raid_set
, md
);
636 sb
= page_address(rdev
->sb_page
);
637 failed_devices
= le64_to_cpu(sb
->failed_devices
);
639 for (i
= 0; i
< mddev
->raid_disks
; i
++)
640 if (!rs
->dev
[i
].data_dev
||
641 test_bit(Faulty
, &(rs
->dev
[i
].rdev
.flags
)))
642 failed_devices
|= (1ULL << i
);
644 memset(sb
, 0, sizeof(*sb
));
646 sb
->magic
= cpu_to_le32(DM_RAID_MAGIC
);
647 sb
->features
= cpu_to_le32(0); /* No features yet */
649 sb
->num_devices
= cpu_to_le32(mddev
->raid_disks
);
650 sb
->array_position
= cpu_to_le32(rdev
->raid_disk
);
652 sb
->events
= cpu_to_le64(mddev
->events
);
653 sb
->failed_devices
= cpu_to_le64(failed_devices
);
655 sb
->disk_recovery_offset
= cpu_to_le64(rdev
->recovery_offset
);
656 sb
->array_resync_offset
= cpu_to_le64(mddev
->recovery_cp
);
658 sb
->level
= cpu_to_le32(mddev
->level
);
659 sb
->layout
= cpu_to_le32(mddev
->layout
);
660 sb
->stripe_sectors
= cpu_to_le32(mddev
->chunk_sectors
);
666 * This function creates a superblock if one is not found on the device
667 * and will decide which superblock to use if there's a choice.
669 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
671 static int super_load(struct md_rdev
*rdev
, struct md_rdev
*refdev
)
674 struct dm_raid_superblock
*sb
;
675 struct dm_raid_superblock
*refsb
;
676 uint64_t events_sb
, events_refsb
;
679 rdev
->sb_size
= sizeof(*sb
);
681 ret
= read_disk_sb(rdev
, rdev
->sb_size
);
685 sb
= page_address(rdev
->sb_page
);
688 * Two cases that we want to write new superblocks and rebuild:
689 * 1) New device (no matching magic number)
690 * 2) Device specified for rebuild (!In_sync w/ offset == 0)
692 if ((sb
->magic
!= cpu_to_le32(DM_RAID_MAGIC
)) ||
693 (!test_bit(In_sync
, &rdev
->flags
) && !rdev
->recovery_offset
)) {
694 super_sync(rdev
->mddev
, rdev
);
696 set_bit(FirstUse
, &rdev
->flags
);
698 /* Force writing of superblocks to disk */
699 set_bit(MD_CHANGE_DEVS
, &rdev
->mddev
->flags
);
701 /* Any superblock is better than none, choose that if given */
702 return refdev
? 0 : 1;
708 events_sb
= le64_to_cpu(sb
->events
);
710 refsb
= page_address(refdev
->sb_page
);
711 events_refsb
= le64_to_cpu(refsb
->events
);
713 return (events_sb
> events_refsb
) ? 1 : 0;
716 static int super_init_validation(struct mddev
*mddev
, struct md_rdev
*rdev
)
719 struct raid_set
*rs
= container_of(mddev
, struct raid_set
, md
);
721 uint64_t failed_devices
;
722 struct dm_raid_superblock
*sb
;
723 uint32_t new_devs
= 0;
724 uint32_t rebuilds
= 0;
726 struct dm_raid_superblock
*sb2
;
728 sb
= page_address(rdev
->sb_page
);
729 events_sb
= le64_to_cpu(sb
->events
);
730 failed_devices
= le64_to_cpu(sb
->failed_devices
);
733 * Initialise to 1 if this is a new superblock.
735 mddev
->events
= events_sb
? : 1;
738 * Reshaping is not currently allowed
740 if ((le32_to_cpu(sb
->level
) != mddev
->level
) ||
741 (le32_to_cpu(sb
->layout
) != mddev
->layout
) ||
742 (le32_to_cpu(sb
->stripe_sectors
) != mddev
->chunk_sectors
)) {
743 DMERR("Reshaping arrays not yet supported.");
747 /* We can only change the number of devices in RAID1 right now */
748 if ((rs
->raid_type
->level
!= 1) &&
749 (le32_to_cpu(sb
->num_devices
) != mddev
->raid_disks
)) {
750 DMERR("Reshaping arrays not yet supported.");
754 if (!(rs
->print_flags
& (DMPF_SYNC
| DMPF_NOSYNC
)))
755 mddev
->recovery_cp
= le64_to_cpu(sb
->array_resync_offset
);
758 * During load, we set FirstUse if a new superblock was written.
759 * There are two reasons we might not have a superblock:
760 * 1) The array is brand new - in which case, all of the
761 * devices must have their In_sync bit set. Also,
762 * recovery_cp must be 0, unless forced.
763 * 2) This is a new device being added to an old array
764 * and the new device needs to be rebuilt - in which
765 * case the In_sync bit will /not/ be set and
766 * recovery_cp must be MaxSector.
768 rdev_for_each(r
, mddev
) {
769 if (!test_bit(In_sync
, &r
->flags
)) {
770 DMINFO("Device %d specified for rebuild: "
771 "Clearing superblock", r
->raid_disk
);
773 } else if (test_bit(FirstUse
, &r
->flags
))
778 if (new_devs
== mddev
->raid_disks
) {
779 DMINFO("Superblocks created for new array");
780 set_bit(MD_ARRAY_FIRST_USE
, &mddev
->flags
);
781 } else if (new_devs
) {
782 DMERR("New device injected "
783 "into existing array without 'rebuild' "
784 "parameter specified");
787 } else if (new_devs
) {
788 DMERR("'rebuild' devices cannot be "
789 "injected into an array with other first-time devices");
791 } else if (mddev
->recovery_cp
!= MaxSector
) {
792 DMERR("'rebuild' specified while array is not in-sync");
797 * Now we set the Faulty bit for those devices that are
798 * recorded in the superblock as failed.
800 rdev_for_each(r
, mddev
) {
803 sb2
= page_address(r
->sb_page
);
804 sb2
->failed_devices
= 0;
807 * Check for any device re-ordering.
809 if (!test_bit(FirstUse
, &r
->flags
) && (r
->raid_disk
>= 0)) {
810 role
= le32_to_cpu(sb2
->array_position
);
811 if (role
!= r
->raid_disk
) {
812 if (rs
->raid_type
->level
!= 1) {
813 rs
->ti
->error
= "Cannot change device "
814 "positions in RAID array";
817 DMINFO("RAID1 device #%d now at position #%d",
822 * Partial recovery is performed on
823 * returning failed devices.
825 if (failed_devices
& (1 << role
))
826 set_bit(Faulty
, &r
->flags
);
833 static int super_validate(struct mddev
*mddev
, struct md_rdev
*rdev
)
835 struct dm_raid_superblock
*sb
= page_address(rdev
->sb_page
);
838 * If mddev->events is not set, we know we have not yet initialized
841 if (!mddev
->events
&& super_init_validation(mddev
, rdev
))
844 mddev
->bitmap_info
.offset
= 4096 >> 9; /* Enable bitmap creation */
845 rdev
->mddev
->bitmap_info
.default_offset
= 4096 >> 9;
846 if (!test_bit(FirstUse
, &rdev
->flags
)) {
847 rdev
->recovery_offset
= le64_to_cpu(sb
->disk_recovery_offset
);
848 if (rdev
->recovery_offset
!= MaxSector
)
849 clear_bit(In_sync
, &rdev
->flags
);
853 * If a device comes back, set it as not In_sync and no longer faulty.
855 if (test_bit(Faulty
, &rdev
->flags
)) {
856 clear_bit(Faulty
, &rdev
->flags
);
857 clear_bit(In_sync
, &rdev
->flags
);
858 rdev
->saved_raid_disk
= rdev
->raid_disk
;
859 rdev
->recovery_offset
= 0;
862 clear_bit(FirstUse
, &rdev
->flags
);
868 * Analyse superblocks and select the freshest.
870 static int analyse_superblocks(struct dm_target
*ti
, struct raid_set
*rs
)
873 unsigned redundancy
= 0;
874 struct raid_dev
*dev
;
875 struct md_rdev
*rdev
, *tmp
, *freshest
;
876 struct mddev
*mddev
= &rs
->md
;
878 switch (rs
->raid_type
->level
) {
880 redundancy
= rs
->md
.raid_disks
- 1;
885 redundancy
= rs
->raid_type
->parity_devs
;
888 ti
->error
= "Unknown RAID type";
893 rdev_for_each_safe(rdev
, tmp
, mddev
) {
894 if (!rdev
->meta_bdev
)
897 ret
= super_load(rdev
, freshest
);
906 dev
= container_of(rdev
, struct raid_dev
, rdev
);
909 dm_put_device(ti
, dev
->meta_dev
);
911 dev
->meta_dev
= NULL
;
912 rdev
->meta_bdev
= NULL
;
915 put_page(rdev
->sb_page
);
917 rdev
->sb_page
= NULL
;
922 * We might be able to salvage the data device
923 * even though the meta device has failed. For
924 * now, we behave as though '- -' had been
925 * set for this device in the table.
928 dm_put_device(ti
, dev
->data_dev
);
930 dev
->data_dev
= NULL
;
933 list_del(&rdev
->same_set
);
937 ti
->error
= "Failed to load superblock";
946 * Validation of the freshest device provides the source of
947 * validation for the remaining devices.
949 ti
->error
= "Unable to assemble array: Invalid superblocks";
950 if (super_validate(mddev
, freshest
))
953 rdev_for_each(rdev
, mddev
)
954 if ((rdev
!= freshest
) && super_validate(mddev
, rdev
))
961 * Construct a RAID4/5/6 mapping:
963 * <raid_type> <#raid_params> <raid_params> \
964 * <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> }
966 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
967 * details on possible <raid_params>.
969 static int raid_ctr(struct dm_target
*ti
, unsigned argc
, char **argv
)
972 struct raid_type
*rt
;
973 unsigned long num_raid_params
, num_raid_devs
;
974 struct raid_set
*rs
= NULL
;
976 /* Must have at least <raid_type> <#raid_params> */
978 ti
->error
= "Too few arguments";
983 rt
= get_raid_type(argv
[0]);
985 ti
->error
= "Unrecognised raid_type";
991 /* number of RAID parameters */
992 if (strict_strtoul(argv
[0], 10, &num_raid_params
) < 0) {
993 ti
->error
= "Cannot understand number of RAID parameters";
999 /* Skip over RAID params for now and find out # of devices */
1000 if (num_raid_params
+ 1 > argc
) {
1001 ti
->error
= "Arguments do not agree with counts given";
1005 if ((strict_strtoul(argv
[num_raid_params
], 10, &num_raid_devs
) < 0) ||
1006 (num_raid_devs
>= INT_MAX
)) {
1007 ti
->error
= "Cannot understand number of raid devices";
1011 rs
= context_alloc(ti
, rt
, (unsigned)num_raid_devs
);
1015 ret
= parse_raid_params(rs
, argv
, (unsigned)num_raid_params
);
1021 argc
-= num_raid_params
+ 1; /* +1: we already have num_raid_devs */
1022 argv
+= num_raid_params
+ 1;
1024 if (argc
!= (num_raid_devs
* 2)) {
1025 ti
->error
= "Supplied RAID devices does not match the count given";
1029 ret
= dev_parms(rs
, argv
);
1033 rs
->md
.sync_super
= super_sync
;
1034 ret
= analyse_superblocks(ti
, rs
);
1038 INIT_WORK(&rs
->md
.event_work
, do_table_event
);
1040 ti
->num_flush_requests
= 1;
1042 mutex_lock(&rs
->md
.reconfig_mutex
);
1043 ret
= md_run(&rs
->md
);
1044 rs
->md
.in_sync
= 0; /* Assume already marked dirty */
1045 mutex_unlock(&rs
->md
.reconfig_mutex
);
1048 ti
->error
= "Fail to run raid array";
1052 rs
->callbacks
.congested_fn
= raid_is_congested
;
1053 dm_table_add_target_callbacks(ti
->table
, &rs
->callbacks
);
1055 mddev_suspend(&rs
->md
);
1064 static void raid_dtr(struct dm_target
*ti
)
1066 struct raid_set
*rs
= ti
->private;
1068 list_del_init(&rs
->callbacks
.list
);
1073 static int raid_map(struct dm_target
*ti
, struct bio
*bio
, union map_info
*map_context
)
1075 struct raid_set
*rs
= ti
->private;
1076 struct mddev
*mddev
= &rs
->md
;
1078 mddev
->pers
->make_request(mddev
, bio
);
1080 return DM_MAPIO_SUBMITTED
;
1083 static int raid_status(struct dm_target
*ti
, status_type_t type
,
1084 unsigned status_flags
, char *result
, unsigned maxlen
)
1086 struct raid_set
*rs
= ti
->private;
1087 unsigned raid_param_cnt
= 1; /* at least 1 for chunksize */
1089 int i
, array_in_sync
= 0;
1093 case STATUSTYPE_INFO
:
1094 DMEMIT("%s %d ", rs
->raid_type
->name
, rs
->md
.raid_disks
);
1096 if (test_bit(MD_RECOVERY_RUNNING
, &rs
->md
.recovery
))
1097 sync
= rs
->md
.curr_resync_completed
;
1099 sync
= rs
->md
.recovery_cp
;
1101 if (sync
>= rs
->md
.resync_max_sectors
) {
1103 sync
= rs
->md
.resync_max_sectors
;
1106 * The array may be doing an initial sync, or it may
1107 * be rebuilding individual components. If all the
1108 * devices are In_sync, then it is the array that is
1109 * being initialized.
1111 for (i
= 0; i
< rs
->md
.raid_disks
; i
++)
1112 if (!test_bit(In_sync
, &rs
->dev
[i
].rdev
.flags
))
1116 * Status characters:
1117 * 'D' = Dead/Failed device
1118 * 'a' = Alive but not in-sync
1119 * 'A' = Alive and in-sync
1121 for (i
= 0; i
< rs
->md
.raid_disks
; i
++) {
1122 if (test_bit(Faulty
, &rs
->dev
[i
].rdev
.flags
))
1124 else if (!array_in_sync
||
1125 !test_bit(In_sync
, &rs
->dev
[i
].rdev
.flags
))
1133 * The in-sync ratio shows the progress of:
1134 * - Initializing the array
1135 * - Rebuilding a subset of devices of the array
1136 * The user can distinguish between the two by referring
1137 * to the status characters.
1139 DMEMIT(" %llu/%llu",
1140 (unsigned long long) sync
,
1141 (unsigned long long) rs
->md
.resync_max_sectors
);
1144 case STATUSTYPE_TABLE
:
1145 /* The string you would use to construct this array */
1146 for (i
= 0; i
< rs
->md
.raid_disks
; i
++) {
1147 if ((rs
->print_flags
& DMPF_REBUILD
) &&
1148 rs
->dev
[i
].data_dev
&&
1149 !test_bit(In_sync
, &rs
->dev
[i
].rdev
.flags
))
1150 raid_param_cnt
+= 2; /* for rebuilds */
1151 if (rs
->dev
[i
].data_dev
&&
1152 test_bit(WriteMostly
, &rs
->dev
[i
].rdev
.flags
))
1153 raid_param_cnt
+= 2;
1156 raid_param_cnt
+= (hweight32(rs
->print_flags
& ~DMPF_REBUILD
) * 2);
1157 if (rs
->print_flags
& (DMPF_SYNC
| DMPF_NOSYNC
))
1160 DMEMIT("%s %u %u", rs
->raid_type
->name
,
1161 raid_param_cnt
, rs
->md
.chunk_sectors
);
1163 if ((rs
->print_flags
& DMPF_SYNC
) &&
1164 (rs
->md
.recovery_cp
== MaxSector
))
1166 if (rs
->print_flags
& DMPF_NOSYNC
)
1169 for (i
= 0; i
< rs
->md
.raid_disks
; i
++)
1170 if ((rs
->print_flags
& DMPF_REBUILD
) &&
1171 rs
->dev
[i
].data_dev
&&
1172 !test_bit(In_sync
, &rs
->dev
[i
].rdev
.flags
))
1173 DMEMIT(" rebuild %u", i
);
1175 if (rs
->print_flags
& DMPF_DAEMON_SLEEP
)
1176 DMEMIT(" daemon_sleep %lu",
1177 rs
->md
.bitmap_info
.daemon_sleep
);
1179 if (rs
->print_flags
& DMPF_MIN_RECOVERY_RATE
)
1180 DMEMIT(" min_recovery_rate %d", rs
->md
.sync_speed_min
);
1182 if (rs
->print_flags
& DMPF_MAX_RECOVERY_RATE
)
1183 DMEMIT(" max_recovery_rate %d", rs
->md
.sync_speed_max
);
1185 for (i
= 0; i
< rs
->md
.raid_disks
; i
++)
1186 if (rs
->dev
[i
].data_dev
&&
1187 test_bit(WriteMostly
, &rs
->dev
[i
].rdev
.flags
))
1188 DMEMIT(" write_mostly %u", i
);
1190 if (rs
->print_flags
& DMPF_MAX_WRITE_BEHIND
)
1191 DMEMIT(" max_write_behind %lu",
1192 rs
->md
.bitmap_info
.max_write_behind
);
1194 if (rs
->print_flags
& DMPF_STRIPE_CACHE
) {
1195 struct r5conf
*conf
= rs
->md
.private;
1197 /* convert from kiB to sectors */
1198 DMEMIT(" stripe_cache %d",
1199 conf
? conf
->max_nr_stripes
* 2 : 0);
1202 if (rs
->print_flags
& DMPF_REGION_SIZE
)
1203 DMEMIT(" region_size %lu",
1204 rs
->md
.bitmap_info
.chunksize
>> 9);
1206 DMEMIT(" %d", rs
->md
.raid_disks
);
1207 for (i
= 0; i
< rs
->md
.raid_disks
; i
++) {
1208 if (rs
->dev
[i
].meta_dev
)
1209 DMEMIT(" %s", rs
->dev
[i
].meta_dev
->name
);
1213 if (rs
->dev
[i
].data_dev
)
1214 DMEMIT(" %s", rs
->dev
[i
].data_dev
->name
);
1223 static int raid_iterate_devices(struct dm_target
*ti
, iterate_devices_callout_fn fn
, void *data
)
1225 struct raid_set
*rs
= ti
->private;
1229 for (i
= 0; !ret
&& i
< rs
->md
.raid_disks
; i
++)
1230 if (rs
->dev
[i
].data_dev
)
1232 rs
->dev
[i
].data_dev
,
1233 0, /* No offset on data devs */
1240 static void raid_io_hints(struct dm_target
*ti
, struct queue_limits
*limits
)
1242 struct raid_set
*rs
= ti
->private;
1243 unsigned chunk_size
= rs
->md
.chunk_sectors
<< 9;
1244 struct r5conf
*conf
= rs
->md
.private;
1246 blk_limits_io_min(limits
, chunk_size
);
1247 blk_limits_io_opt(limits
, chunk_size
* (conf
->raid_disks
- conf
->max_degraded
));
1250 static void raid_presuspend(struct dm_target
*ti
)
1252 struct raid_set
*rs
= ti
->private;
1254 md_stop_writes(&rs
->md
);
1257 static void raid_postsuspend(struct dm_target
*ti
)
1259 struct raid_set
*rs
= ti
->private;
1261 mddev_suspend(&rs
->md
);
1264 static void raid_resume(struct dm_target
*ti
)
1266 struct raid_set
*rs
= ti
->private;
1268 set_bit(MD_CHANGE_DEVS
, &rs
->md
.flags
);
1269 if (!rs
->bitmap_loaded
) {
1270 bitmap_load(&rs
->md
);
1271 rs
->bitmap_loaded
= 1;
1274 clear_bit(MD_RECOVERY_FROZEN
, &rs
->md
.recovery
);
1275 mddev_resume(&rs
->md
);
1278 static struct target_type raid_target
= {
1280 .version
= {1, 2, 0},
1281 .module
= THIS_MODULE
,
1285 .status
= raid_status
,
1286 .iterate_devices
= raid_iterate_devices
,
1287 .io_hints
= raid_io_hints
,
1288 .presuspend
= raid_presuspend
,
1289 .postsuspend
= raid_postsuspend
,
1290 .resume
= raid_resume
,
1293 static int __init
dm_raid_init(void)
1295 return dm_register_target(&raid_target
);
1298 static void __exit
dm_raid_exit(void)
1300 dm_unregister_target(&raid_target
);
1303 module_init(dm_raid_init
);
1304 module_exit(dm_raid_exit
);
1306 MODULE_DESCRIPTION(DM_NAME
" raid4/5/6 target");
1307 MODULE_ALIAS("dm-raid4");
1308 MODULE_ALIAS("dm-raid5");
1309 MODULE_ALIAS("dm-raid6");
1310 MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
1311 MODULE_LICENSE("GPL");