2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->seq_write is the number of the last batch successfully written.
31 * conf->seq_flush is the number of the last batch that was closed to
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is seq_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/raid/pq.h>
49 #include <linux/async_tx.h>
50 #include <linux/module.h>
51 #include <linux/async.h>
52 #include <linux/seq_file.h>
53 #include <linux/cpu.h>
54 #include <linux/slab.h>
55 #include <linux/ratelimit.h>
56 #include <linux/nodemask.h>
57 #include <trace/events/block.h>
64 #define cpu_to_group(cpu) cpu_to_node(cpu)
65 #define ANY_GROUP NUMA_NO_NODE
67 static bool devices_handle_discard_safely
= false;
68 module_param(devices_handle_discard_safely
, bool, 0644);
69 MODULE_PARM_DESC(devices_handle_discard_safely
,
70 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
71 static struct workqueue_struct
*raid5_wq
;
76 #define NR_STRIPES 256
77 #define STRIPE_SIZE PAGE_SIZE
78 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
79 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
80 #define IO_THRESHOLD 1
81 #define BYPASS_THRESHOLD 1
82 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
83 #define HASH_MASK (NR_HASH - 1)
84 #define MAX_STRIPE_BATCH 8
86 static inline struct hlist_head
*stripe_hash(struct r5conf
*conf
, sector_t sect
)
88 int hash
= (sect
>> STRIPE_SHIFT
) & HASH_MASK
;
89 return &conf
->stripe_hashtbl
[hash
];
92 static inline int stripe_hash_locks_hash(sector_t sect
)
94 return (sect
>> STRIPE_SHIFT
) & STRIPE_HASH_LOCKS_MASK
;
97 static inline void lock_device_hash_lock(struct r5conf
*conf
, int hash
)
99 spin_lock_irq(conf
->hash_locks
+ hash
);
100 spin_lock(&conf
->device_lock
);
103 static inline void unlock_device_hash_lock(struct r5conf
*conf
, int hash
)
105 spin_unlock(&conf
->device_lock
);
106 spin_unlock_irq(conf
->hash_locks
+ hash
);
109 static inline void lock_all_device_hash_locks_irq(struct r5conf
*conf
)
113 spin_lock(conf
->hash_locks
);
114 for (i
= 1; i
< NR_STRIPE_HASH_LOCKS
; i
++)
115 spin_lock_nest_lock(conf
->hash_locks
+ i
, conf
->hash_locks
);
116 spin_lock(&conf
->device_lock
);
119 static inline void unlock_all_device_hash_locks_irq(struct r5conf
*conf
)
122 spin_unlock(&conf
->device_lock
);
123 for (i
= NR_STRIPE_HASH_LOCKS
; i
; i
--)
124 spin_unlock(conf
->hash_locks
+ i
- 1);
128 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
129 * order without overlap. There may be several bio's per stripe+device, and
130 * a bio could span several devices.
131 * When walking this list for a particular stripe+device, we must never proceed
132 * beyond a bio that extends past this device, as the next bio might no longer
134 * This function is used to determine the 'next' bio in the list, given the sector
135 * of the current stripe+device
137 static inline struct bio
*r5_next_bio(struct bio
*bio
, sector_t sector
)
139 int sectors
= bio_sectors(bio
);
140 if (bio
->bi_iter
.bi_sector
+ sectors
< sector
+ STRIPE_SECTORS
)
147 * We maintain a biased count of active stripes in the bottom 16 bits of
148 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
150 static inline int raid5_bi_processed_stripes(struct bio
*bio
)
152 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
153 return (atomic_read(segments
) >> 16) & 0xffff;
156 static inline int raid5_dec_bi_active_stripes(struct bio
*bio
)
158 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
159 return atomic_sub_return(1, segments
) & 0xffff;
162 static inline void raid5_inc_bi_active_stripes(struct bio
*bio
)
164 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
165 atomic_inc(segments
);
168 static inline void raid5_set_bi_processed_stripes(struct bio
*bio
,
171 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
175 old
= atomic_read(segments
);
176 new = (old
& 0xffff) | (cnt
<< 16);
177 } while (atomic_cmpxchg(segments
, old
, new) != old
);
180 static inline void raid5_set_bi_stripes(struct bio
*bio
, unsigned int cnt
)
182 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
183 atomic_set(segments
, cnt
);
186 /* Find first data disk in a raid6 stripe */
187 static inline int raid6_d0(struct stripe_head
*sh
)
190 /* ddf always start from first device */
192 /* md starts just after Q block */
193 if (sh
->qd_idx
== sh
->disks
- 1)
196 return sh
->qd_idx
+ 1;
198 static inline int raid6_next_disk(int disk
, int raid_disks
)
201 return (disk
< raid_disks
) ? disk
: 0;
204 /* When walking through the disks in a raid5, starting at raid6_d0,
205 * We need to map each disk to a 'slot', where the data disks are slot
206 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
207 * is raid_disks-1. This help does that mapping.
209 static int raid6_idx_to_slot(int idx
, struct stripe_head
*sh
,
210 int *count
, int syndrome_disks
)
216 if (idx
== sh
->pd_idx
)
217 return syndrome_disks
;
218 if (idx
== sh
->qd_idx
)
219 return syndrome_disks
+ 1;
225 static void return_io(struct bio
*return_bi
)
227 struct bio
*bi
= return_bi
;
230 return_bi
= bi
->bi_next
;
232 bi
->bi_iter
.bi_size
= 0;
233 trace_block_bio_complete(bdev_get_queue(bi
->bi_bdev
),
240 static void print_raid5_conf (struct r5conf
*conf
);
242 static int stripe_operations_active(struct stripe_head
*sh
)
244 return sh
->check_state
|| sh
->reconstruct_state
||
245 test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
) ||
246 test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
249 static void raid5_wakeup_stripe_thread(struct stripe_head
*sh
)
251 struct r5conf
*conf
= sh
->raid_conf
;
252 struct r5worker_group
*group
;
254 int i
, cpu
= sh
->cpu
;
256 if (!cpu_online(cpu
)) {
257 cpu
= cpumask_any(cpu_online_mask
);
261 if (list_empty(&sh
->lru
)) {
262 struct r5worker_group
*group
;
263 group
= conf
->worker_groups
+ cpu_to_group(cpu
);
264 list_add_tail(&sh
->lru
, &group
->handle_list
);
265 group
->stripes_cnt
++;
269 if (conf
->worker_cnt_per_group
== 0) {
270 md_wakeup_thread(conf
->mddev
->thread
);
274 group
= conf
->worker_groups
+ cpu_to_group(sh
->cpu
);
276 group
->workers
[0].working
= true;
277 /* at least one worker should run to avoid race */
278 queue_work_on(sh
->cpu
, raid5_wq
, &group
->workers
[0].work
);
280 thread_cnt
= group
->stripes_cnt
/ MAX_STRIPE_BATCH
- 1;
281 /* wakeup more workers */
282 for (i
= 1; i
< conf
->worker_cnt_per_group
&& thread_cnt
> 0; i
++) {
283 if (group
->workers
[i
].working
== false) {
284 group
->workers
[i
].working
= true;
285 queue_work_on(sh
->cpu
, raid5_wq
,
286 &group
->workers
[i
].work
);
292 static void do_release_stripe(struct r5conf
*conf
, struct stripe_head
*sh
,
293 struct list_head
*temp_inactive_list
)
295 BUG_ON(!list_empty(&sh
->lru
));
296 BUG_ON(atomic_read(&conf
->active_stripes
)==0);
297 if (test_bit(STRIPE_HANDLE
, &sh
->state
)) {
298 if (test_bit(STRIPE_DELAYED
, &sh
->state
) &&
299 !test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
300 list_add_tail(&sh
->lru
, &conf
->delayed_list
);
301 else if (test_bit(STRIPE_BIT_DELAY
, &sh
->state
) &&
302 sh
->bm_seq
- conf
->seq_write
> 0)
303 list_add_tail(&sh
->lru
, &conf
->bitmap_list
);
305 clear_bit(STRIPE_DELAYED
, &sh
->state
);
306 clear_bit(STRIPE_BIT_DELAY
, &sh
->state
);
307 if (conf
->worker_cnt_per_group
== 0) {
308 list_add_tail(&sh
->lru
, &conf
->handle_list
);
310 raid5_wakeup_stripe_thread(sh
);
314 md_wakeup_thread(conf
->mddev
->thread
);
316 BUG_ON(stripe_operations_active(sh
));
317 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
318 if (atomic_dec_return(&conf
->preread_active_stripes
)
320 md_wakeup_thread(conf
->mddev
->thread
);
321 atomic_dec(&conf
->active_stripes
);
322 if (!test_bit(STRIPE_EXPANDING
, &sh
->state
))
323 list_add_tail(&sh
->lru
, temp_inactive_list
);
327 static void __release_stripe(struct r5conf
*conf
, struct stripe_head
*sh
,
328 struct list_head
*temp_inactive_list
)
330 if (atomic_dec_and_test(&sh
->count
))
331 do_release_stripe(conf
, sh
, temp_inactive_list
);
335 * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list
337 * Be careful: Only one task can add/delete stripes from temp_inactive_list at
338 * given time. Adding stripes only takes device lock, while deleting stripes
339 * only takes hash lock.
341 static void release_inactive_stripe_list(struct r5conf
*conf
,
342 struct list_head
*temp_inactive_list
,
346 bool do_wakeup
= false;
349 if (hash
== NR_STRIPE_HASH_LOCKS
) {
350 size
= NR_STRIPE_HASH_LOCKS
;
351 hash
= NR_STRIPE_HASH_LOCKS
- 1;
355 struct list_head
*list
= &temp_inactive_list
[size
- 1];
358 * We don't hold any lock here yet, get_active_stripe() might
359 * remove stripes from the list
361 if (!list_empty_careful(list
)) {
362 spin_lock_irqsave(conf
->hash_locks
+ hash
, flags
);
363 if (list_empty(conf
->inactive_list
+ hash
) &&
365 atomic_dec(&conf
->empty_inactive_list_nr
);
366 list_splice_tail_init(list
, conf
->inactive_list
+ hash
);
368 spin_unlock_irqrestore(conf
->hash_locks
+ hash
, flags
);
375 wake_up(&conf
->wait_for_stripe
);
376 if (conf
->retry_read_aligned
)
377 md_wakeup_thread(conf
->mddev
->thread
);
381 /* should hold conf->device_lock already */
382 static int release_stripe_list(struct r5conf
*conf
,
383 struct list_head
*temp_inactive_list
)
385 struct stripe_head
*sh
;
387 struct llist_node
*head
;
389 head
= llist_del_all(&conf
->released_stripes
);
390 head
= llist_reverse_order(head
);
394 sh
= llist_entry(head
, struct stripe_head
, release_list
);
395 head
= llist_next(head
);
396 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
398 clear_bit(STRIPE_ON_RELEASE_LIST
, &sh
->state
);
400 * Don't worry the bit is set here, because if the bit is set
401 * again, the count is always > 1. This is true for
402 * STRIPE_ON_UNPLUG_LIST bit too.
404 hash
= sh
->hash_lock_index
;
405 __release_stripe(conf
, sh
, &temp_inactive_list
[hash
]);
412 static void release_stripe(struct stripe_head
*sh
)
414 struct r5conf
*conf
= sh
->raid_conf
;
416 struct list_head list
;
420 /* Avoid release_list until the last reference.
422 if (atomic_add_unless(&sh
->count
, -1, 1))
425 if (unlikely(!conf
->mddev
->thread
) ||
426 test_and_set_bit(STRIPE_ON_RELEASE_LIST
, &sh
->state
))
428 wakeup
= llist_add(&sh
->release_list
, &conf
->released_stripes
);
430 md_wakeup_thread(conf
->mddev
->thread
);
433 local_irq_save(flags
);
434 /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
435 if (atomic_dec_and_lock(&sh
->count
, &conf
->device_lock
)) {
436 INIT_LIST_HEAD(&list
);
437 hash
= sh
->hash_lock_index
;
438 do_release_stripe(conf
, sh
, &list
);
439 spin_unlock(&conf
->device_lock
);
440 release_inactive_stripe_list(conf
, &list
, hash
);
442 local_irq_restore(flags
);
445 static inline void remove_hash(struct stripe_head
*sh
)
447 pr_debug("remove_hash(), stripe %llu\n",
448 (unsigned long long)sh
->sector
);
450 hlist_del_init(&sh
->hash
);
453 static inline void insert_hash(struct r5conf
*conf
, struct stripe_head
*sh
)
455 struct hlist_head
*hp
= stripe_hash(conf
, sh
->sector
);
457 pr_debug("insert_hash(), stripe %llu\n",
458 (unsigned long long)sh
->sector
);
460 hlist_add_head(&sh
->hash
, hp
);
463 /* find an idle stripe, make sure it is unhashed, and return it. */
464 static struct stripe_head
*get_free_stripe(struct r5conf
*conf
, int hash
)
466 struct stripe_head
*sh
= NULL
;
467 struct list_head
*first
;
469 if (list_empty(conf
->inactive_list
+ hash
))
471 first
= (conf
->inactive_list
+ hash
)->next
;
472 sh
= list_entry(first
, struct stripe_head
, lru
);
473 list_del_init(first
);
475 atomic_inc(&conf
->active_stripes
);
476 BUG_ON(hash
!= sh
->hash_lock_index
);
477 if (list_empty(conf
->inactive_list
+ hash
))
478 atomic_inc(&conf
->empty_inactive_list_nr
);
483 static void shrink_buffers(struct stripe_head
*sh
)
487 int num
= sh
->raid_conf
->pool_size
;
489 for (i
= 0; i
< num
; i
++) {
490 WARN_ON(sh
->dev
[i
].page
!= sh
->dev
[i
].orig_page
);
494 sh
->dev
[i
].page
= NULL
;
499 static int grow_buffers(struct stripe_head
*sh
)
502 int num
= sh
->raid_conf
->pool_size
;
504 for (i
= 0; i
< num
; i
++) {
507 if (!(page
= alloc_page(GFP_KERNEL
))) {
510 sh
->dev
[i
].page
= page
;
511 sh
->dev
[i
].orig_page
= page
;
516 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
);
517 static void stripe_set_idx(sector_t stripe
, struct r5conf
*conf
, int previous
,
518 struct stripe_head
*sh
);
520 static void init_stripe(struct stripe_head
*sh
, sector_t sector
, int previous
)
522 struct r5conf
*conf
= sh
->raid_conf
;
525 BUG_ON(atomic_read(&sh
->count
) != 0);
526 BUG_ON(test_bit(STRIPE_HANDLE
, &sh
->state
));
527 BUG_ON(stripe_operations_active(sh
));
529 pr_debug("init_stripe called, stripe %llu\n",
530 (unsigned long long)sector
);
532 seq
= read_seqcount_begin(&conf
->gen_lock
);
533 sh
->generation
= conf
->generation
- previous
;
534 sh
->disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
536 stripe_set_idx(sector
, conf
, previous
, sh
);
539 for (i
= sh
->disks
; i
--; ) {
540 struct r5dev
*dev
= &sh
->dev
[i
];
542 if (dev
->toread
|| dev
->read
|| dev
->towrite
|| dev
->written
||
543 test_bit(R5_LOCKED
, &dev
->flags
)) {
544 printk(KERN_ERR
"sector=%llx i=%d %p %p %p %p %d\n",
545 (unsigned long long)sh
->sector
, i
, dev
->toread
,
546 dev
->read
, dev
->towrite
, dev
->written
,
547 test_bit(R5_LOCKED
, &dev
->flags
));
551 raid5_build_block(sh
, i
, previous
);
553 if (read_seqcount_retry(&conf
->gen_lock
, seq
))
555 insert_hash(conf
, sh
);
556 sh
->cpu
= smp_processor_id();
559 static struct stripe_head
*__find_stripe(struct r5conf
*conf
, sector_t sector
,
562 struct stripe_head
*sh
;
564 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector
);
565 hlist_for_each_entry(sh
, stripe_hash(conf
, sector
), hash
)
566 if (sh
->sector
== sector
&& sh
->generation
== generation
)
568 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector
);
573 * Need to check if array has failed when deciding whether to:
575 * - remove non-faulty devices
578 * This determination is simple when no reshape is happening.
579 * However if there is a reshape, we need to carefully check
580 * both the before and after sections.
581 * This is because some failed devices may only affect one
582 * of the two sections, and some non-in_sync devices may
583 * be insync in the section most affected by failed devices.
585 static int calc_degraded(struct r5conf
*conf
)
587 int degraded
, degraded2
;
592 for (i
= 0; i
< conf
->previous_raid_disks
; i
++) {
593 struct md_rdev
*rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
594 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
595 rdev
= rcu_dereference(conf
->disks
[i
].replacement
);
596 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
598 else if (test_bit(In_sync
, &rdev
->flags
))
601 /* not in-sync or faulty.
602 * If the reshape increases the number of devices,
603 * this is being recovered by the reshape, so
604 * this 'previous' section is not in_sync.
605 * If the number of devices is being reduced however,
606 * the device can only be part of the array if
607 * we are reverting a reshape, so this section will
610 if (conf
->raid_disks
>= conf
->previous_raid_disks
)
614 if (conf
->raid_disks
== conf
->previous_raid_disks
)
618 for (i
= 0; i
< conf
->raid_disks
; i
++) {
619 struct md_rdev
*rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
620 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
621 rdev
= rcu_dereference(conf
->disks
[i
].replacement
);
622 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
624 else if (test_bit(In_sync
, &rdev
->flags
))
627 /* not in-sync or faulty.
628 * If reshape increases the number of devices, this
629 * section has already been recovered, else it
630 * almost certainly hasn't.
632 if (conf
->raid_disks
<= conf
->previous_raid_disks
)
636 if (degraded2
> degraded
)
641 static int has_failed(struct r5conf
*conf
)
645 if (conf
->mddev
->reshape_position
== MaxSector
)
646 return conf
->mddev
->degraded
> conf
->max_degraded
;
648 degraded
= calc_degraded(conf
);
649 if (degraded
> conf
->max_degraded
)
654 static struct stripe_head
*
655 get_active_stripe(struct r5conf
*conf
, sector_t sector
,
656 int previous
, int noblock
, int noquiesce
)
658 struct stripe_head
*sh
;
659 int hash
= stripe_hash_locks_hash(sector
);
661 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector
);
663 spin_lock_irq(conf
->hash_locks
+ hash
);
666 wait_event_lock_irq(conf
->wait_for_stripe
,
667 conf
->quiesce
== 0 || noquiesce
,
668 *(conf
->hash_locks
+ hash
));
669 sh
= __find_stripe(conf
, sector
, conf
->generation
- previous
);
671 if (!conf
->inactive_blocked
)
672 sh
= get_free_stripe(conf
, hash
);
673 if (noblock
&& sh
== NULL
)
676 conf
->inactive_blocked
= 1;
678 conf
->wait_for_stripe
,
679 !list_empty(conf
->inactive_list
+ hash
) &&
680 (atomic_read(&conf
->active_stripes
)
681 < (conf
->max_nr_stripes
* 3 / 4)
682 || !conf
->inactive_blocked
),
683 *(conf
->hash_locks
+ hash
));
684 conf
->inactive_blocked
= 0;
686 init_stripe(sh
, sector
, previous
);
687 atomic_inc(&sh
->count
);
689 } else if (!atomic_inc_not_zero(&sh
->count
)) {
690 spin_lock(&conf
->device_lock
);
691 if (!atomic_read(&sh
->count
)) {
692 if (!test_bit(STRIPE_HANDLE
, &sh
->state
))
693 atomic_inc(&conf
->active_stripes
);
694 BUG_ON(list_empty(&sh
->lru
) &&
695 !test_bit(STRIPE_EXPANDING
, &sh
->state
));
696 list_del_init(&sh
->lru
);
698 sh
->group
->stripes_cnt
--;
702 atomic_inc(&sh
->count
);
703 spin_unlock(&conf
->device_lock
);
705 } while (sh
== NULL
);
707 spin_unlock_irq(conf
->hash_locks
+ hash
);
711 /* Determine if 'data_offset' or 'new_data_offset' should be used
712 * in this stripe_head.
714 static int use_new_offset(struct r5conf
*conf
, struct stripe_head
*sh
)
716 sector_t progress
= conf
->reshape_progress
;
717 /* Need a memory barrier to make sure we see the value
718 * of conf->generation, or ->data_offset that was set before
719 * reshape_progress was updated.
722 if (progress
== MaxSector
)
724 if (sh
->generation
== conf
->generation
- 1)
726 /* We are in a reshape, and this is a new-generation stripe,
727 * so use new_data_offset.
733 raid5_end_read_request(struct bio
*bi
, int error
);
735 raid5_end_write_request(struct bio
*bi
, int error
);
737 static void ops_run_io(struct stripe_head
*sh
, struct stripe_head_state
*s
)
739 struct r5conf
*conf
= sh
->raid_conf
;
740 int i
, disks
= sh
->disks
;
744 for (i
= disks
; i
--; ) {
746 int replace_only
= 0;
747 struct bio
*bi
, *rbi
;
748 struct md_rdev
*rdev
, *rrdev
= NULL
;
749 if (test_and_clear_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
)) {
750 if (test_and_clear_bit(R5_WantFUA
, &sh
->dev
[i
].flags
))
754 if (test_bit(R5_Discard
, &sh
->dev
[i
].flags
))
756 } else if (test_and_clear_bit(R5_Wantread
, &sh
->dev
[i
].flags
))
758 else if (test_and_clear_bit(R5_WantReplace
,
759 &sh
->dev
[i
].flags
)) {
764 if (test_and_clear_bit(R5_SyncIO
, &sh
->dev
[i
].flags
))
767 bi
= &sh
->dev
[i
].req
;
768 rbi
= &sh
->dev
[i
].rreq
; /* For writing to replacement */
771 rrdev
= rcu_dereference(conf
->disks
[i
].replacement
);
772 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
773 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
782 /* We raced and saw duplicates */
785 if (test_bit(R5_ReadRepl
, &sh
->dev
[i
].flags
) && rrdev
)
790 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
793 atomic_inc(&rdev
->nr_pending
);
794 if (rrdev
&& test_bit(Faulty
, &rrdev
->flags
))
797 atomic_inc(&rrdev
->nr_pending
);
800 /* We have already checked bad blocks for reads. Now
801 * need to check for writes. We never accept write errors
802 * on the replacement, so we don't to check rrdev.
804 while ((rw
& WRITE
) && rdev
&&
805 test_bit(WriteErrorSeen
, &rdev
->flags
)) {
808 int bad
= is_badblock(rdev
, sh
->sector
, STRIPE_SECTORS
,
809 &first_bad
, &bad_sectors
);
814 set_bit(BlockedBadBlocks
, &rdev
->flags
);
815 if (!conf
->mddev
->external
&&
816 conf
->mddev
->flags
) {
817 /* It is very unlikely, but we might
818 * still need to write out the
819 * bad block log - better give it
821 md_check_recovery(conf
->mddev
);
824 * Because md_wait_for_blocked_rdev
825 * will dec nr_pending, we must
826 * increment it first.
828 atomic_inc(&rdev
->nr_pending
);
829 md_wait_for_blocked_rdev(rdev
, conf
->mddev
);
831 /* Acknowledged bad block - skip the write */
832 rdev_dec_pending(rdev
, conf
->mddev
);
838 if (s
->syncing
|| s
->expanding
|| s
->expanded
840 md_sync_acct(rdev
->bdev
, STRIPE_SECTORS
);
842 set_bit(STRIPE_IO_STARTED
, &sh
->state
);
845 bi
->bi_bdev
= rdev
->bdev
;
847 bi
->bi_end_io
= (rw
& WRITE
)
848 ? raid5_end_write_request
849 : raid5_end_read_request
;
852 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
853 __func__
, (unsigned long long)sh
->sector
,
855 atomic_inc(&sh
->count
);
856 if (use_new_offset(conf
, sh
))
857 bi
->bi_iter
.bi_sector
= (sh
->sector
858 + rdev
->new_data_offset
);
860 bi
->bi_iter
.bi_sector
= (sh
->sector
861 + rdev
->data_offset
);
862 if (test_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
))
863 bi
->bi_rw
|= REQ_NOMERGE
;
865 if (test_bit(R5_SkipCopy
, &sh
->dev
[i
].flags
))
866 WARN_ON(test_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
));
867 sh
->dev
[i
].vec
.bv_page
= sh
->dev
[i
].page
;
869 bi
->bi_io_vec
[0].bv_len
= STRIPE_SIZE
;
870 bi
->bi_io_vec
[0].bv_offset
= 0;
871 bi
->bi_iter
.bi_size
= STRIPE_SIZE
;
873 * If this is discard request, set bi_vcnt 0. We don't
874 * want to confuse SCSI because SCSI will replace payload
876 if (rw
& REQ_DISCARD
)
879 set_bit(R5_DOUBLE_LOCKED
, &sh
->dev
[i
].flags
);
881 if (conf
->mddev
->gendisk
)
882 trace_block_bio_remap(bdev_get_queue(bi
->bi_bdev
),
883 bi
, disk_devt(conf
->mddev
->gendisk
),
885 generic_make_request(bi
);
888 if (s
->syncing
|| s
->expanding
|| s
->expanded
890 md_sync_acct(rrdev
->bdev
, STRIPE_SECTORS
);
892 set_bit(STRIPE_IO_STARTED
, &sh
->state
);
895 rbi
->bi_bdev
= rrdev
->bdev
;
897 BUG_ON(!(rw
& WRITE
));
898 rbi
->bi_end_io
= raid5_end_write_request
;
899 rbi
->bi_private
= sh
;
901 pr_debug("%s: for %llu schedule op %ld on "
902 "replacement disc %d\n",
903 __func__
, (unsigned long long)sh
->sector
,
905 atomic_inc(&sh
->count
);
906 if (use_new_offset(conf
, sh
))
907 rbi
->bi_iter
.bi_sector
= (sh
->sector
908 + rrdev
->new_data_offset
);
910 rbi
->bi_iter
.bi_sector
= (sh
->sector
911 + rrdev
->data_offset
);
912 if (test_bit(R5_SkipCopy
, &sh
->dev
[i
].flags
))
913 WARN_ON(test_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
));
914 sh
->dev
[i
].rvec
.bv_page
= sh
->dev
[i
].page
;
916 rbi
->bi_io_vec
[0].bv_len
= STRIPE_SIZE
;
917 rbi
->bi_io_vec
[0].bv_offset
= 0;
918 rbi
->bi_iter
.bi_size
= STRIPE_SIZE
;
920 * If this is discard request, set bi_vcnt 0. We don't
921 * want to confuse SCSI because SCSI will replace payload
923 if (rw
& REQ_DISCARD
)
925 if (conf
->mddev
->gendisk
)
926 trace_block_bio_remap(bdev_get_queue(rbi
->bi_bdev
),
927 rbi
, disk_devt(conf
->mddev
->gendisk
),
929 generic_make_request(rbi
);
931 if (!rdev
&& !rrdev
) {
933 set_bit(STRIPE_DEGRADED
, &sh
->state
);
934 pr_debug("skip op %ld on disc %d for sector %llu\n",
935 bi
->bi_rw
, i
, (unsigned long long)sh
->sector
);
936 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
937 set_bit(STRIPE_HANDLE
, &sh
->state
);
942 static struct dma_async_tx_descriptor
*
943 async_copy_data(int frombio
, struct bio
*bio
, struct page
**page
,
944 sector_t sector
, struct dma_async_tx_descriptor
*tx
,
945 struct stripe_head
*sh
)
948 struct bvec_iter iter
;
949 struct page
*bio_page
;
951 struct async_submit_ctl submit
;
952 enum async_tx_flags flags
= 0;
954 if (bio
->bi_iter
.bi_sector
>= sector
)
955 page_offset
= (signed)(bio
->bi_iter
.bi_sector
- sector
) * 512;
957 page_offset
= (signed)(sector
- bio
->bi_iter
.bi_sector
) * -512;
960 flags
|= ASYNC_TX_FENCE
;
961 init_async_submit(&submit
, flags
, tx
, NULL
, NULL
, NULL
);
963 bio_for_each_segment(bvl
, bio
, iter
) {
964 int len
= bvl
.bv_len
;
968 if (page_offset
< 0) {
969 b_offset
= -page_offset
;
970 page_offset
+= b_offset
;
974 if (len
> 0 && page_offset
+ len
> STRIPE_SIZE
)
975 clen
= STRIPE_SIZE
- page_offset
;
980 b_offset
+= bvl
.bv_offset
;
981 bio_page
= bvl
.bv_page
;
983 if (sh
->raid_conf
->skip_copy
&&
984 b_offset
== 0 && page_offset
== 0 &&
988 tx
= async_memcpy(*page
, bio_page
, page_offset
,
989 b_offset
, clen
, &submit
);
991 tx
= async_memcpy(bio_page
, *page
, b_offset
,
992 page_offset
, clen
, &submit
);
994 /* chain the operations */
995 submit
.depend_tx
= tx
;
997 if (clen
< len
) /* hit end of page */
1005 static void ops_complete_biofill(void *stripe_head_ref
)
1007 struct stripe_head
*sh
= stripe_head_ref
;
1008 struct bio
*return_bi
= NULL
;
1011 pr_debug("%s: stripe %llu\n", __func__
,
1012 (unsigned long long)sh
->sector
);
1014 /* clear completed biofills */
1015 for (i
= sh
->disks
; i
--; ) {
1016 struct r5dev
*dev
= &sh
->dev
[i
];
1018 /* acknowledge completion of a biofill operation */
1019 /* and check if we need to reply to a read request,
1020 * new R5_Wantfill requests are held off until
1021 * !STRIPE_BIOFILL_RUN
1023 if (test_and_clear_bit(R5_Wantfill
, &dev
->flags
)) {
1024 struct bio
*rbi
, *rbi2
;
1029 while (rbi
&& rbi
->bi_iter
.bi_sector
<
1030 dev
->sector
+ STRIPE_SECTORS
) {
1031 rbi2
= r5_next_bio(rbi
, dev
->sector
);
1032 if (!raid5_dec_bi_active_stripes(rbi
)) {
1033 rbi
->bi_next
= return_bi
;
1040 clear_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
1042 return_io(return_bi
);
1044 set_bit(STRIPE_HANDLE
, &sh
->state
);
1048 static void ops_run_biofill(struct stripe_head
*sh
)
1050 struct dma_async_tx_descriptor
*tx
= NULL
;
1051 struct async_submit_ctl submit
;
1054 pr_debug("%s: stripe %llu\n", __func__
,
1055 (unsigned long long)sh
->sector
);
1057 for (i
= sh
->disks
; i
--; ) {
1058 struct r5dev
*dev
= &sh
->dev
[i
];
1059 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
1061 spin_lock_irq(&sh
->stripe_lock
);
1062 dev
->read
= rbi
= dev
->toread
;
1064 spin_unlock_irq(&sh
->stripe_lock
);
1065 while (rbi
&& rbi
->bi_iter
.bi_sector
<
1066 dev
->sector
+ STRIPE_SECTORS
) {
1067 tx
= async_copy_data(0, rbi
, &dev
->page
,
1068 dev
->sector
, tx
, sh
);
1069 rbi
= r5_next_bio(rbi
, dev
->sector
);
1074 atomic_inc(&sh
->count
);
1075 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_biofill
, sh
, NULL
);
1076 async_trigger_callback(&submit
);
1079 static void mark_target_uptodate(struct stripe_head
*sh
, int target
)
1086 tgt
= &sh
->dev
[target
];
1087 set_bit(R5_UPTODATE
, &tgt
->flags
);
1088 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
1089 clear_bit(R5_Wantcompute
, &tgt
->flags
);
1092 static void ops_complete_compute(void *stripe_head_ref
)
1094 struct stripe_head
*sh
= stripe_head_ref
;
1096 pr_debug("%s: stripe %llu\n", __func__
,
1097 (unsigned long long)sh
->sector
);
1099 /* mark the computed target(s) as uptodate */
1100 mark_target_uptodate(sh
, sh
->ops
.target
);
1101 mark_target_uptodate(sh
, sh
->ops
.target2
);
1103 clear_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
1104 if (sh
->check_state
== check_state_compute_run
)
1105 sh
->check_state
= check_state_compute_result
;
1106 set_bit(STRIPE_HANDLE
, &sh
->state
);
1110 /* return a pointer to the address conversion region of the scribble buffer */
1111 static addr_conv_t
*to_addr_conv(struct stripe_head
*sh
,
1112 struct raid5_percpu
*percpu
)
1114 return percpu
->scribble
+ sizeof(struct page
*) * (sh
->disks
+ 2);
1117 static struct dma_async_tx_descriptor
*
1118 ops_run_compute5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
1120 int disks
= sh
->disks
;
1121 struct page
**xor_srcs
= percpu
->scribble
;
1122 int target
= sh
->ops
.target
;
1123 struct r5dev
*tgt
= &sh
->dev
[target
];
1124 struct page
*xor_dest
= tgt
->page
;
1126 struct dma_async_tx_descriptor
*tx
;
1127 struct async_submit_ctl submit
;
1130 pr_debug("%s: stripe %llu block: %d\n",
1131 __func__
, (unsigned long long)sh
->sector
, target
);
1132 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
1134 for (i
= disks
; i
--; )
1136 xor_srcs
[count
++] = sh
->dev
[i
].page
;
1138 atomic_inc(&sh
->count
);
1140 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
, NULL
,
1141 ops_complete_compute
, sh
, to_addr_conv(sh
, percpu
));
1142 if (unlikely(count
== 1))
1143 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
1145 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
1150 /* set_syndrome_sources - populate source buffers for gen_syndrome
1151 * @srcs - (struct page *) array of size sh->disks
1152 * @sh - stripe_head to parse
1154 * Populates srcs in proper layout order for the stripe and returns the
1155 * 'count' of sources to be used in a call to async_gen_syndrome. The P
1156 * destination buffer is recorded in srcs[count] and the Q destination
1157 * is recorded in srcs[count+1]].
1159 static int set_syndrome_sources(struct page
**srcs
, struct stripe_head
*sh
)
1161 int disks
= sh
->disks
;
1162 int syndrome_disks
= sh
->ddf_layout
? disks
: (disks
- 2);
1163 int d0_idx
= raid6_d0(sh
);
1167 for (i
= 0; i
< disks
; i
++)
1173 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
1175 srcs
[slot
] = sh
->dev
[i
].page
;
1176 i
= raid6_next_disk(i
, disks
);
1177 } while (i
!= d0_idx
);
1179 return syndrome_disks
;
1182 static struct dma_async_tx_descriptor
*
1183 ops_run_compute6_1(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
1185 int disks
= sh
->disks
;
1186 struct page
**blocks
= percpu
->scribble
;
1188 int qd_idx
= sh
->qd_idx
;
1189 struct dma_async_tx_descriptor
*tx
;
1190 struct async_submit_ctl submit
;
1196 if (sh
->ops
.target
< 0)
1197 target
= sh
->ops
.target2
;
1198 else if (sh
->ops
.target2
< 0)
1199 target
= sh
->ops
.target
;
1201 /* we should only have one valid target */
1204 pr_debug("%s: stripe %llu block: %d\n",
1205 __func__
, (unsigned long long)sh
->sector
, target
);
1207 tgt
= &sh
->dev
[target
];
1208 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
1211 atomic_inc(&sh
->count
);
1213 if (target
== qd_idx
) {
1214 count
= set_syndrome_sources(blocks
, sh
);
1215 blocks
[count
] = NULL
; /* regenerating p is not necessary */
1216 BUG_ON(blocks
[count
+1] != dest
); /* q should already be set */
1217 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
1218 ops_complete_compute
, sh
,
1219 to_addr_conv(sh
, percpu
));
1220 tx
= async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
1222 /* Compute any data- or p-drive using XOR */
1224 for (i
= disks
; i
-- ; ) {
1225 if (i
== target
|| i
== qd_idx
)
1227 blocks
[count
++] = sh
->dev
[i
].page
;
1230 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
1231 NULL
, ops_complete_compute
, sh
,
1232 to_addr_conv(sh
, percpu
));
1233 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
, &submit
);
1239 static struct dma_async_tx_descriptor
*
1240 ops_run_compute6_2(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
1242 int i
, count
, disks
= sh
->disks
;
1243 int syndrome_disks
= sh
->ddf_layout
? disks
: disks
-2;
1244 int d0_idx
= raid6_d0(sh
);
1245 int faila
= -1, failb
= -1;
1246 int target
= sh
->ops
.target
;
1247 int target2
= sh
->ops
.target2
;
1248 struct r5dev
*tgt
= &sh
->dev
[target
];
1249 struct r5dev
*tgt2
= &sh
->dev
[target2
];
1250 struct dma_async_tx_descriptor
*tx
;
1251 struct page
**blocks
= percpu
->scribble
;
1252 struct async_submit_ctl submit
;
1254 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
1255 __func__
, (unsigned long long)sh
->sector
, target
, target2
);
1256 BUG_ON(target
< 0 || target2
< 0);
1257 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
1258 BUG_ON(!test_bit(R5_Wantcompute
, &tgt2
->flags
));
1260 /* we need to open-code set_syndrome_sources to handle the
1261 * slot number conversion for 'faila' and 'failb'
1263 for (i
= 0; i
< disks
; i
++)
1268 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
1270 blocks
[slot
] = sh
->dev
[i
].page
;
1276 i
= raid6_next_disk(i
, disks
);
1277 } while (i
!= d0_idx
);
1279 BUG_ON(faila
== failb
);
1282 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
1283 __func__
, (unsigned long long)sh
->sector
, faila
, failb
);
1285 atomic_inc(&sh
->count
);
1287 if (failb
== syndrome_disks
+1) {
1288 /* Q disk is one of the missing disks */
1289 if (faila
== syndrome_disks
) {
1290 /* Missing P+Q, just recompute */
1291 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
1292 ops_complete_compute
, sh
,
1293 to_addr_conv(sh
, percpu
));
1294 return async_gen_syndrome(blocks
, 0, syndrome_disks
+2,
1295 STRIPE_SIZE
, &submit
);
1299 int qd_idx
= sh
->qd_idx
;
1301 /* Missing D+Q: recompute D from P, then recompute Q */
1302 if (target
== qd_idx
)
1303 data_target
= target2
;
1305 data_target
= target
;
1308 for (i
= disks
; i
-- ; ) {
1309 if (i
== data_target
|| i
== qd_idx
)
1311 blocks
[count
++] = sh
->dev
[i
].page
;
1313 dest
= sh
->dev
[data_target
].page
;
1314 init_async_submit(&submit
,
1315 ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
1317 to_addr_conv(sh
, percpu
));
1318 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
,
1321 count
= set_syndrome_sources(blocks
, sh
);
1322 init_async_submit(&submit
, ASYNC_TX_FENCE
, tx
,
1323 ops_complete_compute
, sh
,
1324 to_addr_conv(sh
, percpu
));
1325 return async_gen_syndrome(blocks
, 0, count
+2,
1326 STRIPE_SIZE
, &submit
);
1329 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
1330 ops_complete_compute
, sh
,
1331 to_addr_conv(sh
, percpu
));
1332 if (failb
== syndrome_disks
) {
1333 /* We're missing D+P. */
1334 return async_raid6_datap_recov(syndrome_disks
+2,
1338 /* We're missing D+D. */
1339 return async_raid6_2data_recov(syndrome_disks
+2,
1340 STRIPE_SIZE
, faila
, failb
,
1346 static void ops_complete_prexor(void *stripe_head_ref
)
1348 struct stripe_head
*sh
= stripe_head_ref
;
1350 pr_debug("%s: stripe %llu\n", __func__
,
1351 (unsigned long long)sh
->sector
);
1354 static struct dma_async_tx_descriptor
*
1355 ops_run_prexor(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1356 struct dma_async_tx_descriptor
*tx
)
1358 int disks
= sh
->disks
;
1359 struct page
**xor_srcs
= percpu
->scribble
;
1360 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
1361 struct async_submit_ctl submit
;
1363 /* existing parity data subtracted */
1364 struct page
*xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
1366 pr_debug("%s: stripe %llu\n", __func__
,
1367 (unsigned long long)sh
->sector
);
1369 for (i
= disks
; i
--; ) {
1370 struct r5dev
*dev
= &sh
->dev
[i
];
1371 /* Only process blocks that are known to be uptodate */
1372 if (test_bit(R5_Wantdrain
, &dev
->flags
))
1373 xor_srcs
[count
++] = dev
->page
;
1376 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_DROP_DST
, tx
,
1377 ops_complete_prexor
, sh
, to_addr_conv(sh
, percpu
));
1378 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
1383 static struct dma_async_tx_descriptor
*
1384 ops_run_biodrain(struct stripe_head
*sh
, struct dma_async_tx_descriptor
*tx
)
1386 int disks
= sh
->disks
;
1389 pr_debug("%s: stripe %llu\n", __func__
,
1390 (unsigned long long)sh
->sector
);
1392 for (i
= disks
; i
--; ) {
1393 struct r5dev
*dev
= &sh
->dev
[i
];
1396 if (test_and_clear_bit(R5_Wantdrain
, &dev
->flags
)) {
1399 spin_lock_irq(&sh
->stripe_lock
);
1400 chosen
= dev
->towrite
;
1401 dev
->towrite
= NULL
;
1402 BUG_ON(dev
->written
);
1403 wbi
= dev
->written
= chosen
;
1404 spin_unlock_irq(&sh
->stripe_lock
);
1405 WARN_ON(dev
->page
!= dev
->orig_page
);
1407 while (wbi
&& wbi
->bi_iter
.bi_sector
<
1408 dev
->sector
+ STRIPE_SECTORS
) {
1409 if (wbi
->bi_rw
& REQ_FUA
)
1410 set_bit(R5_WantFUA
, &dev
->flags
);
1411 if (wbi
->bi_rw
& REQ_SYNC
)
1412 set_bit(R5_SyncIO
, &dev
->flags
);
1413 if (wbi
->bi_rw
& REQ_DISCARD
)
1414 set_bit(R5_Discard
, &dev
->flags
);
1416 tx
= async_copy_data(1, wbi
, &dev
->page
,
1417 dev
->sector
, tx
, sh
);
1418 if (dev
->page
!= dev
->orig_page
) {
1419 set_bit(R5_SkipCopy
, &dev
->flags
);
1420 clear_bit(R5_UPTODATE
, &dev
->flags
);
1421 clear_bit(R5_OVERWRITE
, &dev
->flags
);
1424 wbi
= r5_next_bio(wbi
, dev
->sector
);
1432 static void ops_complete_reconstruct(void *stripe_head_ref
)
1434 struct stripe_head
*sh
= stripe_head_ref
;
1435 int disks
= sh
->disks
;
1436 int pd_idx
= sh
->pd_idx
;
1437 int qd_idx
= sh
->qd_idx
;
1439 bool fua
= false, sync
= false, discard
= false;
1441 pr_debug("%s: stripe %llu\n", __func__
,
1442 (unsigned long long)sh
->sector
);
1444 for (i
= disks
; i
--; ) {
1445 fua
|= test_bit(R5_WantFUA
, &sh
->dev
[i
].flags
);
1446 sync
|= test_bit(R5_SyncIO
, &sh
->dev
[i
].flags
);
1447 discard
|= test_bit(R5_Discard
, &sh
->dev
[i
].flags
);
1450 for (i
= disks
; i
--; ) {
1451 struct r5dev
*dev
= &sh
->dev
[i
];
1453 if (dev
->written
|| i
== pd_idx
|| i
== qd_idx
) {
1454 if (!discard
&& !test_bit(R5_SkipCopy
, &dev
->flags
))
1455 set_bit(R5_UPTODATE
, &dev
->flags
);
1457 set_bit(R5_WantFUA
, &dev
->flags
);
1459 set_bit(R5_SyncIO
, &dev
->flags
);
1463 if (sh
->reconstruct_state
== reconstruct_state_drain_run
)
1464 sh
->reconstruct_state
= reconstruct_state_drain_result
;
1465 else if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
)
1466 sh
->reconstruct_state
= reconstruct_state_prexor_drain_result
;
1468 BUG_ON(sh
->reconstruct_state
!= reconstruct_state_run
);
1469 sh
->reconstruct_state
= reconstruct_state_result
;
1472 set_bit(STRIPE_HANDLE
, &sh
->state
);
1477 ops_run_reconstruct5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1478 struct dma_async_tx_descriptor
*tx
)
1480 int disks
= sh
->disks
;
1481 struct page
**xor_srcs
= percpu
->scribble
;
1482 struct async_submit_ctl submit
;
1483 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
1484 struct page
*xor_dest
;
1486 unsigned long flags
;
1488 pr_debug("%s: stripe %llu\n", __func__
,
1489 (unsigned long long)sh
->sector
);
1491 for (i
= 0; i
< sh
->disks
; i
++) {
1494 if (!test_bit(R5_Discard
, &sh
->dev
[i
].flags
))
1497 if (i
>= sh
->disks
) {
1498 atomic_inc(&sh
->count
);
1499 set_bit(R5_Discard
, &sh
->dev
[pd_idx
].flags
);
1500 ops_complete_reconstruct(sh
);
1503 /* check if prexor is active which means only process blocks
1504 * that are part of a read-modify-write (written)
1506 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
) {
1508 xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
1509 for (i
= disks
; i
--; ) {
1510 struct r5dev
*dev
= &sh
->dev
[i
];
1512 xor_srcs
[count
++] = dev
->page
;
1515 xor_dest
= sh
->dev
[pd_idx
].page
;
1516 for (i
= disks
; i
--; ) {
1517 struct r5dev
*dev
= &sh
->dev
[i
];
1519 xor_srcs
[count
++] = dev
->page
;
1523 /* 1/ if we prexor'd then the dest is reused as a source
1524 * 2/ if we did not prexor then we are redoing the parity
1525 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1526 * for the synchronous xor case
1528 flags
= ASYNC_TX_ACK
|
1529 (prexor
? ASYNC_TX_XOR_DROP_DST
: ASYNC_TX_XOR_ZERO_DST
);
1531 atomic_inc(&sh
->count
);
1533 init_async_submit(&submit
, flags
, tx
, ops_complete_reconstruct
, sh
,
1534 to_addr_conv(sh
, percpu
));
1535 if (unlikely(count
== 1))
1536 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
1538 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
1542 ops_run_reconstruct6(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1543 struct dma_async_tx_descriptor
*tx
)
1545 struct async_submit_ctl submit
;
1546 struct page
**blocks
= percpu
->scribble
;
1549 pr_debug("%s: stripe %llu\n", __func__
, (unsigned long long)sh
->sector
);
1551 for (i
= 0; i
< sh
->disks
; i
++) {
1552 if (sh
->pd_idx
== i
|| sh
->qd_idx
== i
)
1554 if (!test_bit(R5_Discard
, &sh
->dev
[i
].flags
))
1557 if (i
>= sh
->disks
) {
1558 atomic_inc(&sh
->count
);
1559 set_bit(R5_Discard
, &sh
->dev
[sh
->pd_idx
].flags
);
1560 set_bit(R5_Discard
, &sh
->dev
[sh
->qd_idx
].flags
);
1561 ops_complete_reconstruct(sh
);
1565 count
= set_syndrome_sources(blocks
, sh
);
1567 atomic_inc(&sh
->count
);
1569 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_reconstruct
,
1570 sh
, to_addr_conv(sh
, percpu
));
1571 async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
1574 static void ops_complete_check(void *stripe_head_ref
)
1576 struct stripe_head
*sh
= stripe_head_ref
;
1578 pr_debug("%s: stripe %llu\n", __func__
,
1579 (unsigned long long)sh
->sector
);
1581 sh
->check_state
= check_state_check_result
;
1582 set_bit(STRIPE_HANDLE
, &sh
->state
);
1586 static void ops_run_check_p(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
1588 int disks
= sh
->disks
;
1589 int pd_idx
= sh
->pd_idx
;
1590 int qd_idx
= sh
->qd_idx
;
1591 struct page
*xor_dest
;
1592 struct page
**xor_srcs
= percpu
->scribble
;
1593 struct dma_async_tx_descriptor
*tx
;
1594 struct async_submit_ctl submit
;
1598 pr_debug("%s: stripe %llu\n", __func__
,
1599 (unsigned long long)sh
->sector
);
1602 xor_dest
= sh
->dev
[pd_idx
].page
;
1603 xor_srcs
[count
++] = xor_dest
;
1604 for (i
= disks
; i
--; ) {
1605 if (i
== pd_idx
|| i
== qd_idx
)
1607 xor_srcs
[count
++] = sh
->dev
[i
].page
;
1610 init_async_submit(&submit
, 0, NULL
, NULL
, NULL
,
1611 to_addr_conv(sh
, percpu
));
1612 tx
= async_xor_val(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
1613 &sh
->ops
.zero_sum_result
, &submit
);
1615 atomic_inc(&sh
->count
);
1616 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_check
, sh
, NULL
);
1617 tx
= async_trigger_callback(&submit
);
1620 static void ops_run_check_pq(struct stripe_head
*sh
, struct raid5_percpu
*percpu
, int checkp
)
1622 struct page
**srcs
= percpu
->scribble
;
1623 struct async_submit_ctl submit
;
1626 pr_debug("%s: stripe %llu checkp: %d\n", __func__
,
1627 (unsigned long long)sh
->sector
, checkp
);
1629 count
= set_syndrome_sources(srcs
, sh
);
1633 atomic_inc(&sh
->count
);
1634 init_async_submit(&submit
, ASYNC_TX_ACK
, NULL
, ops_complete_check
,
1635 sh
, to_addr_conv(sh
, percpu
));
1636 async_syndrome_val(srcs
, 0, count
+2, STRIPE_SIZE
,
1637 &sh
->ops
.zero_sum_result
, percpu
->spare_page
, &submit
);
1640 static void raid_run_ops(struct stripe_head
*sh
, unsigned long ops_request
)
1642 int overlap_clear
= 0, i
, disks
= sh
->disks
;
1643 struct dma_async_tx_descriptor
*tx
= NULL
;
1644 struct r5conf
*conf
= sh
->raid_conf
;
1645 int level
= conf
->level
;
1646 struct raid5_percpu
*percpu
;
1650 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1651 if (test_bit(STRIPE_OP_BIOFILL
, &ops_request
)) {
1652 ops_run_biofill(sh
);
1656 if (test_bit(STRIPE_OP_COMPUTE_BLK
, &ops_request
)) {
1658 tx
= ops_run_compute5(sh
, percpu
);
1660 if (sh
->ops
.target2
< 0 || sh
->ops
.target
< 0)
1661 tx
= ops_run_compute6_1(sh
, percpu
);
1663 tx
= ops_run_compute6_2(sh
, percpu
);
1665 /* terminate the chain if reconstruct is not set to be run */
1666 if (tx
&& !test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
))
1670 if (test_bit(STRIPE_OP_PREXOR
, &ops_request
))
1671 tx
= ops_run_prexor(sh
, percpu
, tx
);
1673 if (test_bit(STRIPE_OP_BIODRAIN
, &ops_request
)) {
1674 tx
= ops_run_biodrain(sh
, tx
);
1678 if (test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
)) {
1680 ops_run_reconstruct5(sh
, percpu
, tx
);
1682 ops_run_reconstruct6(sh
, percpu
, tx
);
1685 if (test_bit(STRIPE_OP_CHECK
, &ops_request
)) {
1686 if (sh
->check_state
== check_state_run
)
1687 ops_run_check_p(sh
, percpu
);
1688 else if (sh
->check_state
== check_state_run_q
)
1689 ops_run_check_pq(sh
, percpu
, 0);
1690 else if (sh
->check_state
== check_state_run_pq
)
1691 ops_run_check_pq(sh
, percpu
, 1);
1697 for (i
= disks
; i
--; ) {
1698 struct r5dev
*dev
= &sh
->dev
[i
];
1699 if (test_and_clear_bit(R5_Overlap
, &dev
->flags
))
1700 wake_up(&sh
->raid_conf
->wait_for_overlap
);
1705 static int grow_one_stripe(struct r5conf
*conf
, int hash
)
1707 struct stripe_head
*sh
;
1708 sh
= kmem_cache_zalloc(conf
->slab_cache
, GFP_KERNEL
);
1712 sh
->raid_conf
= conf
;
1714 spin_lock_init(&sh
->stripe_lock
);
1716 if (grow_buffers(sh
)) {
1718 kmem_cache_free(conf
->slab_cache
, sh
);
1721 sh
->hash_lock_index
= hash
;
1722 /* we just created an active stripe so... */
1723 atomic_set(&sh
->count
, 1);
1724 atomic_inc(&conf
->active_stripes
);
1725 INIT_LIST_HEAD(&sh
->lru
);
1730 static int grow_stripes(struct r5conf
*conf
, int num
)
1732 struct kmem_cache
*sc
;
1733 int devs
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
1736 if (conf
->mddev
->gendisk
)
1737 sprintf(conf
->cache_name
[0],
1738 "raid%d-%s", conf
->level
, mdname(conf
->mddev
));
1740 sprintf(conf
->cache_name
[0],
1741 "raid%d-%p", conf
->level
, conf
->mddev
);
1742 sprintf(conf
->cache_name
[1], "%s-alt", conf
->cache_name
[0]);
1744 conf
->active_name
= 0;
1745 sc
= kmem_cache_create(conf
->cache_name
[conf
->active_name
],
1746 sizeof(struct stripe_head
)+(devs
-1)*sizeof(struct r5dev
),
1750 conf
->slab_cache
= sc
;
1751 conf
->pool_size
= devs
;
1752 hash
= conf
->max_nr_stripes
% NR_STRIPE_HASH_LOCKS
;
1754 if (!grow_one_stripe(conf
, hash
))
1756 conf
->max_nr_stripes
++;
1757 hash
= (hash
+ 1) % NR_STRIPE_HASH_LOCKS
;
1763 * scribble_len - return the required size of the scribble region
1764 * @num - total number of disks in the array
1766 * The size must be enough to contain:
1767 * 1/ a struct page pointer for each device in the array +2
1768 * 2/ room to convert each entry in (1) to its corresponding dma
1769 * (dma_map_page()) or page (page_address()) address.
1771 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1772 * calculate over all devices (not just the data blocks), using zeros in place
1773 * of the P and Q blocks.
1775 static size_t scribble_len(int num
)
1779 len
= sizeof(struct page
*) * (num
+2) + sizeof(addr_conv_t
) * (num
+2);
1784 static int resize_stripes(struct r5conf
*conf
, int newsize
)
1786 /* Make all the stripes able to hold 'newsize' devices.
1787 * New slots in each stripe get 'page' set to a new page.
1789 * This happens in stages:
1790 * 1/ create a new kmem_cache and allocate the required number of
1792 * 2/ gather all the old stripe_heads and transfer the pages across
1793 * to the new stripe_heads. This will have the side effect of
1794 * freezing the array as once all stripe_heads have been collected,
1795 * no IO will be possible. Old stripe heads are freed once their
1796 * pages have been transferred over, and the old kmem_cache is
1797 * freed when all stripes are done.
1798 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
1799 * we simple return a failre status - no need to clean anything up.
1800 * 4/ allocate new pages for the new slots in the new stripe_heads.
1801 * If this fails, we don't bother trying the shrink the
1802 * stripe_heads down again, we just leave them as they are.
1803 * As each stripe_head is processed the new one is released into
1806 * Once step2 is started, we cannot afford to wait for a write,
1807 * so we use GFP_NOIO allocations.
1809 struct stripe_head
*osh
, *nsh
;
1810 LIST_HEAD(newstripes
);
1811 struct disk_info
*ndisks
;
1814 struct kmem_cache
*sc
;
1818 if (newsize
<= conf
->pool_size
)
1819 return 0; /* never bother to shrink */
1821 err
= md_allow_write(conf
->mddev
);
1826 sc
= kmem_cache_create(conf
->cache_name
[1-conf
->active_name
],
1827 sizeof(struct stripe_head
)+(newsize
-1)*sizeof(struct r5dev
),
1832 for (i
= conf
->max_nr_stripes
; i
; i
--) {
1833 nsh
= kmem_cache_zalloc(sc
, GFP_KERNEL
);
1837 nsh
->raid_conf
= conf
;
1838 spin_lock_init(&nsh
->stripe_lock
);
1840 list_add(&nsh
->lru
, &newstripes
);
1843 /* didn't get enough, give up */
1844 while (!list_empty(&newstripes
)) {
1845 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1846 list_del(&nsh
->lru
);
1847 kmem_cache_free(sc
, nsh
);
1849 kmem_cache_destroy(sc
);
1852 /* Step 2 - Must use GFP_NOIO now.
1853 * OK, we have enough stripes, start collecting inactive
1854 * stripes and copying them over
1858 list_for_each_entry(nsh
, &newstripes
, lru
) {
1859 lock_device_hash_lock(conf
, hash
);
1860 wait_event_cmd(conf
->wait_for_stripe
,
1861 !list_empty(conf
->inactive_list
+ hash
),
1862 unlock_device_hash_lock(conf
, hash
),
1863 lock_device_hash_lock(conf
, hash
));
1864 osh
= get_free_stripe(conf
, hash
);
1865 unlock_device_hash_lock(conf
, hash
);
1866 atomic_set(&nsh
->count
, 1);
1867 for(i
=0; i
<conf
->pool_size
; i
++) {
1868 nsh
->dev
[i
].page
= osh
->dev
[i
].page
;
1869 nsh
->dev
[i
].orig_page
= osh
->dev
[i
].page
;
1871 for( ; i
<newsize
; i
++)
1872 nsh
->dev
[i
].page
= NULL
;
1873 nsh
->hash_lock_index
= hash
;
1874 kmem_cache_free(conf
->slab_cache
, osh
);
1876 if (cnt
>= conf
->max_nr_stripes
/ NR_STRIPE_HASH_LOCKS
+
1877 !!((conf
->max_nr_stripes
% NR_STRIPE_HASH_LOCKS
) > hash
)) {
1882 kmem_cache_destroy(conf
->slab_cache
);
1885 * At this point, we are holding all the stripes so the array
1886 * is completely stalled, so now is a good time to resize
1887 * conf->disks and the scribble region
1889 ndisks
= kzalloc(newsize
* sizeof(struct disk_info
), GFP_NOIO
);
1891 for (i
=0; i
<conf
->raid_disks
; i
++)
1892 ndisks
[i
] = conf
->disks
[i
];
1894 conf
->disks
= ndisks
;
1899 conf
->scribble_len
= scribble_len(newsize
);
1900 for_each_present_cpu(cpu
) {
1901 struct raid5_percpu
*percpu
;
1904 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1905 scribble
= kmalloc(conf
->scribble_len
, GFP_NOIO
);
1908 kfree(percpu
->scribble
);
1909 percpu
->scribble
= scribble
;
1917 /* Step 4, return new stripes to service */
1918 while(!list_empty(&newstripes
)) {
1919 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1920 list_del_init(&nsh
->lru
);
1922 for (i
=conf
->raid_disks
; i
< newsize
; i
++)
1923 if (nsh
->dev
[i
].page
== NULL
) {
1924 struct page
*p
= alloc_page(GFP_NOIO
);
1925 nsh
->dev
[i
].page
= p
;
1926 nsh
->dev
[i
].orig_page
= p
;
1930 release_stripe(nsh
);
1932 /* critical section pass, GFP_NOIO no longer needed */
1934 conf
->slab_cache
= sc
;
1935 conf
->active_name
= 1-conf
->active_name
;
1936 conf
->pool_size
= newsize
;
1940 static int drop_one_stripe(struct r5conf
*conf
, int hash
)
1942 struct stripe_head
*sh
;
1944 spin_lock_irq(conf
->hash_locks
+ hash
);
1945 sh
= get_free_stripe(conf
, hash
);
1946 spin_unlock_irq(conf
->hash_locks
+ hash
);
1949 BUG_ON(atomic_read(&sh
->count
));
1951 kmem_cache_free(conf
->slab_cache
, sh
);
1952 atomic_dec(&conf
->active_stripes
);
1956 static void shrink_stripes(struct r5conf
*conf
)
1959 for (hash
= 0; hash
< NR_STRIPE_HASH_LOCKS
; hash
++)
1960 while (drop_one_stripe(conf
, hash
))
1963 if (conf
->slab_cache
)
1964 kmem_cache_destroy(conf
->slab_cache
);
1965 conf
->slab_cache
= NULL
;
1968 static void raid5_end_read_request(struct bio
* bi
, int error
)
1970 struct stripe_head
*sh
= bi
->bi_private
;
1971 struct r5conf
*conf
= sh
->raid_conf
;
1972 int disks
= sh
->disks
, i
;
1973 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1974 char b
[BDEVNAME_SIZE
];
1975 struct md_rdev
*rdev
= NULL
;
1978 for (i
=0 ; i
<disks
; i
++)
1979 if (bi
== &sh
->dev
[i
].req
)
1982 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1983 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1989 if (test_bit(R5_ReadRepl
, &sh
->dev
[i
].flags
))
1990 /* If replacement finished while this request was outstanding,
1991 * 'replacement' might be NULL already.
1992 * In that case it moved down to 'rdev'.
1993 * rdev is not removed until all requests are finished.
1995 rdev
= conf
->disks
[i
].replacement
;
1997 rdev
= conf
->disks
[i
].rdev
;
1999 if (use_new_offset(conf
, sh
))
2000 s
= sh
->sector
+ rdev
->new_data_offset
;
2002 s
= sh
->sector
+ rdev
->data_offset
;
2004 set_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
2005 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
2006 /* Note that this cannot happen on a
2007 * replacement device. We just fail those on
2012 "md/raid:%s: read error corrected"
2013 " (%lu sectors at %llu on %s)\n",
2014 mdname(conf
->mddev
), STRIPE_SECTORS
,
2015 (unsigned long long)s
,
2016 bdevname(rdev
->bdev
, b
));
2017 atomic_add(STRIPE_SECTORS
, &rdev
->corrected_errors
);
2018 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
2019 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
2020 } else if (test_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
))
2021 clear_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
);
2023 if (atomic_read(&rdev
->read_errors
))
2024 atomic_set(&rdev
->read_errors
, 0);
2026 const char *bdn
= bdevname(rdev
->bdev
, b
);
2030 clear_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
2031 atomic_inc(&rdev
->read_errors
);
2032 if (test_bit(R5_ReadRepl
, &sh
->dev
[i
].flags
))
2035 "md/raid:%s: read error on replacement device "
2036 "(sector %llu on %s).\n",
2037 mdname(conf
->mddev
),
2038 (unsigned long long)s
,
2040 else if (conf
->mddev
->degraded
>= conf
->max_degraded
) {
2044 "md/raid:%s: read error not correctable "
2045 "(sector %llu on %s).\n",
2046 mdname(conf
->mddev
),
2047 (unsigned long long)s
,
2049 } else if (test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
)) {
2054 "md/raid:%s: read error NOT corrected!! "
2055 "(sector %llu on %s).\n",
2056 mdname(conf
->mddev
),
2057 (unsigned long long)s
,
2059 } else if (atomic_read(&rdev
->read_errors
)
2060 > conf
->max_nr_stripes
)
2062 "md/raid:%s: Too many read errors, failing device %s.\n",
2063 mdname(conf
->mddev
), bdn
);
2066 if (set_bad
&& test_bit(In_sync
, &rdev
->flags
)
2067 && !test_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
))
2070 if (test_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
)) {
2071 set_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
2072 clear_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
);
2074 set_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
);
2076 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
2077 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
2079 && test_bit(In_sync
, &rdev
->flags
)
2080 && rdev_set_badblocks(
2081 rdev
, sh
->sector
, STRIPE_SECTORS
, 0)))
2082 md_error(conf
->mddev
, rdev
);
2085 rdev_dec_pending(rdev
, conf
->mddev
);
2086 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
2087 set_bit(STRIPE_HANDLE
, &sh
->state
);
2091 static void raid5_end_write_request(struct bio
*bi
, int error
)
2093 struct stripe_head
*sh
= bi
->bi_private
;
2094 struct r5conf
*conf
= sh
->raid_conf
;
2095 int disks
= sh
->disks
, i
;
2096 struct md_rdev
*uninitialized_var(rdev
);
2097 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2100 int replacement
= 0;
2102 for (i
= 0 ; i
< disks
; i
++) {
2103 if (bi
== &sh
->dev
[i
].req
) {
2104 rdev
= conf
->disks
[i
].rdev
;
2107 if (bi
== &sh
->dev
[i
].rreq
) {
2108 rdev
= conf
->disks
[i
].replacement
;
2112 /* rdev was removed and 'replacement'
2113 * replaced it. rdev is not removed
2114 * until all requests are finished.
2116 rdev
= conf
->disks
[i
].rdev
;
2120 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
2121 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
2130 md_error(conf
->mddev
, rdev
);
2131 else if (is_badblock(rdev
, sh
->sector
,
2133 &first_bad
, &bad_sectors
))
2134 set_bit(R5_MadeGoodRepl
, &sh
->dev
[i
].flags
);
2137 set_bit(STRIPE_DEGRADED
, &sh
->state
);
2138 set_bit(WriteErrorSeen
, &rdev
->flags
);
2139 set_bit(R5_WriteError
, &sh
->dev
[i
].flags
);
2140 if (!test_and_set_bit(WantReplacement
, &rdev
->flags
))
2141 set_bit(MD_RECOVERY_NEEDED
,
2142 &rdev
->mddev
->recovery
);
2143 } else if (is_badblock(rdev
, sh
->sector
,
2145 &first_bad
, &bad_sectors
)) {
2146 set_bit(R5_MadeGood
, &sh
->dev
[i
].flags
);
2147 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
))
2148 /* That was a successful write so make
2149 * sure it looks like we already did
2152 set_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
2155 rdev_dec_pending(rdev
, conf
->mddev
);
2157 if (!test_and_clear_bit(R5_DOUBLE_LOCKED
, &sh
->dev
[i
].flags
))
2158 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
2159 set_bit(STRIPE_HANDLE
, &sh
->state
);
2163 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
);
2165 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
)
2167 struct r5dev
*dev
= &sh
->dev
[i
];
2169 bio_init(&dev
->req
);
2170 dev
->req
.bi_io_vec
= &dev
->vec
;
2171 dev
->req
.bi_max_vecs
= 1;
2172 dev
->req
.bi_private
= sh
;
2174 bio_init(&dev
->rreq
);
2175 dev
->rreq
.bi_io_vec
= &dev
->rvec
;
2176 dev
->rreq
.bi_max_vecs
= 1;
2177 dev
->rreq
.bi_private
= sh
;
2180 dev
->sector
= compute_blocknr(sh
, i
, previous
);
2183 static void error(struct mddev
*mddev
, struct md_rdev
*rdev
)
2185 char b
[BDEVNAME_SIZE
];
2186 struct r5conf
*conf
= mddev
->private;
2187 unsigned long flags
;
2188 pr_debug("raid456: error called\n");
2190 spin_lock_irqsave(&conf
->device_lock
, flags
);
2191 clear_bit(In_sync
, &rdev
->flags
);
2192 mddev
->degraded
= calc_degraded(conf
);
2193 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
2194 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
2196 set_bit(Blocked
, &rdev
->flags
);
2197 set_bit(Faulty
, &rdev
->flags
);
2198 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
2200 "md/raid:%s: Disk failure on %s, disabling device.\n"
2201 "md/raid:%s: Operation continuing on %d devices.\n",
2203 bdevname(rdev
->bdev
, b
),
2205 conf
->raid_disks
- mddev
->degraded
);
2209 * Input: a 'big' sector number,
2210 * Output: index of the data and parity disk, and the sector # in them.
2212 static sector_t
raid5_compute_sector(struct r5conf
*conf
, sector_t r_sector
,
2213 int previous
, int *dd_idx
,
2214 struct stripe_head
*sh
)
2216 sector_t stripe
, stripe2
;
2217 sector_t chunk_number
;
2218 unsigned int chunk_offset
;
2221 sector_t new_sector
;
2222 int algorithm
= previous
? conf
->prev_algo
2224 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
2225 : conf
->chunk_sectors
;
2226 int raid_disks
= previous
? conf
->previous_raid_disks
2228 int data_disks
= raid_disks
- conf
->max_degraded
;
2230 /* First compute the information on this sector */
2233 * Compute the chunk number and the sector offset inside the chunk
2235 chunk_offset
= sector_div(r_sector
, sectors_per_chunk
);
2236 chunk_number
= r_sector
;
2239 * Compute the stripe number
2241 stripe
= chunk_number
;
2242 *dd_idx
= sector_div(stripe
, data_disks
);
2245 * Select the parity disk based on the user selected algorithm.
2247 pd_idx
= qd_idx
= -1;
2248 switch(conf
->level
) {
2250 pd_idx
= data_disks
;
2253 switch (algorithm
) {
2254 case ALGORITHM_LEFT_ASYMMETRIC
:
2255 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
);
2256 if (*dd_idx
>= pd_idx
)
2259 case ALGORITHM_RIGHT_ASYMMETRIC
:
2260 pd_idx
= sector_div(stripe2
, raid_disks
);
2261 if (*dd_idx
>= pd_idx
)
2264 case ALGORITHM_LEFT_SYMMETRIC
:
2265 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
);
2266 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
2268 case ALGORITHM_RIGHT_SYMMETRIC
:
2269 pd_idx
= sector_div(stripe2
, raid_disks
);
2270 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
2272 case ALGORITHM_PARITY_0
:
2276 case ALGORITHM_PARITY_N
:
2277 pd_idx
= data_disks
;
2285 switch (algorithm
) {
2286 case ALGORITHM_LEFT_ASYMMETRIC
:
2287 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
2288 qd_idx
= pd_idx
+ 1;
2289 if (pd_idx
== raid_disks
-1) {
2290 (*dd_idx
)++; /* Q D D D P */
2292 } else if (*dd_idx
>= pd_idx
)
2293 (*dd_idx
) += 2; /* D D P Q D */
2295 case ALGORITHM_RIGHT_ASYMMETRIC
:
2296 pd_idx
= sector_div(stripe2
, raid_disks
);
2297 qd_idx
= pd_idx
+ 1;
2298 if (pd_idx
== raid_disks
-1) {
2299 (*dd_idx
)++; /* Q D D D P */
2301 } else if (*dd_idx
>= pd_idx
)
2302 (*dd_idx
) += 2; /* D D P Q D */
2304 case ALGORITHM_LEFT_SYMMETRIC
:
2305 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
2306 qd_idx
= (pd_idx
+ 1) % raid_disks
;
2307 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
2309 case ALGORITHM_RIGHT_SYMMETRIC
:
2310 pd_idx
= sector_div(stripe2
, raid_disks
);
2311 qd_idx
= (pd_idx
+ 1) % raid_disks
;
2312 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
2315 case ALGORITHM_PARITY_0
:
2320 case ALGORITHM_PARITY_N
:
2321 pd_idx
= data_disks
;
2322 qd_idx
= data_disks
+ 1;
2325 case ALGORITHM_ROTATING_ZERO_RESTART
:
2326 /* Exactly the same as RIGHT_ASYMMETRIC, but or
2327 * of blocks for computing Q is different.
2329 pd_idx
= sector_div(stripe2
, raid_disks
);
2330 qd_idx
= pd_idx
+ 1;
2331 if (pd_idx
== raid_disks
-1) {
2332 (*dd_idx
)++; /* Q D D D P */
2334 } else if (*dd_idx
>= pd_idx
)
2335 (*dd_idx
) += 2; /* D D P Q D */
2339 case ALGORITHM_ROTATING_N_RESTART
:
2340 /* Same a left_asymmetric, by first stripe is
2341 * D D D P Q rather than
2345 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
2346 qd_idx
= pd_idx
+ 1;
2347 if (pd_idx
== raid_disks
-1) {
2348 (*dd_idx
)++; /* Q D D D P */
2350 } else if (*dd_idx
>= pd_idx
)
2351 (*dd_idx
) += 2; /* D D P Q D */
2355 case ALGORITHM_ROTATING_N_CONTINUE
:
2356 /* Same as left_symmetric but Q is before P */
2357 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
2358 qd_idx
= (pd_idx
+ raid_disks
- 1) % raid_disks
;
2359 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
2363 case ALGORITHM_LEFT_ASYMMETRIC_6
:
2364 /* RAID5 left_asymmetric, with Q on last device */
2365 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
-1);
2366 if (*dd_idx
>= pd_idx
)
2368 qd_idx
= raid_disks
- 1;
2371 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
2372 pd_idx
= sector_div(stripe2
, raid_disks
-1);
2373 if (*dd_idx
>= pd_idx
)
2375 qd_idx
= raid_disks
- 1;
2378 case ALGORITHM_LEFT_SYMMETRIC_6
:
2379 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
-1);
2380 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
2381 qd_idx
= raid_disks
- 1;
2384 case ALGORITHM_RIGHT_SYMMETRIC_6
:
2385 pd_idx
= sector_div(stripe2
, raid_disks
-1);
2386 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
2387 qd_idx
= raid_disks
- 1;
2390 case ALGORITHM_PARITY_0_6
:
2393 qd_idx
= raid_disks
- 1;
2403 sh
->pd_idx
= pd_idx
;
2404 sh
->qd_idx
= qd_idx
;
2405 sh
->ddf_layout
= ddf_layout
;
2408 * Finally, compute the new sector number
2410 new_sector
= (sector_t
)stripe
* sectors_per_chunk
+ chunk_offset
;
2414 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
)
2416 struct r5conf
*conf
= sh
->raid_conf
;
2417 int raid_disks
= sh
->disks
;
2418 int data_disks
= raid_disks
- conf
->max_degraded
;
2419 sector_t new_sector
= sh
->sector
, check
;
2420 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
2421 : conf
->chunk_sectors
;
2422 int algorithm
= previous
? conf
->prev_algo
2426 sector_t chunk_number
;
2427 int dummy1
, dd_idx
= i
;
2429 struct stripe_head sh2
;
2431 chunk_offset
= sector_div(new_sector
, sectors_per_chunk
);
2432 stripe
= new_sector
;
2434 if (i
== sh
->pd_idx
)
2436 switch(conf
->level
) {
2439 switch (algorithm
) {
2440 case ALGORITHM_LEFT_ASYMMETRIC
:
2441 case ALGORITHM_RIGHT_ASYMMETRIC
:
2445 case ALGORITHM_LEFT_SYMMETRIC
:
2446 case ALGORITHM_RIGHT_SYMMETRIC
:
2449 i
-= (sh
->pd_idx
+ 1);
2451 case ALGORITHM_PARITY_0
:
2454 case ALGORITHM_PARITY_N
:
2461 if (i
== sh
->qd_idx
)
2462 return 0; /* It is the Q disk */
2463 switch (algorithm
) {
2464 case ALGORITHM_LEFT_ASYMMETRIC
:
2465 case ALGORITHM_RIGHT_ASYMMETRIC
:
2466 case ALGORITHM_ROTATING_ZERO_RESTART
:
2467 case ALGORITHM_ROTATING_N_RESTART
:
2468 if (sh
->pd_idx
== raid_disks
-1)
2469 i
--; /* Q D D D P */
2470 else if (i
> sh
->pd_idx
)
2471 i
-= 2; /* D D P Q D */
2473 case ALGORITHM_LEFT_SYMMETRIC
:
2474 case ALGORITHM_RIGHT_SYMMETRIC
:
2475 if (sh
->pd_idx
== raid_disks
-1)
2476 i
--; /* Q D D D P */
2481 i
-= (sh
->pd_idx
+ 2);
2484 case ALGORITHM_PARITY_0
:
2487 case ALGORITHM_PARITY_N
:
2489 case ALGORITHM_ROTATING_N_CONTINUE
:
2490 /* Like left_symmetric, but P is before Q */
2491 if (sh
->pd_idx
== 0)
2492 i
--; /* P D D D Q */
2497 i
-= (sh
->pd_idx
+ 1);
2500 case ALGORITHM_LEFT_ASYMMETRIC_6
:
2501 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
2505 case ALGORITHM_LEFT_SYMMETRIC_6
:
2506 case ALGORITHM_RIGHT_SYMMETRIC_6
:
2508 i
+= data_disks
+ 1;
2509 i
-= (sh
->pd_idx
+ 1);
2511 case ALGORITHM_PARITY_0_6
:
2520 chunk_number
= stripe
* data_disks
+ i
;
2521 r_sector
= chunk_number
* sectors_per_chunk
+ chunk_offset
;
2523 check
= raid5_compute_sector(conf
, r_sector
,
2524 previous
, &dummy1
, &sh2
);
2525 if (check
!= sh
->sector
|| dummy1
!= dd_idx
|| sh2
.pd_idx
!= sh
->pd_idx
2526 || sh2
.qd_idx
!= sh
->qd_idx
) {
2527 printk(KERN_ERR
"md/raid:%s: compute_blocknr: map not correct\n",
2528 mdname(conf
->mddev
));
2535 schedule_reconstruction(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2536 int rcw
, int expand
)
2538 int i
, pd_idx
= sh
->pd_idx
, disks
= sh
->disks
;
2539 struct r5conf
*conf
= sh
->raid_conf
;
2540 int level
= conf
->level
;
2544 for (i
= disks
; i
--; ) {
2545 struct r5dev
*dev
= &sh
->dev
[i
];
2548 set_bit(R5_LOCKED
, &dev
->flags
);
2549 set_bit(R5_Wantdrain
, &dev
->flags
);
2551 clear_bit(R5_UPTODATE
, &dev
->flags
);
2555 /* if we are not expanding this is a proper write request, and
2556 * there will be bios with new data to be drained into the
2561 /* False alarm, nothing to do */
2563 sh
->reconstruct_state
= reconstruct_state_drain_run
;
2564 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
2566 sh
->reconstruct_state
= reconstruct_state_run
;
2568 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
2570 if (s
->locked
+ conf
->max_degraded
== disks
)
2571 if (!test_and_set_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2572 atomic_inc(&conf
->pending_full_writes
);
2575 BUG_ON(!(test_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
) ||
2576 test_bit(R5_Wantcompute
, &sh
->dev
[pd_idx
].flags
)));
2578 for (i
= disks
; i
--; ) {
2579 struct r5dev
*dev
= &sh
->dev
[i
];
2584 (test_bit(R5_UPTODATE
, &dev
->flags
) ||
2585 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2586 set_bit(R5_Wantdrain
, &dev
->flags
);
2587 set_bit(R5_LOCKED
, &dev
->flags
);
2588 clear_bit(R5_UPTODATE
, &dev
->flags
);
2593 /* False alarm - nothing to do */
2595 sh
->reconstruct_state
= reconstruct_state_prexor_drain_run
;
2596 set_bit(STRIPE_OP_PREXOR
, &s
->ops_request
);
2597 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
2598 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
2601 /* keep the parity disk(s) locked while asynchronous operations
2604 set_bit(R5_LOCKED
, &sh
->dev
[pd_idx
].flags
);
2605 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
2609 int qd_idx
= sh
->qd_idx
;
2610 struct r5dev
*dev
= &sh
->dev
[qd_idx
];
2612 set_bit(R5_LOCKED
, &dev
->flags
);
2613 clear_bit(R5_UPTODATE
, &dev
->flags
);
2617 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2618 __func__
, (unsigned long long)sh
->sector
,
2619 s
->locked
, s
->ops_request
);
2623 * Each stripe/dev can have one or more bion attached.
2624 * toread/towrite point to the first in a chain.
2625 * The bi_next chain must be in order.
2627 static int add_stripe_bio(struct stripe_head
*sh
, struct bio
*bi
, int dd_idx
, int forwrite
)
2630 struct r5conf
*conf
= sh
->raid_conf
;
2633 pr_debug("adding bi b#%llu to stripe s#%llu\n",
2634 (unsigned long long)bi
->bi_iter
.bi_sector
,
2635 (unsigned long long)sh
->sector
);
2638 * If several bio share a stripe. The bio bi_phys_segments acts as a
2639 * reference count to avoid race. The reference count should already be
2640 * increased before this function is called (for example, in
2641 * make_request()), so other bio sharing this stripe will not free the
2642 * stripe. If a stripe is owned by one stripe, the stripe lock will
2645 spin_lock_irq(&sh
->stripe_lock
);
2647 bip
= &sh
->dev
[dd_idx
].towrite
;
2651 bip
= &sh
->dev
[dd_idx
].toread
;
2652 while (*bip
&& (*bip
)->bi_iter
.bi_sector
< bi
->bi_iter
.bi_sector
) {
2653 if (bio_end_sector(*bip
) > bi
->bi_iter
.bi_sector
)
2655 bip
= & (*bip
)->bi_next
;
2657 if (*bip
&& (*bip
)->bi_iter
.bi_sector
< bio_end_sector(bi
))
2660 BUG_ON(*bip
&& bi
->bi_next
&& (*bip
) != bi
->bi_next
);
2664 raid5_inc_bi_active_stripes(bi
);
2667 /* check if page is covered */
2668 sector_t sector
= sh
->dev
[dd_idx
].sector
;
2669 for (bi
=sh
->dev
[dd_idx
].towrite
;
2670 sector
< sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
&&
2671 bi
&& bi
->bi_iter
.bi_sector
<= sector
;
2672 bi
= r5_next_bio(bi
, sh
->dev
[dd_idx
].sector
)) {
2673 if (bio_end_sector(bi
) >= sector
)
2674 sector
= bio_end_sector(bi
);
2676 if (sector
>= sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
)
2677 set_bit(R5_OVERWRITE
, &sh
->dev
[dd_idx
].flags
);
2680 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2681 (unsigned long long)(*bip
)->bi_iter
.bi_sector
,
2682 (unsigned long long)sh
->sector
, dd_idx
);
2683 spin_unlock_irq(&sh
->stripe_lock
);
2685 if (conf
->mddev
->bitmap
&& firstwrite
) {
2686 bitmap_startwrite(conf
->mddev
->bitmap
, sh
->sector
,
2688 sh
->bm_seq
= conf
->seq_flush
+1;
2689 set_bit(STRIPE_BIT_DELAY
, &sh
->state
);
2694 set_bit(R5_Overlap
, &sh
->dev
[dd_idx
].flags
);
2695 spin_unlock_irq(&sh
->stripe_lock
);
2699 static void end_reshape(struct r5conf
*conf
);
2701 static void stripe_set_idx(sector_t stripe
, struct r5conf
*conf
, int previous
,
2702 struct stripe_head
*sh
)
2704 int sectors_per_chunk
=
2705 previous
? conf
->prev_chunk_sectors
: conf
->chunk_sectors
;
2707 int chunk_offset
= sector_div(stripe
, sectors_per_chunk
);
2708 int disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
2710 raid5_compute_sector(conf
,
2711 stripe
* (disks
- conf
->max_degraded
)
2712 *sectors_per_chunk
+ chunk_offset
,
2718 handle_failed_stripe(struct r5conf
*conf
, struct stripe_head
*sh
,
2719 struct stripe_head_state
*s
, int disks
,
2720 struct bio
**return_bi
)
2723 for (i
= disks
; i
--; ) {
2727 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
2728 struct md_rdev
*rdev
;
2730 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
2731 if (rdev
&& test_bit(In_sync
, &rdev
->flags
))
2732 atomic_inc(&rdev
->nr_pending
);
2737 if (!rdev_set_badblocks(
2741 md_error(conf
->mddev
, rdev
);
2742 rdev_dec_pending(rdev
, conf
->mddev
);
2745 spin_lock_irq(&sh
->stripe_lock
);
2746 /* fail all writes first */
2747 bi
= sh
->dev
[i
].towrite
;
2748 sh
->dev
[i
].towrite
= NULL
;
2749 spin_unlock_irq(&sh
->stripe_lock
);
2753 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2754 wake_up(&conf
->wait_for_overlap
);
2756 while (bi
&& bi
->bi_iter
.bi_sector
<
2757 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2758 struct bio
*nextbi
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2759 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2760 if (!raid5_dec_bi_active_stripes(bi
)) {
2761 md_write_end(conf
->mddev
);
2762 bi
->bi_next
= *return_bi
;
2768 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
2769 STRIPE_SECTORS
, 0, 0);
2771 /* and fail all 'written' */
2772 bi
= sh
->dev
[i
].written
;
2773 sh
->dev
[i
].written
= NULL
;
2774 if (test_and_clear_bit(R5_SkipCopy
, &sh
->dev
[i
].flags
)) {
2775 WARN_ON(test_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
));
2776 sh
->dev
[i
].page
= sh
->dev
[i
].orig_page
;
2779 if (bi
) bitmap_end
= 1;
2780 while (bi
&& bi
->bi_iter
.bi_sector
<
2781 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2782 struct bio
*bi2
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2783 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2784 if (!raid5_dec_bi_active_stripes(bi
)) {
2785 md_write_end(conf
->mddev
);
2786 bi
->bi_next
= *return_bi
;
2792 /* fail any reads if this device is non-operational and
2793 * the data has not reached the cache yet.
2795 if (!test_bit(R5_Wantfill
, &sh
->dev
[i
].flags
) &&
2796 (!test_bit(R5_Insync
, &sh
->dev
[i
].flags
) ||
2797 test_bit(R5_ReadError
, &sh
->dev
[i
].flags
))) {
2798 spin_lock_irq(&sh
->stripe_lock
);
2799 bi
= sh
->dev
[i
].toread
;
2800 sh
->dev
[i
].toread
= NULL
;
2801 spin_unlock_irq(&sh
->stripe_lock
);
2802 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2803 wake_up(&conf
->wait_for_overlap
);
2804 while (bi
&& bi
->bi_iter
.bi_sector
<
2805 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2806 struct bio
*nextbi
=
2807 r5_next_bio(bi
, sh
->dev
[i
].sector
);
2808 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2809 if (!raid5_dec_bi_active_stripes(bi
)) {
2810 bi
->bi_next
= *return_bi
;
2817 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
2818 STRIPE_SECTORS
, 0, 0);
2819 /* If we were in the middle of a write the parity block might
2820 * still be locked - so just clear all R5_LOCKED flags
2822 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
2825 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2826 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2827 md_wakeup_thread(conf
->mddev
->thread
);
2831 handle_failed_sync(struct r5conf
*conf
, struct stripe_head
*sh
,
2832 struct stripe_head_state
*s
)
2837 clear_bit(STRIPE_SYNCING
, &sh
->state
);
2838 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[sh
->pd_idx
].flags
))
2839 wake_up(&conf
->wait_for_overlap
);
2842 /* There is nothing more to do for sync/check/repair.
2843 * Don't even need to abort as that is handled elsewhere
2844 * if needed, and not always wanted e.g. if there is a known
2846 * For recover/replace we need to record a bad block on all
2847 * non-sync devices, or abort the recovery
2849 if (test_bit(MD_RECOVERY_RECOVER
, &conf
->mddev
->recovery
)) {
2850 /* During recovery devices cannot be removed, so
2851 * locking and refcounting of rdevs is not needed
2853 for (i
= 0; i
< conf
->raid_disks
; i
++) {
2854 struct md_rdev
*rdev
= conf
->disks
[i
].rdev
;
2856 && !test_bit(Faulty
, &rdev
->flags
)
2857 && !test_bit(In_sync
, &rdev
->flags
)
2858 && !rdev_set_badblocks(rdev
, sh
->sector
,
2861 rdev
= conf
->disks
[i
].replacement
;
2863 && !test_bit(Faulty
, &rdev
->flags
)
2864 && !test_bit(In_sync
, &rdev
->flags
)
2865 && !rdev_set_badblocks(rdev
, sh
->sector
,
2870 conf
->recovery_disabled
=
2871 conf
->mddev
->recovery_disabled
;
2873 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, !abort
);
2876 static int want_replace(struct stripe_head
*sh
, int disk_idx
)
2878 struct md_rdev
*rdev
;
2880 /* Doing recovery so rcu locking not required */
2881 rdev
= sh
->raid_conf
->disks
[disk_idx
].replacement
;
2883 && !test_bit(Faulty
, &rdev
->flags
)
2884 && !test_bit(In_sync
, &rdev
->flags
)
2885 && (rdev
->recovery_offset
<= sh
->sector
2886 || rdev
->mddev
->recovery_cp
<= sh
->sector
))
2892 /* fetch_block - checks the given member device to see if its data needs
2893 * to be read or computed to satisfy a request.
2895 * Returns 1 when no more member devices need to be checked, otherwise returns
2896 * 0 to tell the loop in handle_stripe_fill to continue
2899 static int need_this_block(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2900 int disk_idx
, int disks
)
2902 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2903 struct r5dev
*fdev
[2] = { &sh
->dev
[s
->failed_num
[0]],
2904 &sh
->dev
[s
->failed_num
[1]] };
2907 if (test_bit(R5_LOCKED
, &dev
->flags
) ||
2908 test_bit(R5_UPTODATE
, &dev
->flags
))
2909 /* No point reading this as we already have it or have
2910 * decided to get it.
2915 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)))
2916 /* We need this block to directly satisfy a request */
2919 if (s
->syncing
|| s
->expanding
||
2920 (s
->replacing
&& want_replace(sh
, disk_idx
)))
2921 /* When syncing, or expanding we read everything.
2922 * When replacing, we need the replaced block.
2926 if ((s
->failed
>= 1 && fdev
[0]->toread
) ||
2927 (s
->failed
>= 2 && fdev
[1]->toread
))
2928 /* If we want to read from a failed device, then
2929 * we need to actually read every other device.
2934 (sh
->raid_conf
->level
<= 5 && s
->failed
&& fdev
[0]->towrite
&&
2935 (!test_bit(R5_Insync
, &dev
->flags
) || test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) &&
2936 !test_bit(R5_OVERWRITE
, &fdev
[0]->flags
)) ||
2937 ((sh
->raid_conf
->level
== 6 ||
2938 sh
->sector
>= sh
->raid_conf
->mddev
->recovery_cp
)
2939 && s
->failed
&& s
->to_write
&&
2940 (s
->to_write
- s
->non_overwrite
<
2941 sh
->raid_conf
->raid_disks
- sh
->raid_conf
->max_degraded
) &&
2942 (!test_bit(R5_Insync
, &dev
->flags
) || test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))))
2947 static int fetch_block(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2948 int disk_idx
, int disks
)
2950 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2952 /* is the data in this block needed, and can we get it? */
2953 if (need_this_block(sh
, s
, disk_idx
, disks
)) {
2954 /* we would like to get this block, possibly by computing it,
2955 * otherwise read it if the backing disk is insync
2957 BUG_ON(test_bit(R5_Wantcompute
, &dev
->flags
));
2958 BUG_ON(test_bit(R5_Wantread
, &dev
->flags
));
2959 if ((s
->uptodate
== disks
- 1) &&
2960 (s
->failed
&& (disk_idx
== s
->failed_num
[0] ||
2961 disk_idx
== s
->failed_num
[1]))) {
2962 /* have disk failed, and we're requested to fetch it;
2965 pr_debug("Computing stripe %llu block %d\n",
2966 (unsigned long long)sh
->sector
, disk_idx
);
2967 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2968 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2969 set_bit(R5_Wantcompute
, &dev
->flags
);
2970 sh
->ops
.target
= disk_idx
;
2971 sh
->ops
.target2
= -1; /* no 2nd target */
2973 /* Careful: from this point on 'uptodate' is in the eye
2974 * of raid_run_ops which services 'compute' operations
2975 * before writes. R5_Wantcompute flags a block that will
2976 * be R5_UPTODATE by the time it is needed for a
2977 * subsequent operation.
2981 } else if (s
->uptodate
== disks
-2 && s
->failed
>= 2) {
2982 /* Computing 2-failure is *very* expensive; only
2983 * do it if failed >= 2
2986 for (other
= disks
; other
--; ) {
2987 if (other
== disk_idx
)
2989 if (!test_bit(R5_UPTODATE
,
2990 &sh
->dev
[other
].flags
))
2994 pr_debug("Computing stripe %llu blocks %d,%d\n",
2995 (unsigned long long)sh
->sector
,
2997 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2998 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2999 set_bit(R5_Wantcompute
, &sh
->dev
[disk_idx
].flags
);
3000 set_bit(R5_Wantcompute
, &sh
->dev
[other
].flags
);
3001 sh
->ops
.target
= disk_idx
;
3002 sh
->ops
.target2
= other
;
3006 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
3007 set_bit(R5_LOCKED
, &dev
->flags
);
3008 set_bit(R5_Wantread
, &dev
->flags
);
3010 pr_debug("Reading block %d (sync=%d)\n",
3011 disk_idx
, s
->syncing
);
3019 * handle_stripe_fill - read or compute data to satisfy pending requests.
3021 static void handle_stripe_fill(struct stripe_head
*sh
,
3022 struct stripe_head_state
*s
,
3027 /* look for blocks to read/compute, skip this if a compute
3028 * is already in flight, or if the stripe contents are in the
3029 * midst of changing due to a write
3031 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
3032 !sh
->reconstruct_state
)
3033 for (i
= disks
; i
--; )
3034 if (fetch_block(sh
, s
, i
, disks
))
3036 set_bit(STRIPE_HANDLE
, &sh
->state
);
3039 /* handle_stripe_clean_event
3040 * any written block on an uptodate or failed drive can be returned.
3041 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
3042 * never LOCKED, so we don't need to test 'failed' directly.
3044 static void handle_stripe_clean_event(struct r5conf
*conf
,
3045 struct stripe_head
*sh
, int disks
, struct bio
**return_bi
)
3049 int discard_pending
= 0;
3051 for (i
= disks
; i
--; )
3052 if (sh
->dev
[i
].written
) {
3054 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
3055 (test_bit(R5_UPTODATE
, &dev
->flags
) ||
3056 test_bit(R5_Discard
, &dev
->flags
) ||
3057 test_bit(R5_SkipCopy
, &dev
->flags
))) {
3058 /* We can return any write requests */
3059 struct bio
*wbi
, *wbi2
;
3060 pr_debug("Return write for disc %d\n", i
);
3061 if (test_and_clear_bit(R5_Discard
, &dev
->flags
))
3062 clear_bit(R5_UPTODATE
, &dev
->flags
);
3063 if (test_and_clear_bit(R5_SkipCopy
, &dev
->flags
)) {
3064 WARN_ON(test_bit(R5_UPTODATE
, &dev
->flags
));
3065 dev
->page
= dev
->orig_page
;
3068 dev
->written
= NULL
;
3069 while (wbi
&& wbi
->bi_iter
.bi_sector
<
3070 dev
->sector
+ STRIPE_SECTORS
) {
3071 wbi2
= r5_next_bio(wbi
, dev
->sector
);
3072 if (!raid5_dec_bi_active_stripes(wbi
)) {
3073 md_write_end(conf
->mddev
);
3074 wbi
->bi_next
= *return_bi
;
3079 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
3081 !test_bit(STRIPE_DEGRADED
, &sh
->state
),
3083 } else if (test_bit(R5_Discard
, &dev
->flags
))
3084 discard_pending
= 1;
3085 WARN_ON(test_bit(R5_SkipCopy
, &dev
->flags
));
3086 WARN_ON(dev
->page
!= dev
->orig_page
);
3088 if (!discard_pending
&&
3089 test_bit(R5_Discard
, &sh
->dev
[sh
->pd_idx
].flags
)) {
3090 clear_bit(R5_Discard
, &sh
->dev
[sh
->pd_idx
].flags
);
3091 clear_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
);
3092 if (sh
->qd_idx
>= 0) {
3093 clear_bit(R5_Discard
, &sh
->dev
[sh
->qd_idx
].flags
);
3094 clear_bit(R5_UPTODATE
, &sh
->dev
[sh
->qd_idx
].flags
);
3096 /* now that discard is done we can proceed with any sync */
3097 clear_bit(STRIPE_DISCARD
, &sh
->state
);
3099 * SCSI discard will change some bio fields and the stripe has
3100 * no updated data, so remove it from hash list and the stripe
3101 * will be reinitialized
3103 spin_lock_irq(&conf
->device_lock
);
3105 spin_unlock_irq(&conf
->device_lock
);
3106 if (test_bit(STRIPE_SYNC_REQUESTED
, &sh
->state
))
3107 set_bit(STRIPE_HANDLE
, &sh
->state
);
3111 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
3112 if (atomic_dec_and_test(&conf
->pending_full_writes
))
3113 md_wakeup_thread(conf
->mddev
->thread
);
3116 static void handle_stripe_dirtying(struct r5conf
*conf
,
3117 struct stripe_head
*sh
,
3118 struct stripe_head_state
*s
,
3121 int rmw
= 0, rcw
= 0, i
;
3122 sector_t recovery_cp
= conf
->mddev
->recovery_cp
;
3124 /* RAID6 requires 'rcw' in current implementation.
3125 * Otherwise, check whether resync is now happening or should start.
3126 * If yes, then the array is dirty (after unclean shutdown or
3127 * initial creation), so parity in some stripes might be inconsistent.
3128 * In this case, we need to always do reconstruct-write, to ensure
3129 * that in case of drive failure or read-error correction, we
3130 * generate correct data from the parity.
3132 if (conf
->max_degraded
== 2 ||
3133 (recovery_cp
< MaxSector
&& sh
->sector
>= recovery_cp
)) {
3134 /* Calculate the real rcw later - for now make it
3135 * look like rcw is cheaper
3138 pr_debug("force RCW max_degraded=%u, recovery_cp=%llu sh->sector=%llu\n",
3139 conf
->max_degraded
, (unsigned long long)recovery_cp
,
3140 (unsigned long long)sh
->sector
);
3141 } else for (i
= disks
; i
--; ) {
3142 /* would I have to read this buffer for read_modify_write */
3143 struct r5dev
*dev
= &sh
->dev
[i
];
3144 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
3145 !test_bit(R5_LOCKED
, &dev
->flags
) &&
3146 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
3147 test_bit(R5_Wantcompute
, &dev
->flags
))) {
3148 if (test_bit(R5_Insync
, &dev
->flags
))
3151 rmw
+= 2*disks
; /* cannot read it */
3153 /* Would I have to read this buffer for reconstruct_write */
3154 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) && i
!= sh
->pd_idx
&&
3155 !test_bit(R5_LOCKED
, &dev
->flags
) &&
3156 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
3157 test_bit(R5_Wantcompute
, &dev
->flags
))) {
3158 if (test_bit(R5_Insync
, &dev
->flags
))
3164 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
3165 (unsigned long long)sh
->sector
, rmw
, rcw
);
3166 set_bit(STRIPE_HANDLE
, &sh
->state
);
3167 if (rmw
< rcw
&& rmw
> 0) {
3168 /* prefer read-modify-write, but need to get some data */
3169 if (conf
->mddev
->queue
)
3170 blk_add_trace_msg(conf
->mddev
->queue
,
3171 "raid5 rmw %llu %d",
3172 (unsigned long long)sh
->sector
, rmw
);
3173 for (i
= disks
; i
--; ) {
3174 struct r5dev
*dev
= &sh
->dev
[i
];
3175 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
3176 !test_bit(R5_LOCKED
, &dev
->flags
) &&
3177 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
3178 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
3179 test_bit(R5_Insync
, &dev
->flags
)) {
3180 if (test_bit(STRIPE_PREREAD_ACTIVE
,
3182 pr_debug("Read_old block %d for r-m-w\n",
3184 set_bit(R5_LOCKED
, &dev
->flags
);
3185 set_bit(R5_Wantread
, &dev
->flags
);
3188 set_bit(STRIPE_DELAYED
, &sh
->state
);
3189 set_bit(STRIPE_HANDLE
, &sh
->state
);
3194 if (rcw
<= rmw
&& rcw
> 0) {
3195 /* want reconstruct write, but need to get some data */
3198 for (i
= disks
; i
--; ) {
3199 struct r5dev
*dev
= &sh
->dev
[i
];
3200 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
3201 i
!= sh
->pd_idx
&& i
!= sh
->qd_idx
&&
3202 !test_bit(R5_LOCKED
, &dev
->flags
) &&
3203 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
3204 test_bit(R5_Wantcompute
, &dev
->flags
))) {
3206 if (test_bit(R5_Insync
, &dev
->flags
) &&
3207 test_bit(STRIPE_PREREAD_ACTIVE
,
3209 pr_debug("Read_old block "
3210 "%d for Reconstruct\n", i
);
3211 set_bit(R5_LOCKED
, &dev
->flags
);
3212 set_bit(R5_Wantread
, &dev
->flags
);
3216 set_bit(STRIPE_DELAYED
, &sh
->state
);
3217 set_bit(STRIPE_HANDLE
, &sh
->state
);
3221 if (rcw
&& conf
->mddev
->queue
)
3222 blk_add_trace_msg(conf
->mddev
->queue
, "raid5 rcw %llu %d %d %d",
3223 (unsigned long long)sh
->sector
,
3224 rcw
, qread
, test_bit(STRIPE_DELAYED
, &sh
->state
));
3227 if (rcw
> disks
&& rmw
> disks
&&
3228 !test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3229 set_bit(STRIPE_DELAYED
, &sh
->state
);
3231 /* now if nothing is locked, and if we have enough data,
3232 * we can start a write request
3234 /* since handle_stripe can be called at any time we need to handle the
3235 * case where a compute block operation has been submitted and then a
3236 * subsequent call wants to start a write request. raid_run_ops only
3237 * handles the case where compute block and reconstruct are requested
3238 * simultaneously. If this is not the case then new writes need to be
3239 * held off until the compute completes.
3241 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
3242 (s
->locked
== 0 && (rcw
== 0 || rmw
== 0) &&
3243 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)))
3244 schedule_reconstruction(sh
, s
, rcw
== 0, 0);
3247 static void handle_parity_checks5(struct r5conf
*conf
, struct stripe_head
*sh
,
3248 struct stripe_head_state
*s
, int disks
)
3250 struct r5dev
*dev
= NULL
;
3252 set_bit(STRIPE_HANDLE
, &sh
->state
);
3254 switch (sh
->check_state
) {
3255 case check_state_idle
:
3256 /* start a new check operation if there are no failures */
3257 if (s
->failed
== 0) {
3258 BUG_ON(s
->uptodate
!= disks
);
3259 sh
->check_state
= check_state_run
;
3260 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
3261 clear_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
);
3265 dev
= &sh
->dev
[s
->failed_num
[0]];
3267 case check_state_compute_result
:
3268 sh
->check_state
= check_state_idle
;
3270 dev
= &sh
->dev
[sh
->pd_idx
];
3272 /* check that a write has not made the stripe insync */
3273 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
3276 /* either failed parity check, or recovery is happening */
3277 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
3278 BUG_ON(s
->uptodate
!= disks
);
3280 set_bit(R5_LOCKED
, &dev
->flags
);
3282 set_bit(R5_Wantwrite
, &dev
->flags
);
3284 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
3285 set_bit(STRIPE_INSYNC
, &sh
->state
);
3287 case check_state_run
:
3288 break; /* we will be called again upon completion */
3289 case check_state_check_result
:
3290 sh
->check_state
= check_state_idle
;
3292 /* if a failure occurred during the check operation, leave
3293 * STRIPE_INSYNC not set and let the stripe be handled again
3298 /* handle a successful check operation, if parity is correct
3299 * we are done. Otherwise update the mismatch count and repair
3300 * parity if !MD_RECOVERY_CHECK
3302 if ((sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) == 0)
3303 /* parity is correct (on disc,
3304 * not in buffer any more)
3306 set_bit(STRIPE_INSYNC
, &sh
->state
);
3308 atomic64_add(STRIPE_SECTORS
, &conf
->mddev
->resync_mismatches
);
3309 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
3310 /* don't try to repair!! */
3311 set_bit(STRIPE_INSYNC
, &sh
->state
);
3313 sh
->check_state
= check_state_compute_run
;
3314 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
3315 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
3316 set_bit(R5_Wantcompute
,
3317 &sh
->dev
[sh
->pd_idx
].flags
);
3318 sh
->ops
.target
= sh
->pd_idx
;
3319 sh
->ops
.target2
= -1;
3324 case check_state_compute_run
:
3327 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
3328 __func__
, sh
->check_state
,
3329 (unsigned long long) sh
->sector
);
3334 static void handle_parity_checks6(struct r5conf
*conf
, struct stripe_head
*sh
,
3335 struct stripe_head_state
*s
,
3338 int pd_idx
= sh
->pd_idx
;
3339 int qd_idx
= sh
->qd_idx
;
3342 set_bit(STRIPE_HANDLE
, &sh
->state
);
3344 BUG_ON(s
->failed
> 2);
3346 /* Want to check and possibly repair P and Q.
3347 * However there could be one 'failed' device, in which
3348 * case we can only check one of them, possibly using the
3349 * other to generate missing data
3352 switch (sh
->check_state
) {
3353 case check_state_idle
:
3354 /* start a new check operation if there are < 2 failures */
3355 if (s
->failed
== s
->q_failed
) {
3356 /* The only possible failed device holds Q, so it
3357 * makes sense to check P (If anything else were failed,
3358 * we would have used P to recreate it).
3360 sh
->check_state
= check_state_run
;
3362 if (!s
->q_failed
&& s
->failed
< 2) {
3363 /* Q is not failed, and we didn't use it to generate
3364 * anything, so it makes sense to check it
3366 if (sh
->check_state
== check_state_run
)
3367 sh
->check_state
= check_state_run_pq
;
3369 sh
->check_state
= check_state_run_q
;
3372 /* discard potentially stale zero_sum_result */
3373 sh
->ops
.zero_sum_result
= 0;
3375 if (sh
->check_state
== check_state_run
) {
3376 /* async_xor_zero_sum destroys the contents of P */
3377 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
3380 if (sh
->check_state
>= check_state_run
&&
3381 sh
->check_state
<= check_state_run_pq
) {
3382 /* async_syndrome_zero_sum preserves P and Q, so
3383 * no need to mark them !uptodate here
3385 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
3389 /* we have 2-disk failure */
3390 BUG_ON(s
->failed
!= 2);
3392 case check_state_compute_result
:
3393 sh
->check_state
= check_state_idle
;
3395 /* check that a write has not made the stripe insync */
3396 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
3399 /* now write out any block on a failed drive,
3400 * or P or Q if they were recomputed
3402 BUG_ON(s
->uptodate
< disks
- 1); /* We don't need Q to recover */
3403 if (s
->failed
== 2) {
3404 dev
= &sh
->dev
[s
->failed_num
[1]];
3406 set_bit(R5_LOCKED
, &dev
->flags
);
3407 set_bit(R5_Wantwrite
, &dev
->flags
);
3409 if (s
->failed
>= 1) {
3410 dev
= &sh
->dev
[s
->failed_num
[0]];
3412 set_bit(R5_LOCKED
, &dev
->flags
);
3413 set_bit(R5_Wantwrite
, &dev
->flags
);
3415 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
3416 dev
= &sh
->dev
[pd_idx
];
3418 set_bit(R5_LOCKED
, &dev
->flags
);
3419 set_bit(R5_Wantwrite
, &dev
->flags
);
3421 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
3422 dev
= &sh
->dev
[qd_idx
];
3424 set_bit(R5_LOCKED
, &dev
->flags
);
3425 set_bit(R5_Wantwrite
, &dev
->flags
);
3427 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
3429 set_bit(STRIPE_INSYNC
, &sh
->state
);
3431 case check_state_run
:
3432 case check_state_run_q
:
3433 case check_state_run_pq
:
3434 break; /* we will be called again upon completion */
3435 case check_state_check_result
:
3436 sh
->check_state
= check_state_idle
;
3438 /* handle a successful check operation, if parity is correct
3439 * we are done. Otherwise update the mismatch count and repair
3440 * parity if !MD_RECOVERY_CHECK
3442 if (sh
->ops
.zero_sum_result
== 0) {
3443 /* both parities are correct */
3445 set_bit(STRIPE_INSYNC
, &sh
->state
);
3447 /* in contrast to the raid5 case we can validate
3448 * parity, but still have a failure to write
3451 sh
->check_state
= check_state_compute_result
;
3452 /* Returning at this point means that we may go
3453 * off and bring p and/or q uptodate again so
3454 * we make sure to check zero_sum_result again
3455 * to verify if p or q need writeback
3459 atomic64_add(STRIPE_SECTORS
, &conf
->mddev
->resync_mismatches
);
3460 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
3461 /* don't try to repair!! */
3462 set_bit(STRIPE_INSYNC
, &sh
->state
);
3464 int *target
= &sh
->ops
.target
;
3466 sh
->ops
.target
= -1;
3467 sh
->ops
.target2
= -1;
3468 sh
->check_state
= check_state_compute_run
;
3469 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
3470 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
3471 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
3472 set_bit(R5_Wantcompute
,
3473 &sh
->dev
[pd_idx
].flags
);
3475 target
= &sh
->ops
.target2
;
3478 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
3479 set_bit(R5_Wantcompute
,
3480 &sh
->dev
[qd_idx
].flags
);
3487 case check_state_compute_run
:
3490 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
3491 __func__
, sh
->check_state
,
3492 (unsigned long long) sh
->sector
);
3497 static void handle_stripe_expansion(struct r5conf
*conf
, struct stripe_head
*sh
)
3501 /* We have read all the blocks in this stripe and now we need to
3502 * copy some of them into a target stripe for expand.
3504 struct dma_async_tx_descriptor
*tx
= NULL
;
3505 clear_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
3506 for (i
= 0; i
< sh
->disks
; i
++)
3507 if (i
!= sh
->pd_idx
&& i
!= sh
->qd_idx
) {
3509 struct stripe_head
*sh2
;
3510 struct async_submit_ctl submit
;
3512 sector_t bn
= compute_blocknr(sh
, i
, 1);
3513 sector_t s
= raid5_compute_sector(conf
, bn
, 0,
3515 sh2
= get_active_stripe(conf
, s
, 0, 1, 1);
3517 /* so far only the early blocks of this stripe
3518 * have been requested. When later blocks
3519 * get requested, we will try again
3522 if (!test_bit(STRIPE_EXPANDING
, &sh2
->state
) ||
3523 test_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
)) {
3524 /* must have already done this block */
3525 release_stripe(sh2
);
3529 /* place all the copies on one channel */
3530 init_async_submit(&submit
, 0, tx
, NULL
, NULL
, NULL
);
3531 tx
= async_memcpy(sh2
->dev
[dd_idx
].page
,
3532 sh
->dev
[i
].page
, 0, 0, STRIPE_SIZE
,
3535 set_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
);
3536 set_bit(R5_UPTODATE
, &sh2
->dev
[dd_idx
].flags
);
3537 for (j
= 0; j
< conf
->raid_disks
; j
++)
3538 if (j
!= sh2
->pd_idx
&&
3540 !test_bit(R5_Expanded
, &sh2
->dev
[j
].flags
))
3542 if (j
== conf
->raid_disks
) {
3543 set_bit(STRIPE_EXPAND_READY
, &sh2
->state
);
3544 set_bit(STRIPE_HANDLE
, &sh2
->state
);
3546 release_stripe(sh2
);
3549 /* done submitting copies, wait for them to complete */
3550 async_tx_quiesce(&tx
);
3554 * handle_stripe - do things to a stripe.
3556 * We lock the stripe by setting STRIPE_ACTIVE and then examine the
3557 * state of various bits to see what needs to be done.
3559 * return some read requests which now have data
3560 * return some write requests which are safely on storage
3561 * schedule a read on some buffers
3562 * schedule a write of some buffers
3563 * return confirmation of parity correctness
3567 static void analyse_stripe(struct stripe_head
*sh
, struct stripe_head_state
*s
)
3569 struct r5conf
*conf
= sh
->raid_conf
;
3570 int disks
= sh
->disks
;
3573 int do_recovery
= 0;
3575 memset(s
, 0, sizeof(*s
));
3577 s
->expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
3578 s
->expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3579 s
->failed_num
[0] = -1;
3580 s
->failed_num
[1] = -1;
3582 /* Now to look around and see what can be done */
3584 for (i
=disks
; i
--; ) {
3585 struct md_rdev
*rdev
;
3592 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3594 dev
->toread
, dev
->towrite
, dev
->written
);
3595 /* maybe we can reply to a read
3597 * new wantfill requests are only permitted while
3598 * ops_complete_biofill is guaranteed to be inactive
3600 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
3601 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
3602 set_bit(R5_Wantfill
, &dev
->flags
);
3604 /* now count some things */
3605 if (test_bit(R5_LOCKED
, &dev
->flags
))
3607 if (test_bit(R5_UPTODATE
, &dev
->flags
))
3609 if (test_bit(R5_Wantcompute
, &dev
->flags
)) {
3611 BUG_ON(s
->compute
> 2);
3614 if (test_bit(R5_Wantfill
, &dev
->flags
))
3616 else if (dev
->toread
)
3620 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
3625 /* Prefer to use the replacement for reads, but only
3626 * if it is recovered enough and has no bad blocks.
3628 rdev
= rcu_dereference(conf
->disks
[i
].replacement
);
3629 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
) &&
3630 rdev
->recovery_offset
>= sh
->sector
+ STRIPE_SECTORS
&&
3631 !is_badblock(rdev
, sh
->sector
, STRIPE_SECTORS
,
3632 &first_bad
, &bad_sectors
))
3633 set_bit(R5_ReadRepl
, &dev
->flags
);
3636 set_bit(R5_NeedReplace
, &dev
->flags
);
3637 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3638 clear_bit(R5_ReadRepl
, &dev
->flags
);
3640 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
3643 is_bad
= is_badblock(rdev
, sh
->sector
, STRIPE_SECTORS
,
3644 &first_bad
, &bad_sectors
);
3645 if (s
->blocked_rdev
== NULL
3646 && (test_bit(Blocked
, &rdev
->flags
)
3649 set_bit(BlockedBadBlocks
,
3651 s
->blocked_rdev
= rdev
;
3652 atomic_inc(&rdev
->nr_pending
);
3655 clear_bit(R5_Insync
, &dev
->flags
);
3659 /* also not in-sync */
3660 if (!test_bit(WriteErrorSeen
, &rdev
->flags
) &&
3661 test_bit(R5_UPTODATE
, &dev
->flags
)) {
3662 /* treat as in-sync, but with a read error
3663 * which we can now try to correct
3665 set_bit(R5_Insync
, &dev
->flags
);
3666 set_bit(R5_ReadError
, &dev
->flags
);
3668 } else if (test_bit(In_sync
, &rdev
->flags
))
3669 set_bit(R5_Insync
, &dev
->flags
);
3670 else if (sh
->sector
+ STRIPE_SECTORS
<= rdev
->recovery_offset
)
3671 /* in sync if before recovery_offset */
3672 set_bit(R5_Insync
, &dev
->flags
);
3673 else if (test_bit(R5_UPTODATE
, &dev
->flags
) &&
3674 test_bit(R5_Expanded
, &dev
->flags
))
3675 /* If we've reshaped into here, we assume it is Insync.
3676 * We will shortly update recovery_offset to make
3679 set_bit(R5_Insync
, &dev
->flags
);
3681 if (test_bit(R5_WriteError
, &dev
->flags
)) {
3682 /* This flag does not apply to '.replacement'
3683 * only to .rdev, so make sure to check that*/
3684 struct md_rdev
*rdev2
= rcu_dereference(
3685 conf
->disks
[i
].rdev
);
3687 clear_bit(R5_Insync
, &dev
->flags
);
3688 if (rdev2
&& !test_bit(Faulty
, &rdev2
->flags
)) {
3689 s
->handle_bad_blocks
= 1;
3690 atomic_inc(&rdev2
->nr_pending
);
3692 clear_bit(R5_WriteError
, &dev
->flags
);
3694 if (test_bit(R5_MadeGood
, &dev
->flags
)) {
3695 /* This flag does not apply to '.replacement'
3696 * only to .rdev, so make sure to check that*/
3697 struct md_rdev
*rdev2
= rcu_dereference(
3698 conf
->disks
[i
].rdev
);
3699 if (rdev2
&& !test_bit(Faulty
, &rdev2
->flags
)) {
3700 s
->handle_bad_blocks
= 1;
3701 atomic_inc(&rdev2
->nr_pending
);
3703 clear_bit(R5_MadeGood
, &dev
->flags
);
3705 if (test_bit(R5_MadeGoodRepl
, &dev
->flags
)) {
3706 struct md_rdev
*rdev2
= rcu_dereference(
3707 conf
->disks
[i
].replacement
);
3708 if (rdev2
&& !test_bit(Faulty
, &rdev2
->flags
)) {
3709 s
->handle_bad_blocks
= 1;
3710 atomic_inc(&rdev2
->nr_pending
);
3712 clear_bit(R5_MadeGoodRepl
, &dev
->flags
);
3714 if (!test_bit(R5_Insync
, &dev
->flags
)) {
3715 /* The ReadError flag will just be confusing now */
3716 clear_bit(R5_ReadError
, &dev
->flags
);
3717 clear_bit(R5_ReWrite
, &dev
->flags
);
3719 if (test_bit(R5_ReadError
, &dev
->flags
))
3720 clear_bit(R5_Insync
, &dev
->flags
);
3721 if (!test_bit(R5_Insync
, &dev
->flags
)) {
3723 s
->failed_num
[s
->failed
] = i
;
3725 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
))
3729 if (test_bit(STRIPE_SYNCING
, &sh
->state
)) {
3730 /* If there is a failed device being replaced,
3731 * we must be recovering.
3732 * else if we are after recovery_cp, we must be syncing
3733 * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
3734 * else we can only be replacing
3735 * sync and recovery both need to read all devices, and so
3736 * use the same flag.
3739 sh
->sector
>= conf
->mddev
->recovery_cp
||
3740 test_bit(MD_RECOVERY_REQUESTED
, &(conf
->mddev
->recovery
)))
3748 static void handle_stripe(struct stripe_head
*sh
)
3750 struct stripe_head_state s
;
3751 struct r5conf
*conf
= sh
->raid_conf
;
3754 int disks
= sh
->disks
;
3755 struct r5dev
*pdev
, *qdev
;
3757 clear_bit(STRIPE_HANDLE
, &sh
->state
);
3758 if (test_and_set_bit_lock(STRIPE_ACTIVE
, &sh
->state
)) {
3759 /* already being handled, ensure it gets handled
3760 * again when current action finishes */
3761 set_bit(STRIPE_HANDLE
, &sh
->state
);
3765 if (test_bit(STRIPE_SYNC_REQUESTED
, &sh
->state
)) {
3766 spin_lock(&sh
->stripe_lock
);
3767 /* Cannot process 'sync' concurrently with 'discard' */
3768 if (!test_bit(STRIPE_DISCARD
, &sh
->state
) &&
3769 test_and_clear_bit(STRIPE_SYNC_REQUESTED
, &sh
->state
)) {
3770 set_bit(STRIPE_SYNCING
, &sh
->state
);
3771 clear_bit(STRIPE_INSYNC
, &sh
->state
);
3772 clear_bit(STRIPE_REPLACED
, &sh
->state
);
3774 spin_unlock(&sh
->stripe_lock
);
3776 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3778 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3779 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3780 (unsigned long long)sh
->sector
, sh
->state
,
3781 atomic_read(&sh
->count
), sh
->pd_idx
, sh
->qd_idx
,
3782 sh
->check_state
, sh
->reconstruct_state
);
3784 analyse_stripe(sh
, &s
);
3786 if (s
.handle_bad_blocks
) {
3787 set_bit(STRIPE_HANDLE
, &sh
->state
);
3791 if (unlikely(s
.blocked_rdev
)) {
3792 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
3793 s
.replacing
|| s
.to_write
|| s
.written
) {
3794 set_bit(STRIPE_HANDLE
, &sh
->state
);
3797 /* There is nothing for the blocked_rdev to block */
3798 rdev_dec_pending(s
.blocked_rdev
, conf
->mddev
);
3799 s
.blocked_rdev
= NULL
;
3802 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
3803 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
3804 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
3807 pr_debug("locked=%d uptodate=%d to_read=%d"
3808 " to_write=%d failed=%d failed_num=%d,%d\n",
3809 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
, s
.failed
,
3810 s
.failed_num
[0], s
.failed_num
[1]);
3811 /* check if the array has lost more than max_degraded devices and,
3812 * if so, some requests might need to be failed.
3814 if (s
.failed
> conf
->max_degraded
) {
3815 sh
->check_state
= 0;
3816 sh
->reconstruct_state
= 0;
3817 if (s
.to_read
+s
.to_write
+s
.written
)
3818 handle_failed_stripe(conf
, sh
, &s
, disks
, &s
.return_bi
);
3819 if (s
.syncing
+ s
.replacing
)
3820 handle_failed_sync(conf
, sh
, &s
);
3823 /* Now we check to see if any write operations have recently
3827 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
)
3829 if (sh
->reconstruct_state
== reconstruct_state_drain_result
||
3830 sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
) {
3831 sh
->reconstruct_state
= reconstruct_state_idle
;
3833 /* All the 'written' buffers and the parity block are ready to
3834 * be written back to disk
3836 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
) &&
3837 !test_bit(R5_Discard
, &sh
->dev
[sh
->pd_idx
].flags
));
3838 BUG_ON(sh
->qd_idx
>= 0 &&
3839 !test_bit(R5_UPTODATE
, &sh
->dev
[sh
->qd_idx
].flags
) &&
3840 !test_bit(R5_Discard
, &sh
->dev
[sh
->qd_idx
].flags
));
3841 for (i
= disks
; i
--; ) {
3842 struct r5dev
*dev
= &sh
->dev
[i
];
3843 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
3844 (i
== sh
->pd_idx
|| i
== sh
->qd_idx
||
3846 pr_debug("Writing block %d\n", i
);
3847 set_bit(R5_Wantwrite
, &dev
->flags
);
3852 if (!test_bit(R5_Insync
, &dev
->flags
) ||
3853 ((i
== sh
->pd_idx
|| i
== sh
->qd_idx
) &&
3855 set_bit(STRIPE_INSYNC
, &sh
->state
);
3858 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3859 s
.dec_preread_active
= 1;
3863 * might be able to return some write requests if the parity blocks
3864 * are safe, or on a failed drive
3866 pdev
= &sh
->dev
[sh
->pd_idx
];
3867 s
.p_failed
= (s
.failed
>= 1 && s
.failed_num
[0] == sh
->pd_idx
)
3868 || (s
.failed
>= 2 && s
.failed_num
[1] == sh
->pd_idx
);
3869 qdev
= &sh
->dev
[sh
->qd_idx
];
3870 s
.q_failed
= (s
.failed
>= 1 && s
.failed_num
[0] == sh
->qd_idx
)
3871 || (s
.failed
>= 2 && s
.failed_num
[1] == sh
->qd_idx
)
3875 (s
.p_failed
|| ((test_bit(R5_Insync
, &pdev
->flags
)
3876 && !test_bit(R5_LOCKED
, &pdev
->flags
)
3877 && (test_bit(R5_UPTODATE
, &pdev
->flags
) ||
3878 test_bit(R5_Discard
, &pdev
->flags
))))) &&
3879 (s
.q_failed
|| ((test_bit(R5_Insync
, &qdev
->flags
)
3880 && !test_bit(R5_LOCKED
, &qdev
->flags
)
3881 && (test_bit(R5_UPTODATE
, &qdev
->flags
) ||
3882 test_bit(R5_Discard
, &qdev
->flags
))))))
3883 handle_stripe_clean_event(conf
, sh
, disks
, &s
.return_bi
);
3885 /* Now we might consider reading some blocks, either to check/generate
3886 * parity, or to satisfy requests
3887 * or to load a block that is being partially written.
3889 if (s
.to_read
|| s
.non_overwrite
3890 || (conf
->level
== 6 && s
.to_write
&& s
.failed
)
3891 || (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
))
3894 handle_stripe_fill(sh
, &s
, disks
);
3896 /* Now to consider new write requests and what else, if anything
3897 * should be read. We do not handle new writes when:
3898 * 1/ A 'write' operation (copy+xor) is already in flight.
3899 * 2/ A 'check' operation is in flight, as it may clobber the parity
3902 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
3903 handle_stripe_dirtying(conf
, sh
, &s
, disks
);
3905 /* maybe we need to check and possibly fix the parity for this stripe
3906 * Any reads will already have been scheduled, so we just see if enough
3907 * data is available. The parity check is held off while parity
3908 * dependent operations are in flight.
3910 if (sh
->check_state
||
3911 (s
.syncing
&& s
.locked
== 0 &&
3912 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
3913 !test_bit(STRIPE_INSYNC
, &sh
->state
))) {
3914 if (conf
->level
== 6)
3915 handle_parity_checks6(conf
, sh
, &s
, disks
);
3917 handle_parity_checks5(conf
, sh
, &s
, disks
);
3920 if ((s
.replacing
|| s
.syncing
) && s
.locked
== 0
3921 && !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)
3922 && !test_bit(STRIPE_REPLACED
, &sh
->state
)) {
3923 /* Write out to replacement devices where possible */
3924 for (i
= 0; i
< conf
->raid_disks
; i
++)
3925 if (test_bit(R5_NeedReplace
, &sh
->dev
[i
].flags
)) {
3926 WARN_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
));
3927 set_bit(R5_WantReplace
, &sh
->dev
[i
].flags
);
3928 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3932 set_bit(STRIPE_INSYNC
, &sh
->state
);
3933 set_bit(STRIPE_REPLACED
, &sh
->state
);
3935 if ((s
.syncing
|| s
.replacing
) && s
.locked
== 0 &&
3936 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
3937 test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3938 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3939 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3940 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[sh
->pd_idx
].flags
))
3941 wake_up(&conf
->wait_for_overlap
);
3944 /* If the failed drives are just a ReadError, then we might need
3945 * to progress the repair/check process
3947 if (s
.failed
<= conf
->max_degraded
&& !conf
->mddev
->ro
)
3948 for (i
= 0; i
< s
.failed
; i
++) {
3949 struct r5dev
*dev
= &sh
->dev
[s
.failed_num
[i
]];
3950 if (test_bit(R5_ReadError
, &dev
->flags
)
3951 && !test_bit(R5_LOCKED
, &dev
->flags
)
3952 && test_bit(R5_UPTODATE
, &dev
->flags
)
3954 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3955 set_bit(R5_Wantwrite
, &dev
->flags
);
3956 set_bit(R5_ReWrite
, &dev
->flags
);
3957 set_bit(R5_LOCKED
, &dev
->flags
);
3960 /* let's read it back */
3961 set_bit(R5_Wantread
, &dev
->flags
);
3962 set_bit(R5_LOCKED
, &dev
->flags
);
3968 /* Finish reconstruct operations initiated by the expansion process */
3969 if (sh
->reconstruct_state
== reconstruct_state_result
) {
3970 struct stripe_head
*sh_src
3971 = get_active_stripe(conf
, sh
->sector
, 1, 1, 1);
3972 if (sh_src
&& test_bit(STRIPE_EXPAND_SOURCE
, &sh_src
->state
)) {
3973 /* sh cannot be written until sh_src has been read.
3974 * so arrange for sh to be delayed a little
3976 set_bit(STRIPE_DELAYED
, &sh
->state
);
3977 set_bit(STRIPE_HANDLE
, &sh
->state
);
3978 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
,
3980 atomic_inc(&conf
->preread_active_stripes
);
3981 release_stripe(sh_src
);
3985 release_stripe(sh_src
);
3987 sh
->reconstruct_state
= reconstruct_state_idle
;
3988 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3989 for (i
= conf
->raid_disks
; i
--; ) {
3990 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3991 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3996 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
3997 !sh
->reconstruct_state
) {
3998 /* Need to write out all blocks after computing parity */
3999 sh
->disks
= conf
->raid_disks
;
4000 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
4001 schedule_reconstruction(sh
, &s
, 1, 1);
4002 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
4003 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
4004 atomic_dec(&conf
->reshape_stripes
);
4005 wake_up(&conf
->wait_for_overlap
);
4006 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
4009 if (s
.expanding
&& s
.locked
== 0 &&
4010 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
4011 handle_stripe_expansion(conf
, sh
);
4014 /* wait for this device to become unblocked */
4015 if (unlikely(s
.blocked_rdev
)) {
4016 if (conf
->mddev
->external
)
4017 md_wait_for_blocked_rdev(s
.blocked_rdev
,
4020 /* Internal metadata will immediately
4021 * be written by raid5d, so we don't
4022 * need to wait here.
4024 rdev_dec_pending(s
.blocked_rdev
,
4028 if (s
.handle_bad_blocks
)
4029 for (i
= disks
; i
--; ) {
4030 struct md_rdev
*rdev
;
4031 struct r5dev
*dev
= &sh
->dev
[i
];
4032 if (test_and_clear_bit(R5_WriteError
, &dev
->flags
)) {
4033 /* We own a safe reference to the rdev */
4034 rdev
= conf
->disks
[i
].rdev
;
4035 if (!rdev_set_badblocks(rdev
, sh
->sector
,
4037 md_error(conf
->mddev
, rdev
);
4038 rdev_dec_pending(rdev
, conf
->mddev
);
4040 if (test_and_clear_bit(R5_MadeGood
, &dev
->flags
)) {
4041 rdev
= conf
->disks
[i
].rdev
;
4042 rdev_clear_badblocks(rdev
, sh
->sector
,
4044 rdev_dec_pending(rdev
, conf
->mddev
);
4046 if (test_and_clear_bit(R5_MadeGoodRepl
, &dev
->flags
)) {
4047 rdev
= conf
->disks
[i
].replacement
;
4049 /* rdev have been moved down */
4050 rdev
= conf
->disks
[i
].rdev
;
4051 rdev_clear_badblocks(rdev
, sh
->sector
,
4053 rdev_dec_pending(rdev
, conf
->mddev
);
4058 raid_run_ops(sh
, s
.ops_request
);
4062 if (s
.dec_preread_active
) {
4063 /* We delay this until after ops_run_io so that if make_request
4064 * is waiting on a flush, it won't continue until the writes
4065 * have actually been submitted.
4067 atomic_dec(&conf
->preread_active_stripes
);
4068 if (atomic_read(&conf
->preread_active_stripes
) <
4070 md_wakeup_thread(conf
->mddev
->thread
);
4073 return_io(s
.return_bi
);
4075 clear_bit_unlock(STRIPE_ACTIVE
, &sh
->state
);
4078 static void raid5_activate_delayed(struct r5conf
*conf
)
4080 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
) {
4081 while (!list_empty(&conf
->delayed_list
)) {
4082 struct list_head
*l
= conf
->delayed_list
.next
;
4083 struct stripe_head
*sh
;
4084 sh
= list_entry(l
, struct stripe_head
, lru
);
4086 clear_bit(STRIPE_DELAYED
, &sh
->state
);
4087 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
4088 atomic_inc(&conf
->preread_active_stripes
);
4089 list_add_tail(&sh
->lru
, &conf
->hold_list
);
4090 raid5_wakeup_stripe_thread(sh
);
4095 static void activate_bit_delay(struct r5conf
*conf
,
4096 struct list_head
*temp_inactive_list
)
4098 /* device_lock is held */
4099 struct list_head head
;
4100 list_add(&head
, &conf
->bitmap_list
);
4101 list_del_init(&conf
->bitmap_list
);
4102 while (!list_empty(&head
)) {
4103 struct stripe_head
*sh
= list_entry(head
.next
, struct stripe_head
, lru
);
4105 list_del_init(&sh
->lru
);
4106 atomic_inc(&sh
->count
);
4107 hash
= sh
->hash_lock_index
;
4108 __release_stripe(conf
, sh
, &temp_inactive_list
[hash
]);
4112 int md_raid5_congested(struct mddev
*mddev
, int bits
)
4114 struct r5conf
*conf
= mddev
->private;
4116 /* No difference between reads and writes. Just check
4117 * how busy the stripe_cache is
4120 if (conf
->inactive_blocked
)
4124 if (atomic_read(&conf
->empty_inactive_list_nr
))
4129 EXPORT_SYMBOL_GPL(md_raid5_congested
);
4131 static int raid5_congested(void *data
, int bits
)
4133 struct mddev
*mddev
= data
;
4135 return mddev_congested(mddev
, bits
) ||
4136 md_raid5_congested(mddev
, bits
);
4139 /* We want read requests to align with chunks where possible,
4140 * but write requests don't need to.
4142 static int raid5_mergeable_bvec(struct request_queue
*q
,
4143 struct bvec_merge_data
*bvm
,
4144 struct bio_vec
*biovec
)
4146 struct mddev
*mddev
= q
->queuedata
;
4147 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
4149 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
4150 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
4152 if ((bvm
->bi_rw
& 1) == WRITE
)
4153 return biovec
->bv_len
; /* always allow writes to be mergeable */
4155 if (mddev
->new_chunk_sectors
< mddev
->chunk_sectors
)
4156 chunk_sectors
= mddev
->new_chunk_sectors
;
4157 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
4158 if (max
< 0) max
= 0;
4159 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
4160 return biovec
->bv_len
;
4165 static int in_chunk_boundary(struct mddev
*mddev
, struct bio
*bio
)
4167 sector_t sector
= bio
->bi_iter
.bi_sector
+ get_start_sect(bio
->bi_bdev
);
4168 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
4169 unsigned int bio_sectors
= bio_sectors(bio
);
4171 if (mddev
->new_chunk_sectors
< mddev
->chunk_sectors
)
4172 chunk_sectors
= mddev
->new_chunk_sectors
;
4173 return chunk_sectors
>=
4174 ((sector
& (chunk_sectors
- 1)) + bio_sectors
);
4178 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
4179 * later sampled by raid5d.
4181 static void add_bio_to_retry(struct bio
*bi
,struct r5conf
*conf
)
4183 unsigned long flags
;
4185 spin_lock_irqsave(&conf
->device_lock
, flags
);
4187 bi
->bi_next
= conf
->retry_read_aligned_list
;
4188 conf
->retry_read_aligned_list
= bi
;
4190 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
4191 md_wakeup_thread(conf
->mddev
->thread
);
4194 static struct bio
*remove_bio_from_retry(struct r5conf
*conf
)
4198 bi
= conf
->retry_read_aligned
;
4200 conf
->retry_read_aligned
= NULL
;
4203 bi
= conf
->retry_read_aligned_list
;
4205 conf
->retry_read_aligned_list
= bi
->bi_next
;
4208 * this sets the active strip count to 1 and the processed
4209 * strip count to zero (upper 8 bits)
4211 raid5_set_bi_stripes(bi
, 1); /* biased count of active stripes */
4218 * The "raid5_align_endio" should check if the read succeeded and if it
4219 * did, call bio_endio on the original bio (having bio_put the new bio
4221 * If the read failed..
4223 static void raid5_align_endio(struct bio
*bi
, int error
)
4225 struct bio
* raid_bi
= bi
->bi_private
;
4226 struct mddev
*mddev
;
4227 struct r5conf
*conf
;
4228 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
4229 struct md_rdev
*rdev
;
4233 rdev
= (void*)raid_bi
->bi_next
;
4234 raid_bi
->bi_next
= NULL
;
4235 mddev
= rdev
->mddev
;
4236 conf
= mddev
->private;
4238 rdev_dec_pending(rdev
, conf
->mddev
);
4240 if (!error
&& uptodate
) {
4241 trace_block_bio_complete(bdev_get_queue(raid_bi
->bi_bdev
),
4243 bio_endio(raid_bi
, 0);
4244 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
4245 wake_up(&conf
->wait_for_stripe
);
4249 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
4251 add_bio_to_retry(raid_bi
, conf
);
4254 static int bio_fits_rdev(struct bio
*bi
)
4256 struct request_queue
*q
= bdev_get_queue(bi
->bi_bdev
);
4258 if (bio_sectors(bi
) > queue_max_sectors(q
))
4260 blk_recount_segments(q
, bi
);
4261 if (bi
->bi_phys_segments
> queue_max_segments(q
))
4264 if (q
->merge_bvec_fn
)
4265 /* it's too hard to apply the merge_bvec_fn at this stage,
4273 static int chunk_aligned_read(struct mddev
*mddev
, struct bio
* raid_bio
)
4275 struct r5conf
*conf
= mddev
->private;
4277 struct bio
* align_bi
;
4278 struct md_rdev
*rdev
;
4279 sector_t end_sector
;
4281 if (!in_chunk_boundary(mddev
, raid_bio
)) {
4282 pr_debug("chunk_aligned_read : non aligned\n");
4286 * use bio_clone_mddev to make a copy of the bio
4288 align_bi
= bio_clone_mddev(raid_bio
, GFP_NOIO
, mddev
);
4292 * set bi_end_io to a new function, and set bi_private to the
4295 align_bi
->bi_end_io
= raid5_align_endio
;
4296 align_bi
->bi_private
= raid_bio
;
4300 align_bi
->bi_iter
.bi_sector
=
4301 raid5_compute_sector(conf
, raid_bio
->bi_iter
.bi_sector
,
4304 end_sector
= bio_end_sector(align_bi
);
4306 rdev
= rcu_dereference(conf
->disks
[dd_idx
].replacement
);
4307 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
) ||
4308 rdev
->recovery_offset
< end_sector
) {
4309 rdev
= rcu_dereference(conf
->disks
[dd_idx
].rdev
);
4311 (test_bit(Faulty
, &rdev
->flags
) ||
4312 !(test_bit(In_sync
, &rdev
->flags
) ||
4313 rdev
->recovery_offset
>= end_sector
)))
4320 atomic_inc(&rdev
->nr_pending
);
4322 raid_bio
->bi_next
= (void*)rdev
;
4323 align_bi
->bi_bdev
= rdev
->bdev
;
4324 __clear_bit(BIO_SEG_VALID
, &align_bi
->bi_flags
);
4326 if (!bio_fits_rdev(align_bi
) ||
4327 is_badblock(rdev
, align_bi
->bi_iter
.bi_sector
,
4328 bio_sectors(align_bi
),
4329 &first_bad
, &bad_sectors
)) {
4330 /* too big in some way, or has a known bad block */
4332 rdev_dec_pending(rdev
, mddev
);
4336 /* No reshape active, so we can trust rdev->data_offset */
4337 align_bi
->bi_iter
.bi_sector
+= rdev
->data_offset
;
4339 spin_lock_irq(&conf
->device_lock
);
4340 wait_event_lock_irq(conf
->wait_for_stripe
,
4343 atomic_inc(&conf
->active_aligned_reads
);
4344 spin_unlock_irq(&conf
->device_lock
);
4347 trace_block_bio_remap(bdev_get_queue(align_bi
->bi_bdev
),
4348 align_bi
, disk_devt(mddev
->gendisk
),
4349 raid_bio
->bi_iter
.bi_sector
);
4350 generic_make_request(align_bi
);
4359 /* __get_priority_stripe - get the next stripe to process
4361 * Full stripe writes are allowed to pass preread active stripes up until
4362 * the bypass_threshold is exceeded. In general the bypass_count
4363 * increments when the handle_list is handled before the hold_list; however, it
4364 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
4365 * stripe with in flight i/o. The bypass_count will be reset when the
4366 * head of the hold_list has changed, i.e. the head was promoted to the
4369 static struct stripe_head
*__get_priority_stripe(struct r5conf
*conf
, int group
)
4371 struct stripe_head
*sh
= NULL
, *tmp
;
4372 struct list_head
*handle_list
= NULL
;
4373 struct r5worker_group
*wg
= NULL
;
4375 if (conf
->worker_cnt_per_group
== 0) {
4376 handle_list
= &conf
->handle_list
;
4377 } else if (group
!= ANY_GROUP
) {
4378 handle_list
= &conf
->worker_groups
[group
].handle_list
;
4379 wg
= &conf
->worker_groups
[group
];
4382 for (i
= 0; i
< conf
->group_cnt
; i
++) {
4383 handle_list
= &conf
->worker_groups
[i
].handle_list
;
4384 wg
= &conf
->worker_groups
[i
];
4385 if (!list_empty(handle_list
))
4390 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
4392 list_empty(handle_list
) ? "empty" : "busy",
4393 list_empty(&conf
->hold_list
) ? "empty" : "busy",
4394 atomic_read(&conf
->pending_full_writes
), conf
->bypass_count
);
4396 if (!list_empty(handle_list
)) {
4397 sh
= list_entry(handle_list
->next
, typeof(*sh
), lru
);
4399 if (list_empty(&conf
->hold_list
))
4400 conf
->bypass_count
= 0;
4401 else if (!test_bit(STRIPE_IO_STARTED
, &sh
->state
)) {
4402 if (conf
->hold_list
.next
== conf
->last_hold
)
4403 conf
->bypass_count
++;
4405 conf
->last_hold
= conf
->hold_list
.next
;
4406 conf
->bypass_count
-= conf
->bypass_threshold
;
4407 if (conf
->bypass_count
< 0)
4408 conf
->bypass_count
= 0;
4411 } else if (!list_empty(&conf
->hold_list
) &&
4412 ((conf
->bypass_threshold
&&
4413 conf
->bypass_count
> conf
->bypass_threshold
) ||
4414 atomic_read(&conf
->pending_full_writes
) == 0)) {
4416 list_for_each_entry(tmp
, &conf
->hold_list
, lru
) {
4417 if (conf
->worker_cnt_per_group
== 0 ||
4418 group
== ANY_GROUP
||
4419 !cpu_online(tmp
->cpu
) ||
4420 cpu_to_group(tmp
->cpu
) == group
) {
4427 conf
->bypass_count
-= conf
->bypass_threshold
;
4428 if (conf
->bypass_count
< 0)
4429 conf
->bypass_count
= 0;
4441 list_del_init(&sh
->lru
);
4442 BUG_ON(atomic_inc_return(&sh
->count
) != 1);
4446 struct raid5_plug_cb
{
4447 struct blk_plug_cb cb
;
4448 struct list_head list
;
4449 struct list_head temp_inactive_list
[NR_STRIPE_HASH_LOCKS
];
4452 static void raid5_unplug(struct blk_plug_cb
*blk_cb
, bool from_schedule
)
4454 struct raid5_plug_cb
*cb
= container_of(
4455 blk_cb
, struct raid5_plug_cb
, cb
);
4456 struct stripe_head
*sh
;
4457 struct mddev
*mddev
= cb
->cb
.data
;
4458 struct r5conf
*conf
= mddev
->private;
4462 if (cb
->list
.next
&& !list_empty(&cb
->list
)) {
4463 spin_lock_irq(&conf
->device_lock
);
4464 while (!list_empty(&cb
->list
)) {
4465 sh
= list_first_entry(&cb
->list
, struct stripe_head
, lru
);
4466 list_del_init(&sh
->lru
);
4468 * avoid race release_stripe_plug() sees
4469 * STRIPE_ON_UNPLUG_LIST clear but the stripe
4470 * is still in our list
4472 smp_mb__before_atomic();
4473 clear_bit(STRIPE_ON_UNPLUG_LIST
, &sh
->state
);
4475 * STRIPE_ON_RELEASE_LIST could be set here. In that
4476 * case, the count is always > 1 here
4478 hash
= sh
->hash_lock_index
;
4479 __release_stripe(conf
, sh
, &cb
->temp_inactive_list
[hash
]);
4482 spin_unlock_irq(&conf
->device_lock
);
4484 release_inactive_stripe_list(conf
, cb
->temp_inactive_list
,
4485 NR_STRIPE_HASH_LOCKS
);
4487 trace_block_unplug(mddev
->queue
, cnt
, !from_schedule
);
4491 static void release_stripe_plug(struct mddev
*mddev
,
4492 struct stripe_head
*sh
)
4494 struct blk_plug_cb
*blk_cb
= blk_check_plugged(
4495 raid5_unplug
, mddev
,
4496 sizeof(struct raid5_plug_cb
));
4497 struct raid5_plug_cb
*cb
;
4504 cb
= container_of(blk_cb
, struct raid5_plug_cb
, cb
);
4506 if (cb
->list
.next
== NULL
) {
4508 INIT_LIST_HEAD(&cb
->list
);
4509 for (i
= 0; i
< NR_STRIPE_HASH_LOCKS
; i
++)
4510 INIT_LIST_HEAD(cb
->temp_inactive_list
+ i
);
4513 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST
, &sh
->state
))
4514 list_add_tail(&sh
->lru
, &cb
->list
);
4519 static void make_discard_request(struct mddev
*mddev
, struct bio
*bi
)
4521 struct r5conf
*conf
= mddev
->private;
4522 sector_t logical_sector
, last_sector
;
4523 struct stripe_head
*sh
;
4527 if (mddev
->reshape_position
!= MaxSector
)
4528 /* Skip discard while reshape is happening */
4531 logical_sector
= bi
->bi_iter
.bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
4532 last_sector
= bi
->bi_iter
.bi_sector
+ (bi
->bi_iter
.bi_size
>>9);
4535 bi
->bi_phys_segments
= 1; /* over-loaded to count active stripes */
4537 stripe_sectors
= conf
->chunk_sectors
*
4538 (conf
->raid_disks
- conf
->max_degraded
);
4539 logical_sector
= DIV_ROUND_UP_SECTOR_T(logical_sector
,
4541 sector_div(last_sector
, stripe_sectors
);
4543 logical_sector
*= conf
->chunk_sectors
;
4544 last_sector
*= conf
->chunk_sectors
;
4546 for (; logical_sector
< last_sector
;
4547 logical_sector
+= STRIPE_SECTORS
) {
4551 sh
= get_active_stripe(conf
, logical_sector
, 0, 0, 0);
4552 prepare_to_wait(&conf
->wait_for_overlap
, &w
,
4553 TASK_UNINTERRUPTIBLE
);
4554 set_bit(R5_Overlap
, &sh
->dev
[sh
->pd_idx
].flags
);
4555 if (test_bit(STRIPE_SYNCING
, &sh
->state
)) {
4560 clear_bit(R5_Overlap
, &sh
->dev
[sh
->pd_idx
].flags
);
4561 spin_lock_irq(&sh
->stripe_lock
);
4562 for (d
= 0; d
< conf
->raid_disks
; d
++) {
4563 if (d
== sh
->pd_idx
|| d
== sh
->qd_idx
)
4565 if (sh
->dev
[d
].towrite
|| sh
->dev
[d
].toread
) {
4566 set_bit(R5_Overlap
, &sh
->dev
[d
].flags
);
4567 spin_unlock_irq(&sh
->stripe_lock
);
4573 set_bit(STRIPE_DISCARD
, &sh
->state
);
4574 finish_wait(&conf
->wait_for_overlap
, &w
);
4575 for (d
= 0; d
< conf
->raid_disks
; d
++) {
4576 if (d
== sh
->pd_idx
|| d
== sh
->qd_idx
)
4578 sh
->dev
[d
].towrite
= bi
;
4579 set_bit(R5_OVERWRITE
, &sh
->dev
[d
].flags
);
4580 raid5_inc_bi_active_stripes(bi
);
4582 spin_unlock_irq(&sh
->stripe_lock
);
4583 if (conf
->mddev
->bitmap
) {
4585 d
< conf
->raid_disks
- conf
->max_degraded
;
4587 bitmap_startwrite(mddev
->bitmap
,
4591 sh
->bm_seq
= conf
->seq_flush
+ 1;
4592 set_bit(STRIPE_BIT_DELAY
, &sh
->state
);
4595 set_bit(STRIPE_HANDLE
, &sh
->state
);
4596 clear_bit(STRIPE_DELAYED
, &sh
->state
);
4597 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
4598 atomic_inc(&conf
->preread_active_stripes
);
4599 release_stripe_plug(mddev
, sh
);
4602 remaining
= raid5_dec_bi_active_stripes(bi
);
4603 if (remaining
== 0) {
4604 md_write_end(mddev
);
4609 static void make_request(struct mddev
*mddev
, struct bio
* bi
)
4611 struct r5conf
*conf
= mddev
->private;
4613 sector_t new_sector
;
4614 sector_t logical_sector
, last_sector
;
4615 struct stripe_head
*sh
;
4616 const int rw
= bio_data_dir(bi
);
4621 if (unlikely(bi
->bi_rw
& REQ_FLUSH
)) {
4622 md_flush_request(mddev
, bi
);
4626 md_write_start(mddev
, bi
);
4629 mddev
->reshape_position
== MaxSector
&&
4630 chunk_aligned_read(mddev
,bi
))
4633 if (unlikely(bi
->bi_rw
& REQ_DISCARD
)) {
4634 make_discard_request(mddev
, bi
);
4638 logical_sector
= bi
->bi_iter
.bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
4639 last_sector
= bio_end_sector(bi
);
4641 bi
->bi_phys_segments
= 1; /* over-loaded to count active stripes */
4643 prepare_to_wait(&conf
->wait_for_overlap
, &w
, TASK_UNINTERRUPTIBLE
);
4644 for (;logical_sector
< last_sector
; logical_sector
+= STRIPE_SECTORS
) {
4650 seq
= read_seqcount_begin(&conf
->gen_lock
);
4653 prepare_to_wait(&conf
->wait_for_overlap
, &w
,
4654 TASK_UNINTERRUPTIBLE
);
4655 if (unlikely(conf
->reshape_progress
!= MaxSector
)) {
4656 /* spinlock is needed as reshape_progress may be
4657 * 64bit on a 32bit platform, and so it might be
4658 * possible to see a half-updated value
4659 * Of course reshape_progress could change after
4660 * the lock is dropped, so once we get a reference
4661 * to the stripe that we think it is, we will have
4664 spin_lock_irq(&conf
->device_lock
);
4665 if (mddev
->reshape_backwards
4666 ? logical_sector
< conf
->reshape_progress
4667 : logical_sector
>= conf
->reshape_progress
) {
4670 if (mddev
->reshape_backwards
4671 ? logical_sector
< conf
->reshape_safe
4672 : logical_sector
>= conf
->reshape_safe
) {
4673 spin_unlock_irq(&conf
->device_lock
);
4679 spin_unlock_irq(&conf
->device_lock
);
4682 new_sector
= raid5_compute_sector(conf
, logical_sector
,
4685 pr_debug("raid456: make_request, sector %llu logical %llu\n",
4686 (unsigned long long)new_sector
,
4687 (unsigned long long)logical_sector
);
4689 sh
= get_active_stripe(conf
, new_sector
, previous
,
4690 (bi
->bi_rw
&RWA_MASK
), 0);
4692 if (unlikely(previous
)) {
4693 /* expansion might have moved on while waiting for a
4694 * stripe, so we must do the range check again.
4695 * Expansion could still move past after this
4696 * test, but as we are holding a reference to
4697 * 'sh', we know that if that happens,
4698 * STRIPE_EXPANDING will get set and the expansion
4699 * won't proceed until we finish with the stripe.
4702 spin_lock_irq(&conf
->device_lock
);
4703 if (mddev
->reshape_backwards
4704 ? logical_sector
>= conf
->reshape_progress
4705 : logical_sector
< conf
->reshape_progress
)
4706 /* mismatch, need to try again */
4708 spin_unlock_irq(&conf
->device_lock
);
4716 if (read_seqcount_retry(&conf
->gen_lock
, seq
)) {
4717 /* Might have got the wrong stripe_head
4725 logical_sector
>= mddev
->suspend_lo
&&
4726 logical_sector
< mddev
->suspend_hi
) {
4728 /* As the suspend_* range is controlled by
4729 * userspace, we want an interruptible
4732 flush_signals(current
);
4733 prepare_to_wait(&conf
->wait_for_overlap
,
4734 &w
, TASK_INTERRUPTIBLE
);
4735 if (logical_sector
>= mddev
->suspend_lo
&&
4736 logical_sector
< mddev
->suspend_hi
) {
4743 if (test_bit(STRIPE_EXPANDING
, &sh
->state
) ||
4744 !add_stripe_bio(sh
, bi
, dd_idx
, rw
)) {
4745 /* Stripe is busy expanding or
4746 * add failed due to overlap. Flush everything
4749 md_wakeup_thread(mddev
->thread
);
4755 set_bit(STRIPE_HANDLE
, &sh
->state
);
4756 clear_bit(STRIPE_DELAYED
, &sh
->state
);
4757 if ((bi
->bi_rw
& REQ_SYNC
) &&
4758 !test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
4759 atomic_inc(&conf
->preread_active_stripes
);
4760 release_stripe_plug(mddev
, sh
);
4762 /* cannot get stripe for read-ahead, just give-up */
4763 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
4767 finish_wait(&conf
->wait_for_overlap
, &w
);
4769 remaining
= raid5_dec_bi_active_stripes(bi
);
4770 if (remaining
== 0) {
4773 md_write_end(mddev
);
4775 trace_block_bio_complete(bdev_get_queue(bi
->bi_bdev
),
4781 static sector_t
raid5_size(struct mddev
*mddev
, sector_t sectors
, int raid_disks
);
4783 static sector_t
reshape_request(struct mddev
*mddev
, sector_t sector_nr
, int *skipped
)
4785 /* reshaping is quite different to recovery/resync so it is
4786 * handled quite separately ... here.
4788 * On each call to sync_request, we gather one chunk worth of
4789 * destination stripes and flag them as expanding.
4790 * Then we find all the source stripes and request reads.
4791 * As the reads complete, handle_stripe will copy the data
4792 * into the destination stripe and release that stripe.
4794 struct r5conf
*conf
= mddev
->private;
4795 struct stripe_head
*sh
;
4796 sector_t first_sector
, last_sector
;
4797 int raid_disks
= conf
->previous_raid_disks
;
4798 int data_disks
= raid_disks
- conf
->max_degraded
;
4799 int new_data_disks
= conf
->raid_disks
- conf
->max_degraded
;
4802 sector_t writepos
, readpos
, safepos
;
4803 sector_t stripe_addr
;
4804 int reshape_sectors
;
4805 struct list_head stripes
;
4807 if (sector_nr
== 0) {
4808 /* If restarting in the middle, skip the initial sectors */
4809 if (mddev
->reshape_backwards
&&
4810 conf
->reshape_progress
< raid5_size(mddev
, 0, 0)) {
4811 sector_nr
= raid5_size(mddev
, 0, 0)
4812 - conf
->reshape_progress
;
4813 } else if (!mddev
->reshape_backwards
&&
4814 conf
->reshape_progress
> 0)
4815 sector_nr
= conf
->reshape_progress
;
4816 sector_div(sector_nr
, new_data_disks
);
4818 mddev
->curr_resync_completed
= sector_nr
;
4819 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4825 /* We need to process a full chunk at a time.
4826 * If old and new chunk sizes differ, we need to process the
4829 if (mddev
->new_chunk_sectors
> mddev
->chunk_sectors
)
4830 reshape_sectors
= mddev
->new_chunk_sectors
;
4832 reshape_sectors
= mddev
->chunk_sectors
;
4834 /* We update the metadata at least every 10 seconds, or when
4835 * the data about to be copied would over-write the source of
4836 * the data at the front of the range. i.e. one new_stripe
4837 * along from reshape_progress new_maps to after where
4838 * reshape_safe old_maps to
4840 writepos
= conf
->reshape_progress
;
4841 sector_div(writepos
, new_data_disks
);
4842 readpos
= conf
->reshape_progress
;
4843 sector_div(readpos
, data_disks
);
4844 safepos
= conf
->reshape_safe
;
4845 sector_div(safepos
, data_disks
);
4846 if (mddev
->reshape_backwards
) {
4847 writepos
-= min_t(sector_t
, reshape_sectors
, writepos
);
4848 readpos
+= reshape_sectors
;
4849 safepos
+= reshape_sectors
;
4851 writepos
+= reshape_sectors
;
4852 readpos
-= min_t(sector_t
, reshape_sectors
, readpos
);
4853 safepos
-= min_t(sector_t
, reshape_sectors
, safepos
);
4856 /* Having calculated the 'writepos' possibly use it
4857 * to set 'stripe_addr' which is where we will write to.
4859 if (mddev
->reshape_backwards
) {
4860 BUG_ON(conf
->reshape_progress
== 0);
4861 stripe_addr
= writepos
;
4862 BUG_ON((mddev
->dev_sectors
&
4863 ~((sector_t
)reshape_sectors
- 1))
4864 - reshape_sectors
- stripe_addr
4867 BUG_ON(writepos
!= sector_nr
+ reshape_sectors
);
4868 stripe_addr
= sector_nr
;
4871 /* 'writepos' is the most advanced device address we might write.
4872 * 'readpos' is the least advanced device address we might read.
4873 * 'safepos' is the least address recorded in the metadata as having
4875 * If there is a min_offset_diff, these are adjusted either by
4876 * increasing the safepos/readpos if diff is negative, or
4877 * increasing writepos if diff is positive.
4878 * If 'readpos' is then behind 'writepos', there is no way that we can
4879 * ensure safety in the face of a crash - that must be done by userspace
4880 * making a backup of the data. So in that case there is no particular
4881 * rush to update metadata.
4882 * Otherwise if 'safepos' is behind 'writepos', then we really need to
4883 * update the metadata to advance 'safepos' to match 'readpos' so that
4884 * we can be safe in the event of a crash.
4885 * So we insist on updating metadata if safepos is behind writepos and
4886 * readpos is beyond writepos.
4887 * In any case, update the metadata every 10 seconds.
4888 * Maybe that number should be configurable, but I'm not sure it is
4889 * worth it.... maybe it could be a multiple of safemode_delay???
4891 if (conf
->min_offset_diff
< 0) {
4892 safepos
+= -conf
->min_offset_diff
;
4893 readpos
+= -conf
->min_offset_diff
;
4895 writepos
+= conf
->min_offset_diff
;
4897 if ((mddev
->reshape_backwards
4898 ? (safepos
> writepos
&& readpos
< writepos
)
4899 : (safepos
< writepos
&& readpos
> writepos
)) ||
4900 time_after(jiffies
, conf
->reshape_checkpoint
+ 10*HZ
)) {
4901 /* Cannot proceed until we've updated the superblock... */
4902 wait_event(conf
->wait_for_overlap
,
4903 atomic_read(&conf
->reshape_stripes
)==0
4904 || test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
));
4905 if (atomic_read(&conf
->reshape_stripes
) != 0)
4907 mddev
->reshape_position
= conf
->reshape_progress
;
4908 mddev
->curr_resync_completed
= sector_nr
;
4909 conf
->reshape_checkpoint
= jiffies
;
4910 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4911 md_wakeup_thread(mddev
->thread
);
4912 wait_event(mddev
->sb_wait
, mddev
->flags
== 0 ||
4913 test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
));
4914 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
))
4916 spin_lock_irq(&conf
->device_lock
);
4917 conf
->reshape_safe
= mddev
->reshape_position
;
4918 spin_unlock_irq(&conf
->device_lock
);
4919 wake_up(&conf
->wait_for_overlap
);
4920 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4923 INIT_LIST_HEAD(&stripes
);
4924 for (i
= 0; i
< reshape_sectors
; i
+= STRIPE_SECTORS
) {
4926 int skipped_disk
= 0;
4927 sh
= get_active_stripe(conf
, stripe_addr
+i
, 0, 0, 1);
4928 set_bit(STRIPE_EXPANDING
, &sh
->state
);
4929 atomic_inc(&conf
->reshape_stripes
);
4930 /* If any of this stripe is beyond the end of the old
4931 * array, then we need to zero those blocks
4933 for (j
=sh
->disks
; j
--;) {
4935 if (j
== sh
->pd_idx
)
4937 if (conf
->level
== 6 &&
4940 s
= compute_blocknr(sh
, j
, 0);
4941 if (s
< raid5_size(mddev
, 0, 0)) {
4945 memset(page_address(sh
->dev
[j
].page
), 0, STRIPE_SIZE
);
4946 set_bit(R5_Expanded
, &sh
->dev
[j
].flags
);
4947 set_bit(R5_UPTODATE
, &sh
->dev
[j
].flags
);
4949 if (!skipped_disk
) {
4950 set_bit(STRIPE_EXPAND_READY
, &sh
->state
);
4951 set_bit(STRIPE_HANDLE
, &sh
->state
);
4953 list_add(&sh
->lru
, &stripes
);
4955 spin_lock_irq(&conf
->device_lock
);
4956 if (mddev
->reshape_backwards
)
4957 conf
->reshape_progress
-= reshape_sectors
* new_data_disks
;
4959 conf
->reshape_progress
+= reshape_sectors
* new_data_disks
;
4960 spin_unlock_irq(&conf
->device_lock
);
4961 /* Ok, those stripe are ready. We can start scheduling
4962 * reads on the source stripes.
4963 * The source stripes are determined by mapping the first and last
4964 * block on the destination stripes.
4967 raid5_compute_sector(conf
, stripe_addr
*(new_data_disks
),
4970 raid5_compute_sector(conf
, ((stripe_addr
+reshape_sectors
)
4971 * new_data_disks
- 1),
4973 if (last_sector
>= mddev
->dev_sectors
)
4974 last_sector
= mddev
->dev_sectors
- 1;
4975 while (first_sector
<= last_sector
) {
4976 sh
= get_active_stripe(conf
, first_sector
, 1, 0, 1);
4977 set_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
4978 set_bit(STRIPE_HANDLE
, &sh
->state
);
4980 first_sector
+= STRIPE_SECTORS
;
4982 /* Now that the sources are clearly marked, we can release
4983 * the destination stripes
4985 while (!list_empty(&stripes
)) {
4986 sh
= list_entry(stripes
.next
, struct stripe_head
, lru
);
4987 list_del_init(&sh
->lru
);
4990 /* If this takes us to the resync_max point where we have to pause,
4991 * then we need to write out the superblock.
4993 sector_nr
+= reshape_sectors
;
4994 if ((sector_nr
- mddev
->curr_resync_completed
) * 2
4995 >= mddev
->resync_max
- mddev
->curr_resync_completed
) {
4996 /* Cannot proceed until we've updated the superblock... */
4997 wait_event(conf
->wait_for_overlap
,
4998 atomic_read(&conf
->reshape_stripes
) == 0
4999 || test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
));
5000 if (atomic_read(&conf
->reshape_stripes
) != 0)
5002 mddev
->reshape_position
= conf
->reshape_progress
;
5003 mddev
->curr_resync_completed
= sector_nr
;
5004 conf
->reshape_checkpoint
= jiffies
;
5005 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5006 md_wakeup_thread(mddev
->thread
);
5007 wait_event(mddev
->sb_wait
,
5008 !test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)
5009 || test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
));
5010 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
))
5012 spin_lock_irq(&conf
->device_lock
);
5013 conf
->reshape_safe
= mddev
->reshape_position
;
5014 spin_unlock_irq(&conf
->device_lock
);
5015 wake_up(&conf
->wait_for_overlap
);
5016 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
5019 return reshape_sectors
;
5022 /* FIXME go_faster isn't used */
5023 static inline sector_t
sync_request(struct mddev
*mddev
, sector_t sector_nr
, int *skipped
, int go_faster
)
5025 struct r5conf
*conf
= mddev
->private;
5026 struct stripe_head
*sh
;
5027 sector_t max_sector
= mddev
->dev_sectors
;
5028 sector_t sync_blocks
;
5029 int still_degraded
= 0;
5032 if (sector_nr
>= max_sector
) {
5033 /* just being told to finish up .. nothing much to do */
5035 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)) {
5040 if (mddev
->curr_resync
< max_sector
) /* aborted */
5041 bitmap_end_sync(mddev
->bitmap
, mddev
->curr_resync
,
5043 else /* completed sync */
5045 bitmap_close_sync(mddev
->bitmap
);
5050 /* Allow raid5_quiesce to complete */
5051 wait_event(conf
->wait_for_overlap
, conf
->quiesce
!= 2);
5053 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
5054 return reshape_request(mddev
, sector_nr
, skipped
);
5056 /* No need to check resync_max as we never do more than one
5057 * stripe, and as resync_max will always be on a chunk boundary,
5058 * if the check in md_do_sync didn't fire, there is no chance
5059 * of overstepping resync_max here
5062 /* if there is too many failed drives and we are trying
5063 * to resync, then assert that we are finished, because there is
5064 * nothing we can do.
5066 if (mddev
->degraded
>= conf
->max_degraded
&&
5067 test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
5068 sector_t rv
= mddev
->dev_sectors
- sector_nr
;
5072 if (!test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
) &&
5074 !bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, 1) &&
5075 sync_blocks
>= STRIPE_SECTORS
) {
5076 /* we can skip this block, and probably more */
5077 sync_blocks
/= STRIPE_SECTORS
;
5079 return sync_blocks
* STRIPE_SECTORS
; /* keep things rounded to whole stripes */
5082 bitmap_cond_end_sync(mddev
->bitmap
, sector_nr
);
5084 sh
= get_active_stripe(conf
, sector_nr
, 0, 1, 0);
5086 sh
= get_active_stripe(conf
, sector_nr
, 0, 0, 0);
5087 /* make sure we don't swamp the stripe cache if someone else
5088 * is trying to get access
5090 schedule_timeout_uninterruptible(1);
5092 /* Need to check if array will still be degraded after recovery/resync
5093 * We don't need to check the 'failed' flag as when that gets set,
5096 for (i
= 0; i
< conf
->raid_disks
; i
++)
5097 if (conf
->disks
[i
].rdev
== NULL
)
5100 bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, still_degraded
);
5102 set_bit(STRIPE_SYNC_REQUESTED
, &sh
->state
);
5103 set_bit(STRIPE_HANDLE
, &sh
->state
);
5107 return STRIPE_SECTORS
;
5110 static int retry_aligned_read(struct r5conf
*conf
, struct bio
*raid_bio
)
5112 /* We may not be able to submit a whole bio at once as there
5113 * may not be enough stripe_heads available.
5114 * We cannot pre-allocate enough stripe_heads as we may need
5115 * more than exist in the cache (if we allow ever large chunks).
5116 * So we do one stripe head at a time and record in
5117 * ->bi_hw_segments how many have been done.
5119 * We *know* that this entire raid_bio is in one chunk, so
5120 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
5122 struct stripe_head
*sh
;
5124 sector_t sector
, logical_sector
, last_sector
;
5129 logical_sector
= raid_bio
->bi_iter
.bi_sector
&
5130 ~((sector_t
)STRIPE_SECTORS
-1);
5131 sector
= raid5_compute_sector(conf
, logical_sector
,
5133 last_sector
= bio_end_sector(raid_bio
);
5135 for (; logical_sector
< last_sector
;
5136 logical_sector
+= STRIPE_SECTORS
,
5137 sector
+= STRIPE_SECTORS
,
5140 if (scnt
< raid5_bi_processed_stripes(raid_bio
))
5141 /* already done this stripe */
5144 sh
= get_active_stripe(conf
, sector
, 0, 1, 1);
5147 /* failed to get a stripe - must wait */
5148 raid5_set_bi_processed_stripes(raid_bio
, scnt
);
5149 conf
->retry_read_aligned
= raid_bio
;
5153 if (!add_stripe_bio(sh
, raid_bio
, dd_idx
, 0)) {
5155 raid5_set_bi_processed_stripes(raid_bio
, scnt
);
5156 conf
->retry_read_aligned
= raid_bio
;
5160 set_bit(R5_ReadNoMerge
, &sh
->dev
[dd_idx
].flags
);
5165 remaining
= raid5_dec_bi_active_stripes(raid_bio
);
5166 if (remaining
== 0) {
5167 trace_block_bio_complete(bdev_get_queue(raid_bio
->bi_bdev
),
5169 bio_endio(raid_bio
, 0);
5171 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
5172 wake_up(&conf
->wait_for_stripe
);
5176 static int handle_active_stripes(struct r5conf
*conf
, int group
,
5177 struct r5worker
*worker
,
5178 struct list_head
*temp_inactive_list
)
5180 struct stripe_head
*batch
[MAX_STRIPE_BATCH
], *sh
;
5181 int i
, batch_size
= 0, hash
;
5182 bool release_inactive
= false;
5184 while (batch_size
< MAX_STRIPE_BATCH
&&
5185 (sh
= __get_priority_stripe(conf
, group
)) != NULL
)
5186 batch
[batch_size
++] = sh
;
5188 if (batch_size
== 0) {
5189 for (i
= 0; i
< NR_STRIPE_HASH_LOCKS
; i
++)
5190 if (!list_empty(temp_inactive_list
+ i
))
5192 if (i
== NR_STRIPE_HASH_LOCKS
)
5194 release_inactive
= true;
5196 spin_unlock_irq(&conf
->device_lock
);
5198 release_inactive_stripe_list(conf
, temp_inactive_list
,
5199 NR_STRIPE_HASH_LOCKS
);
5201 if (release_inactive
) {
5202 spin_lock_irq(&conf
->device_lock
);
5206 for (i
= 0; i
< batch_size
; i
++)
5207 handle_stripe(batch
[i
]);
5211 spin_lock_irq(&conf
->device_lock
);
5212 for (i
= 0; i
< batch_size
; i
++) {
5213 hash
= batch
[i
]->hash_lock_index
;
5214 __release_stripe(conf
, batch
[i
], &temp_inactive_list
[hash
]);
5219 static void raid5_do_work(struct work_struct
*work
)
5221 struct r5worker
*worker
= container_of(work
, struct r5worker
, work
);
5222 struct r5worker_group
*group
= worker
->group
;
5223 struct r5conf
*conf
= group
->conf
;
5224 int group_id
= group
- conf
->worker_groups
;
5226 struct blk_plug plug
;
5228 pr_debug("+++ raid5worker active\n");
5230 blk_start_plug(&plug
);
5232 spin_lock_irq(&conf
->device_lock
);
5234 int batch_size
, released
;
5236 released
= release_stripe_list(conf
, worker
->temp_inactive_list
);
5238 batch_size
= handle_active_stripes(conf
, group_id
, worker
,
5239 worker
->temp_inactive_list
);
5240 worker
->working
= false;
5241 if (!batch_size
&& !released
)
5243 handled
+= batch_size
;
5245 pr_debug("%d stripes handled\n", handled
);
5247 spin_unlock_irq(&conf
->device_lock
);
5248 blk_finish_plug(&plug
);
5250 pr_debug("--- raid5worker inactive\n");
5254 * This is our raid5 kernel thread.
5256 * We scan the hash table for stripes which can be handled now.
5257 * During the scan, completed stripes are saved for us by the interrupt
5258 * handler, so that they will not have to wait for our next wakeup.
5260 static void raid5d(struct md_thread
*thread
)
5262 struct mddev
*mddev
= thread
->mddev
;
5263 struct r5conf
*conf
= mddev
->private;
5265 struct blk_plug plug
;
5267 pr_debug("+++ raid5d active\n");
5269 md_check_recovery(mddev
);
5271 blk_start_plug(&plug
);
5273 spin_lock_irq(&conf
->device_lock
);
5276 int batch_size
, released
;
5278 released
= release_stripe_list(conf
, conf
->temp_inactive_list
);
5281 !list_empty(&conf
->bitmap_list
)) {
5282 /* Now is a good time to flush some bitmap updates */
5284 spin_unlock_irq(&conf
->device_lock
);
5285 bitmap_unplug(mddev
->bitmap
);
5286 spin_lock_irq(&conf
->device_lock
);
5287 conf
->seq_write
= conf
->seq_flush
;
5288 activate_bit_delay(conf
, conf
->temp_inactive_list
);
5290 raid5_activate_delayed(conf
);
5292 while ((bio
= remove_bio_from_retry(conf
))) {
5294 spin_unlock_irq(&conf
->device_lock
);
5295 ok
= retry_aligned_read(conf
, bio
);
5296 spin_lock_irq(&conf
->device_lock
);
5302 batch_size
= handle_active_stripes(conf
, ANY_GROUP
, NULL
,
5303 conf
->temp_inactive_list
);
5304 if (!batch_size
&& !released
)
5306 handled
+= batch_size
;
5308 if (mddev
->flags
& ~(1<<MD_CHANGE_PENDING
)) {
5309 spin_unlock_irq(&conf
->device_lock
);
5310 md_check_recovery(mddev
);
5311 spin_lock_irq(&conf
->device_lock
);
5314 pr_debug("%d stripes handled\n", handled
);
5316 spin_unlock_irq(&conf
->device_lock
);
5318 async_tx_issue_pending_all();
5319 blk_finish_plug(&plug
);
5321 pr_debug("--- raid5d inactive\n");
5325 raid5_show_stripe_cache_size(struct mddev
*mddev
, char *page
)
5327 struct r5conf
*conf
= mddev
->private;
5329 return sprintf(page
, "%d\n", conf
->max_nr_stripes
);
5335 raid5_set_cache_size(struct mddev
*mddev
, int size
)
5337 struct r5conf
*conf
= mddev
->private;
5341 if (size
<= 16 || size
> 32768)
5343 hash
= (conf
->max_nr_stripes
- 1) % NR_STRIPE_HASH_LOCKS
;
5344 while (size
< conf
->max_nr_stripes
) {
5345 if (drop_one_stripe(conf
, hash
))
5346 conf
->max_nr_stripes
--;
5351 hash
= NR_STRIPE_HASH_LOCKS
- 1;
5353 err
= md_allow_write(mddev
);
5356 hash
= conf
->max_nr_stripes
% NR_STRIPE_HASH_LOCKS
;
5357 while (size
> conf
->max_nr_stripes
) {
5358 if (grow_one_stripe(conf
, hash
))
5359 conf
->max_nr_stripes
++;
5361 hash
= (hash
+ 1) % NR_STRIPE_HASH_LOCKS
;
5365 EXPORT_SYMBOL(raid5_set_cache_size
);
5368 raid5_store_stripe_cache_size(struct mddev
*mddev
, const char *page
, size_t len
)
5370 struct r5conf
*conf
= mddev
->private;
5374 if (len
>= PAGE_SIZE
)
5379 if (kstrtoul(page
, 10, &new))
5381 err
= raid5_set_cache_size(mddev
, new);
5387 static struct md_sysfs_entry
5388 raid5_stripecache_size
= __ATTR(stripe_cache_size
, S_IRUGO
| S_IWUSR
,
5389 raid5_show_stripe_cache_size
,
5390 raid5_store_stripe_cache_size
);
5393 raid5_show_preread_threshold(struct mddev
*mddev
, char *page
)
5395 struct r5conf
*conf
= mddev
->private;
5397 return sprintf(page
, "%d\n", conf
->bypass_threshold
);
5403 raid5_store_preread_threshold(struct mddev
*mddev
, const char *page
, size_t len
)
5405 struct r5conf
*conf
= mddev
->private;
5407 if (len
>= PAGE_SIZE
)
5412 if (kstrtoul(page
, 10, &new))
5414 if (new > conf
->max_nr_stripes
)
5416 conf
->bypass_threshold
= new;
5420 static struct md_sysfs_entry
5421 raid5_preread_bypass_threshold
= __ATTR(preread_bypass_threshold
,
5423 raid5_show_preread_threshold
,
5424 raid5_store_preread_threshold
);
5427 raid5_show_skip_copy(struct mddev
*mddev
, char *page
)
5429 struct r5conf
*conf
= mddev
->private;
5431 return sprintf(page
, "%d\n", conf
->skip_copy
);
5437 raid5_store_skip_copy(struct mddev
*mddev
, const char *page
, size_t len
)
5439 struct r5conf
*conf
= mddev
->private;
5441 if (len
>= PAGE_SIZE
)
5446 if (kstrtoul(page
, 10, &new))
5449 if (new == conf
->skip_copy
)
5452 mddev_suspend(mddev
);
5453 conf
->skip_copy
= new;
5455 mddev
->queue
->backing_dev_info
.capabilities
|=
5456 BDI_CAP_STABLE_WRITES
;
5458 mddev
->queue
->backing_dev_info
.capabilities
&=
5459 ~BDI_CAP_STABLE_WRITES
;
5460 mddev_resume(mddev
);
5464 static struct md_sysfs_entry
5465 raid5_skip_copy
= __ATTR(skip_copy
, S_IRUGO
| S_IWUSR
,
5466 raid5_show_skip_copy
,
5467 raid5_store_skip_copy
);
5470 stripe_cache_active_show(struct mddev
*mddev
, char *page
)
5472 struct r5conf
*conf
= mddev
->private;
5474 return sprintf(page
, "%d\n", atomic_read(&conf
->active_stripes
));
5479 static struct md_sysfs_entry
5480 raid5_stripecache_active
= __ATTR_RO(stripe_cache_active
);
5483 raid5_show_group_thread_cnt(struct mddev
*mddev
, char *page
)
5485 struct r5conf
*conf
= mddev
->private;
5487 return sprintf(page
, "%d\n", conf
->worker_cnt_per_group
);
5492 static int alloc_thread_groups(struct r5conf
*conf
, int cnt
,
5494 int *worker_cnt_per_group
,
5495 struct r5worker_group
**worker_groups
);
5497 raid5_store_group_thread_cnt(struct mddev
*mddev
, const char *page
, size_t len
)
5499 struct r5conf
*conf
= mddev
->private;
5502 struct r5worker_group
*new_groups
, *old_groups
;
5503 int group_cnt
, worker_cnt_per_group
;
5505 if (len
>= PAGE_SIZE
)
5510 if (kstrtoul(page
, 10, &new))
5513 if (new == conf
->worker_cnt_per_group
)
5516 mddev_suspend(mddev
);
5518 old_groups
= conf
->worker_groups
;
5520 flush_workqueue(raid5_wq
);
5522 err
= alloc_thread_groups(conf
, new,
5523 &group_cnt
, &worker_cnt_per_group
,
5526 spin_lock_irq(&conf
->device_lock
);
5527 conf
->group_cnt
= group_cnt
;
5528 conf
->worker_cnt_per_group
= worker_cnt_per_group
;
5529 conf
->worker_groups
= new_groups
;
5530 spin_unlock_irq(&conf
->device_lock
);
5533 kfree(old_groups
[0].workers
);
5537 mddev_resume(mddev
);
5544 static struct md_sysfs_entry
5545 raid5_group_thread_cnt
= __ATTR(group_thread_cnt
, S_IRUGO
| S_IWUSR
,
5546 raid5_show_group_thread_cnt
,
5547 raid5_store_group_thread_cnt
);
5549 static struct attribute
*raid5_attrs
[] = {
5550 &raid5_stripecache_size
.attr
,
5551 &raid5_stripecache_active
.attr
,
5552 &raid5_preread_bypass_threshold
.attr
,
5553 &raid5_group_thread_cnt
.attr
,
5554 &raid5_skip_copy
.attr
,
5557 static struct attribute_group raid5_attrs_group
= {
5559 .attrs
= raid5_attrs
,
5562 static int alloc_thread_groups(struct r5conf
*conf
, int cnt
,
5564 int *worker_cnt_per_group
,
5565 struct r5worker_group
**worker_groups
)
5569 struct r5worker
*workers
;
5571 *worker_cnt_per_group
= cnt
;
5574 *worker_groups
= NULL
;
5577 *group_cnt
= num_possible_nodes();
5578 size
= sizeof(struct r5worker
) * cnt
;
5579 workers
= kzalloc(size
* *group_cnt
, GFP_NOIO
);
5580 *worker_groups
= kzalloc(sizeof(struct r5worker_group
) *
5581 *group_cnt
, GFP_NOIO
);
5582 if (!*worker_groups
|| !workers
) {
5584 kfree(*worker_groups
);
5588 for (i
= 0; i
< *group_cnt
; i
++) {
5589 struct r5worker_group
*group
;
5591 group
= &(*worker_groups
)[i
];
5592 INIT_LIST_HEAD(&group
->handle_list
);
5594 group
->workers
= workers
+ i
* cnt
;
5596 for (j
= 0; j
< cnt
; j
++) {
5597 struct r5worker
*worker
= group
->workers
+ j
;
5598 worker
->group
= group
;
5599 INIT_WORK(&worker
->work
, raid5_do_work
);
5601 for (k
= 0; k
< NR_STRIPE_HASH_LOCKS
; k
++)
5602 INIT_LIST_HEAD(worker
->temp_inactive_list
+ k
);
5609 static void free_thread_groups(struct r5conf
*conf
)
5611 if (conf
->worker_groups
)
5612 kfree(conf
->worker_groups
[0].workers
);
5613 kfree(conf
->worker_groups
);
5614 conf
->worker_groups
= NULL
;
5618 raid5_size(struct mddev
*mddev
, sector_t sectors
, int raid_disks
)
5620 struct r5conf
*conf
= mddev
->private;
5623 sectors
= mddev
->dev_sectors
;
5625 /* size is defined by the smallest of previous and new size */
5626 raid_disks
= min(conf
->raid_disks
, conf
->previous_raid_disks
);
5628 sectors
&= ~((sector_t
)mddev
->chunk_sectors
- 1);
5629 sectors
&= ~((sector_t
)mddev
->new_chunk_sectors
- 1);
5630 return sectors
* (raid_disks
- conf
->max_degraded
);
5633 static void free_scratch_buffer(struct r5conf
*conf
, struct raid5_percpu
*percpu
)
5635 safe_put_page(percpu
->spare_page
);
5636 kfree(percpu
->scribble
);
5637 percpu
->spare_page
= NULL
;
5638 percpu
->scribble
= NULL
;
5641 static int alloc_scratch_buffer(struct r5conf
*conf
, struct raid5_percpu
*percpu
)
5643 if (conf
->level
== 6 && !percpu
->spare_page
)
5644 percpu
->spare_page
= alloc_page(GFP_KERNEL
);
5645 if (!percpu
->scribble
)
5646 percpu
->scribble
= kmalloc(conf
->scribble_len
, GFP_KERNEL
);
5648 if (!percpu
->scribble
|| (conf
->level
== 6 && !percpu
->spare_page
)) {
5649 free_scratch_buffer(conf
, percpu
);
5656 static void raid5_free_percpu(struct r5conf
*conf
)
5663 #ifdef CONFIG_HOTPLUG_CPU
5664 unregister_cpu_notifier(&conf
->cpu_notify
);
5668 for_each_possible_cpu(cpu
)
5669 free_scratch_buffer(conf
, per_cpu_ptr(conf
->percpu
, cpu
));
5672 free_percpu(conf
->percpu
);
5675 static void free_conf(struct r5conf
*conf
)
5677 free_thread_groups(conf
);
5678 shrink_stripes(conf
);
5679 raid5_free_percpu(conf
);
5681 kfree(conf
->stripe_hashtbl
);
5685 #ifdef CONFIG_HOTPLUG_CPU
5686 static int raid456_cpu_notify(struct notifier_block
*nfb
, unsigned long action
,
5689 struct r5conf
*conf
= container_of(nfb
, struct r5conf
, cpu_notify
);
5690 long cpu
= (long)hcpu
;
5691 struct raid5_percpu
*percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
5694 case CPU_UP_PREPARE
:
5695 case CPU_UP_PREPARE_FROZEN
:
5696 if (alloc_scratch_buffer(conf
, percpu
)) {
5697 pr_err("%s: failed memory allocation for cpu%ld\n",
5699 return notifier_from_errno(-ENOMEM
);
5703 case CPU_DEAD_FROZEN
:
5704 free_scratch_buffer(conf
, per_cpu_ptr(conf
->percpu
, cpu
));
5713 static int raid5_alloc_percpu(struct r5conf
*conf
)
5718 conf
->percpu
= alloc_percpu(struct raid5_percpu
);
5722 #ifdef CONFIG_HOTPLUG_CPU
5723 conf
->cpu_notify
.notifier_call
= raid456_cpu_notify
;
5724 conf
->cpu_notify
.priority
= 0;
5725 err
= register_cpu_notifier(&conf
->cpu_notify
);
5731 for_each_present_cpu(cpu
) {
5732 err
= alloc_scratch_buffer(conf
, per_cpu_ptr(conf
->percpu
, cpu
));
5734 pr_err("%s: failed memory allocation for cpu%ld\n",
5744 static struct r5conf
*setup_conf(struct mddev
*mddev
)
5746 struct r5conf
*conf
;
5747 int raid_disk
, memory
, max_disks
;
5748 struct md_rdev
*rdev
;
5749 struct disk_info
*disk
;
5752 int group_cnt
, worker_cnt_per_group
;
5753 struct r5worker_group
*new_group
;
5755 if (mddev
->new_level
!= 5
5756 && mddev
->new_level
!= 4
5757 && mddev
->new_level
!= 6) {
5758 printk(KERN_ERR
"md/raid:%s: raid level not set to 4/5/6 (%d)\n",
5759 mdname(mddev
), mddev
->new_level
);
5760 return ERR_PTR(-EIO
);
5762 if ((mddev
->new_level
== 5
5763 && !algorithm_valid_raid5(mddev
->new_layout
)) ||
5764 (mddev
->new_level
== 6
5765 && !algorithm_valid_raid6(mddev
->new_layout
))) {
5766 printk(KERN_ERR
"md/raid:%s: layout %d not supported\n",
5767 mdname(mddev
), mddev
->new_layout
);
5768 return ERR_PTR(-EIO
);
5770 if (mddev
->new_level
== 6 && mddev
->raid_disks
< 4) {
5771 printk(KERN_ERR
"md/raid:%s: not enough configured devices (%d, minimum 4)\n",
5772 mdname(mddev
), mddev
->raid_disks
);
5773 return ERR_PTR(-EINVAL
);
5776 if (!mddev
->new_chunk_sectors
||
5777 (mddev
->new_chunk_sectors
<< 9) % PAGE_SIZE
||
5778 !is_power_of_2(mddev
->new_chunk_sectors
)) {
5779 printk(KERN_ERR
"md/raid:%s: invalid chunk size %d\n",
5780 mdname(mddev
), mddev
->new_chunk_sectors
<< 9);
5781 return ERR_PTR(-EINVAL
);
5784 conf
= kzalloc(sizeof(struct r5conf
), GFP_KERNEL
);
5787 /* Don't enable multi-threading by default*/
5788 if (!alloc_thread_groups(conf
, 0, &group_cnt
, &worker_cnt_per_group
,
5790 conf
->group_cnt
= group_cnt
;
5791 conf
->worker_cnt_per_group
= worker_cnt_per_group
;
5792 conf
->worker_groups
= new_group
;
5795 spin_lock_init(&conf
->device_lock
);
5796 seqcount_init(&conf
->gen_lock
);
5797 init_waitqueue_head(&conf
->wait_for_stripe
);
5798 init_waitqueue_head(&conf
->wait_for_overlap
);
5799 INIT_LIST_HEAD(&conf
->handle_list
);
5800 INIT_LIST_HEAD(&conf
->hold_list
);
5801 INIT_LIST_HEAD(&conf
->delayed_list
);
5802 INIT_LIST_HEAD(&conf
->bitmap_list
);
5803 init_llist_head(&conf
->released_stripes
);
5804 atomic_set(&conf
->active_stripes
, 0);
5805 atomic_set(&conf
->preread_active_stripes
, 0);
5806 atomic_set(&conf
->active_aligned_reads
, 0);
5807 conf
->bypass_threshold
= BYPASS_THRESHOLD
;
5808 conf
->recovery_disabled
= mddev
->recovery_disabled
- 1;
5810 conf
->raid_disks
= mddev
->raid_disks
;
5811 if (mddev
->reshape_position
== MaxSector
)
5812 conf
->previous_raid_disks
= mddev
->raid_disks
;
5814 conf
->previous_raid_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
5815 max_disks
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
5816 conf
->scribble_len
= scribble_len(max_disks
);
5818 conf
->disks
= kzalloc(max_disks
* sizeof(struct disk_info
),
5823 conf
->mddev
= mddev
;
5825 if ((conf
->stripe_hashtbl
= kzalloc(PAGE_SIZE
, GFP_KERNEL
)) == NULL
)
5828 /* We init hash_locks[0] separately to that it can be used
5829 * as the reference lock in the spin_lock_nest_lock() call
5830 * in lock_all_device_hash_locks_irq in order to convince
5831 * lockdep that we know what we are doing.
5833 spin_lock_init(conf
->hash_locks
);
5834 for (i
= 1; i
< NR_STRIPE_HASH_LOCKS
; i
++)
5835 spin_lock_init(conf
->hash_locks
+ i
);
5837 for (i
= 0; i
< NR_STRIPE_HASH_LOCKS
; i
++)
5838 INIT_LIST_HEAD(conf
->inactive_list
+ i
);
5840 for (i
= 0; i
< NR_STRIPE_HASH_LOCKS
; i
++)
5841 INIT_LIST_HEAD(conf
->temp_inactive_list
+ i
);
5843 conf
->level
= mddev
->new_level
;
5844 if (raid5_alloc_percpu(conf
) != 0)
5847 pr_debug("raid456: run(%s) called.\n", mdname(mddev
));
5849 rdev_for_each(rdev
, mddev
) {
5850 raid_disk
= rdev
->raid_disk
;
5851 if (raid_disk
>= max_disks
5854 disk
= conf
->disks
+ raid_disk
;
5856 if (test_bit(Replacement
, &rdev
->flags
)) {
5857 if (disk
->replacement
)
5859 disk
->replacement
= rdev
;
5866 if (test_bit(In_sync
, &rdev
->flags
)) {
5867 char b
[BDEVNAME_SIZE
];
5868 printk(KERN_INFO
"md/raid:%s: device %s operational as raid"
5870 mdname(mddev
), bdevname(rdev
->bdev
, b
), raid_disk
);
5871 } else if (rdev
->saved_raid_disk
!= raid_disk
)
5872 /* Cannot rely on bitmap to complete recovery */
5876 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
5877 conf
->level
= mddev
->new_level
;
5878 if (conf
->level
== 6)
5879 conf
->max_degraded
= 2;
5881 conf
->max_degraded
= 1;
5882 conf
->algorithm
= mddev
->new_layout
;
5883 conf
->reshape_progress
= mddev
->reshape_position
;
5884 if (conf
->reshape_progress
!= MaxSector
) {
5885 conf
->prev_chunk_sectors
= mddev
->chunk_sectors
;
5886 conf
->prev_algo
= mddev
->layout
;
5889 memory
= conf
->max_nr_stripes
* (sizeof(struct stripe_head
) +
5890 max_disks
* ((sizeof(struct bio
) + PAGE_SIZE
))) / 1024;
5891 atomic_set(&conf
->empty_inactive_list_nr
, NR_STRIPE_HASH_LOCKS
);
5892 if (grow_stripes(conf
, NR_STRIPES
)) {
5894 "md/raid:%s: couldn't allocate %dkB for buffers\n",
5895 mdname(mddev
), memory
);
5898 printk(KERN_INFO
"md/raid:%s: allocated %dkB\n",
5899 mdname(mddev
), memory
);
5901 sprintf(pers_name
, "raid%d", mddev
->new_level
);
5902 conf
->thread
= md_register_thread(raid5d
, mddev
, pers_name
);
5903 if (!conf
->thread
) {
5905 "md/raid:%s: couldn't allocate thread.\n",
5915 return ERR_PTR(-EIO
);
5917 return ERR_PTR(-ENOMEM
);
5920 static int only_parity(int raid_disk
, int algo
, int raid_disks
, int max_degraded
)
5923 case ALGORITHM_PARITY_0
:
5924 if (raid_disk
< max_degraded
)
5927 case ALGORITHM_PARITY_N
:
5928 if (raid_disk
>= raid_disks
- max_degraded
)
5931 case ALGORITHM_PARITY_0_6
:
5932 if (raid_disk
== 0 ||
5933 raid_disk
== raid_disks
- 1)
5936 case ALGORITHM_LEFT_ASYMMETRIC_6
:
5937 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
5938 case ALGORITHM_LEFT_SYMMETRIC_6
:
5939 case ALGORITHM_RIGHT_SYMMETRIC_6
:
5940 if (raid_disk
== raid_disks
- 1)
5946 static int run(struct mddev
*mddev
)
5948 struct r5conf
*conf
;
5949 int working_disks
= 0;
5950 int dirty_parity_disks
= 0;
5951 struct md_rdev
*rdev
;
5952 sector_t reshape_offset
= 0;
5954 long long min_offset_diff
= 0;
5957 if (mddev
->recovery_cp
!= MaxSector
)
5958 printk(KERN_NOTICE
"md/raid:%s: not clean"
5959 " -- starting background reconstruction\n",
5962 rdev_for_each(rdev
, mddev
) {
5964 if (rdev
->raid_disk
< 0)
5966 diff
= (rdev
->new_data_offset
- rdev
->data_offset
);
5968 min_offset_diff
= diff
;
5970 } else if (mddev
->reshape_backwards
&&
5971 diff
< min_offset_diff
)
5972 min_offset_diff
= diff
;
5973 else if (!mddev
->reshape_backwards
&&
5974 diff
> min_offset_diff
)
5975 min_offset_diff
= diff
;
5978 if (mddev
->reshape_position
!= MaxSector
) {
5979 /* Check that we can continue the reshape.
5980 * Difficulties arise if the stripe we would write to
5981 * next is at or after the stripe we would read from next.
5982 * For a reshape that changes the number of devices, this
5983 * is only possible for a very short time, and mdadm makes
5984 * sure that time appears to have past before assembling
5985 * the array. So we fail if that time hasn't passed.
5986 * For a reshape that keeps the number of devices the same
5987 * mdadm must be monitoring the reshape can keeping the
5988 * critical areas read-only and backed up. It will start
5989 * the array in read-only mode, so we check for that.
5991 sector_t here_new
, here_old
;
5993 int max_degraded
= (mddev
->level
== 6 ? 2 : 1);
5995 if (mddev
->new_level
!= mddev
->level
) {
5996 printk(KERN_ERR
"md/raid:%s: unsupported reshape "
5997 "required - aborting.\n",
6001 old_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
6002 /* reshape_position must be on a new-stripe boundary, and one
6003 * further up in new geometry must map after here in old
6006 here_new
= mddev
->reshape_position
;
6007 if (sector_div(here_new
, mddev
->new_chunk_sectors
*
6008 (mddev
->raid_disks
- max_degraded
))) {
6009 printk(KERN_ERR
"md/raid:%s: reshape_position not "
6010 "on a stripe boundary\n", mdname(mddev
));
6013 reshape_offset
= here_new
* mddev
->new_chunk_sectors
;
6014 /* here_new is the stripe we will write to */
6015 here_old
= mddev
->reshape_position
;
6016 sector_div(here_old
, mddev
->chunk_sectors
*
6017 (old_disks
-max_degraded
));
6018 /* here_old is the first stripe that we might need to read
6020 if (mddev
->delta_disks
== 0) {
6021 if ((here_new
* mddev
->new_chunk_sectors
!=
6022 here_old
* mddev
->chunk_sectors
)) {
6023 printk(KERN_ERR
"md/raid:%s: reshape position is"
6024 " confused - aborting\n", mdname(mddev
));
6027 /* We cannot be sure it is safe to start an in-place
6028 * reshape. It is only safe if user-space is monitoring
6029 * and taking constant backups.
6030 * mdadm always starts a situation like this in
6031 * readonly mode so it can take control before
6032 * allowing any writes. So just check for that.
6034 if (abs(min_offset_diff
) >= mddev
->chunk_sectors
&&
6035 abs(min_offset_diff
) >= mddev
->new_chunk_sectors
)
6036 /* not really in-place - so OK */;
6037 else if (mddev
->ro
== 0) {
6038 printk(KERN_ERR
"md/raid:%s: in-place reshape "
6039 "must be started in read-only mode "
6044 } else if (mddev
->reshape_backwards
6045 ? (here_new
* mddev
->new_chunk_sectors
+ min_offset_diff
<=
6046 here_old
* mddev
->chunk_sectors
)
6047 : (here_new
* mddev
->new_chunk_sectors
>=
6048 here_old
* mddev
->chunk_sectors
+ (-min_offset_diff
))) {
6049 /* Reading from the same stripe as writing to - bad */
6050 printk(KERN_ERR
"md/raid:%s: reshape_position too early for "
6051 "auto-recovery - aborting.\n",
6055 printk(KERN_INFO
"md/raid:%s: reshape will continue\n",
6057 /* OK, we should be able to continue; */
6059 BUG_ON(mddev
->level
!= mddev
->new_level
);
6060 BUG_ON(mddev
->layout
!= mddev
->new_layout
);
6061 BUG_ON(mddev
->chunk_sectors
!= mddev
->new_chunk_sectors
);
6062 BUG_ON(mddev
->delta_disks
!= 0);
6065 if (mddev
->private == NULL
)
6066 conf
= setup_conf(mddev
);
6068 conf
= mddev
->private;
6071 return PTR_ERR(conf
);
6073 conf
->min_offset_diff
= min_offset_diff
;
6074 mddev
->thread
= conf
->thread
;
6075 conf
->thread
= NULL
;
6076 mddev
->private = conf
;
6078 for (i
= 0; i
< conf
->raid_disks
&& conf
->previous_raid_disks
;
6080 rdev
= conf
->disks
[i
].rdev
;
6081 if (!rdev
&& conf
->disks
[i
].replacement
) {
6082 /* The replacement is all we have yet */
6083 rdev
= conf
->disks
[i
].replacement
;
6084 conf
->disks
[i
].replacement
= NULL
;
6085 clear_bit(Replacement
, &rdev
->flags
);
6086 conf
->disks
[i
].rdev
= rdev
;
6090 if (conf
->disks
[i
].replacement
&&
6091 conf
->reshape_progress
!= MaxSector
) {
6092 /* replacements and reshape simply do not mix. */
6093 printk(KERN_ERR
"md: cannot handle concurrent "
6094 "replacement and reshape.\n");
6097 if (test_bit(In_sync
, &rdev
->flags
)) {
6101 /* This disc is not fully in-sync. However if it
6102 * just stored parity (beyond the recovery_offset),
6103 * when we don't need to be concerned about the
6104 * array being dirty.
6105 * When reshape goes 'backwards', we never have
6106 * partially completed devices, so we only need
6107 * to worry about reshape going forwards.
6109 /* Hack because v0.91 doesn't store recovery_offset properly. */
6110 if (mddev
->major_version
== 0 &&
6111 mddev
->minor_version
> 90)
6112 rdev
->recovery_offset
= reshape_offset
;
6114 if (rdev
->recovery_offset
< reshape_offset
) {
6115 /* We need to check old and new layout */
6116 if (!only_parity(rdev
->raid_disk
,
6119 conf
->max_degraded
))
6122 if (!only_parity(rdev
->raid_disk
,
6124 conf
->previous_raid_disks
,
6125 conf
->max_degraded
))
6127 dirty_parity_disks
++;
6131 * 0 for a fully functional array, 1 or 2 for a degraded array.
6133 mddev
->degraded
= calc_degraded(conf
);
6135 if (has_failed(conf
)) {
6136 printk(KERN_ERR
"md/raid:%s: not enough operational devices"
6137 " (%d/%d failed)\n",
6138 mdname(mddev
), mddev
->degraded
, conf
->raid_disks
);
6142 /* device size must be a multiple of chunk size */
6143 mddev
->dev_sectors
&= ~(mddev
->chunk_sectors
- 1);
6144 mddev
->resync_max_sectors
= mddev
->dev_sectors
;
6146 if (mddev
->degraded
> dirty_parity_disks
&&
6147 mddev
->recovery_cp
!= MaxSector
) {
6148 if (mddev
->ok_start_degraded
)
6150 "md/raid:%s: starting dirty degraded array"
6151 " - data corruption possible.\n",
6155 "md/raid:%s: cannot start dirty degraded array.\n",
6161 if (mddev
->degraded
== 0)
6162 printk(KERN_INFO
"md/raid:%s: raid level %d active with %d out of %d"
6163 " devices, algorithm %d\n", mdname(mddev
), conf
->level
,
6164 mddev
->raid_disks
-mddev
->degraded
, mddev
->raid_disks
,
6167 printk(KERN_ALERT
"md/raid:%s: raid level %d active with %d"
6168 " out of %d devices, algorithm %d\n",
6169 mdname(mddev
), conf
->level
,
6170 mddev
->raid_disks
- mddev
->degraded
,
6171 mddev
->raid_disks
, mddev
->new_layout
);
6173 print_raid5_conf(conf
);
6175 if (conf
->reshape_progress
!= MaxSector
) {
6176 conf
->reshape_safe
= conf
->reshape_progress
;
6177 atomic_set(&conf
->reshape_stripes
, 0);
6178 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
6179 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
6180 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
6181 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
6182 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
6186 /* Ok, everything is just fine now */
6187 if (mddev
->to_remove
== &raid5_attrs_group
)
6188 mddev
->to_remove
= NULL
;
6189 else if (mddev
->kobj
.sd
&&
6190 sysfs_create_group(&mddev
->kobj
, &raid5_attrs_group
))
6192 "raid5: failed to create sysfs attributes for %s\n",
6194 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
6198 bool discard_supported
= true;
6199 /* read-ahead size must cover two whole stripes, which
6200 * is 2 * (datadisks) * chunksize where 'n' is the
6201 * number of raid devices
6203 int data_disks
= conf
->previous_raid_disks
- conf
->max_degraded
;
6204 int stripe
= data_disks
*
6205 ((mddev
->chunk_sectors
<< 9) / PAGE_SIZE
);
6206 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
6207 mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
6209 blk_queue_merge_bvec(mddev
->queue
, raid5_mergeable_bvec
);
6211 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
6212 mddev
->queue
->backing_dev_info
.congested_fn
= raid5_congested
;
6214 chunk_size
= mddev
->chunk_sectors
<< 9;
6215 blk_queue_io_min(mddev
->queue
, chunk_size
);
6216 blk_queue_io_opt(mddev
->queue
, chunk_size
*
6217 (conf
->raid_disks
- conf
->max_degraded
));
6218 mddev
->queue
->limits
.raid_partial_stripes_expensive
= 1;
6220 * We can only discard a whole stripe. It doesn't make sense to
6221 * discard data disk but write parity disk
6223 stripe
= stripe
* PAGE_SIZE
;
6224 /* Round up to power of 2, as discard handling
6225 * currently assumes that */
6226 while ((stripe
-1) & stripe
)
6227 stripe
= (stripe
| (stripe
-1)) + 1;
6228 mddev
->queue
->limits
.discard_alignment
= stripe
;
6229 mddev
->queue
->limits
.discard_granularity
= stripe
;
6231 * unaligned part of discard request will be ignored, so can't
6232 * guarantee discard_zeroes_data
6234 mddev
->queue
->limits
.discard_zeroes_data
= 0;
6236 blk_queue_max_write_same_sectors(mddev
->queue
, 0);
6238 rdev_for_each(rdev
, mddev
) {
6239 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
6240 rdev
->data_offset
<< 9);
6241 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
6242 rdev
->new_data_offset
<< 9);
6244 * discard_zeroes_data is required, otherwise data
6245 * could be lost. Consider a scenario: discard a stripe
6246 * (the stripe could be inconsistent if
6247 * discard_zeroes_data is 0); write one disk of the
6248 * stripe (the stripe could be inconsistent again
6249 * depending on which disks are used to calculate
6250 * parity); the disk is broken; The stripe data of this
6253 if (!blk_queue_discard(bdev_get_queue(rdev
->bdev
)) ||
6254 !bdev_get_queue(rdev
->bdev
)->
6255 limits
.discard_zeroes_data
)
6256 discard_supported
= false;
6257 /* Unfortunately, discard_zeroes_data is not currently
6258 * a guarantee - just a hint. So we only allow DISCARD
6259 * if the sysadmin has confirmed that only safe devices
6260 * are in use by setting a module parameter.
6262 if (!devices_handle_discard_safely
) {
6263 if (discard_supported
) {
6264 pr_info("md/raid456: discard support disabled due to uncertainty.\n");
6265 pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
6267 discard_supported
= false;
6271 if (discard_supported
&&
6272 mddev
->queue
->limits
.max_discard_sectors
>= stripe
&&
6273 mddev
->queue
->limits
.discard_granularity
>= stripe
)
6274 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
,
6277 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
,
6283 md_unregister_thread(&mddev
->thread
);
6284 print_raid5_conf(conf
);
6286 mddev
->private = NULL
;
6287 printk(KERN_ALERT
"md/raid:%s: failed to run raid set.\n", mdname(mddev
));
6291 static int stop(struct mddev
*mddev
)
6293 struct r5conf
*conf
= mddev
->private;
6295 md_unregister_thread(&mddev
->thread
);
6297 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
6299 mddev
->private = NULL
;
6300 mddev
->to_remove
= &raid5_attrs_group
;
6304 static void status(struct seq_file
*seq
, struct mddev
*mddev
)
6306 struct r5conf
*conf
= mddev
->private;
6309 seq_printf(seq
, " level %d, %dk chunk, algorithm %d", mddev
->level
,
6310 mddev
->chunk_sectors
/ 2, mddev
->layout
);
6311 seq_printf (seq
, " [%d/%d] [", conf
->raid_disks
, conf
->raid_disks
- mddev
->degraded
);
6312 for (i
= 0; i
< conf
->raid_disks
; i
++)
6313 seq_printf (seq
, "%s",
6314 conf
->disks
[i
].rdev
&&
6315 test_bit(In_sync
, &conf
->disks
[i
].rdev
->flags
) ? "U" : "_");
6316 seq_printf (seq
, "]");
6319 static void print_raid5_conf (struct r5conf
*conf
)
6322 struct disk_info
*tmp
;
6324 printk(KERN_DEBUG
"RAID conf printout:\n");
6326 printk("(conf==NULL)\n");
6329 printk(KERN_DEBUG
" --- level:%d rd:%d wd:%d\n", conf
->level
,
6331 conf
->raid_disks
- conf
->mddev
->degraded
);
6333 for (i
= 0; i
< conf
->raid_disks
; i
++) {
6334 char b
[BDEVNAME_SIZE
];
6335 tmp
= conf
->disks
+ i
;
6337 printk(KERN_DEBUG
" disk %d, o:%d, dev:%s\n",
6338 i
, !test_bit(Faulty
, &tmp
->rdev
->flags
),
6339 bdevname(tmp
->rdev
->bdev
, b
));
6343 static int raid5_spare_active(struct mddev
*mddev
)
6346 struct r5conf
*conf
= mddev
->private;
6347 struct disk_info
*tmp
;
6349 unsigned long flags
;
6351 for (i
= 0; i
< conf
->raid_disks
; i
++) {
6352 tmp
= conf
->disks
+ i
;
6353 if (tmp
->replacement
6354 && tmp
->replacement
->recovery_offset
== MaxSector
6355 && !test_bit(Faulty
, &tmp
->replacement
->flags
)
6356 && !test_and_set_bit(In_sync
, &tmp
->replacement
->flags
)) {
6357 /* Replacement has just become active. */
6359 || !test_and_clear_bit(In_sync
, &tmp
->rdev
->flags
))
6362 /* Replaced device not technically faulty,
6363 * but we need to be sure it gets removed
6364 * and never re-added.
6366 set_bit(Faulty
, &tmp
->rdev
->flags
);
6367 sysfs_notify_dirent_safe(
6368 tmp
->rdev
->sysfs_state
);
6370 sysfs_notify_dirent_safe(tmp
->replacement
->sysfs_state
);
6371 } else if (tmp
->rdev
6372 && tmp
->rdev
->recovery_offset
== MaxSector
6373 && !test_bit(Faulty
, &tmp
->rdev
->flags
)
6374 && !test_and_set_bit(In_sync
, &tmp
->rdev
->flags
)) {
6376 sysfs_notify_dirent_safe(tmp
->rdev
->sysfs_state
);
6379 spin_lock_irqsave(&conf
->device_lock
, flags
);
6380 mddev
->degraded
= calc_degraded(conf
);
6381 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
6382 print_raid5_conf(conf
);
6386 static int raid5_remove_disk(struct mddev
*mddev
, struct md_rdev
*rdev
)
6388 struct r5conf
*conf
= mddev
->private;
6390 int number
= rdev
->raid_disk
;
6391 struct md_rdev
**rdevp
;
6392 struct disk_info
*p
= conf
->disks
+ number
;
6394 print_raid5_conf(conf
);
6395 if (rdev
== p
->rdev
)
6397 else if (rdev
== p
->replacement
)
6398 rdevp
= &p
->replacement
;
6402 if (number
>= conf
->raid_disks
&&
6403 conf
->reshape_progress
== MaxSector
)
6404 clear_bit(In_sync
, &rdev
->flags
);
6406 if (test_bit(In_sync
, &rdev
->flags
) ||
6407 atomic_read(&rdev
->nr_pending
)) {
6411 /* Only remove non-faulty devices if recovery
6414 if (!test_bit(Faulty
, &rdev
->flags
) &&
6415 mddev
->recovery_disabled
!= conf
->recovery_disabled
&&
6416 !has_failed(conf
) &&
6417 (!p
->replacement
|| p
->replacement
== rdev
) &&
6418 number
< conf
->raid_disks
) {
6424 if (atomic_read(&rdev
->nr_pending
)) {
6425 /* lost the race, try later */
6428 } else if (p
->replacement
) {
6429 /* We must have just cleared 'rdev' */
6430 p
->rdev
= p
->replacement
;
6431 clear_bit(Replacement
, &p
->replacement
->flags
);
6432 smp_mb(); /* Make sure other CPUs may see both as identical
6433 * but will never see neither - if they are careful
6435 p
->replacement
= NULL
;
6436 clear_bit(WantReplacement
, &rdev
->flags
);
6438 /* We might have just removed the Replacement as faulty-
6439 * clear the bit just in case
6441 clear_bit(WantReplacement
, &rdev
->flags
);
6444 print_raid5_conf(conf
);
6448 static int raid5_add_disk(struct mddev
*mddev
, struct md_rdev
*rdev
)
6450 struct r5conf
*conf
= mddev
->private;
6453 struct disk_info
*p
;
6455 int last
= conf
->raid_disks
- 1;
6457 if (mddev
->recovery_disabled
== conf
->recovery_disabled
)
6460 if (rdev
->saved_raid_disk
< 0 && has_failed(conf
))
6461 /* no point adding a device */
6464 if (rdev
->raid_disk
>= 0)
6465 first
= last
= rdev
->raid_disk
;
6468 * find the disk ... but prefer rdev->saved_raid_disk
6471 if (rdev
->saved_raid_disk
>= 0 &&
6472 rdev
->saved_raid_disk
>= first
&&
6473 conf
->disks
[rdev
->saved_raid_disk
].rdev
== NULL
)
6474 first
= rdev
->saved_raid_disk
;
6476 for (disk
= first
; disk
<= last
; disk
++) {
6477 p
= conf
->disks
+ disk
;
6478 if (p
->rdev
== NULL
) {
6479 clear_bit(In_sync
, &rdev
->flags
);
6480 rdev
->raid_disk
= disk
;
6482 if (rdev
->saved_raid_disk
!= disk
)
6484 rcu_assign_pointer(p
->rdev
, rdev
);
6488 for (disk
= first
; disk
<= last
; disk
++) {
6489 p
= conf
->disks
+ disk
;
6490 if (test_bit(WantReplacement
, &p
->rdev
->flags
) &&
6491 p
->replacement
== NULL
) {
6492 clear_bit(In_sync
, &rdev
->flags
);
6493 set_bit(Replacement
, &rdev
->flags
);
6494 rdev
->raid_disk
= disk
;
6497 rcu_assign_pointer(p
->replacement
, rdev
);
6502 print_raid5_conf(conf
);
6506 static int raid5_resize(struct mddev
*mddev
, sector_t sectors
)
6508 /* no resync is happening, and there is enough space
6509 * on all devices, so we can resize.
6510 * We need to make sure resync covers any new space.
6511 * If the array is shrinking we should possibly wait until
6512 * any io in the removed space completes, but it hardly seems
6516 sectors
&= ~((sector_t
)mddev
->chunk_sectors
- 1);
6517 newsize
= raid5_size(mddev
, sectors
, mddev
->raid_disks
);
6518 if (mddev
->external_size
&&
6519 mddev
->array_sectors
> newsize
)
6521 if (mddev
->bitmap
) {
6522 int ret
= bitmap_resize(mddev
->bitmap
, sectors
, 0, 0);
6526 md_set_array_sectors(mddev
, newsize
);
6527 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
6528 revalidate_disk(mddev
->gendisk
);
6529 if (sectors
> mddev
->dev_sectors
&&
6530 mddev
->recovery_cp
> mddev
->dev_sectors
) {
6531 mddev
->recovery_cp
= mddev
->dev_sectors
;
6532 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6534 mddev
->dev_sectors
= sectors
;
6535 mddev
->resync_max_sectors
= sectors
;
6539 static int check_stripe_cache(struct mddev
*mddev
)
6541 /* Can only proceed if there are plenty of stripe_heads.
6542 * We need a minimum of one full stripe,, and for sensible progress
6543 * it is best to have about 4 times that.
6544 * If we require 4 times, then the default 256 4K stripe_heads will
6545 * allow for chunk sizes up to 256K, which is probably OK.
6546 * If the chunk size is greater, user-space should request more
6547 * stripe_heads first.
6549 struct r5conf
*conf
= mddev
->private;
6550 if (((mddev
->chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
6551 > conf
->max_nr_stripes
||
6552 ((mddev
->new_chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
6553 > conf
->max_nr_stripes
) {
6554 printk(KERN_WARNING
"md/raid:%s: reshape: not enough stripes. Needed %lu\n",
6556 ((max(mddev
->chunk_sectors
, mddev
->new_chunk_sectors
) << 9)
6563 static int check_reshape(struct mddev
*mddev
)
6565 struct r5conf
*conf
= mddev
->private;
6567 if (mddev
->delta_disks
== 0 &&
6568 mddev
->new_layout
== mddev
->layout
&&
6569 mddev
->new_chunk_sectors
== mddev
->chunk_sectors
)
6570 return 0; /* nothing to do */
6571 if (has_failed(conf
))
6573 if (mddev
->delta_disks
< 0 && mddev
->reshape_position
== MaxSector
) {
6574 /* We might be able to shrink, but the devices must
6575 * be made bigger first.
6576 * For raid6, 4 is the minimum size.
6577 * Otherwise 2 is the minimum
6580 if (mddev
->level
== 6)
6582 if (mddev
->raid_disks
+ mddev
->delta_disks
< min
)
6586 if (!check_stripe_cache(mddev
))
6589 return resize_stripes(conf
, (conf
->previous_raid_disks
6590 + mddev
->delta_disks
));
6593 static int raid5_start_reshape(struct mddev
*mddev
)
6595 struct r5conf
*conf
= mddev
->private;
6596 struct md_rdev
*rdev
;
6598 unsigned long flags
;
6600 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
6603 if (!check_stripe_cache(mddev
))
6606 if (has_failed(conf
))
6609 rdev_for_each(rdev
, mddev
) {
6610 if (!test_bit(In_sync
, &rdev
->flags
)
6611 && !test_bit(Faulty
, &rdev
->flags
))
6615 if (spares
- mddev
->degraded
< mddev
->delta_disks
- conf
->max_degraded
)
6616 /* Not enough devices even to make a degraded array
6621 /* Refuse to reduce size of the array. Any reductions in
6622 * array size must be through explicit setting of array_size
6625 if (raid5_size(mddev
, 0, conf
->raid_disks
+ mddev
->delta_disks
)
6626 < mddev
->array_sectors
) {
6627 printk(KERN_ERR
"md/raid:%s: array size must be reduced "
6628 "before number of disks\n", mdname(mddev
));
6632 atomic_set(&conf
->reshape_stripes
, 0);
6633 spin_lock_irq(&conf
->device_lock
);
6634 write_seqcount_begin(&conf
->gen_lock
);
6635 conf
->previous_raid_disks
= conf
->raid_disks
;
6636 conf
->raid_disks
+= mddev
->delta_disks
;
6637 conf
->prev_chunk_sectors
= conf
->chunk_sectors
;
6638 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
6639 conf
->prev_algo
= conf
->algorithm
;
6640 conf
->algorithm
= mddev
->new_layout
;
6642 /* Code that selects data_offset needs to see the generation update
6643 * if reshape_progress has been set - so a memory barrier needed.
6646 if (mddev
->reshape_backwards
)
6647 conf
->reshape_progress
= raid5_size(mddev
, 0, 0);
6649 conf
->reshape_progress
= 0;
6650 conf
->reshape_safe
= conf
->reshape_progress
;
6651 write_seqcount_end(&conf
->gen_lock
);
6652 spin_unlock_irq(&conf
->device_lock
);
6654 /* Now make sure any requests that proceeded on the assumption
6655 * the reshape wasn't running - like Discard or Read - have
6658 mddev_suspend(mddev
);
6659 mddev_resume(mddev
);
6661 /* Add some new drives, as many as will fit.
6662 * We know there are enough to make the newly sized array work.
6663 * Don't add devices if we are reducing the number of
6664 * devices in the array. This is because it is not possible
6665 * to correctly record the "partially reconstructed" state of
6666 * such devices during the reshape and confusion could result.
6668 if (mddev
->delta_disks
>= 0) {
6669 rdev_for_each(rdev
, mddev
)
6670 if (rdev
->raid_disk
< 0 &&
6671 !test_bit(Faulty
, &rdev
->flags
)) {
6672 if (raid5_add_disk(mddev
, rdev
) == 0) {
6674 >= conf
->previous_raid_disks
)
6675 set_bit(In_sync
, &rdev
->flags
);
6677 rdev
->recovery_offset
= 0;
6679 if (sysfs_link_rdev(mddev
, rdev
))
6680 /* Failure here is OK */;
6682 } else if (rdev
->raid_disk
>= conf
->previous_raid_disks
6683 && !test_bit(Faulty
, &rdev
->flags
)) {
6684 /* This is a spare that was manually added */
6685 set_bit(In_sync
, &rdev
->flags
);
6688 /* When a reshape changes the number of devices,
6689 * ->degraded is measured against the larger of the
6690 * pre and post number of devices.
6692 spin_lock_irqsave(&conf
->device_lock
, flags
);
6693 mddev
->degraded
= calc_degraded(conf
);
6694 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
6696 mddev
->raid_disks
= conf
->raid_disks
;
6697 mddev
->reshape_position
= conf
->reshape_progress
;
6698 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
6700 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
6701 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
6702 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
6703 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
6704 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
6706 if (!mddev
->sync_thread
) {
6707 mddev
->recovery
= 0;
6708 spin_lock_irq(&conf
->device_lock
);
6709 write_seqcount_begin(&conf
->gen_lock
);
6710 mddev
->raid_disks
= conf
->raid_disks
= conf
->previous_raid_disks
;
6711 mddev
->new_chunk_sectors
=
6712 conf
->chunk_sectors
= conf
->prev_chunk_sectors
;
6713 mddev
->new_layout
= conf
->algorithm
= conf
->prev_algo
;
6714 rdev_for_each(rdev
, mddev
)
6715 rdev
->new_data_offset
= rdev
->data_offset
;
6717 conf
->generation
--;
6718 conf
->reshape_progress
= MaxSector
;
6719 mddev
->reshape_position
= MaxSector
;
6720 write_seqcount_end(&conf
->gen_lock
);
6721 spin_unlock_irq(&conf
->device_lock
);
6724 conf
->reshape_checkpoint
= jiffies
;
6725 md_wakeup_thread(mddev
->sync_thread
);
6726 md_new_event(mddev
);
6730 /* This is called from the reshape thread and should make any
6731 * changes needed in 'conf'
6733 static void end_reshape(struct r5conf
*conf
)
6736 if (!test_bit(MD_RECOVERY_INTR
, &conf
->mddev
->recovery
)) {
6737 struct md_rdev
*rdev
;
6739 spin_lock_irq(&conf
->device_lock
);
6740 conf
->previous_raid_disks
= conf
->raid_disks
;
6741 rdev_for_each(rdev
, conf
->mddev
)
6742 rdev
->data_offset
= rdev
->new_data_offset
;
6744 conf
->reshape_progress
= MaxSector
;
6745 spin_unlock_irq(&conf
->device_lock
);
6746 wake_up(&conf
->wait_for_overlap
);
6748 /* read-ahead size must cover two whole stripes, which is
6749 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
6751 if (conf
->mddev
->queue
) {
6752 int data_disks
= conf
->raid_disks
- conf
->max_degraded
;
6753 int stripe
= data_disks
* ((conf
->chunk_sectors
<< 9)
6755 if (conf
->mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
6756 conf
->mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
6761 /* This is called from the raid5d thread with mddev_lock held.
6762 * It makes config changes to the device.
6764 static void raid5_finish_reshape(struct mddev
*mddev
)
6766 struct r5conf
*conf
= mddev
->private;
6768 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
6770 if (mddev
->delta_disks
> 0) {
6771 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
6772 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
6773 revalidate_disk(mddev
->gendisk
);
6776 spin_lock_irq(&conf
->device_lock
);
6777 mddev
->degraded
= calc_degraded(conf
);
6778 spin_unlock_irq(&conf
->device_lock
);
6779 for (d
= conf
->raid_disks
;
6780 d
< conf
->raid_disks
- mddev
->delta_disks
;
6782 struct md_rdev
*rdev
= conf
->disks
[d
].rdev
;
6784 clear_bit(In_sync
, &rdev
->flags
);
6785 rdev
= conf
->disks
[d
].replacement
;
6787 clear_bit(In_sync
, &rdev
->flags
);
6790 mddev
->layout
= conf
->algorithm
;
6791 mddev
->chunk_sectors
= conf
->chunk_sectors
;
6792 mddev
->reshape_position
= MaxSector
;
6793 mddev
->delta_disks
= 0;
6794 mddev
->reshape_backwards
= 0;
6798 static void raid5_quiesce(struct mddev
*mddev
, int state
)
6800 struct r5conf
*conf
= mddev
->private;
6803 case 2: /* resume for a suspend */
6804 wake_up(&conf
->wait_for_overlap
);
6807 case 1: /* stop all writes */
6808 lock_all_device_hash_locks_irq(conf
);
6809 /* '2' tells resync/reshape to pause so that all
6810 * active stripes can drain
6813 wait_event_cmd(conf
->wait_for_stripe
,
6814 atomic_read(&conf
->active_stripes
) == 0 &&
6815 atomic_read(&conf
->active_aligned_reads
) == 0,
6816 unlock_all_device_hash_locks_irq(conf
),
6817 lock_all_device_hash_locks_irq(conf
));
6819 unlock_all_device_hash_locks_irq(conf
);
6820 /* allow reshape to continue */
6821 wake_up(&conf
->wait_for_overlap
);
6824 case 0: /* re-enable writes */
6825 lock_all_device_hash_locks_irq(conf
);
6827 wake_up(&conf
->wait_for_stripe
);
6828 wake_up(&conf
->wait_for_overlap
);
6829 unlock_all_device_hash_locks_irq(conf
);
6834 static void *raid45_takeover_raid0(struct mddev
*mddev
, int level
)
6836 struct r0conf
*raid0_conf
= mddev
->private;
6839 /* for raid0 takeover only one zone is supported */
6840 if (raid0_conf
->nr_strip_zones
> 1) {
6841 printk(KERN_ERR
"md/raid:%s: cannot takeover raid0 with more than one zone.\n",
6843 return ERR_PTR(-EINVAL
);
6846 sectors
= raid0_conf
->strip_zone
[0].zone_end
;
6847 sector_div(sectors
, raid0_conf
->strip_zone
[0].nb_dev
);
6848 mddev
->dev_sectors
= sectors
;
6849 mddev
->new_level
= level
;
6850 mddev
->new_layout
= ALGORITHM_PARITY_N
;
6851 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
6852 mddev
->raid_disks
+= 1;
6853 mddev
->delta_disks
= 1;
6854 /* make sure it will be not marked as dirty */
6855 mddev
->recovery_cp
= MaxSector
;
6857 return setup_conf(mddev
);
6860 static void *raid5_takeover_raid1(struct mddev
*mddev
)
6864 if (mddev
->raid_disks
!= 2 ||
6865 mddev
->degraded
> 1)
6866 return ERR_PTR(-EINVAL
);
6868 /* Should check if there are write-behind devices? */
6870 chunksect
= 64*2; /* 64K by default */
6872 /* The array must be an exact multiple of chunksize */
6873 while (chunksect
&& (mddev
->array_sectors
& (chunksect
-1)))
6876 if ((chunksect
<<9) < STRIPE_SIZE
)
6877 /* array size does not allow a suitable chunk size */
6878 return ERR_PTR(-EINVAL
);
6880 mddev
->new_level
= 5;
6881 mddev
->new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
6882 mddev
->new_chunk_sectors
= chunksect
;
6884 return setup_conf(mddev
);
6887 static void *raid5_takeover_raid6(struct mddev
*mddev
)
6891 switch (mddev
->layout
) {
6892 case ALGORITHM_LEFT_ASYMMETRIC_6
:
6893 new_layout
= ALGORITHM_LEFT_ASYMMETRIC
;
6895 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
6896 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC
;
6898 case ALGORITHM_LEFT_SYMMETRIC_6
:
6899 new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
6901 case ALGORITHM_RIGHT_SYMMETRIC_6
:
6902 new_layout
= ALGORITHM_RIGHT_SYMMETRIC
;
6904 case ALGORITHM_PARITY_0_6
:
6905 new_layout
= ALGORITHM_PARITY_0
;
6907 case ALGORITHM_PARITY_N
:
6908 new_layout
= ALGORITHM_PARITY_N
;
6911 return ERR_PTR(-EINVAL
);
6913 mddev
->new_level
= 5;
6914 mddev
->new_layout
= new_layout
;
6915 mddev
->delta_disks
= -1;
6916 mddev
->raid_disks
-= 1;
6917 return setup_conf(mddev
);
6920 static int raid5_check_reshape(struct mddev
*mddev
)
6922 /* For a 2-drive array, the layout and chunk size can be changed
6923 * immediately as not restriping is needed.
6924 * For larger arrays we record the new value - after validation
6925 * to be used by a reshape pass.
6927 struct r5conf
*conf
= mddev
->private;
6928 int new_chunk
= mddev
->new_chunk_sectors
;
6930 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid5(mddev
->new_layout
))
6932 if (new_chunk
> 0) {
6933 if (!is_power_of_2(new_chunk
))
6935 if (new_chunk
< (PAGE_SIZE
>>9))
6937 if (mddev
->array_sectors
& (new_chunk
-1))
6938 /* not factor of array size */
6942 /* They look valid */
6944 if (mddev
->raid_disks
== 2) {
6945 /* can make the change immediately */
6946 if (mddev
->new_layout
>= 0) {
6947 conf
->algorithm
= mddev
->new_layout
;
6948 mddev
->layout
= mddev
->new_layout
;
6950 if (new_chunk
> 0) {
6951 conf
->chunk_sectors
= new_chunk
;
6952 mddev
->chunk_sectors
= new_chunk
;
6954 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
6955 md_wakeup_thread(mddev
->thread
);
6957 return check_reshape(mddev
);
6960 static int raid6_check_reshape(struct mddev
*mddev
)
6962 int new_chunk
= mddev
->new_chunk_sectors
;
6964 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid6(mddev
->new_layout
))
6966 if (new_chunk
> 0) {
6967 if (!is_power_of_2(new_chunk
))
6969 if (new_chunk
< (PAGE_SIZE
>> 9))
6971 if (mddev
->array_sectors
& (new_chunk
-1))
6972 /* not factor of array size */
6976 /* They look valid */
6977 return check_reshape(mddev
);
6980 static void *raid5_takeover(struct mddev
*mddev
)
6982 /* raid5 can take over:
6983 * raid0 - if there is only one strip zone - make it a raid4 layout
6984 * raid1 - if there are two drives. We need to know the chunk size
6985 * raid4 - trivial - just use a raid4 layout.
6986 * raid6 - Providing it is a *_6 layout
6988 if (mddev
->level
== 0)
6989 return raid45_takeover_raid0(mddev
, 5);
6990 if (mddev
->level
== 1)
6991 return raid5_takeover_raid1(mddev
);
6992 if (mddev
->level
== 4) {
6993 mddev
->new_layout
= ALGORITHM_PARITY_N
;
6994 mddev
->new_level
= 5;
6995 return setup_conf(mddev
);
6997 if (mddev
->level
== 6)
6998 return raid5_takeover_raid6(mddev
);
7000 return ERR_PTR(-EINVAL
);
7003 static void *raid4_takeover(struct mddev
*mddev
)
7005 /* raid4 can take over:
7006 * raid0 - if there is only one strip zone
7007 * raid5 - if layout is right
7009 if (mddev
->level
== 0)
7010 return raid45_takeover_raid0(mddev
, 4);
7011 if (mddev
->level
== 5 &&
7012 mddev
->layout
== ALGORITHM_PARITY_N
) {
7013 mddev
->new_layout
= 0;
7014 mddev
->new_level
= 4;
7015 return setup_conf(mddev
);
7017 return ERR_PTR(-EINVAL
);
7020 static struct md_personality raid5_personality
;
7022 static void *raid6_takeover(struct mddev
*mddev
)
7024 /* Currently can only take over a raid5. We map the
7025 * personality to an equivalent raid6 personality
7026 * with the Q block at the end.
7030 if (mddev
->pers
!= &raid5_personality
)
7031 return ERR_PTR(-EINVAL
);
7032 if (mddev
->degraded
> 1)
7033 return ERR_PTR(-EINVAL
);
7034 if (mddev
->raid_disks
> 253)
7035 return ERR_PTR(-EINVAL
);
7036 if (mddev
->raid_disks
< 3)
7037 return ERR_PTR(-EINVAL
);
7039 switch (mddev
->layout
) {
7040 case ALGORITHM_LEFT_ASYMMETRIC
:
7041 new_layout
= ALGORITHM_LEFT_ASYMMETRIC_6
;
7043 case ALGORITHM_RIGHT_ASYMMETRIC
:
7044 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC_6
;
7046 case ALGORITHM_LEFT_SYMMETRIC
:
7047 new_layout
= ALGORITHM_LEFT_SYMMETRIC_6
;
7049 case ALGORITHM_RIGHT_SYMMETRIC
:
7050 new_layout
= ALGORITHM_RIGHT_SYMMETRIC_6
;
7052 case ALGORITHM_PARITY_0
:
7053 new_layout
= ALGORITHM_PARITY_0_6
;
7055 case ALGORITHM_PARITY_N
:
7056 new_layout
= ALGORITHM_PARITY_N
;
7059 return ERR_PTR(-EINVAL
);
7061 mddev
->new_level
= 6;
7062 mddev
->new_layout
= new_layout
;
7063 mddev
->delta_disks
= 1;
7064 mddev
->raid_disks
+= 1;
7065 return setup_conf(mddev
);
7068 static struct md_personality raid6_personality
=
7072 .owner
= THIS_MODULE
,
7073 .make_request
= make_request
,
7077 .error_handler
= error
,
7078 .hot_add_disk
= raid5_add_disk
,
7079 .hot_remove_disk
= raid5_remove_disk
,
7080 .spare_active
= raid5_spare_active
,
7081 .sync_request
= sync_request
,
7082 .resize
= raid5_resize
,
7084 .check_reshape
= raid6_check_reshape
,
7085 .start_reshape
= raid5_start_reshape
,
7086 .finish_reshape
= raid5_finish_reshape
,
7087 .quiesce
= raid5_quiesce
,
7088 .takeover
= raid6_takeover
,
7090 static struct md_personality raid5_personality
=
7094 .owner
= THIS_MODULE
,
7095 .make_request
= make_request
,
7099 .error_handler
= error
,
7100 .hot_add_disk
= raid5_add_disk
,
7101 .hot_remove_disk
= raid5_remove_disk
,
7102 .spare_active
= raid5_spare_active
,
7103 .sync_request
= sync_request
,
7104 .resize
= raid5_resize
,
7106 .check_reshape
= raid5_check_reshape
,
7107 .start_reshape
= raid5_start_reshape
,
7108 .finish_reshape
= raid5_finish_reshape
,
7109 .quiesce
= raid5_quiesce
,
7110 .takeover
= raid5_takeover
,
7113 static struct md_personality raid4_personality
=
7117 .owner
= THIS_MODULE
,
7118 .make_request
= make_request
,
7122 .error_handler
= error
,
7123 .hot_add_disk
= raid5_add_disk
,
7124 .hot_remove_disk
= raid5_remove_disk
,
7125 .spare_active
= raid5_spare_active
,
7126 .sync_request
= sync_request
,
7127 .resize
= raid5_resize
,
7129 .check_reshape
= raid5_check_reshape
,
7130 .start_reshape
= raid5_start_reshape
,
7131 .finish_reshape
= raid5_finish_reshape
,
7132 .quiesce
= raid5_quiesce
,
7133 .takeover
= raid4_takeover
,
7136 static int __init
raid5_init(void)
7138 raid5_wq
= alloc_workqueue("raid5wq",
7139 WQ_UNBOUND
|WQ_MEM_RECLAIM
|WQ_CPU_INTENSIVE
|WQ_SYSFS
, 0);
7142 register_md_personality(&raid6_personality
);
7143 register_md_personality(&raid5_personality
);
7144 register_md_personality(&raid4_personality
);
7148 static void raid5_exit(void)
7150 unregister_md_personality(&raid6_personality
);
7151 unregister_md_personality(&raid5_personality
);
7152 unregister_md_personality(&raid4_personality
);
7153 destroy_workqueue(raid5_wq
);
7156 module_init(raid5_init
);
7157 module_exit(raid5_exit
);
7158 MODULE_LICENSE("GPL");
7159 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
7160 MODULE_ALIAS("md-personality-4"); /* RAID5 */
7161 MODULE_ALIAS("md-raid5");
7162 MODULE_ALIAS("md-raid4");
7163 MODULE_ALIAS("md-level-5");
7164 MODULE_ALIAS("md-level-4");
7165 MODULE_ALIAS("md-personality-8"); /* RAID6 */
7166 MODULE_ALIAS("md-raid6");
7167 MODULE_ALIAS("md-level-6");
7169 /* This used to be two separate modules, they were: */
7170 MODULE_ALIAS("raid5");
7171 MODULE_ALIAS("raid6");