md/raid5: fix handling of degraded stripes in batches.
[deliverable/linux.git] / drivers / md / raid5.c
1 /*
2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
6 *
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21 /*
22 * BITMAP UNPLUGGING:
23 *
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
26 * explanation.
27 *
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->seq_write is the number of the last batch successfully written.
31 * conf->seq_flush is the number of the last batch that was closed to
32 * new additions.
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is seq_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
39 * batch.
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
43 * miss any bits.
44 */
45
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/raid/pq.h>
49 #include <linux/async_tx.h>
50 #include <linux/module.h>
51 #include <linux/async.h>
52 #include <linux/seq_file.h>
53 #include <linux/cpu.h>
54 #include <linux/slab.h>
55 #include <linux/ratelimit.h>
56 #include <linux/nodemask.h>
57 #include <linux/flex_array.h>
58 #include <trace/events/block.h>
59
60 #include "md.h"
61 #include "raid5.h"
62 #include "raid0.h"
63 #include "bitmap.h"
64
65 #define cpu_to_group(cpu) cpu_to_node(cpu)
66 #define ANY_GROUP NUMA_NO_NODE
67
68 static bool devices_handle_discard_safely = false;
69 module_param(devices_handle_discard_safely, bool, 0644);
70 MODULE_PARM_DESC(devices_handle_discard_safely,
71 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
72 static struct workqueue_struct *raid5_wq;
73 /*
74 * Stripe cache
75 */
76
77 #define NR_STRIPES 256
78 #define STRIPE_SIZE PAGE_SIZE
79 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
80 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
81 #define IO_THRESHOLD 1
82 #define BYPASS_THRESHOLD 1
83 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
84 #define HASH_MASK (NR_HASH - 1)
85 #define MAX_STRIPE_BATCH 8
86
87 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
88 {
89 int hash = (sect >> STRIPE_SHIFT) & HASH_MASK;
90 return &conf->stripe_hashtbl[hash];
91 }
92
93 static inline int stripe_hash_locks_hash(sector_t sect)
94 {
95 return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK;
96 }
97
98 static inline void lock_device_hash_lock(struct r5conf *conf, int hash)
99 {
100 spin_lock_irq(conf->hash_locks + hash);
101 spin_lock(&conf->device_lock);
102 }
103
104 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
105 {
106 spin_unlock(&conf->device_lock);
107 spin_unlock_irq(conf->hash_locks + hash);
108 }
109
110 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
111 {
112 int i;
113 local_irq_disable();
114 spin_lock(conf->hash_locks);
115 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
116 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
117 spin_lock(&conf->device_lock);
118 }
119
120 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
121 {
122 int i;
123 spin_unlock(&conf->device_lock);
124 for (i = NR_STRIPE_HASH_LOCKS; i; i--)
125 spin_unlock(conf->hash_locks + i - 1);
126 local_irq_enable();
127 }
128
129 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
130 * order without overlap. There may be several bio's per stripe+device, and
131 * a bio could span several devices.
132 * When walking this list for a particular stripe+device, we must never proceed
133 * beyond a bio that extends past this device, as the next bio might no longer
134 * be valid.
135 * This function is used to determine the 'next' bio in the list, given the sector
136 * of the current stripe+device
137 */
138 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
139 {
140 int sectors = bio_sectors(bio);
141 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
142 return bio->bi_next;
143 else
144 return NULL;
145 }
146
147 /*
148 * We maintain a biased count of active stripes in the bottom 16 bits of
149 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
150 */
151 static inline int raid5_bi_processed_stripes(struct bio *bio)
152 {
153 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
154 return (atomic_read(segments) >> 16) & 0xffff;
155 }
156
157 static inline int raid5_dec_bi_active_stripes(struct bio *bio)
158 {
159 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
160 return atomic_sub_return(1, segments) & 0xffff;
161 }
162
163 static inline void raid5_inc_bi_active_stripes(struct bio *bio)
164 {
165 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
166 atomic_inc(segments);
167 }
168
169 static inline void raid5_set_bi_processed_stripes(struct bio *bio,
170 unsigned int cnt)
171 {
172 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
173 int old, new;
174
175 do {
176 old = atomic_read(segments);
177 new = (old & 0xffff) | (cnt << 16);
178 } while (atomic_cmpxchg(segments, old, new) != old);
179 }
180
181 static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
182 {
183 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
184 atomic_set(segments, cnt);
185 }
186
187 /* Find first data disk in a raid6 stripe */
188 static inline int raid6_d0(struct stripe_head *sh)
189 {
190 if (sh->ddf_layout)
191 /* ddf always start from first device */
192 return 0;
193 /* md starts just after Q block */
194 if (sh->qd_idx == sh->disks - 1)
195 return 0;
196 else
197 return sh->qd_idx + 1;
198 }
199 static inline int raid6_next_disk(int disk, int raid_disks)
200 {
201 disk++;
202 return (disk < raid_disks) ? disk : 0;
203 }
204
205 /* When walking through the disks in a raid5, starting at raid6_d0,
206 * We need to map each disk to a 'slot', where the data disks are slot
207 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
208 * is raid_disks-1. This help does that mapping.
209 */
210 static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
211 int *count, int syndrome_disks)
212 {
213 int slot = *count;
214
215 if (sh->ddf_layout)
216 (*count)++;
217 if (idx == sh->pd_idx)
218 return syndrome_disks;
219 if (idx == sh->qd_idx)
220 return syndrome_disks + 1;
221 if (!sh->ddf_layout)
222 (*count)++;
223 return slot;
224 }
225
226 static void return_io(struct bio *return_bi)
227 {
228 struct bio *bi = return_bi;
229 while (bi) {
230
231 return_bi = bi->bi_next;
232 bi->bi_next = NULL;
233 bi->bi_iter.bi_size = 0;
234 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
235 bi, 0);
236 bio_endio(bi, 0);
237 bi = return_bi;
238 }
239 }
240
241 static void print_raid5_conf (struct r5conf *conf);
242
243 static int stripe_operations_active(struct stripe_head *sh)
244 {
245 return sh->check_state || sh->reconstruct_state ||
246 test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
247 test_bit(STRIPE_COMPUTE_RUN, &sh->state);
248 }
249
250 static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
251 {
252 struct r5conf *conf = sh->raid_conf;
253 struct r5worker_group *group;
254 int thread_cnt;
255 int i, cpu = sh->cpu;
256
257 if (!cpu_online(cpu)) {
258 cpu = cpumask_any(cpu_online_mask);
259 sh->cpu = cpu;
260 }
261
262 if (list_empty(&sh->lru)) {
263 struct r5worker_group *group;
264 group = conf->worker_groups + cpu_to_group(cpu);
265 list_add_tail(&sh->lru, &group->handle_list);
266 group->stripes_cnt++;
267 sh->group = group;
268 }
269
270 if (conf->worker_cnt_per_group == 0) {
271 md_wakeup_thread(conf->mddev->thread);
272 return;
273 }
274
275 group = conf->worker_groups + cpu_to_group(sh->cpu);
276
277 group->workers[0].working = true;
278 /* at least one worker should run to avoid race */
279 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work);
280
281 thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1;
282 /* wakeup more workers */
283 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) {
284 if (group->workers[i].working == false) {
285 group->workers[i].working = true;
286 queue_work_on(sh->cpu, raid5_wq,
287 &group->workers[i].work);
288 thread_cnt--;
289 }
290 }
291 }
292
293 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
294 struct list_head *temp_inactive_list)
295 {
296 BUG_ON(!list_empty(&sh->lru));
297 BUG_ON(atomic_read(&conf->active_stripes)==0);
298 if (test_bit(STRIPE_HANDLE, &sh->state)) {
299 if (test_bit(STRIPE_DELAYED, &sh->state) &&
300 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
301 list_add_tail(&sh->lru, &conf->delayed_list);
302 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
303 sh->bm_seq - conf->seq_write > 0)
304 list_add_tail(&sh->lru, &conf->bitmap_list);
305 else {
306 clear_bit(STRIPE_DELAYED, &sh->state);
307 clear_bit(STRIPE_BIT_DELAY, &sh->state);
308 if (conf->worker_cnt_per_group == 0) {
309 list_add_tail(&sh->lru, &conf->handle_list);
310 } else {
311 raid5_wakeup_stripe_thread(sh);
312 return;
313 }
314 }
315 md_wakeup_thread(conf->mddev->thread);
316 } else {
317 BUG_ON(stripe_operations_active(sh));
318 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
319 if (atomic_dec_return(&conf->preread_active_stripes)
320 < IO_THRESHOLD)
321 md_wakeup_thread(conf->mddev->thread);
322 atomic_dec(&conf->active_stripes);
323 if (!test_bit(STRIPE_EXPANDING, &sh->state))
324 list_add_tail(&sh->lru, temp_inactive_list);
325 }
326 }
327
328 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh,
329 struct list_head *temp_inactive_list)
330 {
331 if (atomic_dec_and_test(&sh->count))
332 do_release_stripe(conf, sh, temp_inactive_list);
333 }
334
335 /*
336 * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list
337 *
338 * Be careful: Only one task can add/delete stripes from temp_inactive_list at
339 * given time. Adding stripes only takes device lock, while deleting stripes
340 * only takes hash lock.
341 */
342 static void release_inactive_stripe_list(struct r5conf *conf,
343 struct list_head *temp_inactive_list,
344 int hash)
345 {
346 int size;
347 bool do_wakeup = false;
348 unsigned long flags;
349
350 if (hash == NR_STRIPE_HASH_LOCKS) {
351 size = NR_STRIPE_HASH_LOCKS;
352 hash = NR_STRIPE_HASH_LOCKS - 1;
353 } else
354 size = 1;
355 while (size) {
356 struct list_head *list = &temp_inactive_list[size - 1];
357
358 /*
359 * We don't hold any lock here yet, get_active_stripe() might
360 * remove stripes from the list
361 */
362 if (!list_empty_careful(list)) {
363 spin_lock_irqsave(conf->hash_locks + hash, flags);
364 if (list_empty(conf->inactive_list + hash) &&
365 !list_empty(list))
366 atomic_dec(&conf->empty_inactive_list_nr);
367 list_splice_tail_init(list, conf->inactive_list + hash);
368 do_wakeup = true;
369 spin_unlock_irqrestore(conf->hash_locks + hash, flags);
370 }
371 size--;
372 hash--;
373 }
374
375 if (do_wakeup) {
376 wake_up(&conf->wait_for_stripe);
377 if (conf->retry_read_aligned)
378 md_wakeup_thread(conf->mddev->thread);
379 }
380 }
381
382 /* should hold conf->device_lock already */
383 static int release_stripe_list(struct r5conf *conf,
384 struct list_head *temp_inactive_list)
385 {
386 struct stripe_head *sh;
387 int count = 0;
388 struct llist_node *head;
389
390 head = llist_del_all(&conf->released_stripes);
391 head = llist_reverse_order(head);
392 while (head) {
393 int hash;
394
395 sh = llist_entry(head, struct stripe_head, release_list);
396 head = llist_next(head);
397 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
398 smp_mb();
399 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state);
400 /*
401 * Don't worry the bit is set here, because if the bit is set
402 * again, the count is always > 1. This is true for
403 * STRIPE_ON_UNPLUG_LIST bit too.
404 */
405 hash = sh->hash_lock_index;
406 __release_stripe(conf, sh, &temp_inactive_list[hash]);
407 count++;
408 }
409
410 return count;
411 }
412
413 static void release_stripe(struct stripe_head *sh)
414 {
415 struct r5conf *conf = sh->raid_conf;
416 unsigned long flags;
417 struct list_head list;
418 int hash;
419 bool wakeup;
420
421 /* Avoid release_list until the last reference.
422 */
423 if (atomic_add_unless(&sh->count, -1, 1))
424 return;
425
426 if (unlikely(!conf->mddev->thread) ||
427 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
428 goto slow_path;
429 wakeup = llist_add(&sh->release_list, &conf->released_stripes);
430 if (wakeup)
431 md_wakeup_thread(conf->mddev->thread);
432 return;
433 slow_path:
434 local_irq_save(flags);
435 /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
436 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
437 INIT_LIST_HEAD(&list);
438 hash = sh->hash_lock_index;
439 do_release_stripe(conf, sh, &list);
440 spin_unlock(&conf->device_lock);
441 release_inactive_stripe_list(conf, &list, hash);
442 }
443 local_irq_restore(flags);
444 }
445
446 static inline void remove_hash(struct stripe_head *sh)
447 {
448 pr_debug("remove_hash(), stripe %llu\n",
449 (unsigned long long)sh->sector);
450
451 hlist_del_init(&sh->hash);
452 }
453
454 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
455 {
456 struct hlist_head *hp = stripe_hash(conf, sh->sector);
457
458 pr_debug("insert_hash(), stripe %llu\n",
459 (unsigned long long)sh->sector);
460
461 hlist_add_head(&sh->hash, hp);
462 }
463
464 /* find an idle stripe, make sure it is unhashed, and return it. */
465 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)
466 {
467 struct stripe_head *sh = NULL;
468 struct list_head *first;
469
470 if (list_empty(conf->inactive_list + hash))
471 goto out;
472 first = (conf->inactive_list + hash)->next;
473 sh = list_entry(first, struct stripe_head, lru);
474 list_del_init(first);
475 remove_hash(sh);
476 atomic_inc(&conf->active_stripes);
477 BUG_ON(hash != sh->hash_lock_index);
478 if (list_empty(conf->inactive_list + hash))
479 atomic_inc(&conf->empty_inactive_list_nr);
480 out:
481 return sh;
482 }
483
484 static void shrink_buffers(struct stripe_head *sh)
485 {
486 struct page *p;
487 int i;
488 int num = sh->raid_conf->pool_size;
489
490 for (i = 0; i < num ; i++) {
491 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page);
492 p = sh->dev[i].page;
493 if (!p)
494 continue;
495 sh->dev[i].page = NULL;
496 put_page(p);
497 }
498 }
499
500 static int grow_buffers(struct stripe_head *sh, gfp_t gfp)
501 {
502 int i;
503 int num = sh->raid_conf->pool_size;
504
505 for (i = 0; i < num; i++) {
506 struct page *page;
507
508 if (!(page = alloc_page(gfp))) {
509 return 1;
510 }
511 sh->dev[i].page = page;
512 sh->dev[i].orig_page = page;
513 }
514 return 0;
515 }
516
517 static void raid5_build_block(struct stripe_head *sh, int i, int previous);
518 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
519 struct stripe_head *sh);
520
521 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
522 {
523 struct r5conf *conf = sh->raid_conf;
524 int i, seq;
525
526 BUG_ON(atomic_read(&sh->count) != 0);
527 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
528 BUG_ON(stripe_operations_active(sh));
529 BUG_ON(sh->batch_head);
530
531 pr_debug("init_stripe called, stripe %llu\n",
532 (unsigned long long)sector);
533 retry:
534 seq = read_seqcount_begin(&conf->gen_lock);
535 sh->generation = conf->generation - previous;
536 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
537 sh->sector = sector;
538 stripe_set_idx(sector, conf, previous, sh);
539 sh->state = 0;
540
541 for (i = sh->disks; i--; ) {
542 struct r5dev *dev = &sh->dev[i];
543
544 if (dev->toread || dev->read || dev->towrite || dev->written ||
545 test_bit(R5_LOCKED, &dev->flags)) {
546 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
547 (unsigned long long)sh->sector, i, dev->toread,
548 dev->read, dev->towrite, dev->written,
549 test_bit(R5_LOCKED, &dev->flags));
550 WARN_ON(1);
551 }
552 dev->flags = 0;
553 raid5_build_block(sh, i, previous);
554 }
555 if (read_seqcount_retry(&conf->gen_lock, seq))
556 goto retry;
557 sh->overwrite_disks = 0;
558 insert_hash(conf, sh);
559 sh->cpu = smp_processor_id();
560 set_bit(STRIPE_BATCH_READY, &sh->state);
561 }
562
563 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
564 short generation)
565 {
566 struct stripe_head *sh;
567
568 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
569 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
570 if (sh->sector == sector && sh->generation == generation)
571 return sh;
572 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
573 return NULL;
574 }
575
576 /*
577 * Need to check if array has failed when deciding whether to:
578 * - start an array
579 * - remove non-faulty devices
580 * - add a spare
581 * - allow a reshape
582 * This determination is simple when no reshape is happening.
583 * However if there is a reshape, we need to carefully check
584 * both the before and after sections.
585 * This is because some failed devices may only affect one
586 * of the two sections, and some non-in_sync devices may
587 * be insync in the section most affected by failed devices.
588 */
589 static int calc_degraded(struct r5conf *conf)
590 {
591 int degraded, degraded2;
592 int i;
593
594 rcu_read_lock();
595 degraded = 0;
596 for (i = 0; i < conf->previous_raid_disks; i++) {
597 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
598 if (rdev && test_bit(Faulty, &rdev->flags))
599 rdev = rcu_dereference(conf->disks[i].replacement);
600 if (!rdev || test_bit(Faulty, &rdev->flags))
601 degraded++;
602 else if (test_bit(In_sync, &rdev->flags))
603 ;
604 else
605 /* not in-sync or faulty.
606 * If the reshape increases the number of devices,
607 * this is being recovered by the reshape, so
608 * this 'previous' section is not in_sync.
609 * If the number of devices is being reduced however,
610 * the device can only be part of the array if
611 * we are reverting a reshape, so this section will
612 * be in-sync.
613 */
614 if (conf->raid_disks >= conf->previous_raid_disks)
615 degraded++;
616 }
617 rcu_read_unlock();
618 if (conf->raid_disks == conf->previous_raid_disks)
619 return degraded;
620 rcu_read_lock();
621 degraded2 = 0;
622 for (i = 0; i < conf->raid_disks; i++) {
623 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
624 if (rdev && test_bit(Faulty, &rdev->flags))
625 rdev = rcu_dereference(conf->disks[i].replacement);
626 if (!rdev || test_bit(Faulty, &rdev->flags))
627 degraded2++;
628 else if (test_bit(In_sync, &rdev->flags))
629 ;
630 else
631 /* not in-sync or faulty.
632 * If reshape increases the number of devices, this
633 * section has already been recovered, else it
634 * almost certainly hasn't.
635 */
636 if (conf->raid_disks <= conf->previous_raid_disks)
637 degraded2++;
638 }
639 rcu_read_unlock();
640 if (degraded2 > degraded)
641 return degraded2;
642 return degraded;
643 }
644
645 static int has_failed(struct r5conf *conf)
646 {
647 int degraded;
648
649 if (conf->mddev->reshape_position == MaxSector)
650 return conf->mddev->degraded > conf->max_degraded;
651
652 degraded = calc_degraded(conf);
653 if (degraded > conf->max_degraded)
654 return 1;
655 return 0;
656 }
657
658 static struct stripe_head *
659 get_active_stripe(struct r5conf *conf, sector_t sector,
660 int previous, int noblock, int noquiesce)
661 {
662 struct stripe_head *sh;
663 int hash = stripe_hash_locks_hash(sector);
664
665 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
666
667 spin_lock_irq(conf->hash_locks + hash);
668
669 do {
670 wait_event_lock_irq(conf->wait_for_stripe,
671 conf->quiesce == 0 || noquiesce,
672 *(conf->hash_locks + hash));
673 sh = __find_stripe(conf, sector, conf->generation - previous);
674 if (!sh) {
675 if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
676 sh = get_free_stripe(conf, hash);
677 if (!sh && llist_empty(&conf->released_stripes) &&
678 !test_bit(R5_DID_ALLOC, &conf->cache_state))
679 set_bit(R5_ALLOC_MORE,
680 &conf->cache_state);
681 }
682 if (noblock && sh == NULL)
683 break;
684 if (!sh) {
685 set_bit(R5_INACTIVE_BLOCKED,
686 &conf->cache_state);
687 wait_event_lock_irq(
688 conf->wait_for_stripe,
689 !list_empty(conf->inactive_list + hash) &&
690 (atomic_read(&conf->active_stripes)
691 < (conf->max_nr_stripes * 3 / 4)
692 || !test_bit(R5_INACTIVE_BLOCKED,
693 &conf->cache_state)),
694 *(conf->hash_locks + hash));
695 clear_bit(R5_INACTIVE_BLOCKED,
696 &conf->cache_state);
697 } else {
698 init_stripe(sh, sector, previous);
699 atomic_inc(&sh->count);
700 }
701 } else if (!atomic_inc_not_zero(&sh->count)) {
702 spin_lock(&conf->device_lock);
703 if (!atomic_read(&sh->count)) {
704 if (!test_bit(STRIPE_HANDLE, &sh->state))
705 atomic_inc(&conf->active_stripes);
706 BUG_ON(list_empty(&sh->lru) &&
707 !test_bit(STRIPE_EXPANDING, &sh->state));
708 list_del_init(&sh->lru);
709 if (sh->group) {
710 sh->group->stripes_cnt--;
711 sh->group = NULL;
712 }
713 }
714 atomic_inc(&sh->count);
715 spin_unlock(&conf->device_lock);
716 }
717 } while (sh == NULL);
718
719 spin_unlock_irq(conf->hash_locks + hash);
720 return sh;
721 }
722
723 static bool is_full_stripe_write(struct stripe_head *sh)
724 {
725 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded));
726 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded);
727 }
728
729 static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
730 {
731 local_irq_disable();
732 if (sh1 > sh2) {
733 spin_lock(&sh2->stripe_lock);
734 spin_lock_nested(&sh1->stripe_lock, 1);
735 } else {
736 spin_lock(&sh1->stripe_lock);
737 spin_lock_nested(&sh2->stripe_lock, 1);
738 }
739 }
740
741 static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
742 {
743 spin_unlock(&sh1->stripe_lock);
744 spin_unlock(&sh2->stripe_lock);
745 local_irq_enable();
746 }
747
748 /* Only freshly new full stripe normal write stripe can be added to a batch list */
749 static bool stripe_can_batch(struct stripe_head *sh)
750 {
751 return test_bit(STRIPE_BATCH_READY, &sh->state) &&
752 is_full_stripe_write(sh);
753 }
754
755 /* we only do back search */
756 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh)
757 {
758 struct stripe_head *head;
759 sector_t head_sector, tmp_sec;
760 int hash;
761 int dd_idx;
762
763 if (!stripe_can_batch(sh))
764 return;
765 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
766 tmp_sec = sh->sector;
767 if (!sector_div(tmp_sec, conf->chunk_sectors))
768 return;
769 head_sector = sh->sector - STRIPE_SECTORS;
770
771 hash = stripe_hash_locks_hash(head_sector);
772 spin_lock_irq(conf->hash_locks + hash);
773 head = __find_stripe(conf, head_sector, conf->generation);
774 if (head && !atomic_inc_not_zero(&head->count)) {
775 spin_lock(&conf->device_lock);
776 if (!atomic_read(&head->count)) {
777 if (!test_bit(STRIPE_HANDLE, &head->state))
778 atomic_inc(&conf->active_stripes);
779 BUG_ON(list_empty(&head->lru) &&
780 !test_bit(STRIPE_EXPANDING, &head->state));
781 list_del_init(&head->lru);
782 if (head->group) {
783 head->group->stripes_cnt--;
784 head->group = NULL;
785 }
786 }
787 atomic_inc(&head->count);
788 spin_unlock(&conf->device_lock);
789 }
790 spin_unlock_irq(conf->hash_locks + hash);
791
792 if (!head)
793 return;
794 if (!stripe_can_batch(head))
795 goto out;
796
797 lock_two_stripes(head, sh);
798 /* clear_batch_ready clear the flag */
799 if (!stripe_can_batch(head) || !stripe_can_batch(sh))
800 goto unlock_out;
801
802 if (sh->batch_head)
803 goto unlock_out;
804
805 dd_idx = 0;
806 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
807 dd_idx++;
808 if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw)
809 goto unlock_out;
810
811 if (head->batch_head) {
812 spin_lock(&head->batch_head->batch_lock);
813 /* This batch list is already running */
814 if (!stripe_can_batch(head)) {
815 spin_unlock(&head->batch_head->batch_lock);
816 goto unlock_out;
817 }
818
819 /*
820 * at this point, head's BATCH_READY could be cleared, but we
821 * can still add the stripe to batch list
822 */
823 list_add(&sh->batch_list, &head->batch_list);
824 spin_unlock(&head->batch_head->batch_lock);
825
826 sh->batch_head = head->batch_head;
827 } else {
828 head->batch_head = head;
829 sh->batch_head = head->batch_head;
830 spin_lock(&head->batch_lock);
831 list_add_tail(&sh->batch_list, &head->batch_list);
832 spin_unlock(&head->batch_lock);
833 }
834
835 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
836 if (atomic_dec_return(&conf->preread_active_stripes)
837 < IO_THRESHOLD)
838 md_wakeup_thread(conf->mddev->thread);
839
840 atomic_inc(&sh->count);
841 unlock_out:
842 unlock_two_stripes(head, sh);
843 out:
844 release_stripe(head);
845 }
846
847 /* Determine if 'data_offset' or 'new_data_offset' should be used
848 * in this stripe_head.
849 */
850 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
851 {
852 sector_t progress = conf->reshape_progress;
853 /* Need a memory barrier to make sure we see the value
854 * of conf->generation, or ->data_offset that was set before
855 * reshape_progress was updated.
856 */
857 smp_rmb();
858 if (progress == MaxSector)
859 return 0;
860 if (sh->generation == conf->generation - 1)
861 return 0;
862 /* We are in a reshape, and this is a new-generation stripe,
863 * so use new_data_offset.
864 */
865 return 1;
866 }
867
868 static void
869 raid5_end_read_request(struct bio *bi, int error);
870 static void
871 raid5_end_write_request(struct bio *bi, int error);
872
873 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
874 {
875 struct r5conf *conf = sh->raid_conf;
876 int i, disks = sh->disks;
877 struct stripe_head *head_sh = sh;
878
879 might_sleep();
880
881 for (i = disks; i--; ) {
882 int rw;
883 int replace_only = 0;
884 struct bio *bi, *rbi;
885 struct md_rdev *rdev, *rrdev = NULL;
886
887 sh = head_sh;
888 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
889 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
890 rw = WRITE_FUA;
891 else
892 rw = WRITE;
893 if (test_bit(R5_Discard, &sh->dev[i].flags))
894 rw |= REQ_DISCARD;
895 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
896 rw = READ;
897 else if (test_and_clear_bit(R5_WantReplace,
898 &sh->dev[i].flags)) {
899 rw = WRITE;
900 replace_only = 1;
901 } else
902 continue;
903 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
904 rw |= REQ_SYNC;
905
906 again:
907 bi = &sh->dev[i].req;
908 rbi = &sh->dev[i].rreq; /* For writing to replacement */
909
910 rcu_read_lock();
911 rrdev = rcu_dereference(conf->disks[i].replacement);
912 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
913 rdev = rcu_dereference(conf->disks[i].rdev);
914 if (!rdev) {
915 rdev = rrdev;
916 rrdev = NULL;
917 }
918 if (rw & WRITE) {
919 if (replace_only)
920 rdev = NULL;
921 if (rdev == rrdev)
922 /* We raced and saw duplicates */
923 rrdev = NULL;
924 } else {
925 if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev)
926 rdev = rrdev;
927 rrdev = NULL;
928 }
929
930 if (rdev && test_bit(Faulty, &rdev->flags))
931 rdev = NULL;
932 if (rdev)
933 atomic_inc(&rdev->nr_pending);
934 if (rrdev && test_bit(Faulty, &rrdev->flags))
935 rrdev = NULL;
936 if (rrdev)
937 atomic_inc(&rrdev->nr_pending);
938 rcu_read_unlock();
939
940 /* We have already checked bad blocks for reads. Now
941 * need to check for writes. We never accept write errors
942 * on the replacement, so we don't to check rrdev.
943 */
944 while ((rw & WRITE) && rdev &&
945 test_bit(WriteErrorSeen, &rdev->flags)) {
946 sector_t first_bad;
947 int bad_sectors;
948 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
949 &first_bad, &bad_sectors);
950 if (!bad)
951 break;
952
953 if (bad < 0) {
954 set_bit(BlockedBadBlocks, &rdev->flags);
955 if (!conf->mddev->external &&
956 conf->mddev->flags) {
957 /* It is very unlikely, but we might
958 * still need to write out the
959 * bad block log - better give it
960 * a chance*/
961 md_check_recovery(conf->mddev);
962 }
963 /*
964 * Because md_wait_for_blocked_rdev
965 * will dec nr_pending, we must
966 * increment it first.
967 */
968 atomic_inc(&rdev->nr_pending);
969 md_wait_for_blocked_rdev(rdev, conf->mddev);
970 } else {
971 /* Acknowledged bad block - skip the write */
972 rdev_dec_pending(rdev, conf->mddev);
973 rdev = NULL;
974 }
975 }
976
977 if (rdev) {
978 if (s->syncing || s->expanding || s->expanded
979 || s->replacing)
980 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
981
982 set_bit(STRIPE_IO_STARTED, &sh->state);
983
984 bio_reset(bi);
985 bi->bi_bdev = rdev->bdev;
986 bi->bi_rw = rw;
987 bi->bi_end_io = (rw & WRITE)
988 ? raid5_end_write_request
989 : raid5_end_read_request;
990 bi->bi_private = sh;
991
992 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
993 __func__, (unsigned long long)sh->sector,
994 bi->bi_rw, i);
995 atomic_inc(&sh->count);
996 if (sh != head_sh)
997 atomic_inc(&head_sh->count);
998 if (use_new_offset(conf, sh))
999 bi->bi_iter.bi_sector = (sh->sector
1000 + rdev->new_data_offset);
1001 else
1002 bi->bi_iter.bi_sector = (sh->sector
1003 + rdev->data_offset);
1004 if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
1005 bi->bi_rw |= REQ_NOMERGE;
1006
1007 if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
1008 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
1009 sh->dev[i].vec.bv_page = sh->dev[i].page;
1010 bi->bi_vcnt = 1;
1011 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1012 bi->bi_io_vec[0].bv_offset = 0;
1013 bi->bi_iter.bi_size = STRIPE_SIZE;
1014 /*
1015 * If this is discard request, set bi_vcnt 0. We don't
1016 * want to confuse SCSI because SCSI will replace payload
1017 */
1018 if (rw & REQ_DISCARD)
1019 bi->bi_vcnt = 0;
1020 if (rrdev)
1021 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
1022
1023 if (conf->mddev->gendisk)
1024 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
1025 bi, disk_devt(conf->mddev->gendisk),
1026 sh->dev[i].sector);
1027 generic_make_request(bi);
1028 }
1029 if (rrdev) {
1030 if (s->syncing || s->expanding || s->expanded
1031 || s->replacing)
1032 md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
1033
1034 set_bit(STRIPE_IO_STARTED, &sh->state);
1035
1036 bio_reset(rbi);
1037 rbi->bi_bdev = rrdev->bdev;
1038 rbi->bi_rw = rw;
1039 BUG_ON(!(rw & WRITE));
1040 rbi->bi_end_io = raid5_end_write_request;
1041 rbi->bi_private = sh;
1042
1043 pr_debug("%s: for %llu schedule op %ld on "
1044 "replacement disc %d\n",
1045 __func__, (unsigned long long)sh->sector,
1046 rbi->bi_rw, i);
1047 atomic_inc(&sh->count);
1048 if (sh != head_sh)
1049 atomic_inc(&head_sh->count);
1050 if (use_new_offset(conf, sh))
1051 rbi->bi_iter.bi_sector = (sh->sector
1052 + rrdev->new_data_offset);
1053 else
1054 rbi->bi_iter.bi_sector = (sh->sector
1055 + rrdev->data_offset);
1056 if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
1057 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
1058 sh->dev[i].rvec.bv_page = sh->dev[i].page;
1059 rbi->bi_vcnt = 1;
1060 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1061 rbi->bi_io_vec[0].bv_offset = 0;
1062 rbi->bi_iter.bi_size = STRIPE_SIZE;
1063 /*
1064 * If this is discard request, set bi_vcnt 0. We don't
1065 * want to confuse SCSI because SCSI will replace payload
1066 */
1067 if (rw & REQ_DISCARD)
1068 rbi->bi_vcnt = 0;
1069 if (conf->mddev->gendisk)
1070 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
1071 rbi, disk_devt(conf->mddev->gendisk),
1072 sh->dev[i].sector);
1073 generic_make_request(rbi);
1074 }
1075 if (!rdev && !rrdev) {
1076 if (rw & WRITE)
1077 set_bit(STRIPE_DEGRADED, &sh->state);
1078 pr_debug("skip op %ld on disc %d for sector %llu\n",
1079 bi->bi_rw, i, (unsigned long long)sh->sector);
1080 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1081 set_bit(STRIPE_HANDLE, &sh->state);
1082 }
1083
1084 if (!head_sh->batch_head)
1085 continue;
1086 sh = list_first_entry(&sh->batch_list, struct stripe_head,
1087 batch_list);
1088 if (sh != head_sh)
1089 goto again;
1090 }
1091 }
1092
1093 static struct dma_async_tx_descriptor *
1094 async_copy_data(int frombio, struct bio *bio, struct page **page,
1095 sector_t sector, struct dma_async_tx_descriptor *tx,
1096 struct stripe_head *sh)
1097 {
1098 struct bio_vec bvl;
1099 struct bvec_iter iter;
1100 struct page *bio_page;
1101 int page_offset;
1102 struct async_submit_ctl submit;
1103 enum async_tx_flags flags = 0;
1104
1105 if (bio->bi_iter.bi_sector >= sector)
1106 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
1107 else
1108 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
1109
1110 if (frombio)
1111 flags |= ASYNC_TX_FENCE;
1112 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
1113
1114 bio_for_each_segment(bvl, bio, iter) {
1115 int len = bvl.bv_len;
1116 int clen;
1117 int b_offset = 0;
1118
1119 if (page_offset < 0) {
1120 b_offset = -page_offset;
1121 page_offset += b_offset;
1122 len -= b_offset;
1123 }
1124
1125 if (len > 0 && page_offset + len > STRIPE_SIZE)
1126 clen = STRIPE_SIZE - page_offset;
1127 else
1128 clen = len;
1129
1130 if (clen > 0) {
1131 b_offset += bvl.bv_offset;
1132 bio_page = bvl.bv_page;
1133 if (frombio) {
1134 if (sh->raid_conf->skip_copy &&
1135 b_offset == 0 && page_offset == 0 &&
1136 clen == STRIPE_SIZE)
1137 *page = bio_page;
1138 else
1139 tx = async_memcpy(*page, bio_page, page_offset,
1140 b_offset, clen, &submit);
1141 } else
1142 tx = async_memcpy(bio_page, *page, b_offset,
1143 page_offset, clen, &submit);
1144 }
1145 /* chain the operations */
1146 submit.depend_tx = tx;
1147
1148 if (clen < len) /* hit end of page */
1149 break;
1150 page_offset += len;
1151 }
1152
1153 return tx;
1154 }
1155
1156 static void ops_complete_biofill(void *stripe_head_ref)
1157 {
1158 struct stripe_head *sh = stripe_head_ref;
1159 struct bio *return_bi = NULL;
1160 int i;
1161
1162 pr_debug("%s: stripe %llu\n", __func__,
1163 (unsigned long long)sh->sector);
1164
1165 /* clear completed biofills */
1166 for (i = sh->disks; i--; ) {
1167 struct r5dev *dev = &sh->dev[i];
1168
1169 /* acknowledge completion of a biofill operation */
1170 /* and check if we need to reply to a read request,
1171 * new R5_Wantfill requests are held off until
1172 * !STRIPE_BIOFILL_RUN
1173 */
1174 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
1175 struct bio *rbi, *rbi2;
1176
1177 BUG_ON(!dev->read);
1178 rbi = dev->read;
1179 dev->read = NULL;
1180 while (rbi && rbi->bi_iter.bi_sector <
1181 dev->sector + STRIPE_SECTORS) {
1182 rbi2 = r5_next_bio(rbi, dev->sector);
1183 if (!raid5_dec_bi_active_stripes(rbi)) {
1184 rbi->bi_next = return_bi;
1185 return_bi = rbi;
1186 }
1187 rbi = rbi2;
1188 }
1189 }
1190 }
1191 clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
1192
1193 return_io(return_bi);
1194
1195 set_bit(STRIPE_HANDLE, &sh->state);
1196 release_stripe(sh);
1197 }
1198
1199 static void ops_run_biofill(struct stripe_head *sh)
1200 {
1201 struct dma_async_tx_descriptor *tx = NULL;
1202 struct async_submit_ctl submit;
1203 int i;
1204
1205 BUG_ON(sh->batch_head);
1206 pr_debug("%s: stripe %llu\n", __func__,
1207 (unsigned long long)sh->sector);
1208
1209 for (i = sh->disks; i--; ) {
1210 struct r5dev *dev = &sh->dev[i];
1211 if (test_bit(R5_Wantfill, &dev->flags)) {
1212 struct bio *rbi;
1213 spin_lock_irq(&sh->stripe_lock);
1214 dev->read = rbi = dev->toread;
1215 dev->toread = NULL;
1216 spin_unlock_irq(&sh->stripe_lock);
1217 while (rbi && rbi->bi_iter.bi_sector <
1218 dev->sector + STRIPE_SECTORS) {
1219 tx = async_copy_data(0, rbi, &dev->page,
1220 dev->sector, tx, sh);
1221 rbi = r5_next_bio(rbi, dev->sector);
1222 }
1223 }
1224 }
1225
1226 atomic_inc(&sh->count);
1227 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
1228 async_trigger_callback(&submit);
1229 }
1230
1231 static void mark_target_uptodate(struct stripe_head *sh, int target)
1232 {
1233 struct r5dev *tgt;
1234
1235 if (target < 0)
1236 return;
1237
1238 tgt = &sh->dev[target];
1239 set_bit(R5_UPTODATE, &tgt->flags);
1240 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1241 clear_bit(R5_Wantcompute, &tgt->flags);
1242 }
1243
1244 static void ops_complete_compute(void *stripe_head_ref)
1245 {
1246 struct stripe_head *sh = stripe_head_ref;
1247
1248 pr_debug("%s: stripe %llu\n", __func__,
1249 (unsigned long long)sh->sector);
1250
1251 /* mark the computed target(s) as uptodate */
1252 mark_target_uptodate(sh, sh->ops.target);
1253 mark_target_uptodate(sh, sh->ops.target2);
1254
1255 clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
1256 if (sh->check_state == check_state_compute_run)
1257 sh->check_state = check_state_compute_result;
1258 set_bit(STRIPE_HANDLE, &sh->state);
1259 release_stripe(sh);
1260 }
1261
1262 /* return a pointer to the address conversion region of the scribble buffer */
1263 static addr_conv_t *to_addr_conv(struct stripe_head *sh,
1264 struct raid5_percpu *percpu, int i)
1265 {
1266 void *addr;
1267
1268 addr = flex_array_get(percpu->scribble, i);
1269 return addr + sizeof(struct page *) * (sh->disks + 2);
1270 }
1271
1272 /* return a pointer to the address conversion region of the scribble buffer */
1273 static struct page **to_addr_page(struct raid5_percpu *percpu, int i)
1274 {
1275 void *addr;
1276
1277 addr = flex_array_get(percpu->scribble, i);
1278 return addr;
1279 }
1280
1281 static struct dma_async_tx_descriptor *
1282 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
1283 {
1284 int disks = sh->disks;
1285 struct page **xor_srcs = to_addr_page(percpu, 0);
1286 int target = sh->ops.target;
1287 struct r5dev *tgt = &sh->dev[target];
1288 struct page *xor_dest = tgt->page;
1289 int count = 0;
1290 struct dma_async_tx_descriptor *tx;
1291 struct async_submit_ctl submit;
1292 int i;
1293
1294 BUG_ON(sh->batch_head);
1295
1296 pr_debug("%s: stripe %llu block: %d\n",
1297 __func__, (unsigned long long)sh->sector, target);
1298 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1299
1300 for (i = disks; i--; )
1301 if (i != target)
1302 xor_srcs[count++] = sh->dev[i].page;
1303
1304 atomic_inc(&sh->count);
1305
1306 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
1307 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0));
1308 if (unlikely(count == 1))
1309 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1310 else
1311 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1312
1313 return tx;
1314 }
1315
1316 /* set_syndrome_sources - populate source buffers for gen_syndrome
1317 * @srcs - (struct page *) array of size sh->disks
1318 * @sh - stripe_head to parse
1319 *
1320 * Populates srcs in proper layout order for the stripe and returns the
1321 * 'count' of sources to be used in a call to async_gen_syndrome. The P
1322 * destination buffer is recorded in srcs[count] and the Q destination
1323 * is recorded in srcs[count+1]].
1324 */
1325 static int set_syndrome_sources(struct page **srcs,
1326 struct stripe_head *sh,
1327 int srctype)
1328 {
1329 int disks = sh->disks;
1330 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
1331 int d0_idx = raid6_d0(sh);
1332 int count;
1333 int i;
1334
1335 for (i = 0; i < disks; i++)
1336 srcs[i] = NULL;
1337
1338 count = 0;
1339 i = d0_idx;
1340 do {
1341 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1342 struct r5dev *dev = &sh->dev[i];
1343
1344 if (i == sh->qd_idx || i == sh->pd_idx ||
1345 (srctype == SYNDROME_SRC_ALL) ||
1346 (srctype == SYNDROME_SRC_WANT_DRAIN &&
1347 test_bit(R5_Wantdrain, &dev->flags)) ||
1348 (srctype == SYNDROME_SRC_WRITTEN &&
1349 dev->written))
1350 srcs[slot] = sh->dev[i].page;
1351 i = raid6_next_disk(i, disks);
1352 } while (i != d0_idx);
1353
1354 return syndrome_disks;
1355 }
1356
1357 static struct dma_async_tx_descriptor *
1358 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
1359 {
1360 int disks = sh->disks;
1361 struct page **blocks = to_addr_page(percpu, 0);
1362 int target;
1363 int qd_idx = sh->qd_idx;
1364 struct dma_async_tx_descriptor *tx;
1365 struct async_submit_ctl submit;
1366 struct r5dev *tgt;
1367 struct page *dest;
1368 int i;
1369 int count;
1370
1371 BUG_ON(sh->batch_head);
1372 if (sh->ops.target < 0)
1373 target = sh->ops.target2;
1374 else if (sh->ops.target2 < 0)
1375 target = sh->ops.target;
1376 else
1377 /* we should only have one valid target */
1378 BUG();
1379 BUG_ON(target < 0);
1380 pr_debug("%s: stripe %llu block: %d\n",
1381 __func__, (unsigned long long)sh->sector, target);
1382
1383 tgt = &sh->dev[target];
1384 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1385 dest = tgt->page;
1386
1387 atomic_inc(&sh->count);
1388
1389 if (target == qd_idx) {
1390 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
1391 blocks[count] = NULL; /* regenerating p is not necessary */
1392 BUG_ON(blocks[count+1] != dest); /* q should already be set */
1393 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1394 ops_complete_compute, sh,
1395 to_addr_conv(sh, percpu, 0));
1396 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
1397 } else {
1398 /* Compute any data- or p-drive using XOR */
1399 count = 0;
1400 for (i = disks; i-- ; ) {
1401 if (i == target || i == qd_idx)
1402 continue;
1403 blocks[count++] = sh->dev[i].page;
1404 }
1405
1406 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
1407 NULL, ops_complete_compute, sh,
1408 to_addr_conv(sh, percpu, 0));
1409 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
1410 }
1411
1412 return tx;
1413 }
1414
1415 static struct dma_async_tx_descriptor *
1416 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
1417 {
1418 int i, count, disks = sh->disks;
1419 int syndrome_disks = sh->ddf_layout ? disks : disks-2;
1420 int d0_idx = raid6_d0(sh);
1421 int faila = -1, failb = -1;
1422 int target = sh->ops.target;
1423 int target2 = sh->ops.target2;
1424 struct r5dev *tgt = &sh->dev[target];
1425 struct r5dev *tgt2 = &sh->dev[target2];
1426 struct dma_async_tx_descriptor *tx;
1427 struct page **blocks = to_addr_page(percpu, 0);
1428 struct async_submit_ctl submit;
1429
1430 BUG_ON(sh->batch_head);
1431 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
1432 __func__, (unsigned long long)sh->sector, target, target2);
1433 BUG_ON(target < 0 || target2 < 0);
1434 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1435 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
1436
1437 /* we need to open-code set_syndrome_sources to handle the
1438 * slot number conversion for 'faila' and 'failb'
1439 */
1440 for (i = 0; i < disks ; i++)
1441 blocks[i] = NULL;
1442 count = 0;
1443 i = d0_idx;
1444 do {
1445 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1446
1447 blocks[slot] = sh->dev[i].page;
1448
1449 if (i == target)
1450 faila = slot;
1451 if (i == target2)
1452 failb = slot;
1453 i = raid6_next_disk(i, disks);
1454 } while (i != d0_idx);
1455
1456 BUG_ON(faila == failb);
1457 if (failb < faila)
1458 swap(faila, failb);
1459 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
1460 __func__, (unsigned long long)sh->sector, faila, failb);
1461
1462 atomic_inc(&sh->count);
1463
1464 if (failb == syndrome_disks+1) {
1465 /* Q disk is one of the missing disks */
1466 if (faila == syndrome_disks) {
1467 /* Missing P+Q, just recompute */
1468 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1469 ops_complete_compute, sh,
1470 to_addr_conv(sh, percpu, 0));
1471 return async_gen_syndrome(blocks, 0, syndrome_disks+2,
1472 STRIPE_SIZE, &submit);
1473 } else {
1474 struct page *dest;
1475 int data_target;
1476 int qd_idx = sh->qd_idx;
1477
1478 /* Missing D+Q: recompute D from P, then recompute Q */
1479 if (target == qd_idx)
1480 data_target = target2;
1481 else
1482 data_target = target;
1483
1484 count = 0;
1485 for (i = disks; i-- ; ) {
1486 if (i == data_target || i == qd_idx)
1487 continue;
1488 blocks[count++] = sh->dev[i].page;
1489 }
1490 dest = sh->dev[data_target].page;
1491 init_async_submit(&submit,
1492 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
1493 NULL, NULL, NULL,
1494 to_addr_conv(sh, percpu, 0));
1495 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
1496 &submit);
1497
1498 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
1499 init_async_submit(&submit, ASYNC_TX_FENCE, tx,
1500 ops_complete_compute, sh,
1501 to_addr_conv(sh, percpu, 0));
1502 return async_gen_syndrome(blocks, 0, count+2,
1503 STRIPE_SIZE, &submit);
1504 }
1505 } else {
1506 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1507 ops_complete_compute, sh,
1508 to_addr_conv(sh, percpu, 0));
1509 if (failb == syndrome_disks) {
1510 /* We're missing D+P. */
1511 return async_raid6_datap_recov(syndrome_disks+2,
1512 STRIPE_SIZE, faila,
1513 blocks, &submit);
1514 } else {
1515 /* We're missing D+D. */
1516 return async_raid6_2data_recov(syndrome_disks+2,
1517 STRIPE_SIZE, faila, failb,
1518 blocks, &submit);
1519 }
1520 }
1521 }
1522
1523 static void ops_complete_prexor(void *stripe_head_ref)
1524 {
1525 struct stripe_head *sh = stripe_head_ref;
1526
1527 pr_debug("%s: stripe %llu\n", __func__,
1528 (unsigned long long)sh->sector);
1529 }
1530
1531 static struct dma_async_tx_descriptor *
1532 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu,
1533 struct dma_async_tx_descriptor *tx)
1534 {
1535 int disks = sh->disks;
1536 struct page **xor_srcs = to_addr_page(percpu, 0);
1537 int count = 0, pd_idx = sh->pd_idx, i;
1538 struct async_submit_ctl submit;
1539
1540 /* existing parity data subtracted */
1541 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1542
1543 BUG_ON(sh->batch_head);
1544 pr_debug("%s: stripe %llu\n", __func__,
1545 (unsigned long long)sh->sector);
1546
1547 for (i = disks; i--; ) {
1548 struct r5dev *dev = &sh->dev[i];
1549 /* Only process blocks that are known to be uptodate */
1550 if (test_bit(R5_Wantdrain, &dev->flags))
1551 xor_srcs[count++] = dev->page;
1552 }
1553
1554 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
1555 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
1556 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1557
1558 return tx;
1559 }
1560
1561 static struct dma_async_tx_descriptor *
1562 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu,
1563 struct dma_async_tx_descriptor *tx)
1564 {
1565 struct page **blocks = to_addr_page(percpu, 0);
1566 int count;
1567 struct async_submit_ctl submit;
1568
1569 pr_debug("%s: stripe %llu\n", __func__,
1570 (unsigned long long)sh->sector);
1571
1572 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN);
1573
1574 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx,
1575 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
1576 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
1577
1578 return tx;
1579 }
1580
1581 static struct dma_async_tx_descriptor *
1582 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1583 {
1584 int disks = sh->disks;
1585 int i;
1586 struct stripe_head *head_sh = sh;
1587
1588 pr_debug("%s: stripe %llu\n", __func__,
1589 (unsigned long long)sh->sector);
1590
1591 for (i = disks; i--; ) {
1592 struct r5dev *dev;
1593 struct bio *chosen;
1594
1595 sh = head_sh;
1596 if (test_and_clear_bit(R5_Wantdrain, &head_sh->dev[i].flags)) {
1597 struct bio *wbi;
1598
1599 again:
1600 dev = &sh->dev[i];
1601 spin_lock_irq(&sh->stripe_lock);
1602 chosen = dev->towrite;
1603 dev->towrite = NULL;
1604 sh->overwrite_disks = 0;
1605 BUG_ON(dev->written);
1606 wbi = dev->written = chosen;
1607 spin_unlock_irq(&sh->stripe_lock);
1608 WARN_ON(dev->page != dev->orig_page);
1609
1610 while (wbi && wbi->bi_iter.bi_sector <
1611 dev->sector + STRIPE_SECTORS) {
1612 if (wbi->bi_rw & REQ_FUA)
1613 set_bit(R5_WantFUA, &dev->flags);
1614 if (wbi->bi_rw & REQ_SYNC)
1615 set_bit(R5_SyncIO, &dev->flags);
1616 if (wbi->bi_rw & REQ_DISCARD)
1617 set_bit(R5_Discard, &dev->flags);
1618 else {
1619 tx = async_copy_data(1, wbi, &dev->page,
1620 dev->sector, tx, sh);
1621 if (dev->page != dev->orig_page) {
1622 set_bit(R5_SkipCopy, &dev->flags);
1623 clear_bit(R5_UPTODATE, &dev->flags);
1624 clear_bit(R5_OVERWRITE, &dev->flags);
1625 }
1626 }
1627 wbi = r5_next_bio(wbi, dev->sector);
1628 }
1629
1630 if (head_sh->batch_head) {
1631 sh = list_first_entry(&sh->batch_list,
1632 struct stripe_head,
1633 batch_list);
1634 if (sh == head_sh)
1635 continue;
1636 goto again;
1637 }
1638 }
1639 }
1640
1641 return tx;
1642 }
1643
1644 static void ops_complete_reconstruct(void *stripe_head_ref)
1645 {
1646 struct stripe_head *sh = stripe_head_ref;
1647 int disks = sh->disks;
1648 int pd_idx = sh->pd_idx;
1649 int qd_idx = sh->qd_idx;
1650 int i;
1651 bool fua = false, sync = false, discard = false;
1652
1653 pr_debug("%s: stripe %llu\n", __func__,
1654 (unsigned long long)sh->sector);
1655
1656 for (i = disks; i--; ) {
1657 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
1658 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
1659 discard |= test_bit(R5_Discard, &sh->dev[i].flags);
1660 }
1661
1662 for (i = disks; i--; ) {
1663 struct r5dev *dev = &sh->dev[i];
1664
1665 if (dev->written || i == pd_idx || i == qd_idx) {
1666 if (!discard && !test_bit(R5_SkipCopy, &dev->flags))
1667 set_bit(R5_UPTODATE, &dev->flags);
1668 if (fua)
1669 set_bit(R5_WantFUA, &dev->flags);
1670 if (sync)
1671 set_bit(R5_SyncIO, &dev->flags);
1672 }
1673 }
1674
1675 if (sh->reconstruct_state == reconstruct_state_drain_run)
1676 sh->reconstruct_state = reconstruct_state_drain_result;
1677 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
1678 sh->reconstruct_state = reconstruct_state_prexor_drain_result;
1679 else {
1680 BUG_ON(sh->reconstruct_state != reconstruct_state_run);
1681 sh->reconstruct_state = reconstruct_state_result;
1682 }
1683
1684 set_bit(STRIPE_HANDLE, &sh->state);
1685 release_stripe(sh);
1686 }
1687
1688 static void
1689 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1690 struct dma_async_tx_descriptor *tx)
1691 {
1692 int disks = sh->disks;
1693 struct page **xor_srcs;
1694 struct async_submit_ctl submit;
1695 int count, pd_idx = sh->pd_idx, i;
1696 struct page *xor_dest;
1697 int prexor = 0;
1698 unsigned long flags;
1699 int j = 0;
1700 struct stripe_head *head_sh = sh;
1701 int last_stripe;
1702
1703 pr_debug("%s: stripe %llu\n", __func__,
1704 (unsigned long long)sh->sector);
1705
1706 for (i = 0; i < sh->disks; i++) {
1707 if (pd_idx == i)
1708 continue;
1709 if (!test_bit(R5_Discard, &sh->dev[i].flags))
1710 break;
1711 }
1712 if (i >= sh->disks) {
1713 atomic_inc(&sh->count);
1714 set_bit(R5_Discard, &sh->dev[pd_idx].flags);
1715 ops_complete_reconstruct(sh);
1716 return;
1717 }
1718 again:
1719 count = 0;
1720 xor_srcs = to_addr_page(percpu, j);
1721 /* check if prexor is active which means only process blocks
1722 * that are part of a read-modify-write (written)
1723 */
1724 if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1725 prexor = 1;
1726 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1727 for (i = disks; i--; ) {
1728 struct r5dev *dev = &sh->dev[i];
1729 if (head_sh->dev[i].written)
1730 xor_srcs[count++] = dev->page;
1731 }
1732 } else {
1733 xor_dest = sh->dev[pd_idx].page;
1734 for (i = disks; i--; ) {
1735 struct r5dev *dev = &sh->dev[i];
1736 if (i != pd_idx)
1737 xor_srcs[count++] = dev->page;
1738 }
1739 }
1740
1741 /* 1/ if we prexor'd then the dest is reused as a source
1742 * 2/ if we did not prexor then we are redoing the parity
1743 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1744 * for the synchronous xor case
1745 */
1746 last_stripe = !head_sh->batch_head ||
1747 list_first_entry(&sh->batch_list,
1748 struct stripe_head, batch_list) == head_sh;
1749 if (last_stripe) {
1750 flags = ASYNC_TX_ACK |
1751 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
1752
1753 atomic_inc(&head_sh->count);
1754 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh,
1755 to_addr_conv(sh, percpu, j));
1756 } else {
1757 flags = prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST;
1758 init_async_submit(&submit, flags, tx, NULL, NULL,
1759 to_addr_conv(sh, percpu, j));
1760 }
1761
1762 if (unlikely(count == 1))
1763 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1764 else
1765 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1766 if (!last_stripe) {
1767 j++;
1768 sh = list_first_entry(&sh->batch_list, struct stripe_head,
1769 batch_list);
1770 goto again;
1771 }
1772 }
1773
1774 static void
1775 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1776 struct dma_async_tx_descriptor *tx)
1777 {
1778 struct async_submit_ctl submit;
1779 struct page **blocks;
1780 int count, i, j = 0;
1781 struct stripe_head *head_sh = sh;
1782 int last_stripe;
1783 int synflags;
1784 unsigned long txflags;
1785
1786 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1787
1788 for (i = 0; i < sh->disks; i++) {
1789 if (sh->pd_idx == i || sh->qd_idx == i)
1790 continue;
1791 if (!test_bit(R5_Discard, &sh->dev[i].flags))
1792 break;
1793 }
1794 if (i >= sh->disks) {
1795 atomic_inc(&sh->count);
1796 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
1797 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
1798 ops_complete_reconstruct(sh);
1799 return;
1800 }
1801
1802 again:
1803 blocks = to_addr_page(percpu, j);
1804
1805 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1806 synflags = SYNDROME_SRC_WRITTEN;
1807 txflags = ASYNC_TX_ACK | ASYNC_TX_PQ_XOR_DST;
1808 } else {
1809 synflags = SYNDROME_SRC_ALL;
1810 txflags = ASYNC_TX_ACK;
1811 }
1812
1813 count = set_syndrome_sources(blocks, sh, synflags);
1814 last_stripe = !head_sh->batch_head ||
1815 list_first_entry(&sh->batch_list,
1816 struct stripe_head, batch_list) == head_sh;
1817
1818 if (last_stripe) {
1819 atomic_inc(&head_sh->count);
1820 init_async_submit(&submit, txflags, tx, ops_complete_reconstruct,
1821 head_sh, to_addr_conv(sh, percpu, j));
1822 } else
1823 init_async_submit(&submit, 0, tx, NULL, NULL,
1824 to_addr_conv(sh, percpu, j));
1825 async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
1826 if (!last_stripe) {
1827 j++;
1828 sh = list_first_entry(&sh->batch_list, struct stripe_head,
1829 batch_list);
1830 goto again;
1831 }
1832 }
1833
1834 static void ops_complete_check(void *stripe_head_ref)
1835 {
1836 struct stripe_head *sh = stripe_head_ref;
1837
1838 pr_debug("%s: stripe %llu\n", __func__,
1839 (unsigned long long)sh->sector);
1840
1841 sh->check_state = check_state_check_result;
1842 set_bit(STRIPE_HANDLE, &sh->state);
1843 release_stripe(sh);
1844 }
1845
1846 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
1847 {
1848 int disks = sh->disks;
1849 int pd_idx = sh->pd_idx;
1850 int qd_idx = sh->qd_idx;
1851 struct page *xor_dest;
1852 struct page **xor_srcs = to_addr_page(percpu, 0);
1853 struct dma_async_tx_descriptor *tx;
1854 struct async_submit_ctl submit;
1855 int count;
1856 int i;
1857
1858 pr_debug("%s: stripe %llu\n", __func__,
1859 (unsigned long long)sh->sector);
1860
1861 BUG_ON(sh->batch_head);
1862 count = 0;
1863 xor_dest = sh->dev[pd_idx].page;
1864 xor_srcs[count++] = xor_dest;
1865 for (i = disks; i--; ) {
1866 if (i == pd_idx || i == qd_idx)
1867 continue;
1868 xor_srcs[count++] = sh->dev[i].page;
1869 }
1870
1871 init_async_submit(&submit, 0, NULL, NULL, NULL,
1872 to_addr_conv(sh, percpu, 0));
1873 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
1874 &sh->ops.zero_sum_result, &submit);
1875
1876 atomic_inc(&sh->count);
1877 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
1878 tx = async_trigger_callback(&submit);
1879 }
1880
1881 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
1882 {
1883 struct page **srcs = to_addr_page(percpu, 0);
1884 struct async_submit_ctl submit;
1885 int count;
1886
1887 pr_debug("%s: stripe %llu checkp: %d\n", __func__,
1888 (unsigned long long)sh->sector, checkp);
1889
1890 BUG_ON(sh->batch_head);
1891 count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL);
1892 if (!checkp)
1893 srcs[count] = NULL;
1894
1895 atomic_inc(&sh->count);
1896 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
1897 sh, to_addr_conv(sh, percpu, 0));
1898 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
1899 &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1900 }
1901
1902 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1903 {
1904 int overlap_clear = 0, i, disks = sh->disks;
1905 struct dma_async_tx_descriptor *tx = NULL;
1906 struct r5conf *conf = sh->raid_conf;
1907 int level = conf->level;
1908 struct raid5_percpu *percpu;
1909 unsigned long cpu;
1910
1911 cpu = get_cpu();
1912 percpu = per_cpu_ptr(conf->percpu, cpu);
1913 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
1914 ops_run_biofill(sh);
1915 overlap_clear++;
1916 }
1917
1918 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
1919 if (level < 6)
1920 tx = ops_run_compute5(sh, percpu);
1921 else {
1922 if (sh->ops.target2 < 0 || sh->ops.target < 0)
1923 tx = ops_run_compute6_1(sh, percpu);
1924 else
1925 tx = ops_run_compute6_2(sh, percpu);
1926 }
1927 /* terminate the chain if reconstruct is not set to be run */
1928 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
1929 async_tx_ack(tx);
1930 }
1931
1932 if (test_bit(STRIPE_OP_PREXOR, &ops_request)) {
1933 if (level < 6)
1934 tx = ops_run_prexor5(sh, percpu, tx);
1935 else
1936 tx = ops_run_prexor6(sh, percpu, tx);
1937 }
1938
1939 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
1940 tx = ops_run_biodrain(sh, tx);
1941 overlap_clear++;
1942 }
1943
1944 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
1945 if (level < 6)
1946 ops_run_reconstruct5(sh, percpu, tx);
1947 else
1948 ops_run_reconstruct6(sh, percpu, tx);
1949 }
1950
1951 if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
1952 if (sh->check_state == check_state_run)
1953 ops_run_check_p(sh, percpu);
1954 else if (sh->check_state == check_state_run_q)
1955 ops_run_check_pq(sh, percpu, 0);
1956 else if (sh->check_state == check_state_run_pq)
1957 ops_run_check_pq(sh, percpu, 1);
1958 else
1959 BUG();
1960 }
1961
1962 if (overlap_clear && !sh->batch_head)
1963 for (i = disks; i--; ) {
1964 struct r5dev *dev = &sh->dev[i];
1965 if (test_and_clear_bit(R5_Overlap, &dev->flags))
1966 wake_up(&sh->raid_conf->wait_for_overlap);
1967 }
1968 put_cpu();
1969 }
1970
1971 static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
1972 {
1973 struct stripe_head *sh;
1974
1975 sh = kmem_cache_zalloc(sc, gfp);
1976 if (sh) {
1977 spin_lock_init(&sh->stripe_lock);
1978 spin_lock_init(&sh->batch_lock);
1979 INIT_LIST_HEAD(&sh->batch_list);
1980 INIT_LIST_HEAD(&sh->lru);
1981 atomic_set(&sh->count, 1);
1982 }
1983 return sh;
1984 }
1985 static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
1986 {
1987 struct stripe_head *sh;
1988
1989 sh = alloc_stripe(conf->slab_cache, gfp);
1990 if (!sh)
1991 return 0;
1992
1993 sh->raid_conf = conf;
1994
1995 if (grow_buffers(sh, gfp)) {
1996 shrink_buffers(sh);
1997 kmem_cache_free(conf->slab_cache, sh);
1998 return 0;
1999 }
2000 sh->hash_lock_index =
2001 conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
2002 /* we just created an active stripe so... */
2003 atomic_inc(&conf->active_stripes);
2004
2005 release_stripe(sh);
2006 conf->max_nr_stripes++;
2007 return 1;
2008 }
2009
2010 static int grow_stripes(struct r5conf *conf, int num)
2011 {
2012 struct kmem_cache *sc;
2013 int devs = max(conf->raid_disks, conf->previous_raid_disks);
2014
2015 if (conf->mddev->gendisk)
2016 sprintf(conf->cache_name[0],
2017 "raid%d-%s", conf->level, mdname(conf->mddev));
2018 else
2019 sprintf(conf->cache_name[0],
2020 "raid%d-%p", conf->level, conf->mddev);
2021 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
2022
2023 conf->active_name = 0;
2024 sc = kmem_cache_create(conf->cache_name[conf->active_name],
2025 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
2026 0, 0, NULL);
2027 if (!sc)
2028 return 1;
2029 conf->slab_cache = sc;
2030 conf->pool_size = devs;
2031 while (num--)
2032 if (!grow_one_stripe(conf, GFP_KERNEL))
2033 return 1;
2034
2035 return 0;
2036 }
2037
2038 /**
2039 * scribble_len - return the required size of the scribble region
2040 * @num - total number of disks in the array
2041 *
2042 * The size must be enough to contain:
2043 * 1/ a struct page pointer for each device in the array +2
2044 * 2/ room to convert each entry in (1) to its corresponding dma
2045 * (dma_map_page()) or page (page_address()) address.
2046 *
2047 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
2048 * calculate over all devices (not just the data blocks), using zeros in place
2049 * of the P and Q blocks.
2050 */
2051 static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags)
2052 {
2053 struct flex_array *ret;
2054 size_t len;
2055
2056 len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
2057 ret = flex_array_alloc(len, cnt, flags);
2058 if (!ret)
2059 return NULL;
2060 /* always prealloc all elements, so no locking is required */
2061 if (flex_array_prealloc(ret, 0, cnt, flags)) {
2062 flex_array_free(ret);
2063 return NULL;
2064 }
2065 return ret;
2066 }
2067
2068 static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
2069 {
2070 unsigned long cpu;
2071 int err = 0;
2072
2073 mddev_suspend(conf->mddev);
2074 get_online_cpus();
2075 for_each_present_cpu(cpu) {
2076 struct raid5_percpu *percpu;
2077 struct flex_array *scribble;
2078
2079 percpu = per_cpu_ptr(conf->percpu, cpu);
2080 scribble = scribble_alloc(new_disks,
2081 new_sectors / STRIPE_SECTORS,
2082 GFP_NOIO);
2083
2084 if (scribble) {
2085 flex_array_free(percpu->scribble);
2086 percpu->scribble = scribble;
2087 } else {
2088 err = -ENOMEM;
2089 break;
2090 }
2091 }
2092 put_online_cpus();
2093 mddev_resume(conf->mddev);
2094 return err;
2095 }
2096
2097 static int resize_stripes(struct r5conf *conf, int newsize)
2098 {
2099 /* Make all the stripes able to hold 'newsize' devices.
2100 * New slots in each stripe get 'page' set to a new page.
2101 *
2102 * This happens in stages:
2103 * 1/ create a new kmem_cache and allocate the required number of
2104 * stripe_heads.
2105 * 2/ gather all the old stripe_heads and transfer the pages across
2106 * to the new stripe_heads. This will have the side effect of
2107 * freezing the array as once all stripe_heads have been collected,
2108 * no IO will be possible. Old stripe heads are freed once their
2109 * pages have been transferred over, and the old kmem_cache is
2110 * freed when all stripes are done.
2111 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
2112 * we simple return a failre status - no need to clean anything up.
2113 * 4/ allocate new pages for the new slots in the new stripe_heads.
2114 * If this fails, we don't bother trying the shrink the
2115 * stripe_heads down again, we just leave them as they are.
2116 * As each stripe_head is processed the new one is released into
2117 * active service.
2118 *
2119 * Once step2 is started, we cannot afford to wait for a write,
2120 * so we use GFP_NOIO allocations.
2121 */
2122 struct stripe_head *osh, *nsh;
2123 LIST_HEAD(newstripes);
2124 struct disk_info *ndisks;
2125 int err;
2126 struct kmem_cache *sc;
2127 int i;
2128 int hash, cnt;
2129
2130 if (newsize <= conf->pool_size)
2131 return 0; /* never bother to shrink */
2132
2133 err = md_allow_write(conf->mddev);
2134 if (err)
2135 return err;
2136
2137 /* Step 1 */
2138 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
2139 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
2140 0, 0, NULL);
2141 if (!sc)
2142 return -ENOMEM;
2143
2144 for (i = conf->max_nr_stripes; i; i--) {
2145 nsh = alloc_stripe(sc, GFP_KERNEL);
2146 if (!nsh)
2147 break;
2148
2149 nsh->raid_conf = conf;
2150 list_add(&nsh->lru, &newstripes);
2151 }
2152 if (i) {
2153 /* didn't get enough, give up */
2154 while (!list_empty(&newstripes)) {
2155 nsh = list_entry(newstripes.next, struct stripe_head, lru);
2156 list_del(&nsh->lru);
2157 kmem_cache_free(sc, nsh);
2158 }
2159 kmem_cache_destroy(sc);
2160 return -ENOMEM;
2161 }
2162 /* Step 2 - Must use GFP_NOIO now.
2163 * OK, we have enough stripes, start collecting inactive
2164 * stripes and copying them over
2165 */
2166 hash = 0;
2167 cnt = 0;
2168 list_for_each_entry(nsh, &newstripes, lru) {
2169 lock_device_hash_lock(conf, hash);
2170 wait_event_cmd(conf->wait_for_stripe,
2171 !list_empty(conf->inactive_list + hash),
2172 unlock_device_hash_lock(conf, hash),
2173 lock_device_hash_lock(conf, hash));
2174 osh = get_free_stripe(conf, hash);
2175 unlock_device_hash_lock(conf, hash);
2176
2177 for(i=0; i<conf->pool_size; i++) {
2178 nsh->dev[i].page = osh->dev[i].page;
2179 nsh->dev[i].orig_page = osh->dev[i].page;
2180 }
2181 nsh->hash_lock_index = hash;
2182 kmem_cache_free(conf->slab_cache, osh);
2183 cnt++;
2184 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS +
2185 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) {
2186 hash++;
2187 cnt = 0;
2188 }
2189 }
2190 kmem_cache_destroy(conf->slab_cache);
2191
2192 /* Step 3.
2193 * At this point, we are holding all the stripes so the array
2194 * is completely stalled, so now is a good time to resize
2195 * conf->disks and the scribble region
2196 */
2197 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
2198 if (ndisks) {
2199 for (i=0; i<conf->raid_disks; i++)
2200 ndisks[i] = conf->disks[i];
2201 kfree(conf->disks);
2202 conf->disks = ndisks;
2203 } else
2204 err = -ENOMEM;
2205
2206 /* Step 4, return new stripes to service */
2207 while(!list_empty(&newstripes)) {
2208 nsh = list_entry(newstripes.next, struct stripe_head, lru);
2209 list_del_init(&nsh->lru);
2210
2211 for (i=conf->raid_disks; i < newsize; i++)
2212 if (nsh->dev[i].page == NULL) {
2213 struct page *p = alloc_page(GFP_NOIO);
2214 nsh->dev[i].page = p;
2215 nsh->dev[i].orig_page = p;
2216 if (!p)
2217 err = -ENOMEM;
2218 }
2219 release_stripe(nsh);
2220 }
2221 /* critical section pass, GFP_NOIO no longer needed */
2222
2223 conf->slab_cache = sc;
2224 conf->active_name = 1-conf->active_name;
2225 if (!err)
2226 conf->pool_size = newsize;
2227 return err;
2228 }
2229
2230 static int drop_one_stripe(struct r5conf *conf)
2231 {
2232 struct stripe_head *sh;
2233 int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
2234
2235 spin_lock_irq(conf->hash_locks + hash);
2236 sh = get_free_stripe(conf, hash);
2237 spin_unlock_irq(conf->hash_locks + hash);
2238 if (!sh)
2239 return 0;
2240 BUG_ON(atomic_read(&sh->count));
2241 shrink_buffers(sh);
2242 kmem_cache_free(conf->slab_cache, sh);
2243 atomic_dec(&conf->active_stripes);
2244 conf->max_nr_stripes--;
2245 return 1;
2246 }
2247
2248 static void shrink_stripes(struct r5conf *conf)
2249 {
2250 while (conf->max_nr_stripes &&
2251 drop_one_stripe(conf))
2252 ;
2253
2254 if (conf->slab_cache)
2255 kmem_cache_destroy(conf->slab_cache);
2256 conf->slab_cache = NULL;
2257 }
2258
2259 static void raid5_end_read_request(struct bio * bi, int error)
2260 {
2261 struct stripe_head *sh = bi->bi_private;
2262 struct r5conf *conf = sh->raid_conf;
2263 int disks = sh->disks, i;
2264 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
2265 char b[BDEVNAME_SIZE];
2266 struct md_rdev *rdev = NULL;
2267 sector_t s;
2268
2269 for (i=0 ; i<disks; i++)
2270 if (bi == &sh->dev[i].req)
2271 break;
2272
2273 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
2274 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
2275 uptodate);
2276 if (i == disks) {
2277 BUG();
2278 return;
2279 }
2280 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
2281 /* If replacement finished while this request was outstanding,
2282 * 'replacement' might be NULL already.
2283 * In that case it moved down to 'rdev'.
2284 * rdev is not removed until all requests are finished.
2285 */
2286 rdev = conf->disks[i].replacement;
2287 if (!rdev)
2288 rdev = conf->disks[i].rdev;
2289
2290 if (use_new_offset(conf, sh))
2291 s = sh->sector + rdev->new_data_offset;
2292 else
2293 s = sh->sector + rdev->data_offset;
2294 if (uptodate) {
2295 set_bit(R5_UPTODATE, &sh->dev[i].flags);
2296 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2297 /* Note that this cannot happen on a
2298 * replacement device. We just fail those on
2299 * any error
2300 */
2301 printk_ratelimited(
2302 KERN_INFO
2303 "md/raid:%s: read error corrected"
2304 " (%lu sectors at %llu on %s)\n",
2305 mdname(conf->mddev), STRIPE_SECTORS,
2306 (unsigned long long)s,
2307 bdevname(rdev->bdev, b));
2308 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
2309 clear_bit(R5_ReadError, &sh->dev[i].flags);
2310 clear_bit(R5_ReWrite, &sh->dev[i].flags);
2311 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
2312 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2313
2314 if (atomic_read(&rdev->read_errors))
2315 atomic_set(&rdev->read_errors, 0);
2316 } else {
2317 const char *bdn = bdevname(rdev->bdev, b);
2318 int retry = 0;
2319 int set_bad = 0;
2320
2321 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
2322 atomic_inc(&rdev->read_errors);
2323 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
2324 printk_ratelimited(
2325 KERN_WARNING
2326 "md/raid:%s: read error on replacement device "
2327 "(sector %llu on %s).\n",
2328 mdname(conf->mddev),
2329 (unsigned long long)s,
2330 bdn);
2331 else if (conf->mddev->degraded >= conf->max_degraded) {
2332 set_bad = 1;
2333 printk_ratelimited(
2334 KERN_WARNING
2335 "md/raid:%s: read error not correctable "
2336 "(sector %llu on %s).\n",
2337 mdname(conf->mddev),
2338 (unsigned long long)s,
2339 bdn);
2340 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
2341 /* Oh, no!!! */
2342 set_bad = 1;
2343 printk_ratelimited(
2344 KERN_WARNING
2345 "md/raid:%s: read error NOT corrected!! "
2346 "(sector %llu on %s).\n",
2347 mdname(conf->mddev),
2348 (unsigned long long)s,
2349 bdn);
2350 } else if (atomic_read(&rdev->read_errors)
2351 > conf->max_nr_stripes)
2352 printk(KERN_WARNING
2353 "md/raid:%s: Too many read errors, failing device %s.\n",
2354 mdname(conf->mddev), bdn);
2355 else
2356 retry = 1;
2357 if (set_bad && test_bit(In_sync, &rdev->flags)
2358 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
2359 retry = 1;
2360 if (retry)
2361 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
2362 set_bit(R5_ReadError, &sh->dev[i].flags);
2363 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2364 } else
2365 set_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2366 else {
2367 clear_bit(R5_ReadError, &sh->dev[i].flags);
2368 clear_bit(R5_ReWrite, &sh->dev[i].flags);
2369 if (!(set_bad
2370 && test_bit(In_sync, &rdev->flags)
2371 && rdev_set_badblocks(
2372 rdev, sh->sector, STRIPE_SECTORS, 0)))
2373 md_error(conf->mddev, rdev);
2374 }
2375 }
2376 rdev_dec_pending(rdev, conf->mddev);
2377 clear_bit(R5_LOCKED, &sh->dev[i].flags);
2378 set_bit(STRIPE_HANDLE, &sh->state);
2379 release_stripe(sh);
2380 }
2381
2382 static void raid5_end_write_request(struct bio *bi, int error)
2383 {
2384 struct stripe_head *sh = bi->bi_private;
2385 struct r5conf *conf = sh->raid_conf;
2386 int disks = sh->disks, i;
2387 struct md_rdev *uninitialized_var(rdev);
2388 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
2389 sector_t first_bad;
2390 int bad_sectors;
2391 int replacement = 0;
2392
2393 for (i = 0 ; i < disks; i++) {
2394 if (bi == &sh->dev[i].req) {
2395 rdev = conf->disks[i].rdev;
2396 break;
2397 }
2398 if (bi == &sh->dev[i].rreq) {
2399 rdev = conf->disks[i].replacement;
2400 if (rdev)
2401 replacement = 1;
2402 else
2403 /* rdev was removed and 'replacement'
2404 * replaced it. rdev is not removed
2405 * until all requests are finished.
2406 */
2407 rdev = conf->disks[i].rdev;
2408 break;
2409 }
2410 }
2411 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
2412 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
2413 uptodate);
2414 if (i == disks) {
2415 BUG();
2416 return;
2417 }
2418
2419 if (replacement) {
2420 if (!uptodate)
2421 md_error(conf->mddev, rdev);
2422 else if (is_badblock(rdev, sh->sector,
2423 STRIPE_SECTORS,
2424 &first_bad, &bad_sectors))
2425 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
2426 } else {
2427 if (!uptodate) {
2428 set_bit(STRIPE_DEGRADED, &sh->state);
2429 set_bit(WriteErrorSeen, &rdev->flags);
2430 set_bit(R5_WriteError, &sh->dev[i].flags);
2431 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2432 set_bit(MD_RECOVERY_NEEDED,
2433 &rdev->mddev->recovery);
2434 } else if (is_badblock(rdev, sh->sector,
2435 STRIPE_SECTORS,
2436 &first_bad, &bad_sectors)) {
2437 set_bit(R5_MadeGood, &sh->dev[i].flags);
2438 if (test_bit(R5_ReadError, &sh->dev[i].flags))
2439 /* That was a successful write so make
2440 * sure it looks like we already did
2441 * a re-write.
2442 */
2443 set_bit(R5_ReWrite, &sh->dev[i].flags);
2444 }
2445 }
2446 rdev_dec_pending(rdev, conf->mddev);
2447
2448 if (sh->batch_head && !uptodate && !replacement)
2449 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
2450
2451 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
2452 clear_bit(R5_LOCKED, &sh->dev[i].flags);
2453 set_bit(STRIPE_HANDLE, &sh->state);
2454 release_stripe(sh);
2455
2456 if (sh->batch_head && sh != sh->batch_head)
2457 release_stripe(sh->batch_head);
2458 }
2459
2460 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
2461
2462 static void raid5_build_block(struct stripe_head *sh, int i, int previous)
2463 {
2464 struct r5dev *dev = &sh->dev[i];
2465
2466 bio_init(&dev->req);
2467 dev->req.bi_io_vec = &dev->vec;
2468 dev->req.bi_max_vecs = 1;
2469 dev->req.bi_private = sh;
2470
2471 bio_init(&dev->rreq);
2472 dev->rreq.bi_io_vec = &dev->rvec;
2473 dev->rreq.bi_max_vecs = 1;
2474 dev->rreq.bi_private = sh;
2475
2476 dev->flags = 0;
2477 dev->sector = compute_blocknr(sh, i, previous);
2478 }
2479
2480 static void error(struct mddev *mddev, struct md_rdev *rdev)
2481 {
2482 char b[BDEVNAME_SIZE];
2483 struct r5conf *conf = mddev->private;
2484 unsigned long flags;
2485 pr_debug("raid456: error called\n");
2486
2487 spin_lock_irqsave(&conf->device_lock, flags);
2488 clear_bit(In_sync, &rdev->flags);
2489 mddev->degraded = calc_degraded(conf);
2490 spin_unlock_irqrestore(&conf->device_lock, flags);
2491 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2492
2493 set_bit(Blocked, &rdev->flags);
2494 set_bit(Faulty, &rdev->flags);
2495 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2496 printk(KERN_ALERT
2497 "md/raid:%s: Disk failure on %s, disabling device.\n"
2498 "md/raid:%s: Operation continuing on %d devices.\n",
2499 mdname(mddev),
2500 bdevname(rdev->bdev, b),
2501 mdname(mddev),
2502 conf->raid_disks - mddev->degraded);
2503 }
2504
2505 /*
2506 * Input: a 'big' sector number,
2507 * Output: index of the data and parity disk, and the sector # in them.
2508 */
2509 static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
2510 int previous, int *dd_idx,
2511 struct stripe_head *sh)
2512 {
2513 sector_t stripe, stripe2;
2514 sector_t chunk_number;
2515 unsigned int chunk_offset;
2516 int pd_idx, qd_idx;
2517 int ddf_layout = 0;
2518 sector_t new_sector;
2519 int algorithm = previous ? conf->prev_algo
2520 : conf->algorithm;
2521 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
2522 : conf->chunk_sectors;
2523 int raid_disks = previous ? conf->previous_raid_disks
2524 : conf->raid_disks;
2525 int data_disks = raid_disks - conf->max_degraded;
2526
2527 /* First compute the information on this sector */
2528
2529 /*
2530 * Compute the chunk number and the sector offset inside the chunk
2531 */
2532 chunk_offset = sector_div(r_sector, sectors_per_chunk);
2533 chunk_number = r_sector;
2534
2535 /*
2536 * Compute the stripe number
2537 */
2538 stripe = chunk_number;
2539 *dd_idx = sector_div(stripe, data_disks);
2540 stripe2 = stripe;
2541 /*
2542 * Select the parity disk based on the user selected algorithm.
2543 */
2544 pd_idx = qd_idx = -1;
2545 switch(conf->level) {
2546 case 4:
2547 pd_idx = data_disks;
2548 break;
2549 case 5:
2550 switch (algorithm) {
2551 case ALGORITHM_LEFT_ASYMMETRIC:
2552 pd_idx = data_disks - sector_div(stripe2, raid_disks);
2553 if (*dd_idx >= pd_idx)
2554 (*dd_idx)++;
2555 break;
2556 case ALGORITHM_RIGHT_ASYMMETRIC:
2557 pd_idx = sector_div(stripe2, raid_disks);
2558 if (*dd_idx >= pd_idx)
2559 (*dd_idx)++;
2560 break;
2561 case ALGORITHM_LEFT_SYMMETRIC:
2562 pd_idx = data_disks - sector_div(stripe2, raid_disks);
2563 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2564 break;
2565 case ALGORITHM_RIGHT_SYMMETRIC:
2566 pd_idx = sector_div(stripe2, raid_disks);
2567 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2568 break;
2569 case ALGORITHM_PARITY_0:
2570 pd_idx = 0;
2571 (*dd_idx)++;
2572 break;
2573 case ALGORITHM_PARITY_N:
2574 pd_idx = data_disks;
2575 break;
2576 default:
2577 BUG();
2578 }
2579 break;
2580 case 6:
2581
2582 switch (algorithm) {
2583 case ALGORITHM_LEFT_ASYMMETRIC:
2584 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2585 qd_idx = pd_idx + 1;
2586 if (pd_idx == raid_disks-1) {
2587 (*dd_idx)++; /* Q D D D P */
2588 qd_idx = 0;
2589 } else if (*dd_idx >= pd_idx)
2590 (*dd_idx) += 2; /* D D P Q D */
2591 break;
2592 case ALGORITHM_RIGHT_ASYMMETRIC:
2593 pd_idx = sector_div(stripe2, raid_disks);
2594 qd_idx = pd_idx + 1;
2595 if (pd_idx == raid_disks-1) {
2596 (*dd_idx)++; /* Q D D D P */
2597 qd_idx = 0;
2598 } else if (*dd_idx >= pd_idx)
2599 (*dd_idx) += 2; /* D D P Q D */
2600 break;
2601 case ALGORITHM_LEFT_SYMMETRIC:
2602 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2603 qd_idx = (pd_idx + 1) % raid_disks;
2604 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
2605 break;
2606 case ALGORITHM_RIGHT_SYMMETRIC:
2607 pd_idx = sector_div(stripe2, raid_disks);
2608 qd_idx = (pd_idx + 1) % raid_disks;
2609 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
2610 break;
2611
2612 case ALGORITHM_PARITY_0:
2613 pd_idx = 0;
2614 qd_idx = 1;
2615 (*dd_idx) += 2;
2616 break;
2617 case ALGORITHM_PARITY_N:
2618 pd_idx = data_disks;
2619 qd_idx = data_disks + 1;
2620 break;
2621
2622 case ALGORITHM_ROTATING_ZERO_RESTART:
2623 /* Exactly the same as RIGHT_ASYMMETRIC, but or
2624 * of blocks for computing Q is different.
2625 */
2626 pd_idx = sector_div(stripe2, raid_disks);
2627 qd_idx = pd_idx + 1;
2628 if (pd_idx == raid_disks-1) {
2629 (*dd_idx)++; /* Q D D D P */
2630 qd_idx = 0;
2631 } else if (*dd_idx >= pd_idx)
2632 (*dd_idx) += 2; /* D D P Q D */
2633 ddf_layout = 1;
2634 break;
2635
2636 case ALGORITHM_ROTATING_N_RESTART:
2637 /* Same a left_asymmetric, by first stripe is
2638 * D D D P Q rather than
2639 * Q D D D P
2640 */
2641 stripe2 += 1;
2642 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2643 qd_idx = pd_idx + 1;
2644 if (pd_idx == raid_disks-1) {
2645 (*dd_idx)++; /* Q D D D P */
2646 qd_idx = 0;
2647 } else if (*dd_idx >= pd_idx)
2648 (*dd_idx) += 2; /* D D P Q D */
2649 ddf_layout = 1;
2650 break;
2651
2652 case ALGORITHM_ROTATING_N_CONTINUE:
2653 /* Same as left_symmetric but Q is before P */
2654 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2655 qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
2656 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2657 ddf_layout = 1;
2658 break;
2659
2660 case ALGORITHM_LEFT_ASYMMETRIC_6:
2661 /* RAID5 left_asymmetric, with Q on last device */
2662 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
2663 if (*dd_idx >= pd_idx)
2664 (*dd_idx)++;
2665 qd_idx = raid_disks - 1;
2666 break;
2667
2668 case ALGORITHM_RIGHT_ASYMMETRIC_6:
2669 pd_idx = sector_div(stripe2, raid_disks-1);
2670 if (*dd_idx >= pd_idx)
2671 (*dd_idx)++;
2672 qd_idx = raid_disks - 1;
2673 break;
2674
2675 case ALGORITHM_LEFT_SYMMETRIC_6:
2676 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
2677 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
2678 qd_idx = raid_disks - 1;
2679 break;
2680
2681 case ALGORITHM_RIGHT_SYMMETRIC_6:
2682 pd_idx = sector_div(stripe2, raid_disks-1);
2683 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
2684 qd_idx = raid_disks - 1;
2685 break;
2686
2687 case ALGORITHM_PARITY_0_6:
2688 pd_idx = 0;
2689 (*dd_idx)++;
2690 qd_idx = raid_disks - 1;
2691 break;
2692
2693 default:
2694 BUG();
2695 }
2696 break;
2697 }
2698
2699 if (sh) {
2700 sh->pd_idx = pd_idx;
2701 sh->qd_idx = qd_idx;
2702 sh->ddf_layout = ddf_layout;
2703 }
2704 /*
2705 * Finally, compute the new sector number
2706 */
2707 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
2708 return new_sector;
2709 }
2710
2711 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
2712 {
2713 struct r5conf *conf = sh->raid_conf;
2714 int raid_disks = sh->disks;
2715 int data_disks = raid_disks - conf->max_degraded;
2716 sector_t new_sector = sh->sector, check;
2717 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
2718 : conf->chunk_sectors;
2719 int algorithm = previous ? conf->prev_algo
2720 : conf->algorithm;
2721 sector_t stripe;
2722 int chunk_offset;
2723 sector_t chunk_number;
2724 int dummy1, dd_idx = i;
2725 sector_t r_sector;
2726 struct stripe_head sh2;
2727
2728 chunk_offset = sector_div(new_sector, sectors_per_chunk);
2729 stripe = new_sector;
2730
2731 if (i == sh->pd_idx)
2732 return 0;
2733 switch(conf->level) {
2734 case 4: break;
2735 case 5:
2736 switch (algorithm) {
2737 case ALGORITHM_LEFT_ASYMMETRIC:
2738 case ALGORITHM_RIGHT_ASYMMETRIC:
2739 if (i > sh->pd_idx)
2740 i--;
2741 break;
2742 case ALGORITHM_LEFT_SYMMETRIC:
2743 case ALGORITHM_RIGHT_SYMMETRIC:
2744 if (i < sh->pd_idx)
2745 i += raid_disks;
2746 i -= (sh->pd_idx + 1);
2747 break;
2748 case ALGORITHM_PARITY_0:
2749 i -= 1;
2750 break;
2751 case ALGORITHM_PARITY_N:
2752 break;
2753 default:
2754 BUG();
2755 }
2756 break;
2757 case 6:
2758 if (i == sh->qd_idx)
2759 return 0; /* It is the Q disk */
2760 switch (algorithm) {
2761 case ALGORITHM_LEFT_ASYMMETRIC:
2762 case ALGORITHM_RIGHT_ASYMMETRIC:
2763 case ALGORITHM_ROTATING_ZERO_RESTART:
2764 case ALGORITHM_ROTATING_N_RESTART:
2765 if (sh->pd_idx == raid_disks-1)
2766 i--; /* Q D D D P */
2767 else if (i > sh->pd_idx)
2768 i -= 2; /* D D P Q D */
2769 break;
2770 case ALGORITHM_LEFT_SYMMETRIC:
2771 case ALGORITHM_RIGHT_SYMMETRIC:
2772 if (sh->pd_idx == raid_disks-1)
2773 i--; /* Q D D D P */
2774 else {
2775 /* D D P Q D */
2776 if (i < sh->pd_idx)
2777 i += raid_disks;
2778 i -= (sh->pd_idx + 2);
2779 }
2780 break;
2781 case ALGORITHM_PARITY_0:
2782 i -= 2;
2783 break;
2784 case ALGORITHM_PARITY_N:
2785 break;
2786 case ALGORITHM_ROTATING_N_CONTINUE:
2787 /* Like left_symmetric, but P is before Q */
2788 if (sh->pd_idx == 0)
2789 i--; /* P D D D Q */
2790 else {
2791 /* D D Q P D */
2792 if (i < sh->pd_idx)
2793 i += raid_disks;
2794 i -= (sh->pd_idx + 1);
2795 }
2796 break;
2797 case ALGORITHM_LEFT_ASYMMETRIC_6:
2798 case ALGORITHM_RIGHT_ASYMMETRIC_6:
2799 if (i > sh->pd_idx)
2800 i--;
2801 break;
2802 case ALGORITHM_LEFT_SYMMETRIC_6:
2803 case ALGORITHM_RIGHT_SYMMETRIC_6:
2804 if (i < sh->pd_idx)
2805 i += data_disks + 1;
2806 i -= (sh->pd_idx + 1);
2807 break;
2808 case ALGORITHM_PARITY_0_6:
2809 i -= 1;
2810 break;
2811 default:
2812 BUG();
2813 }
2814 break;
2815 }
2816
2817 chunk_number = stripe * data_disks + i;
2818 r_sector = chunk_number * sectors_per_chunk + chunk_offset;
2819
2820 check = raid5_compute_sector(conf, r_sector,
2821 previous, &dummy1, &sh2);
2822 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
2823 || sh2.qd_idx != sh->qd_idx) {
2824 printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
2825 mdname(conf->mddev));
2826 return 0;
2827 }
2828 return r_sector;
2829 }
2830
2831 static void
2832 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2833 int rcw, int expand)
2834 {
2835 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks;
2836 struct r5conf *conf = sh->raid_conf;
2837 int level = conf->level;
2838
2839 if (rcw) {
2840
2841 for (i = disks; i--; ) {
2842 struct r5dev *dev = &sh->dev[i];
2843
2844 if (dev->towrite) {
2845 set_bit(R5_LOCKED, &dev->flags);
2846 set_bit(R5_Wantdrain, &dev->flags);
2847 if (!expand)
2848 clear_bit(R5_UPTODATE, &dev->flags);
2849 s->locked++;
2850 }
2851 }
2852 /* if we are not expanding this is a proper write request, and
2853 * there will be bios with new data to be drained into the
2854 * stripe cache
2855 */
2856 if (!expand) {
2857 if (!s->locked)
2858 /* False alarm, nothing to do */
2859 return;
2860 sh->reconstruct_state = reconstruct_state_drain_run;
2861 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2862 } else
2863 sh->reconstruct_state = reconstruct_state_run;
2864
2865 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2866
2867 if (s->locked + conf->max_degraded == disks)
2868 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
2869 atomic_inc(&conf->pending_full_writes);
2870 } else {
2871 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
2872 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
2873 BUG_ON(level == 6 &&
2874 (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) ||
2875 test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags))));
2876
2877 for (i = disks; i--; ) {
2878 struct r5dev *dev = &sh->dev[i];
2879 if (i == pd_idx || i == qd_idx)
2880 continue;
2881
2882 if (dev->towrite &&
2883 (test_bit(R5_UPTODATE, &dev->flags) ||
2884 test_bit(R5_Wantcompute, &dev->flags))) {
2885 set_bit(R5_Wantdrain, &dev->flags);
2886 set_bit(R5_LOCKED, &dev->flags);
2887 clear_bit(R5_UPTODATE, &dev->flags);
2888 s->locked++;
2889 }
2890 }
2891 if (!s->locked)
2892 /* False alarm - nothing to do */
2893 return;
2894 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
2895 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
2896 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2897 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2898 }
2899
2900 /* keep the parity disk(s) locked while asynchronous operations
2901 * are in flight
2902 */
2903 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2904 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2905 s->locked++;
2906
2907 if (level == 6) {
2908 int qd_idx = sh->qd_idx;
2909 struct r5dev *dev = &sh->dev[qd_idx];
2910
2911 set_bit(R5_LOCKED, &dev->flags);
2912 clear_bit(R5_UPTODATE, &dev->flags);
2913 s->locked++;
2914 }
2915
2916 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2917 __func__, (unsigned long long)sh->sector,
2918 s->locked, s->ops_request);
2919 }
2920
2921 /*
2922 * Each stripe/dev can have one or more bion attached.
2923 * toread/towrite point to the first in a chain.
2924 * The bi_next chain must be in order.
2925 */
2926 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
2927 int forwrite, int previous)
2928 {
2929 struct bio **bip;
2930 struct r5conf *conf = sh->raid_conf;
2931 int firstwrite=0;
2932
2933 pr_debug("adding bi b#%llu to stripe s#%llu\n",
2934 (unsigned long long)bi->bi_iter.bi_sector,
2935 (unsigned long long)sh->sector);
2936
2937 /*
2938 * If several bio share a stripe. The bio bi_phys_segments acts as a
2939 * reference count to avoid race. The reference count should already be
2940 * increased before this function is called (for example, in
2941 * make_request()), so other bio sharing this stripe will not free the
2942 * stripe. If a stripe is owned by one stripe, the stripe lock will
2943 * protect it.
2944 */
2945 spin_lock_irq(&sh->stripe_lock);
2946 /* Don't allow new IO added to stripes in batch list */
2947 if (sh->batch_head)
2948 goto overlap;
2949 if (forwrite) {
2950 bip = &sh->dev[dd_idx].towrite;
2951 if (*bip == NULL)
2952 firstwrite = 1;
2953 } else
2954 bip = &sh->dev[dd_idx].toread;
2955 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
2956 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
2957 goto overlap;
2958 bip = & (*bip)->bi_next;
2959 }
2960 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
2961 goto overlap;
2962
2963 if (!forwrite || previous)
2964 clear_bit(STRIPE_BATCH_READY, &sh->state);
2965
2966 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
2967 if (*bip)
2968 bi->bi_next = *bip;
2969 *bip = bi;
2970 raid5_inc_bi_active_stripes(bi);
2971
2972 if (forwrite) {
2973 /* check if page is covered */
2974 sector_t sector = sh->dev[dd_idx].sector;
2975 for (bi=sh->dev[dd_idx].towrite;
2976 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2977 bi && bi->bi_iter.bi_sector <= sector;
2978 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2979 if (bio_end_sector(bi) >= sector)
2980 sector = bio_end_sector(bi);
2981 }
2982 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2983 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags))
2984 sh->overwrite_disks++;
2985 }
2986
2987 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2988 (unsigned long long)(*bip)->bi_iter.bi_sector,
2989 (unsigned long long)sh->sector, dd_idx);
2990 spin_unlock_irq(&sh->stripe_lock);
2991
2992 if (conf->mddev->bitmap && firstwrite) {
2993 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2994 STRIPE_SECTORS, 0);
2995 sh->bm_seq = conf->seq_flush+1;
2996 set_bit(STRIPE_BIT_DELAY, &sh->state);
2997 }
2998
2999 if (stripe_can_batch(sh))
3000 stripe_add_to_batch_list(conf, sh);
3001 return 1;
3002
3003 overlap:
3004 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
3005 spin_unlock_irq(&sh->stripe_lock);
3006 return 0;
3007 }
3008
3009 static void end_reshape(struct r5conf *conf);
3010
3011 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
3012 struct stripe_head *sh)
3013 {
3014 int sectors_per_chunk =
3015 previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
3016 int dd_idx;
3017 int chunk_offset = sector_div(stripe, sectors_per_chunk);
3018 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
3019
3020 raid5_compute_sector(conf,
3021 stripe * (disks - conf->max_degraded)
3022 *sectors_per_chunk + chunk_offset,
3023 previous,
3024 &dd_idx, sh);
3025 }
3026
3027 static void
3028 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3029 struct stripe_head_state *s, int disks,
3030 struct bio **return_bi)
3031 {
3032 int i;
3033 BUG_ON(sh->batch_head);
3034 for (i = disks; i--; ) {
3035 struct bio *bi;
3036 int bitmap_end = 0;
3037
3038 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
3039 struct md_rdev *rdev;
3040 rcu_read_lock();
3041 rdev = rcu_dereference(conf->disks[i].rdev);
3042 if (rdev && test_bit(In_sync, &rdev->flags))
3043 atomic_inc(&rdev->nr_pending);
3044 else
3045 rdev = NULL;
3046 rcu_read_unlock();
3047 if (rdev) {
3048 if (!rdev_set_badblocks(
3049 rdev,
3050 sh->sector,
3051 STRIPE_SECTORS, 0))
3052 md_error(conf->mddev, rdev);
3053 rdev_dec_pending(rdev, conf->mddev);
3054 }
3055 }
3056 spin_lock_irq(&sh->stripe_lock);
3057 /* fail all writes first */
3058 bi = sh->dev[i].towrite;
3059 sh->dev[i].towrite = NULL;
3060 sh->overwrite_disks = 0;
3061 spin_unlock_irq(&sh->stripe_lock);
3062 if (bi)
3063 bitmap_end = 1;
3064
3065 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
3066 wake_up(&conf->wait_for_overlap);
3067
3068 while (bi && bi->bi_iter.bi_sector <
3069 sh->dev[i].sector + STRIPE_SECTORS) {
3070 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
3071 clear_bit(BIO_UPTODATE, &bi->bi_flags);
3072 if (!raid5_dec_bi_active_stripes(bi)) {
3073 md_write_end(conf->mddev);
3074 bi->bi_next = *return_bi;
3075 *return_bi = bi;
3076 }
3077 bi = nextbi;
3078 }
3079 if (bitmap_end)
3080 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3081 STRIPE_SECTORS, 0, 0);
3082 bitmap_end = 0;
3083 /* and fail all 'written' */
3084 bi = sh->dev[i].written;
3085 sh->dev[i].written = NULL;
3086 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) {
3087 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
3088 sh->dev[i].page = sh->dev[i].orig_page;
3089 }
3090
3091 if (bi) bitmap_end = 1;
3092 while (bi && bi->bi_iter.bi_sector <
3093 sh->dev[i].sector + STRIPE_SECTORS) {
3094 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
3095 clear_bit(BIO_UPTODATE, &bi->bi_flags);
3096 if (!raid5_dec_bi_active_stripes(bi)) {
3097 md_write_end(conf->mddev);
3098 bi->bi_next = *return_bi;
3099 *return_bi = bi;
3100 }
3101 bi = bi2;
3102 }
3103
3104 /* fail any reads if this device is non-operational and
3105 * the data has not reached the cache yet.
3106 */
3107 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
3108 (!test_bit(R5_Insync, &sh->dev[i].flags) ||
3109 test_bit(R5_ReadError, &sh->dev[i].flags))) {
3110 spin_lock_irq(&sh->stripe_lock);
3111 bi = sh->dev[i].toread;
3112 sh->dev[i].toread = NULL;
3113 spin_unlock_irq(&sh->stripe_lock);
3114 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
3115 wake_up(&conf->wait_for_overlap);
3116 while (bi && bi->bi_iter.bi_sector <
3117 sh->dev[i].sector + STRIPE_SECTORS) {
3118 struct bio *nextbi =
3119 r5_next_bio(bi, sh->dev[i].sector);
3120 clear_bit(BIO_UPTODATE, &bi->bi_flags);
3121 if (!raid5_dec_bi_active_stripes(bi)) {
3122 bi->bi_next = *return_bi;
3123 *return_bi = bi;
3124 }
3125 bi = nextbi;
3126 }
3127 }
3128 if (bitmap_end)
3129 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3130 STRIPE_SECTORS, 0, 0);
3131 /* If we were in the middle of a write the parity block might
3132 * still be locked - so just clear all R5_LOCKED flags
3133 */
3134 clear_bit(R5_LOCKED, &sh->dev[i].flags);
3135 }
3136
3137 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
3138 if (atomic_dec_and_test(&conf->pending_full_writes))
3139 md_wakeup_thread(conf->mddev->thread);
3140 }
3141
3142 static void
3143 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
3144 struct stripe_head_state *s)
3145 {
3146 int abort = 0;
3147 int i;
3148
3149 BUG_ON(sh->batch_head);
3150 clear_bit(STRIPE_SYNCING, &sh->state);
3151 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
3152 wake_up(&conf->wait_for_overlap);
3153 s->syncing = 0;
3154 s->replacing = 0;
3155 /* There is nothing more to do for sync/check/repair.
3156 * Don't even need to abort as that is handled elsewhere
3157 * if needed, and not always wanted e.g. if there is a known
3158 * bad block here.
3159 * For recover/replace we need to record a bad block on all
3160 * non-sync devices, or abort the recovery
3161 */
3162 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
3163 /* During recovery devices cannot be removed, so
3164 * locking and refcounting of rdevs is not needed
3165 */
3166 for (i = 0; i < conf->raid_disks; i++) {
3167 struct md_rdev *rdev = conf->disks[i].rdev;
3168 if (rdev
3169 && !test_bit(Faulty, &rdev->flags)
3170 && !test_bit(In_sync, &rdev->flags)
3171 && !rdev_set_badblocks(rdev, sh->sector,
3172 STRIPE_SECTORS, 0))
3173 abort = 1;
3174 rdev = conf->disks[i].replacement;
3175 if (rdev
3176 && !test_bit(Faulty, &rdev->flags)
3177 && !test_bit(In_sync, &rdev->flags)
3178 && !rdev_set_badblocks(rdev, sh->sector,
3179 STRIPE_SECTORS, 0))
3180 abort = 1;
3181 }
3182 if (abort)
3183 conf->recovery_disabled =
3184 conf->mddev->recovery_disabled;
3185 }
3186 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort);
3187 }
3188
3189 static int want_replace(struct stripe_head *sh, int disk_idx)
3190 {
3191 struct md_rdev *rdev;
3192 int rv = 0;
3193 /* Doing recovery so rcu locking not required */
3194 rdev = sh->raid_conf->disks[disk_idx].replacement;
3195 if (rdev
3196 && !test_bit(Faulty, &rdev->flags)
3197 && !test_bit(In_sync, &rdev->flags)
3198 && (rdev->recovery_offset <= sh->sector
3199 || rdev->mddev->recovery_cp <= sh->sector))
3200 rv = 1;
3201
3202 return rv;
3203 }
3204
3205 /* fetch_block - checks the given member device to see if its data needs
3206 * to be read or computed to satisfy a request.
3207 *
3208 * Returns 1 when no more member devices need to be checked, otherwise returns
3209 * 0 to tell the loop in handle_stripe_fill to continue
3210 */
3211
3212 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
3213 int disk_idx, int disks)
3214 {
3215 struct r5dev *dev = &sh->dev[disk_idx];
3216 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
3217 &sh->dev[s->failed_num[1]] };
3218 int i;
3219
3220
3221 if (test_bit(R5_LOCKED, &dev->flags) ||
3222 test_bit(R5_UPTODATE, &dev->flags))
3223 /* No point reading this as we already have it or have
3224 * decided to get it.
3225 */
3226 return 0;
3227
3228 if (dev->toread ||
3229 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)))
3230 /* We need this block to directly satisfy a request */
3231 return 1;
3232
3233 if (s->syncing || s->expanding ||
3234 (s->replacing && want_replace(sh, disk_idx)))
3235 /* When syncing, or expanding we read everything.
3236 * When replacing, we need the replaced block.
3237 */
3238 return 1;
3239
3240 if ((s->failed >= 1 && fdev[0]->toread) ||
3241 (s->failed >= 2 && fdev[1]->toread))
3242 /* If we want to read from a failed device, then
3243 * we need to actually read every other device.
3244 */
3245 return 1;
3246
3247 /* Sometimes neither read-modify-write nor reconstruct-write
3248 * cycles can work. In those cases we read every block we
3249 * can. Then the parity-update is certain to have enough to
3250 * work with.
3251 * This can only be a problem when we need to write something,
3252 * and some device has failed. If either of those tests
3253 * fail we need look no further.
3254 */
3255 if (!s->failed || !s->to_write)
3256 return 0;
3257
3258 if (test_bit(R5_Insync, &dev->flags) &&
3259 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3260 /* Pre-reads at not permitted until after short delay
3261 * to gather multiple requests. However if this
3262 * device is no Insync, the block could only be be computed
3263 * and there is no need to delay that.
3264 */
3265 return 0;
3266
3267 for (i = 0; i < s->failed; i++) {
3268 if (fdev[i]->towrite &&
3269 !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
3270 !test_bit(R5_OVERWRITE, &fdev[i]->flags))
3271 /* If we have a partial write to a failed
3272 * device, then we will need to reconstruct
3273 * the content of that device, so all other
3274 * devices must be read.
3275 */
3276 return 1;
3277 }
3278
3279 /* If we are forced to do a reconstruct-write, either because
3280 * the current RAID6 implementation only supports that, or
3281 * or because parity cannot be trusted and we are currently
3282 * recovering it, there is extra need to be careful.
3283 * If one of the devices that we would need to read, because
3284 * it is not being overwritten (and maybe not written at all)
3285 * is missing/faulty, then we need to read everything we can.
3286 */
3287 if (sh->raid_conf->level != 6 &&
3288 sh->sector < sh->raid_conf->mddev->recovery_cp)
3289 /* reconstruct-write isn't being forced */
3290 return 0;
3291 for (i = 0; i < s->failed; i++) {
3292 if (s->failed_num[i] != sh->pd_idx &&
3293 s->failed_num[i] != sh->qd_idx &&
3294 !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
3295 !test_bit(R5_OVERWRITE, &fdev[i]->flags))
3296 return 1;
3297 }
3298
3299 return 0;
3300 }
3301
3302 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
3303 int disk_idx, int disks)
3304 {
3305 struct r5dev *dev = &sh->dev[disk_idx];
3306
3307 /* is the data in this block needed, and can we get it? */
3308 if (need_this_block(sh, s, disk_idx, disks)) {
3309 /* we would like to get this block, possibly by computing it,
3310 * otherwise read it if the backing disk is insync
3311 */
3312 BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
3313 BUG_ON(test_bit(R5_Wantread, &dev->flags));
3314 BUG_ON(sh->batch_head);
3315 if ((s->uptodate == disks - 1) &&
3316 (s->failed && (disk_idx == s->failed_num[0] ||
3317 disk_idx == s->failed_num[1]))) {
3318 /* have disk failed, and we're requested to fetch it;
3319 * do compute it
3320 */
3321 pr_debug("Computing stripe %llu block %d\n",
3322 (unsigned long long)sh->sector, disk_idx);
3323 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3324 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3325 set_bit(R5_Wantcompute, &dev->flags);
3326 sh->ops.target = disk_idx;
3327 sh->ops.target2 = -1; /* no 2nd target */
3328 s->req_compute = 1;
3329 /* Careful: from this point on 'uptodate' is in the eye
3330 * of raid_run_ops which services 'compute' operations
3331 * before writes. R5_Wantcompute flags a block that will
3332 * be R5_UPTODATE by the time it is needed for a
3333 * subsequent operation.
3334 */
3335 s->uptodate++;
3336 return 1;
3337 } else if (s->uptodate == disks-2 && s->failed >= 2) {
3338 /* Computing 2-failure is *very* expensive; only
3339 * do it if failed >= 2
3340 */
3341 int other;
3342 for (other = disks; other--; ) {
3343 if (other == disk_idx)
3344 continue;
3345 if (!test_bit(R5_UPTODATE,
3346 &sh->dev[other].flags))
3347 break;
3348 }
3349 BUG_ON(other < 0);
3350 pr_debug("Computing stripe %llu blocks %d,%d\n",
3351 (unsigned long long)sh->sector,
3352 disk_idx, other);
3353 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3354 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3355 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
3356 set_bit(R5_Wantcompute, &sh->dev[other].flags);
3357 sh->ops.target = disk_idx;
3358 sh->ops.target2 = other;
3359 s->uptodate += 2;
3360 s->req_compute = 1;
3361 return 1;
3362 } else if (test_bit(R5_Insync, &dev->flags)) {
3363 set_bit(R5_LOCKED, &dev->flags);
3364 set_bit(R5_Wantread, &dev->flags);
3365 s->locked++;
3366 pr_debug("Reading block %d (sync=%d)\n",
3367 disk_idx, s->syncing);
3368 }
3369 }
3370
3371 return 0;
3372 }
3373
3374 /**
3375 * handle_stripe_fill - read or compute data to satisfy pending requests.
3376 */
3377 static void handle_stripe_fill(struct stripe_head *sh,
3378 struct stripe_head_state *s,
3379 int disks)
3380 {
3381 int i;
3382
3383 /* look for blocks to read/compute, skip this if a compute
3384 * is already in flight, or if the stripe contents are in the
3385 * midst of changing due to a write
3386 */
3387 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
3388 !sh->reconstruct_state)
3389 for (i = disks; i--; )
3390 if (fetch_block(sh, s, i, disks))
3391 break;
3392 set_bit(STRIPE_HANDLE, &sh->state);
3393 }
3394
3395 /* handle_stripe_clean_event
3396 * any written block on an uptodate or failed drive can be returned.
3397 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
3398 * never LOCKED, so we don't need to test 'failed' directly.
3399 */
3400 static void handle_stripe_clean_event(struct r5conf *conf,
3401 struct stripe_head *sh, int disks, struct bio **return_bi)
3402 {
3403 int i;
3404 struct r5dev *dev;
3405 int discard_pending = 0;
3406 struct stripe_head *head_sh = sh;
3407 bool do_endio = false;
3408 int wakeup_nr = 0;
3409
3410 for (i = disks; i--; )
3411 if (sh->dev[i].written) {
3412 dev = &sh->dev[i];
3413 if (!test_bit(R5_LOCKED, &dev->flags) &&
3414 (test_bit(R5_UPTODATE, &dev->flags) ||
3415 test_bit(R5_Discard, &dev->flags) ||
3416 test_bit(R5_SkipCopy, &dev->flags))) {
3417 /* We can return any write requests */
3418 struct bio *wbi, *wbi2;
3419 pr_debug("Return write for disc %d\n", i);
3420 if (test_and_clear_bit(R5_Discard, &dev->flags))
3421 clear_bit(R5_UPTODATE, &dev->flags);
3422 if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) {
3423 WARN_ON(test_bit(R5_UPTODATE, &dev->flags));
3424 }
3425 do_endio = true;
3426
3427 returnbi:
3428 dev->page = dev->orig_page;
3429 wbi = dev->written;
3430 dev->written = NULL;
3431 while (wbi && wbi->bi_iter.bi_sector <
3432 dev->sector + STRIPE_SECTORS) {
3433 wbi2 = r5_next_bio(wbi, dev->sector);
3434 if (!raid5_dec_bi_active_stripes(wbi)) {
3435 md_write_end(conf->mddev);
3436 wbi->bi_next = *return_bi;
3437 *return_bi = wbi;
3438 }
3439 wbi = wbi2;
3440 }
3441 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3442 STRIPE_SECTORS,
3443 !test_bit(STRIPE_DEGRADED, &sh->state),
3444 0);
3445 if (head_sh->batch_head) {
3446 sh = list_first_entry(&sh->batch_list,
3447 struct stripe_head,
3448 batch_list);
3449 if (sh != head_sh) {
3450 dev = &sh->dev[i];
3451 goto returnbi;
3452 }
3453 }
3454 sh = head_sh;
3455 dev = &sh->dev[i];
3456 } else if (test_bit(R5_Discard, &dev->flags))
3457 discard_pending = 1;
3458 WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
3459 WARN_ON(dev->page != dev->orig_page);
3460 }
3461 if (!discard_pending &&
3462 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
3463 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
3464 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
3465 if (sh->qd_idx >= 0) {
3466 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
3467 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags);
3468 }
3469 /* now that discard is done we can proceed with any sync */
3470 clear_bit(STRIPE_DISCARD, &sh->state);
3471 /*
3472 * SCSI discard will change some bio fields and the stripe has
3473 * no updated data, so remove it from hash list and the stripe
3474 * will be reinitialized
3475 */
3476 spin_lock_irq(&conf->device_lock);
3477 unhash:
3478 remove_hash(sh);
3479 if (head_sh->batch_head) {
3480 sh = list_first_entry(&sh->batch_list,
3481 struct stripe_head, batch_list);
3482 if (sh != head_sh)
3483 goto unhash;
3484 }
3485 spin_unlock_irq(&conf->device_lock);
3486 sh = head_sh;
3487
3488 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
3489 set_bit(STRIPE_HANDLE, &sh->state);
3490
3491 }
3492
3493 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
3494 if (atomic_dec_and_test(&conf->pending_full_writes))
3495 md_wakeup_thread(conf->mddev->thread);
3496
3497 if (!head_sh->batch_head || !do_endio)
3498 return;
3499 for (i = 0; i < head_sh->disks; i++) {
3500 if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
3501 wakeup_nr++;
3502 }
3503 while (!list_empty(&head_sh->batch_list)) {
3504 int i;
3505 sh = list_first_entry(&head_sh->batch_list,
3506 struct stripe_head, batch_list);
3507 list_del_init(&sh->batch_list);
3508
3509 set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
3510 head_sh->state & ~((1 << STRIPE_ACTIVE) |
3511 (1 << STRIPE_PREREAD_ACTIVE) |
3512 STRIPE_EXPAND_SYNC_FLAG));
3513 sh->check_state = head_sh->check_state;
3514 sh->reconstruct_state = head_sh->reconstruct_state;
3515 for (i = 0; i < sh->disks; i++) {
3516 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
3517 wakeup_nr++;
3518 sh->dev[i].flags = head_sh->dev[i].flags;
3519 }
3520
3521 spin_lock_irq(&sh->stripe_lock);
3522 sh->batch_head = NULL;
3523 spin_unlock_irq(&sh->stripe_lock);
3524 if (sh->state & STRIPE_EXPAND_SYNC_FLAG)
3525 set_bit(STRIPE_HANDLE, &sh->state);
3526 release_stripe(sh);
3527 }
3528
3529 spin_lock_irq(&head_sh->stripe_lock);
3530 head_sh->batch_head = NULL;
3531 spin_unlock_irq(&head_sh->stripe_lock);
3532 wake_up_nr(&conf->wait_for_overlap, wakeup_nr);
3533 if (head_sh->state & STRIPE_EXPAND_SYNC_FLAG)
3534 set_bit(STRIPE_HANDLE, &head_sh->state);
3535 }
3536
3537 static void handle_stripe_dirtying(struct r5conf *conf,
3538 struct stripe_head *sh,
3539 struct stripe_head_state *s,
3540 int disks)
3541 {
3542 int rmw = 0, rcw = 0, i;
3543 sector_t recovery_cp = conf->mddev->recovery_cp;
3544
3545 /* Check whether resync is now happening or should start.
3546 * If yes, then the array is dirty (after unclean shutdown or
3547 * initial creation), so parity in some stripes might be inconsistent.
3548 * In this case, we need to always do reconstruct-write, to ensure
3549 * that in case of drive failure or read-error correction, we
3550 * generate correct data from the parity.
3551 */
3552 if (conf->rmw_level == PARITY_DISABLE_RMW ||
3553 (recovery_cp < MaxSector && sh->sector >= recovery_cp &&
3554 s->failed == 0)) {
3555 /* Calculate the real rcw later - for now make it
3556 * look like rcw is cheaper
3557 */
3558 rcw = 1; rmw = 2;
3559 pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n",
3560 conf->rmw_level, (unsigned long long)recovery_cp,
3561 (unsigned long long)sh->sector);
3562 } else for (i = disks; i--; ) {
3563 /* would I have to read this buffer for read_modify_write */
3564 struct r5dev *dev = &sh->dev[i];
3565 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
3566 !test_bit(R5_LOCKED, &dev->flags) &&
3567 !(test_bit(R5_UPTODATE, &dev->flags) ||
3568 test_bit(R5_Wantcompute, &dev->flags))) {
3569 if (test_bit(R5_Insync, &dev->flags))
3570 rmw++;
3571 else
3572 rmw += 2*disks; /* cannot read it */
3573 }
3574 /* Would I have to read this buffer for reconstruct_write */
3575 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
3576 i != sh->pd_idx && i != sh->qd_idx &&
3577 !test_bit(R5_LOCKED, &dev->flags) &&
3578 !(test_bit(R5_UPTODATE, &dev->flags) ||
3579 test_bit(R5_Wantcompute, &dev->flags))) {
3580 if (test_bit(R5_Insync, &dev->flags))
3581 rcw++;
3582 else
3583 rcw += 2*disks;
3584 }
3585 }
3586 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
3587 (unsigned long long)sh->sector, rmw, rcw);
3588 set_bit(STRIPE_HANDLE, &sh->state);
3589 if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_ENABLE_RMW)) && rmw > 0) {
3590 /* prefer read-modify-write, but need to get some data */
3591 if (conf->mddev->queue)
3592 blk_add_trace_msg(conf->mddev->queue,
3593 "raid5 rmw %llu %d",
3594 (unsigned long long)sh->sector, rmw);
3595 for (i = disks; i--; ) {
3596 struct r5dev *dev = &sh->dev[i];
3597 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
3598 !test_bit(R5_LOCKED, &dev->flags) &&
3599 !(test_bit(R5_UPTODATE, &dev->flags) ||
3600 test_bit(R5_Wantcompute, &dev->flags)) &&
3601 test_bit(R5_Insync, &dev->flags)) {
3602 if (test_bit(STRIPE_PREREAD_ACTIVE,
3603 &sh->state)) {
3604 pr_debug("Read_old block %d for r-m-w\n",
3605 i);
3606 set_bit(R5_LOCKED, &dev->flags);
3607 set_bit(R5_Wantread, &dev->flags);
3608 s->locked++;
3609 } else {
3610 set_bit(STRIPE_DELAYED, &sh->state);
3611 set_bit(STRIPE_HANDLE, &sh->state);
3612 }
3613 }
3614 }
3615 }
3616 if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_ENABLE_RMW)) && rcw > 0) {
3617 /* want reconstruct write, but need to get some data */
3618 int qread =0;
3619 rcw = 0;
3620 for (i = disks; i--; ) {
3621 struct r5dev *dev = &sh->dev[i];
3622 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
3623 i != sh->pd_idx && i != sh->qd_idx &&
3624 !test_bit(R5_LOCKED, &dev->flags) &&
3625 !(test_bit(R5_UPTODATE, &dev->flags) ||
3626 test_bit(R5_Wantcompute, &dev->flags))) {
3627 rcw++;
3628 if (test_bit(R5_Insync, &dev->flags) &&
3629 test_bit(STRIPE_PREREAD_ACTIVE,
3630 &sh->state)) {
3631 pr_debug("Read_old block "
3632 "%d for Reconstruct\n", i);
3633 set_bit(R5_LOCKED, &dev->flags);
3634 set_bit(R5_Wantread, &dev->flags);
3635 s->locked++;
3636 qread++;
3637 } else {
3638 set_bit(STRIPE_DELAYED, &sh->state);
3639 set_bit(STRIPE_HANDLE, &sh->state);
3640 }
3641 }
3642 }
3643 if (rcw && conf->mddev->queue)
3644 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
3645 (unsigned long long)sh->sector,
3646 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
3647 }
3648
3649 if (rcw > disks && rmw > disks &&
3650 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3651 set_bit(STRIPE_DELAYED, &sh->state);
3652
3653 /* now if nothing is locked, and if we have enough data,
3654 * we can start a write request
3655 */
3656 /* since handle_stripe can be called at any time we need to handle the
3657 * case where a compute block operation has been submitted and then a
3658 * subsequent call wants to start a write request. raid_run_ops only
3659 * handles the case where compute block and reconstruct are requested
3660 * simultaneously. If this is not the case then new writes need to be
3661 * held off until the compute completes.
3662 */
3663 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
3664 (s->locked == 0 && (rcw == 0 || rmw == 0) &&
3665 !test_bit(STRIPE_BIT_DELAY, &sh->state)))
3666 schedule_reconstruction(sh, s, rcw == 0, 0);
3667 }
3668
3669 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
3670 struct stripe_head_state *s, int disks)
3671 {
3672 struct r5dev *dev = NULL;
3673
3674 BUG_ON(sh->batch_head);
3675 set_bit(STRIPE_HANDLE, &sh->state);
3676
3677 switch (sh->check_state) {
3678 case check_state_idle:
3679 /* start a new check operation if there are no failures */
3680 if (s->failed == 0) {
3681 BUG_ON(s->uptodate != disks);
3682 sh->check_state = check_state_run;
3683 set_bit(STRIPE_OP_CHECK, &s->ops_request);
3684 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
3685 s->uptodate--;
3686 break;
3687 }
3688 dev = &sh->dev[s->failed_num[0]];
3689 /* fall through */
3690 case check_state_compute_result:
3691 sh->check_state = check_state_idle;
3692 if (!dev)
3693 dev = &sh->dev[sh->pd_idx];
3694
3695 /* check that a write has not made the stripe insync */
3696 if (test_bit(STRIPE_INSYNC, &sh->state))
3697 break;
3698
3699 /* either failed parity check, or recovery is happening */
3700 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
3701 BUG_ON(s->uptodate != disks);
3702
3703 set_bit(R5_LOCKED, &dev->flags);
3704 s->locked++;
3705 set_bit(R5_Wantwrite, &dev->flags);
3706
3707 clear_bit(STRIPE_DEGRADED, &sh->state);
3708 set_bit(STRIPE_INSYNC, &sh->state);
3709 break;
3710 case check_state_run:
3711 break; /* we will be called again upon completion */
3712 case check_state_check_result:
3713 sh->check_state = check_state_idle;
3714
3715 /* if a failure occurred during the check operation, leave
3716 * STRIPE_INSYNC not set and let the stripe be handled again
3717 */
3718 if (s->failed)
3719 break;
3720
3721 /* handle a successful check operation, if parity is correct
3722 * we are done. Otherwise update the mismatch count and repair
3723 * parity if !MD_RECOVERY_CHECK
3724 */
3725 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
3726 /* parity is correct (on disc,
3727 * not in buffer any more)
3728 */
3729 set_bit(STRIPE_INSYNC, &sh->state);
3730 else {
3731 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
3732 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
3733 /* don't try to repair!! */
3734 set_bit(STRIPE_INSYNC, &sh->state);
3735 else {
3736 sh->check_state = check_state_compute_run;
3737 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3738 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3739 set_bit(R5_Wantcompute,
3740 &sh->dev[sh->pd_idx].flags);
3741 sh->ops.target = sh->pd_idx;
3742 sh->ops.target2 = -1;
3743 s->uptodate++;
3744 }
3745 }
3746 break;
3747 case check_state_compute_run:
3748 break;
3749 default:
3750 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
3751 __func__, sh->check_state,
3752 (unsigned long long) sh->sector);
3753 BUG();
3754 }
3755 }
3756
3757 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
3758 struct stripe_head_state *s,
3759 int disks)
3760 {
3761 int pd_idx = sh->pd_idx;
3762 int qd_idx = sh->qd_idx;
3763 struct r5dev *dev;
3764
3765 BUG_ON(sh->batch_head);
3766 set_bit(STRIPE_HANDLE, &sh->state);
3767
3768 BUG_ON(s->failed > 2);
3769
3770 /* Want to check and possibly repair P and Q.
3771 * However there could be one 'failed' device, in which
3772 * case we can only check one of them, possibly using the
3773 * other to generate missing data
3774 */
3775
3776 switch (sh->check_state) {
3777 case check_state_idle:
3778 /* start a new check operation if there are < 2 failures */
3779 if (s->failed == s->q_failed) {
3780 /* The only possible failed device holds Q, so it
3781 * makes sense to check P (If anything else were failed,
3782 * we would have used P to recreate it).
3783 */
3784 sh->check_state = check_state_run;
3785 }
3786 if (!s->q_failed && s->failed < 2) {
3787 /* Q is not failed, and we didn't use it to generate
3788 * anything, so it makes sense to check it
3789 */
3790 if (sh->check_state == check_state_run)
3791 sh->check_state = check_state_run_pq;
3792 else
3793 sh->check_state = check_state_run_q;
3794 }
3795
3796 /* discard potentially stale zero_sum_result */
3797 sh->ops.zero_sum_result = 0;
3798
3799 if (sh->check_state == check_state_run) {
3800 /* async_xor_zero_sum destroys the contents of P */
3801 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
3802 s->uptodate--;
3803 }
3804 if (sh->check_state >= check_state_run &&
3805 sh->check_state <= check_state_run_pq) {
3806 /* async_syndrome_zero_sum preserves P and Q, so
3807 * no need to mark them !uptodate here
3808 */
3809 set_bit(STRIPE_OP_CHECK, &s->ops_request);
3810 break;
3811 }
3812
3813 /* we have 2-disk failure */
3814 BUG_ON(s->failed != 2);
3815 /* fall through */
3816 case check_state_compute_result:
3817 sh->check_state = check_state_idle;
3818
3819 /* check that a write has not made the stripe insync */
3820 if (test_bit(STRIPE_INSYNC, &sh->state))
3821 break;
3822
3823 /* now write out any block on a failed drive,
3824 * or P or Q if they were recomputed
3825 */
3826 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
3827 if (s->failed == 2) {
3828 dev = &sh->dev[s->failed_num[1]];
3829 s->locked++;
3830 set_bit(R5_LOCKED, &dev->flags);
3831 set_bit(R5_Wantwrite, &dev->flags);
3832 }
3833 if (s->failed >= 1) {
3834 dev = &sh->dev[s->failed_num[0]];
3835 s->locked++;
3836 set_bit(R5_LOCKED, &dev->flags);
3837 set_bit(R5_Wantwrite, &dev->flags);
3838 }
3839 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
3840 dev = &sh->dev[pd_idx];
3841 s->locked++;
3842 set_bit(R5_LOCKED, &dev->flags);
3843 set_bit(R5_Wantwrite, &dev->flags);
3844 }
3845 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
3846 dev = &sh->dev[qd_idx];
3847 s->locked++;
3848 set_bit(R5_LOCKED, &dev->flags);
3849 set_bit(R5_Wantwrite, &dev->flags);
3850 }
3851 clear_bit(STRIPE_DEGRADED, &sh->state);
3852
3853 set_bit(STRIPE_INSYNC, &sh->state);
3854 break;
3855 case check_state_run:
3856 case check_state_run_q:
3857 case check_state_run_pq:
3858 break; /* we will be called again upon completion */
3859 case check_state_check_result:
3860 sh->check_state = check_state_idle;
3861
3862 /* handle a successful check operation, if parity is correct
3863 * we are done. Otherwise update the mismatch count and repair
3864 * parity if !MD_RECOVERY_CHECK
3865 */
3866 if (sh->ops.zero_sum_result == 0) {
3867 /* both parities are correct */
3868 if (!s->failed)
3869 set_bit(STRIPE_INSYNC, &sh->state);
3870 else {
3871 /* in contrast to the raid5 case we can validate
3872 * parity, but still have a failure to write
3873 * back
3874 */
3875 sh->check_state = check_state_compute_result;
3876 /* Returning at this point means that we may go
3877 * off and bring p and/or q uptodate again so
3878 * we make sure to check zero_sum_result again
3879 * to verify if p or q need writeback
3880 */
3881 }
3882 } else {
3883 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
3884 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
3885 /* don't try to repair!! */
3886 set_bit(STRIPE_INSYNC, &sh->state);
3887 else {
3888 int *target = &sh->ops.target;
3889
3890 sh->ops.target = -1;
3891 sh->ops.target2 = -1;
3892 sh->check_state = check_state_compute_run;
3893 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3894 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3895 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
3896 set_bit(R5_Wantcompute,
3897 &sh->dev[pd_idx].flags);
3898 *target = pd_idx;
3899 target = &sh->ops.target2;
3900 s->uptodate++;
3901 }
3902 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
3903 set_bit(R5_Wantcompute,
3904 &sh->dev[qd_idx].flags);
3905 *target = qd_idx;
3906 s->uptodate++;
3907 }
3908 }
3909 }
3910 break;
3911 case check_state_compute_run:
3912 break;
3913 default:
3914 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
3915 __func__, sh->check_state,
3916 (unsigned long long) sh->sector);
3917 BUG();
3918 }
3919 }
3920
3921 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
3922 {
3923 int i;
3924
3925 /* We have read all the blocks in this stripe and now we need to
3926 * copy some of them into a target stripe for expand.
3927 */
3928 struct dma_async_tx_descriptor *tx = NULL;
3929 BUG_ON(sh->batch_head);
3930 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3931 for (i = 0; i < sh->disks; i++)
3932 if (i != sh->pd_idx && i != sh->qd_idx) {
3933 int dd_idx, j;
3934 struct stripe_head *sh2;
3935 struct async_submit_ctl submit;
3936
3937 sector_t bn = compute_blocknr(sh, i, 1);
3938 sector_t s = raid5_compute_sector(conf, bn, 0,
3939 &dd_idx, NULL);
3940 sh2 = get_active_stripe(conf, s, 0, 1, 1);
3941 if (sh2 == NULL)
3942 /* so far only the early blocks of this stripe
3943 * have been requested. When later blocks
3944 * get requested, we will try again
3945 */
3946 continue;
3947 if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
3948 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
3949 /* must have already done this block */
3950 release_stripe(sh2);
3951 continue;
3952 }
3953
3954 /* place all the copies on one channel */
3955 init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
3956 tx = async_memcpy(sh2->dev[dd_idx].page,
3957 sh->dev[i].page, 0, 0, STRIPE_SIZE,
3958 &submit);
3959
3960 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
3961 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
3962 for (j = 0; j < conf->raid_disks; j++)
3963 if (j != sh2->pd_idx &&
3964 j != sh2->qd_idx &&
3965 !test_bit(R5_Expanded, &sh2->dev[j].flags))
3966 break;
3967 if (j == conf->raid_disks) {
3968 set_bit(STRIPE_EXPAND_READY, &sh2->state);
3969 set_bit(STRIPE_HANDLE, &sh2->state);
3970 }
3971 release_stripe(sh2);
3972
3973 }
3974 /* done submitting copies, wait for them to complete */
3975 async_tx_quiesce(&tx);
3976 }
3977
3978 /*
3979 * handle_stripe - do things to a stripe.
3980 *
3981 * We lock the stripe by setting STRIPE_ACTIVE and then examine the
3982 * state of various bits to see what needs to be done.
3983 * Possible results:
3984 * return some read requests which now have data
3985 * return some write requests which are safely on storage
3986 * schedule a read on some buffers
3987 * schedule a write of some buffers
3988 * return confirmation of parity correctness
3989 *
3990 */
3991
3992 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3993 {
3994 struct r5conf *conf = sh->raid_conf;
3995 int disks = sh->disks;
3996 struct r5dev *dev;
3997 int i;
3998 int do_recovery = 0;
3999
4000 memset(s, 0, sizeof(*s));
4001
4002 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head;
4003 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head;
4004 s->failed_num[0] = -1;
4005 s->failed_num[1] = -1;
4006
4007 /* Now to look around and see what can be done */
4008 rcu_read_lock();
4009 for (i=disks; i--; ) {
4010 struct md_rdev *rdev;
4011 sector_t first_bad;
4012 int bad_sectors;
4013 int is_bad = 0;
4014
4015 dev = &sh->dev[i];
4016
4017 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
4018 i, dev->flags,
4019 dev->toread, dev->towrite, dev->written);
4020 /* maybe we can reply to a read
4021 *
4022 * new wantfill requests are only permitted while
4023 * ops_complete_biofill is guaranteed to be inactive
4024 */
4025 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
4026 !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
4027 set_bit(R5_Wantfill, &dev->flags);
4028
4029 /* now count some things */
4030 if (test_bit(R5_LOCKED, &dev->flags))
4031 s->locked++;
4032 if (test_bit(R5_UPTODATE, &dev->flags))
4033 s->uptodate++;
4034 if (test_bit(R5_Wantcompute, &dev->flags)) {
4035 s->compute++;
4036 BUG_ON(s->compute > 2);
4037 }
4038
4039 if (test_bit(R5_Wantfill, &dev->flags))
4040 s->to_fill++;
4041 else if (dev->toread)
4042 s->to_read++;
4043 if (dev->towrite) {
4044 s->to_write++;
4045 if (!test_bit(R5_OVERWRITE, &dev->flags))
4046 s->non_overwrite++;
4047 }
4048 if (dev->written)
4049 s->written++;
4050 /* Prefer to use the replacement for reads, but only
4051 * if it is recovered enough and has no bad blocks.
4052 */
4053 rdev = rcu_dereference(conf->disks[i].replacement);
4054 if (rdev && !test_bit(Faulty, &rdev->flags) &&
4055 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
4056 !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
4057 &first_bad, &bad_sectors))
4058 set_bit(R5_ReadRepl, &dev->flags);
4059 else {
4060 if (rdev)
4061 set_bit(R5_NeedReplace, &dev->flags);
4062 rdev = rcu_dereference(conf->disks[i].rdev);
4063 clear_bit(R5_ReadRepl, &dev->flags);
4064 }
4065 if (rdev && test_bit(Faulty, &rdev->flags))
4066 rdev = NULL;
4067 if (rdev) {
4068 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
4069 &first_bad, &bad_sectors);
4070 if (s->blocked_rdev == NULL
4071 && (test_bit(Blocked, &rdev->flags)
4072 || is_bad < 0)) {
4073 if (is_bad < 0)
4074 set_bit(BlockedBadBlocks,
4075 &rdev->flags);
4076 s->blocked_rdev = rdev;
4077 atomic_inc(&rdev->nr_pending);
4078 }
4079 }
4080 clear_bit(R5_Insync, &dev->flags);
4081 if (!rdev)
4082 /* Not in-sync */;
4083 else if (is_bad) {
4084 /* also not in-sync */
4085 if (!test_bit(WriteErrorSeen, &rdev->flags) &&
4086 test_bit(R5_UPTODATE, &dev->flags)) {
4087 /* treat as in-sync, but with a read error
4088 * which we can now try to correct
4089 */
4090 set_bit(R5_Insync, &dev->flags);
4091 set_bit(R5_ReadError, &dev->flags);
4092 }
4093 } else if (test_bit(In_sync, &rdev->flags))
4094 set_bit(R5_Insync, &dev->flags);
4095 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
4096 /* in sync if before recovery_offset */
4097 set_bit(R5_Insync, &dev->flags);
4098 else if (test_bit(R5_UPTODATE, &dev->flags) &&
4099 test_bit(R5_Expanded, &dev->flags))
4100 /* If we've reshaped into here, we assume it is Insync.
4101 * We will shortly update recovery_offset to make
4102 * it official.
4103 */
4104 set_bit(R5_Insync, &dev->flags);
4105
4106 if (test_bit(R5_WriteError, &dev->flags)) {
4107 /* This flag does not apply to '.replacement'
4108 * only to .rdev, so make sure to check that*/
4109 struct md_rdev *rdev2 = rcu_dereference(
4110 conf->disks[i].rdev);
4111 if (rdev2 == rdev)
4112 clear_bit(R5_Insync, &dev->flags);
4113 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
4114 s->handle_bad_blocks = 1;
4115 atomic_inc(&rdev2->nr_pending);
4116 } else
4117 clear_bit(R5_WriteError, &dev->flags);
4118 }
4119 if (test_bit(R5_MadeGood, &dev->flags)) {
4120 /* This flag does not apply to '.replacement'
4121 * only to .rdev, so make sure to check that*/
4122 struct md_rdev *rdev2 = rcu_dereference(
4123 conf->disks[i].rdev);
4124 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
4125 s->handle_bad_blocks = 1;
4126 atomic_inc(&rdev2->nr_pending);
4127 } else
4128 clear_bit(R5_MadeGood, &dev->flags);
4129 }
4130 if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
4131 struct md_rdev *rdev2 = rcu_dereference(
4132 conf->disks[i].replacement);
4133 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
4134 s->handle_bad_blocks = 1;
4135 atomic_inc(&rdev2->nr_pending);
4136 } else
4137 clear_bit(R5_MadeGoodRepl, &dev->flags);
4138 }
4139 if (!test_bit(R5_Insync, &dev->flags)) {
4140 /* The ReadError flag will just be confusing now */
4141 clear_bit(R5_ReadError, &dev->flags);
4142 clear_bit(R5_ReWrite, &dev->flags);
4143 }
4144 if (test_bit(R5_ReadError, &dev->flags))
4145 clear_bit(R5_Insync, &dev->flags);
4146 if (!test_bit(R5_Insync, &dev->flags)) {
4147 if (s->failed < 2)
4148 s->failed_num[s->failed] = i;
4149 s->failed++;
4150 if (rdev && !test_bit(Faulty, &rdev->flags))
4151 do_recovery = 1;
4152 }
4153 }
4154 if (test_bit(STRIPE_SYNCING, &sh->state)) {
4155 /* If there is a failed device being replaced,
4156 * we must be recovering.
4157 * else if we are after recovery_cp, we must be syncing
4158 * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
4159 * else we can only be replacing
4160 * sync and recovery both need to read all devices, and so
4161 * use the same flag.
4162 */
4163 if (do_recovery ||
4164 sh->sector >= conf->mddev->recovery_cp ||
4165 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
4166 s->syncing = 1;
4167 else
4168 s->replacing = 1;
4169 }
4170 rcu_read_unlock();
4171 }
4172
4173 static int clear_batch_ready(struct stripe_head *sh)
4174 {
4175 struct stripe_head *tmp;
4176 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state))
4177 return 0;
4178 spin_lock(&sh->stripe_lock);
4179 if (!sh->batch_head) {
4180 spin_unlock(&sh->stripe_lock);
4181 return 0;
4182 }
4183
4184 /*
4185 * this stripe could be added to a batch list before we check
4186 * BATCH_READY, skips it
4187 */
4188 if (sh->batch_head != sh) {
4189 spin_unlock(&sh->stripe_lock);
4190 return 1;
4191 }
4192 spin_lock(&sh->batch_lock);
4193 list_for_each_entry(tmp, &sh->batch_list, batch_list)
4194 clear_bit(STRIPE_BATCH_READY, &tmp->state);
4195 spin_unlock(&sh->batch_lock);
4196 spin_unlock(&sh->stripe_lock);
4197
4198 /*
4199 * BATCH_READY is cleared, no new stripes can be added.
4200 * batch_list can be accessed without lock
4201 */
4202 return 0;
4203 }
4204
4205 static void check_break_stripe_batch_list(struct stripe_head *sh)
4206 {
4207 struct stripe_head *head_sh, *next;
4208 int i;
4209
4210 if (!test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
4211 return;
4212
4213 head_sh = sh;
4214
4215 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) {
4216
4217 list_del_init(&sh->batch_list);
4218
4219 set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
4220 head_sh->state & ~((1 << STRIPE_ACTIVE) |
4221 (1 << STRIPE_PREREAD_ACTIVE) |
4222 (1 << STRIPE_DEGRADED) |
4223 STRIPE_EXPAND_SYNC_FLAG));
4224 sh->check_state = head_sh->check_state;
4225 sh->reconstruct_state = head_sh->reconstruct_state;
4226 for (i = 0; i < sh->disks; i++)
4227 sh->dev[i].flags = head_sh->dev[i].flags &
4228 (~((1 << R5_WriteError) | (1 << R5_Overlap)));
4229
4230 spin_lock_irq(&sh->stripe_lock);
4231 sh->batch_head = NULL;
4232 spin_unlock_irq(&sh->stripe_lock);
4233
4234 set_bit(STRIPE_HANDLE, &sh->state);
4235 release_stripe(sh);
4236 }
4237 }
4238
4239 static void handle_stripe(struct stripe_head *sh)
4240 {
4241 struct stripe_head_state s;
4242 struct r5conf *conf = sh->raid_conf;
4243 int i;
4244 int prexor;
4245 int disks = sh->disks;
4246 struct r5dev *pdev, *qdev;
4247
4248 clear_bit(STRIPE_HANDLE, &sh->state);
4249 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
4250 /* already being handled, ensure it gets handled
4251 * again when current action finishes */
4252 set_bit(STRIPE_HANDLE, &sh->state);
4253 return;
4254 }
4255
4256 if (clear_batch_ready(sh) ) {
4257 clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
4258 return;
4259 }
4260
4261 check_break_stripe_batch_list(sh);
4262
4263 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) {
4264 spin_lock(&sh->stripe_lock);
4265 /* Cannot process 'sync' concurrently with 'discard' */
4266 if (!test_bit(STRIPE_DISCARD, &sh->state) &&
4267 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
4268 set_bit(STRIPE_SYNCING, &sh->state);
4269 clear_bit(STRIPE_INSYNC, &sh->state);
4270 clear_bit(STRIPE_REPLACED, &sh->state);
4271 }
4272 spin_unlock(&sh->stripe_lock);
4273 }
4274 clear_bit(STRIPE_DELAYED, &sh->state);
4275
4276 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
4277 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
4278 (unsigned long long)sh->sector, sh->state,
4279 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
4280 sh->check_state, sh->reconstruct_state);
4281
4282 analyse_stripe(sh, &s);
4283
4284 if (s.handle_bad_blocks) {
4285 set_bit(STRIPE_HANDLE, &sh->state);
4286 goto finish;
4287 }
4288
4289 if (unlikely(s.blocked_rdev)) {
4290 if (s.syncing || s.expanding || s.expanded ||
4291 s.replacing || s.to_write || s.written) {
4292 set_bit(STRIPE_HANDLE, &sh->state);
4293 goto finish;
4294 }
4295 /* There is nothing for the blocked_rdev to block */
4296 rdev_dec_pending(s.blocked_rdev, conf->mddev);
4297 s.blocked_rdev = NULL;
4298 }
4299
4300 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
4301 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
4302 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
4303 }
4304
4305 pr_debug("locked=%d uptodate=%d to_read=%d"
4306 " to_write=%d failed=%d failed_num=%d,%d\n",
4307 s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
4308 s.failed_num[0], s.failed_num[1]);
4309 /* check if the array has lost more than max_degraded devices and,
4310 * if so, some requests might need to be failed.
4311 */
4312 if (s.failed > conf->max_degraded) {
4313 sh->check_state = 0;
4314 sh->reconstruct_state = 0;
4315 if (s.to_read+s.to_write+s.written)
4316 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
4317 if (s.syncing + s.replacing)
4318 handle_failed_sync(conf, sh, &s);
4319 }
4320
4321 /* Now we check to see if any write operations have recently
4322 * completed
4323 */
4324 prexor = 0;
4325 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
4326 prexor = 1;
4327 if (sh->reconstruct_state == reconstruct_state_drain_result ||
4328 sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
4329 sh->reconstruct_state = reconstruct_state_idle;
4330
4331 /* All the 'written' buffers and the parity block are ready to
4332 * be written back to disk
4333 */
4334 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) &&
4335 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags));
4336 BUG_ON(sh->qd_idx >= 0 &&
4337 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) &&
4338 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags));
4339 for (i = disks; i--; ) {
4340 struct r5dev *dev = &sh->dev[i];
4341 if (test_bit(R5_LOCKED, &dev->flags) &&
4342 (i == sh->pd_idx || i == sh->qd_idx ||
4343 dev->written)) {
4344 pr_debug("Writing block %d\n", i);
4345 set_bit(R5_Wantwrite, &dev->flags);
4346 if (prexor)
4347 continue;
4348 if (s.failed > 1)
4349 continue;
4350 if (!test_bit(R5_Insync, &dev->flags) ||
4351 ((i == sh->pd_idx || i == sh->qd_idx) &&
4352 s.failed == 0))
4353 set_bit(STRIPE_INSYNC, &sh->state);
4354 }
4355 }
4356 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4357 s.dec_preread_active = 1;
4358 }
4359
4360 /*
4361 * might be able to return some write requests if the parity blocks
4362 * are safe, or on a failed drive
4363 */
4364 pdev = &sh->dev[sh->pd_idx];
4365 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
4366 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
4367 qdev = &sh->dev[sh->qd_idx];
4368 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
4369 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
4370 || conf->level < 6;
4371
4372 if (s.written &&
4373 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
4374 && !test_bit(R5_LOCKED, &pdev->flags)
4375 && (test_bit(R5_UPTODATE, &pdev->flags) ||
4376 test_bit(R5_Discard, &pdev->flags))))) &&
4377 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
4378 && !test_bit(R5_LOCKED, &qdev->flags)
4379 && (test_bit(R5_UPTODATE, &qdev->flags) ||
4380 test_bit(R5_Discard, &qdev->flags))))))
4381 handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
4382
4383 /* Now we might consider reading some blocks, either to check/generate
4384 * parity, or to satisfy requests
4385 * or to load a block that is being partially written.
4386 */
4387 if (s.to_read || s.non_overwrite
4388 || (conf->level == 6 && s.to_write && s.failed)
4389 || (s.syncing && (s.uptodate + s.compute < disks))
4390 || s.replacing
4391 || s.expanding)
4392 handle_stripe_fill(sh, &s, disks);
4393
4394 /* Now to consider new write requests and what else, if anything
4395 * should be read. We do not handle new writes when:
4396 * 1/ A 'write' operation (copy+xor) is already in flight.
4397 * 2/ A 'check' operation is in flight, as it may clobber the parity
4398 * block.
4399 */
4400 if (s.to_write && !sh->reconstruct_state && !sh->check_state)
4401 handle_stripe_dirtying(conf, sh, &s, disks);
4402
4403 /* maybe we need to check and possibly fix the parity for this stripe
4404 * Any reads will already have been scheduled, so we just see if enough
4405 * data is available. The parity check is held off while parity
4406 * dependent operations are in flight.
4407 */
4408 if (sh->check_state ||
4409 (s.syncing && s.locked == 0 &&
4410 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
4411 !test_bit(STRIPE_INSYNC, &sh->state))) {
4412 if (conf->level == 6)
4413 handle_parity_checks6(conf, sh, &s, disks);
4414 else
4415 handle_parity_checks5(conf, sh, &s, disks);
4416 }
4417
4418 if ((s.replacing || s.syncing) && s.locked == 0
4419 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)
4420 && !test_bit(STRIPE_REPLACED, &sh->state)) {
4421 /* Write out to replacement devices where possible */
4422 for (i = 0; i < conf->raid_disks; i++)
4423 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
4424 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags));
4425 set_bit(R5_WantReplace, &sh->dev[i].flags);
4426 set_bit(R5_LOCKED, &sh->dev[i].flags);
4427 s.locked++;
4428 }
4429 if (s.replacing)
4430 set_bit(STRIPE_INSYNC, &sh->state);
4431 set_bit(STRIPE_REPLACED, &sh->state);
4432 }
4433 if ((s.syncing || s.replacing) && s.locked == 0 &&
4434 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
4435 test_bit(STRIPE_INSYNC, &sh->state)) {
4436 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
4437 clear_bit(STRIPE_SYNCING, &sh->state);
4438 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
4439 wake_up(&conf->wait_for_overlap);
4440 }
4441
4442 /* If the failed drives are just a ReadError, then we might need
4443 * to progress the repair/check process
4444 */
4445 if (s.failed <= conf->max_degraded && !conf->mddev->ro)
4446 for (i = 0; i < s.failed; i++) {
4447 struct r5dev *dev = &sh->dev[s.failed_num[i]];
4448 if (test_bit(R5_ReadError, &dev->flags)
4449 && !test_bit(R5_LOCKED, &dev->flags)
4450 && test_bit(R5_UPTODATE, &dev->flags)
4451 ) {
4452 if (!test_bit(R5_ReWrite, &dev->flags)) {
4453 set_bit(R5_Wantwrite, &dev->flags);
4454 set_bit(R5_ReWrite, &dev->flags);
4455 set_bit(R5_LOCKED, &dev->flags);
4456 s.locked++;
4457 } else {
4458 /* let's read it back */
4459 set_bit(R5_Wantread, &dev->flags);
4460 set_bit(R5_LOCKED, &dev->flags);
4461 s.locked++;
4462 }
4463 }
4464 }
4465
4466 /* Finish reconstruct operations initiated by the expansion process */
4467 if (sh->reconstruct_state == reconstruct_state_result) {
4468 struct stripe_head *sh_src
4469 = get_active_stripe(conf, sh->sector, 1, 1, 1);
4470 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
4471 /* sh cannot be written until sh_src has been read.
4472 * so arrange for sh to be delayed a little
4473 */
4474 set_bit(STRIPE_DELAYED, &sh->state);
4475 set_bit(STRIPE_HANDLE, &sh->state);
4476 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
4477 &sh_src->state))
4478 atomic_inc(&conf->preread_active_stripes);
4479 release_stripe(sh_src);
4480 goto finish;
4481 }
4482 if (sh_src)
4483 release_stripe(sh_src);
4484
4485 sh->reconstruct_state = reconstruct_state_idle;
4486 clear_bit(STRIPE_EXPANDING, &sh->state);
4487 for (i = conf->raid_disks; i--; ) {
4488 set_bit(R5_Wantwrite, &sh->dev[i].flags);
4489 set_bit(R5_LOCKED, &sh->dev[i].flags);
4490 s.locked++;
4491 }
4492 }
4493
4494 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
4495 !sh->reconstruct_state) {
4496 /* Need to write out all blocks after computing parity */
4497 sh->disks = conf->raid_disks;
4498 stripe_set_idx(sh->sector, conf, 0, sh);
4499 schedule_reconstruction(sh, &s, 1, 1);
4500 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
4501 clear_bit(STRIPE_EXPAND_READY, &sh->state);
4502 atomic_dec(&conf->reshape_stripes);
4503 wake_up(&conf->wait_for_overlap);
4504 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
4505 }
4506
4507 if (s.expanding && s.locked == 0 &&
4508 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
4509 handle_stripe_expansion(conf, sh);
4510
4511 finish:
4512 /* wait for this device to become unblocked */
4513 if (unlikely(s.blocked_rdev)) {
4514 if (conf->mddev->external)
4515 md_wait_for_blocked_rdev(s.blocked_rdev,
4516 conf->mddev);
4517 else
4518 /* Internal metadata will immediately
4519 * be written by raid5d, so we don't
4520 * need to wait here.
4521 */
4522 rdev_dec_pending(s.blocked_rdev,
4523 conf->mddev);
4524 }
4525
4526 if (s.handle_bad_blocks)
4527 for (i = disks; i--; ) {
4528 struct md_rdev *rdev;
4529 struct r5dev *dev = &sh->dev[i];
4530 if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
4531 /* We own a safe reference to the rdev */
4532 rdev = conf->disks[i].rdev;
4533 if (!rdev_set_badblocks(rdev, sh->sector,
4534 STRIPE_SECTORS, 0))
4535 md_error(conf->mddev, rdev);
4536 rdev_dec_pending(rdev, conf->mddev);
4537 }
4538 if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
4539 rdev = conf->disks[i].rdev;
4540 rdev_clear_badblocks(rdev, sh->sector,
4541 STRIPE_SECTORS, 0);
4542 rdev_dec_pending(rdev, conf->mddev);
4543 }
4544 if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
4545 rdev = conf->disks[i].replacement;
4546 if (!rdev)
4547 /* rdev have been moved down */
4548 rdev = conf->disks[i].rdev;
4549 rdev_clear_badblocks(rdev, sh->sector,
4550 STRIPE_SECTORS, 0);
4551 rdev_dec_pending(rdev, conf->mddev);
4552 }
4553 }
4554
4555 if (s.ops_request)
4556 raid_run_ops(sh, s.ops_request);
4557
4558 ops_run_io(sh, &s);
4559
4560 if (s.dec_preread_active) {
4561 /* We delay this until after ops_run_io so that if make_request
4562 * is waiting on a flush, it won't continue until the writes
4563 * have actually been submitted.
4564 */
4565 atomic_dec(&conf->preread_active_stripes);
4566 if (atomic_read(&conf->preread_active_stripes) <
4567 IO_THRESHOLD)
4568 md_wakeup_thread(conf->mddev->thread);
4569 }
4570
4571 return_io(s.return_bi);
4572
4573 clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
4574 }
4575
4576 static void raid5_activate_delayed(struct r5conf *conf)
4577 {
4578 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
4579 while (!list_empty(&conf->delayed_list)) {
4580 struct list_head *l = conf->delayed_list.next;
4581 struct stripe_head *sh;
4582 sh = list_entry(l, struct stripe_head, lru);
4583 list_del_init(l);
4584 clear_bit(STRIPE_DELAYED, &sh->state);
4585 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4586 atomic_inc(&conf->preread_active_stripes);
4587 list_add_tail(&sh->lru, &conf->hold_list);
4588 raid5_wakeup_stripe_thread(sh);
4589 }
4590 }
4591 }
4592
4593 static void activate_bit_delay(struct r5conf *conf,
4594 struct list_head *temp_inactive_list)
4595 {
4596 /* device_lock is held */
4597 struct list_head head;
4598 list_add(&head, &conf->bitmap_list);
4599 list_del_init(&conf->bitmap_list);
4600 while (!list_empty(&head)) {
4601 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
4602 int hash;
4603 list_del_init(&sh->lru);
4604 atomic_inc(&sh->count);
4605 hash = sh->hash_lock_index;
4606 __release_stripe(conf, sh, &temp_inactive_list[hash]);
4607 }
4608 }
4609
4610 static int raid5_congested(struct mddev *mddev, int bits)
4611 {
4612 struct r5conf *conf = mddev->private;
4613
4614 /* No difference between reads and writes. Just check
4615 * how busy the stripe_cache is
4616 */
4617
4618 if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
4619 return 1;
4620 if (conf->quiesce)
4621 return 1;
4622 if (atomic_read(&conf->empty_inactive_list_nr))
4623 return 1;
4624
4625 return 0;
4626 }
4627
4628 /* We want read requests to align with chunks where possible,
4629 * but write requests don't need to.
4630 */
4631 static int raid5_mergeable_bvec(struct mddev *mddev,
4632 struct bvec_merge_data *bvm,
4633 struct bio_vec *biovec)
4634 {
4635 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
4636 int max;
4637 unsigned int chunk_sectors = mddev->chunk_sectors;
4638 unsigned int bio_sectors = bvm->bi_size >> 9;
4639
4640 /*
4641 * always allow writes to be mergeable, read as well if array
4642 * is degraded as we'll go through stripe cache anyway.
4643 */
4644 if ((bvm->bi_rw & 1) == WRITE || mddev->degraded)
4645 return biovec->bv_len;
4646
4647 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
4648 chunk_sectors = mddev->new_chunk_sectors;
4649 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
4650 if (max < 0) max = 0;
4651 if (max <= biovec->bv_len && bio_sectors == 0)
4652 return biovec->bv_len;
4653 else
4654 return max;
4655 }
4656
4657 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
4658 {
4659 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
4660 unsigned int chunk_sectors = mddev->chunk_sectors;
4661 unsigned int bio_sectors = bio_sectors(bio);
4662
4663 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
4664 chunk_sectors = mddev->new_chunk_sectors;
4665 return chunk_sectors >=
4666 ((sector & (chunk_sectors - 1)) + bio_sectors);
4667 }
4668
4669 /*
4670 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
4671 * later sampled by raid5d.
4672 */
4673 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
4674 {
4675 unsigned long flags;
4676
4677 spin_lock_irqsave(&conf->device_lock, flags);
4678
4679 bi->bi_next = conf->retry_read_aligned_list;
4680 conf->retry_read_aligned_list = bi;
4681
4682 spin_unlock_irqrestore(&conf->device_lock, flags);
4683 md_wakeup_thread(conf->mddev->thread);
4684 }
4685
4686 static struct bio *remove_bio_from_retry(struct r5conf *conf)
4687 {
4688 struct bio *bi;
4689
4690 bi = conf->retry_read_aligned;
4691 if (bi) {
4692 conf->retry_read_aligned = NULL;
4693 return bi;
4694 }
4695 bi = conf->retry_read_aligned_list;
4696 if(bi) {
4697 conf->retry_read_aligned_list = bi->bi_next;
4698 bi->bi_next = NULL;
4699 /*
4700 * this sets the active strip count to 1 and the processed
4701 * strip count to zero (upper 8 bits)
4702 */
4703 raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */
4704 }
4705
4706 return bi;
4707 }
4708
4709 /*
4710 * The "raid5_align_endio" should check if the read succeeded and if it
4711 * did, call bio_endio on the original bio (having bio_put the new bio
4712 * first).
4713 * If the read failed..
4714 */
4715 static void raid5_align_endio(struct bio *bi, int error)
4716 {
4717 struct bio* raid_bi = bi->bi_private;
4718 struct mddev *mddev;
4719 struct r5conf *conf;
4720 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
4721 struct md_rdev *rdev;
4722
4723 bio_put(bi);
4724
4725 rdev = (void*)raid_bi->bi_next;
4726 raid_bi->bi_next = NULL;
4727 mddev = rdev->mddev;
4728 conf = mddev->private;
4729
4730 rdev_dec_pending(rdev, conf->mddev);
4731
4732 if (!error && uptodate) {
4733 trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
4734 raid_bi, 0);
4735 bio_endio(raid_bi, 0);
4736 if (atomic_dec_and_test(&conf->active_aligned_reads))
4737 wake_up(&conf->wait_for_stripe);
4738 return;
4739 }
4740
4741 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
4742
4743 add_bio_to_retry(raid_bi, conf);
4744 }
4745
4746 static int bio_fits_rdev(struct bio *bi)
4747 {
4748 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
4749
4750 if (bio_sectors(bi) > queue_max_sectors(q))
4751 return 0;
4752 blk_recount_segments(q, bi);
4753 if (bi->bi_phys_segments > queue_max_segments(q))
4754 return 0;
4755
4756 if (q->merge_bvec_fn)
4757 /* it's too hard to apply the merge_bvec_fn at this stage,
4758 * just just give up
4759 */
4760 return 0;
4761
4762 return 1;
4763 }
4764
4765 static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4766 {
4767 struct r5conf *conf = mddev->private;
4768 int dd_idx;
4769 struct bio* align_bi;
4770 struct md_rdev *rdev;
4771 sector_t end_sector;
4772
4773 if (!in_chunk_boundary(mddev, raid_bio)) {
4774 pr_debug("chunk_aligned_read : non aligned\n");
4775 return 0;
4776 }
4777 /*
4778 * use bio_clone_mddev to make a copy of the bio
4779 */
4780 align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
4781 if (!align_bi)
4782 return 0;
4783 /*
4784 * set bi_end_io to a new function, and set bi_private to the
4785 * original bio.
4786 */
4787 align_bi->bi_end_io = raid5_align_endio;
4788 align_bi->bi_private = raid_bio;
4789 /*
4790 * compute position
4791 */
4792 align_bi->bi_iter.bi_sector =
4793 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
4794 0, &dd_idx, NULL);
4795
4796 end_sector = bio_end_sector(align_bi);
4797 rcu_read_lock();
4798 rdev = rcu_dereference(conf->disks[dd_idx].replacement);
4799 if (!rdev || test_bit(Faulty, &rdev->flags) ||
4800 rdev->recovery_offset < end_sector) {
4801 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
4802 if (rdev &&
4803 (test_bit(Faulty, &rdev->flags) ||
4804 !(test_bit(In_sync, &rdev->flags) ||
4805 rdev->recovery_offset >= end_sector)))
4806 rdev = NULL;
4807 }
4808 if (rdev) {
4809 sector_t first_bad;
4810 int bad_sectors;
4811
4812 atomic_inc(&rdev->nr_pending);
4813 rcu_read_unlock();
4814 raid_bio->bi_next = (void*)rdev;
4815 align_bi->bi_bdev = rdev->bdev;
4816 __clear_bit(BIO_SEG_VALID, &align_bi->bi_flags);
4817
4818 if (!bio_fits_rdev(align_bi) ||
4819 is_badblock(rdev, align_bi->bi_iter.bi_sector,
4820 bio_sectors(align_bi),
4821 &first_bad, &bad_sectors)) {
4822 /* too big in some way, or has a known bad block */
4823 bio_put(align_bi);
4824 rdev_dec_pending(rdev, mddev);
4825 return 0;
4826 }
4827
4828 /* No reshape active, so we can trust rdev->data_offset */
4829 align_bi->bi_iter.bi_sector += rdev->data_offset;
4830
4831 spin_lock_irq(&conf->device_lock);
4832 wait_event_lock_irq(conf->wait_for_stripe,
4833 conf->quiesce == 0,
4834 conf->device_lock);
4835 atomic_inc(&conf->active_aligned_reads);
4836 spin_unlock_irq(&conf->device_lock);
4837
4838 if (mddev->gendisk)
4839 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
4840 align_bi, disk_devt(mddev->gendisk),
4841 raid_bio->bi_iter.bi_sector);
4842 generic_make_request(align_bi);
4843 return 1;
4844 } else {
4845 rcu_read_unlock();
4846 bio_put(align_bi);
4847 return 0;
4848 }
4849 }
4850
4851 /* __get_priority_stripe - get the next stripe to process
4852 *
4853 * Full stripe writes are allowed to pass preread active stripes up until
4854 * the bypass_threshold is exceeded. In general the bypass_count
4855 * increments when the handle_list is handled before the hold_list; however, it
4856 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
4857 * stripe with in flight i/o. The bypass_count will be reset when the
4858 * head of the hold_list has changed, i.e. the head was promoted to the
4859 * handle_list.
4860 */
4861 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
4862 {
4863 struct stripe_head *sh = NULL, *tmp;
4864 struct list_head *handle_list = NULL;
4865 struct r5worker_group *wg = NULL;
4866
4867 if (conf->worker_cnt_per_group == 0) {
4868 handle_list = &conf->handle_list;
4869 } else if (group != ANY_GROUP) {
4870 handle_list = &conf->worker_groups[group].handle_list;
4871 wg = &conf->worker_groups[group];
4872 } else {
4873 int i;
4874 for (i = 0; i < conf->group_cnt; i++) {
4875 handle_list = &conf->worker_groups[i].handle_list;
4876 wg = &conf->worker_groups[i];
4877 if (!list_empty(handle_list))
4878 break;
4879 }
4880 }
4881
4882 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
4883 __func__,
4884 list_empty(handle_list) ? "empty" : "busy",
4885 list_empty(&conf->hold_list) ? "empty" : "busy",
4886 atomic_read(&conf->pending_full_writes), conf->bypass_count);
4887
4888 if (!list_empty(handle_list)) {
4889 sh = list_entry(handle_list->next, typeof(*sh), lru);
4890
4891 if (list_empty(&conf->hold_list))
4892 conf->bypass_count = 0;
4893 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
4894 if (conf->hold_list.next == conf->last_hold)
4895 conf->bypass_count++;
4896 else {
4897 conf->last_hold = conf->hold_list.next;
4898 conf->bypass_count -= conf->bypass_threshold;
4899 if (conf->bypass_count < 0)
4900 conf->bypass_count = 0;
4901 }
4902 }
4903 } else if (!list_empty(&conf->hold_list) &&
4904 ((conf->bypass_threshold &&
4905 conf->bypass_count > conf->bypass_threshold) ||
4906 atomic_read(&conf->pending_full_writes) == 0)) {
4907
4908 list_for_each_entry(tmp, &conf->hold_list, lru) {
4909 if (conf->worker_cnt_per_group == 0 ||
4910 group == ANY_GROUP ||
4911 !cpu_online(tmp->cpu) ||
4912 cpu_to_group(tmp->cpu) == group) {
4913 sh = tmp;
4914 break;
4915 }
4916 }
4917
4918 if (sh) {
4919 conf->bypass_count -= conf->bypass_threshold;
4920 if (conf->bypass_count < 0)
4921 conf->bypass_count = 0;
4922 }
4923 wg = NULL;
4924 }
4925
4926 if (!sh)
4927 return NULL;
4928
4929 if (wg) {
4930 wg->stripes_cnt--;
4931 sh->group = NULL;
4932 }
4933 list_del_init(&sh->lru);
4934 BUG_ON(atomic_inc_return(&sh->count) != 1);
4935 return sh;
4936 }
4937
4938 struct raid5_plug_cb {
4939 struct blk_plug_cb cb;
4940 struct list_head list;
4941 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
4942 };
4943
4944 static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
4945 {
4946 struct raid5_plug_cb *cb = container_of(
4947 blk_cb, struct raid5_plug_cb, cb);
4948 struct stripe_head *sh;
4949 struct mddev *mddev = cb->cb.data;
4950 struct r5conf *conf = mddev->private;
4951 int cnt = 0;
4952 int hash;
4953
4954 if (cb->list.next && !list_empty(&cb->list)) {
4955 spin_lock_irq(&conf->device_lock);
4956 while (!list_empty(&cb->list)) {
4957 sh = list_first_entry(&cb->list, struct stripe_head, lru);
4958 list_del_init(&sh->lru);
4959 /*
4960 * avoid race release_stripe_plug() sees
4961 * STRIPE_ON_UNPLUG_LIST clear but the stripe
4962 * is still in our list
4963 */
4964 smp_mb__before_atomic();
4965 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
4966 /*
4967 * STRIPE_ON_RELEASE_LIST could be set here. In that
4968 * case, the count is always > 1 here
4969 */
4970 hash = sh->hash_lock_index;
4971 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]);
4972 cnt++;
4973 }
4974 spin_unlock_irq(&conf->device_lock);
4975 }
4976 release_inactive_stripe_list(conf, cb->temp_inactive_list,
4977 NR_STRIPE_HASH_LOCKS);
4978 if (mddev->queue)
4979 trace_block_unplug(mddev->queue, cnt, !from_schedule);
4980 kfree(cb);
4981 }
4982
4983 static void release_stripe_plug(struct mddev *mddev,
4984 struct stripe_head *sh)
4985 {
4986 struct blk_plug_cb *blk_cb = blk_check_plugged(
4987 raid5_unplug, mddev,
4988 sizeof(struct raid5_plug_cb));
4989 struct raid5_plug_cb *cb;
4990
4991 if (!blk_cb) {
4992 release_stripe(sh);
4993 return;
4994 }
4995
4996 cb = container_of(blk_cb, struct raid5_plug_cb, cb);
4997
4998 if (cb->list.next == NULL) {
4999 int i;
5000 INIT_LIST_HEAD(&cb->list);
5001 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
5002 INIT_LIST_HEAD(cb->temp_inactive_list + i);
5003 }
5004
5005 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
5006 list_add_tail(&sh->lru, &cb->list);
5007 else
5008 release_stripe(sh);
5009 }
5010
5011 static void make_discard_request(struct mddev *mddev, struct bio *bi)
5012 {
5013 struct r5conf *conf = mddev->private;
5014 sector_t logical_sector, last_sector;
5015 struct stripe_head *sh;
5016 int remaining;
5017 int stripe_sectors;
5018
5019 if (mddev->reshape_position != MaxSector)
5020 /* Skip discard while reshape is happening */
5021 return;
5022
5023 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
5024 last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
5025
5026 bi->bi_next = NULL;
5027 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
5028
5029 stripe_sectors = conf->chunk_sectors *
5030 (conf->raid_disks - conf->max_degraded);
5031 logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector,
5032 stripe_sectors);
5033 sector_div(last_sector, stripe_sectors);
5034
5035 logical_sector *= conf->chunk_sectors;
5036 last_sector *= conf->chunk_sectors;
5037
5038 for (; logical_sector < last_sector;
5039 logical_sector += STRIPE_SECTORS) {
5040 DEFINE_WAIT(w);
5041 int d;
5042 again:
5043 sh = get_active_stripe(conf, logical_sector, 0, 0, 0);
5044 prepare_to_wait(&conf->wait_for_overlap, &w,
5045 TASK_UNINTERRUPTIBLE);
5046 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
5047 if (test_bit(STRIPE_SYNCING, &sh->state)) {
5048 release_stripe(sh);
5049 schedule();
5050 goto again;
5051 }
5052 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
5053 spin_lock_irq(&sh->stripe_lock);
5054 for (d = 0; d < conf->raid_disks; d++) {
5055 if (d == sh->pd_idx || d == sh->qd_idx)
5056 continue;
5057 if (sh->dev[d].towrite || sh->dev[d].toread) {
5058 set_bit(R5_Overlap, &sh->dev[d].flags);
5059 spin_unlock_irq(&sh->stripe_lock);
5060 release_stripe(sh);
5061 schedule();
5062 goto again;
5063 }
5064 }
5065 set_bit(STRIPE_DISCARD, &sh->state);
5066 finish_wait(&conf->wait_for_overlap, &w);
5067 sh->overwrite_disks = 0;
5068 for (d = 0; d < conf->raid_disks; d++) {
5069 if (d == sh->pd_idx || d == sh->qd_idx)
5070 continue;
5071 sh->dev[d].towrite = bi;
5072 set_bit(R5_OVERWRITE, &sh->dev[d].flags);
5073 raid5_inc_bi_active_stripes(bi);
5074 sh->overwrite_disks++;
5075 }
5076 spin_unlock_irq(&sh->stripe_lock);
5077 if (conf->mddev->bitmap) {
5078 for (d = 0;
5079 d < conf->raid_disks - conf->max_degraded;
5080 d++)
5081 bitmap_startwrite(mddev->bitmap,
5082 sh->sector,
5083 STRIPE_SECTORS,
5084 0);
5085 sh->bm_seq = conf->seq_flush + 1;
5086 set_bit(STRIPE_BIT_DELAY, &sh->state);
5087 }
5088
5089 set_bit(STRIPE_HANDLE, &sh->state);
5090 clear_bit(STRIPE_DELAYED, &sh->state);
5091 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
5092 atomic_inc(&conf->preread_active_stripes);
5093 release_stripe_plug(mddev, sh);
5094 }
5095
5096 remaining = raid5_dec_bi_active_stripes(bi);
5097 if (remaining == 0) {
5098 md_write_end(mddev);
5099 bio_endio(bi, 0);
5100 }
5101 }
5102
5103 static void make_request(struct mddev *mddev, struct bio * bi)
5104 {
5105 struct r5conf *conf = mddev->private;
5106 int dd_idx;
5107 sector_t new_sector;
5108 sector_t logical_sector, last_sector;
5109 struct stripe_head *sh;
5110 const int rw = bio_data_dir(bi);
5111 int remaining;
5112 DEFINE_WAIT(w);
5113 bool do_prepare;
5114
5115 if (unlikely(bi->bi_rw & REQ_FLUSH)) {
5116 md_flush_request(mddev, bi);
5117 return;
5118 }
5119
5120 md_write_start(mddev, bi);
5121
5122 /*
5123 * If array is degraded, better not do chunk aligned read because
5124 * later we might have to read it again in order to reconstruct
5125 * data on failed drives.
5126 */
5127 if (rw == READ && mddev->degraded == 0 &&
5128 mddev->reshape_position == MaxSector &&
5129 chunk_aligned_read(mddev,bi))
5130 return;
5131
5132 if (unlikely(bi->bi_rw & REQ_DISCARD)) {
5133 make_discard_request(mddev, bi);
5134 return;
5135 }
5136
5137 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
5138 last_sector = bio_end_sector(bi);
5139 bi->bi_next = NULL;
5140 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
5141
5142 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
5143 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
5144 int previous;
5145 int seq;
5146
5147 do_prepare = false;
5148 retry:
5149 seq = read_seqcount_begin(&conf->gen_lock);
5150 previous = 0;
5151 if (do_prepare)
5152 prepare_to_wait(&conf->wait_for_overlap, &w,
5153 TASK_UNINTERRUPTIBLE);
5154 if (unlikely(conf->reshape_progress != MaxSector)) {
5155 /* spinlock is needed as reshape_progress may be
5156 * 64bit on a 32bit platform, and so it might be
5157 * possible to see a half-updated value
5158 * Of course reshape_progress could change after
5159 * the lock is dropped, so once we get a reference
5160 * to the stripe that we think it is, we will have
5161 * to check again.
5162 */
5163 spin_lock_irq(&conf->device_lock);
5164 if (mddev->reshape_backwards
5165 ? logical_sector < conf->reshape_progress
5166 : logical_sector >= conf->reshape_progress) {
5167 previous = 1;
5168 } else {
5169 if (mddev->reshape_backwards
5170 ? logical_sector < conf->reshape_safe
5171 : logical_sector >= conf->reshape_safe) {
5172 spin_unlock_irq(&conf->device_lock);
5173 schedule();
5174 do_prepare = true;
5175 goto retry;
5176 }
5177 }
5178 spin_unlock_irq(&conf->device_lock);
5179 }
5180
5181 new_sector = raid5_compute_sector(conf, logical_sector,
5182 previous,
5183 &dd_idx, NULL);
5184 pr_debug("raid456: make_request, sector %llu logical %llu\n",
5185 (unsigned long long)new_sector,
5186 (unsigned long long)logical_sector);
5187
5188 sh = get_active_stripe(conf, new_sector, previous,
5189 (bi->bi_rw&RWA_MASK), 0);
5190 if (sh) {
5191 if (unlikely(previous)) {
5192 /* expansion might have moved on while waiting for a
5193 * stripe, so we must do the range check again.
5194 * Expansion could still move past after this
5195 * test, but as we are holding a reference to
5196 * 'sh', we know that if that happens,
5197 * STRIPE_EXPANDING will get set and the expansion
5198 * won't proceed until we finish with the stripe.
5199 */
5200 int must_retry = 0;
5201 spin_lock_irq(&conf->device_lock);
5202 if (mddev->reshape_backwards
5203 ? logical_sector >= conf->reshape_progress
5204 : logical_sector < conf->reshape_progress)
5205 /* mismatch, need to try again */
5206 must_retry = 1;
5207 spin_unlock_irq(&conf->device_lock);
5208 if (must_retry) {
5209 release_stripe(sh);
5210 schedule();
5211 do_prepare = true;
5212 goto retry;
5213 }
5214 }
5215 if (read_seqcount_retry(&conf->gen_lock, seq)) {
5216 /* Might have got the wrong stripe_head
5217 * by accident
5218 */
5219 release_stripe(sh);
5220 goto retry;
5221 }
5222
5223 if (rw == WRITE &&
5224 logical_sector >= mddev->suspend_lo &&
5225 logical_sector < mddev->suspend_hi) {
5226 release_stripe(sh);
5227 /* As the suspend_* range is controlled by
5228 * userspace, we want an interruptible
5229 * wait.
5230 */
5231 flush_signals(current);
5232 prepare_to_wait(&conf->wait_for_overlap,
5233 &w, TASK_INTERRUPTIBLE);
5234 if (logical_sector >= mddev->suspend_lo &&
5235 logical_sector < mddev->suspend_hi) {
5236 schedule();
5237 do_prepare = true;
5238 }
5239 goto retry;
5240 }
5241
5242 if (test_bit(STRIPE_EXPANDING, &sh->state) ||
5243 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
5244 /* Stripe is busy expanding or
5245 * add failed due to overlap. Flush everything
5246 * and wait a while
5247 */
5248 md_wakeup_thread(mddev->thread);
5249 release_stripe(sh);
5250 schedule();
5251 do_prepare = true;
5252 goto retry;
5253 }
5254 set_bit(STRIPE_HANDLE, &sh->state);
5255 clear_bit(STRIPE_DELAYED, &sh->state);
5256 if ((!sh->batch_head || sh == sh->batch_head) &&
5257 (bi->bi_rw & REQ_SYNC) &&
5258 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
5259 atomic_inc(&conf->preread_active_stripes);
5260 release_stripe_plug(mddev, sh);
5261 } else {
5262 /* cannot get stripe for read-ahead, just give-up */
5263 clear_bit(BIO_UPTODATE, &bi->bi_flags);
5264 break;
5265 }
5266 }
5267 finish_wait(&conf->wait_for_overlap, &w);
5268
5269 remaining = raid5_dec_bi_active_stripes(bi);
5270 if (remaining == 0) {
5271
5272 if ( rw == WRITE )
5273 md_write_end(mddev);
5274
5275 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
5276 bi, 0);
5277 bio_endio(bi, 0);
5278 }
5279 }
5280
5281 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
5282
5283 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
5284 {
5285 /* reshaping is quite different to recovery/resync so it is
5286 * handled quite separately ... here.
5287 *
5288 * On each call to sync_request, we gather one chunk worth of
5289 * destination stripes and flag them as expanding.
5290 * Then we find all the source stripes and request reads.
5291 * As the reads complete, handle_stripe will copy the data
5292 * into the destination stripe and release that stripe.
5293 */
5294 struct r5conf *conf = mddev->private;
5295 struct stripe_head *sh;
5296 sector_t first_sector, last_sector;
5297 int raid_disks = conf->previous_raid_disks;
5298 int data_disks = raid_disks - conf->max_degraded;
5299 int new_data_disks = conf->raid_disks - conf->max_degraded;
5300 int i;
5301 int dd_idx;
5302 sector_t writepos, readpos, safepos;
5303 sector_t stripe_addr;
5304 int reshape_sectors;
5305 struct list_head stripes;
5306
5307 if (sector_nr == 0) {
5308 /* If restarting in the middle, skip the initial sectors */
5309 if (mddev->reshape_backwards &&
5310 conf->reshape_progress < raid5_size(mddev, 0, 0)) {
5311 sector_nr = raid5_size(mddev, 0, 0)
5312 - conf->reshape_progress;
5313 } else if (!mddev->reshape_backwards &&
5314 conf->reshape_progress > 0)
5315 sector_nr = conf->reshape_progress;
5316 sector_div(sector_nr, new_data_disks);
5317 if (sector_nr) {
5318 mddev->curr_resync_completed = sector_nr;
5319 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5320 *skipped = 1;
5321 return sector_nr;
5322 }
5323 }
5324
5325 /* We need to process a full chunk at a time.
5326 * If old and new chunk sizes differ, we need to process the
5327 * largest of these
5328 */
5329 if (mddev->new_chunk_sectors > mddev->chunk_sectors)
5330 reshape_sectors = mddev->new_chunk_sectors;
5331 else
5332 reshape_sectors = mddev->chunk_sectors;
5333
5334 /* We update the metadata at least every 10 seconds, or when
5335 * the data about to be copied would over-write the source of
5336 * the data at the front of the range. i.e. one new_stripe
5337 * along from reshape_progress new_maps to after where
5338 * reshape_safe old_maps to
5339 */
5340 writepos = conf->reshape_progress;
5341 sector_div(writepos, new_data_disks);
5342 readpos = conf->reshape_progress;
5343 sector_div(readpos, data_disks);
5344 safepos = conf->reshape_safe;
5345 sector_div(safepos, data_disks);
5346 if (mddev->reshape_backwards) {
5347 writepos -= min_t(sector_t, reshape_sectors, writepos);
5348 readpos += reshape_sectors;
5349 safepos += reshape_sectors;
5350 } else {
5351 writepos += reshape_sectors;
5352 readpos -= min_t(sector_t, reshape_sectors, readpos);
5353 safepos -= min_t(sector_t, reshape_sectors, safepos);
5354 }
5355
5356 /* Having calculated the 'writepos' possibly use it
5357 * to set 'stripe_addr' which is where we will write to.
5358 */
5359 if (mddev->reshape_backwards) {
5360 BUG_ON(conf->reshape_progress == 0);
5361 stripe_addr = writepos;
5362 BUG_ON((mddev->dev_sectors &
5363 ~((sector_t)reshape_sectors - 1))
5364 - reshape_sectors - stripe_addr
5365 != sector_nr);
5366 } else {
5367 BUG_ON(writepos != sector_nr + reshape_sectors);
5368 stripe_addr = sector_nr;
5369 }
5370
5371 /* 'writepos' is the most advanced device address we might write.
5372 * 'readpos' is the least advanced device address we might read.
5373 * 'safepos' is the least address recorded in the metadata as having
5374 * been reshaped.
5375 * If there is a min_offset_diff, these are adjusted either by
5376 * increasing the safepos/readpos if diff is negative, or
5377 * increasing writepos if diff is positive.
5378 * If 'readpos' is then behind 'writepos', there is no way that we can
5379 * ensure safety in the face of a crash - that must be done by userspace
5380 * making a backup of the data. So in that case there is no particular
5381 * rush to update metadata.
5382 * Otherwise if 'safepos' is behind 'writepos', then we really need to
5383 * update the metadata to advance 'safepos' to match 'readpos' so that
5384 * we can be safe in the event of a crash.
5385 * So we insist on updating metadata if safepos is behind writepos and
5386 * readpos is beyond writepos.
5387 * In any case, update the metadata every 10 seconds.
5388 * Maybe that number should be configurable, but I'm not sure it is
5389 * worth it.... maybe it could be a multiple of safemode_delay???
5390 */
5391 if (conf->min_offset_diff < 0) {
5392 safepos += -conf->min_offset_diff;
5393 readpos += -conf->min_offset_diff;
5394 } else
5395 writepos += conf->min_offset_diff;
5396
5397 if ((mddev->reshape_backwards
5398 ? (safepos > writepos && readpos < writepos)
5399 : (safepos < writepos && readpos > writepos)) ||
5400 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
5401 /* Cannot proceed until we've updated the superblock... */
5402 wait_event(conf->wait_for_overlap,
5403 atomic_read(&conf->reshape_stripes)==0
5404 || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
5405 if (atomic_read(&conf->reshape_stripes) != 0)
5406 return 0;
5407 mddev->reshape_position = conf->reshape_progress;
5408 mddev->curr_resync_completed = sector_nr;
5409 conf->reshape_checkpoint = jiffies;
5410 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5411 md_wakeup_thread(mddev->thread);
5412 wait_event(mddev->sb_wait, mddev->flags == 0 ||
5413 test_bit(MD_RECOVERY_INTR, &mddev->recovery));
5414 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5415 return 0;
5416 spin_lock_irq(&conf->device_lock);
5417 conf->reshape_safe = mddev->reshape_position;
5418 spin_unlock_irq(&conf->device_lock);
5419 wake_up(&conf->wait_for_overlap);
5420 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5421 }
5422
5423 INIT_LIST_HEAD(&stripes);
5424 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
5425 int j;
5426 int skipped_disk = 0;
5427 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
5428 set_bit(STRIPE_EXPANDING, &sh->state);
5429 atomic_inc(&conf->reshape_stripes);
5430 /* If any of this stripe is beyond the end of the old
5431 * array, then we need to zero those blocks
5432 */
5433 for (j=sh->disks; j--;) {
5434 sector_t s;
5435 if (j == sh->pd_idx)
5436 continue;
5437 if (conf->level == 6 &&
5438 j == sh->qd_idx)
5439 continue;
5440 s = compute_blocknr(sh, j, 0);
5441 if (s < raid5_size(mddev, 0, 0)) {
5442 skipped_disk = 1;
5443 continue;
5444 }
5445 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
5446 set_bit(R5_Expanded, &sh->dev[j].flags);
5447 set_bit(R5_UPTODATE, &sh->dev[j].flags);
5448 }
5449 if (!skipped_disk) {
5450 set_bit(STRIPE_EXPAND_READY, &sh->state);
5451 set_bit(STRIPE_HANDLE, &sh->state);
5452 }
5453 list_add(&sh->lru, &stripes);
5454 }
5455 spin_lock_irq(&conf->device_lock);
5456 if (mddev->reshape_backwards)
5457 conf->reshape_progress -= reshape_sectors * new_data_disks;
5458 else
5459 conf->reshape_progress += reshape_sectors * new_data_disks;
5460 spin_unlock_irq(&conf->device_lock);
5461 /* Ok, those stripe are ready. We can start scheduling
5462 * reads on the source stripes.
5463 * The source stripes are determined by mapping the first and last
5464 * block on the destination stripes.
5465 */
5466 first_sector =
5467 raid5_compute_sector(conf, stripe_addr*(new_data_disks),
5468 1, &dd_idx, NULL);
5469 last_sector =
5470 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
5471 * new_data_disks - 1),
5472 1, &dd_idx, NULL);
5473 if (last_sector >= mddev->dev_sectors)
5474 last_sector = mddev->dev_sectors - 1;
5475 while (first_sector <= last_sector) {
5476 sh = get_active_stripe(conf, first_sector, 1, 0, 1);
5477 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
5478 set_bit(STRIPE_HANDLE, &sh->state);
5479 release_stripe(sh);
5480 first_sector += STRIPE_SECTORS;
5481 }
5482 /* Now that the sources are clearly marked, we can release
5483 * the destination stripes
5484 */
5485 while (!list_empty(&stripes)) {
5486 sh = list_entry(stripes.next, struct stripe_head, lru);
5487 list_del_init(&sh->lru);
5488 release_stripe(sh);
5489 }
5490 /* If this takes us to the resync_max point where we have to pause,
5491 * then we need to write out the superblock.
5492 */
5493 sector_nr += reshape_sectors;
5494 if ((sector_nr - mddev->curr_resync_completed) * 2
5495 >= mddev->resync_max - mddev->curr_resync_completed) {
5496 /* Cannot proceed until we've updated the superblock... */
5497 wait_event(conf->wait_for_overlap,
5498 atomic_read(&conf->reshape_stripes) == 0
5499 || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
5500 if (atomic_read(&conf->reshape_stripes) != 0)
5501 goto ret;
5502 mddev->reshape_position = conf->reshape_progress;
5503 mddev->curr_resync_completed = sector_nr;
5504 conf->reshape_checkpoint = jiffies;
5505 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5506 md_wakeup_thread(mddev->thread);
5507 wait_event(mddev->sb_wait,
5508 !test_bit(MD_CHANGE_DEVS, &mddev->flags)
5509 || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
5510 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5511 goto ret;
5512 spin_lock_irq(&conf->device_lock);
5513 conf->reshape_safe = mddev->reshape_position;
5514 spin_unlock_irq(&conf->device_lock);
5515 wake_up(&conf->wait_for_overlap);
5516 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5517 }
5518 ret:
5519 return reshape_sectors;
5520 }
5521
5522 static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
5523 {
5524 struct r5conf *conf = mddev->private;
5525 struct stripe_head *sh;
5526 sector_t max_sector = mddev->dev_sectors;
5527 sector_t sync_blocks;
5528 int still_degraded = 0;
5529 int i;
5530
5531 if (sector_nr >= max_sector) {
5532 /* just being told to finish up .. nothing much to do */
5533
5534 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
5535 end_reshape(conf);
5536 return 0;
5537 }
5538
5539 if (mddev->curr_resync < max_sector) /* aborted */
5540 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
5541 &sync_blocks, 1);
5542 else /* completed sync */
5543 conf->fullsync = 0;
5544 bitmap_close_sync(mddev->bitmap);
5545
5546 return 0;
5547 }
5548
5549 /* Allow raid5_quiesce to complete */
5550 wait_event(conf->wait_for_overlap, conf->quiesce != 2);
5551
5552 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5553 return reshape_request(mddev, sector_nr, skipped);
5554
5555 /* No need to check resync_max as we never do more than one
5556 * stripe, and as resync_max will always be on a chunk boundary,
5557 * if the check in md_do_sync didn't fire, there is no chance
5558 * of overstepping resync_max here
5559 */
5560
5561 /* if there is too many failed drives and we are trying
5562 * to resync, then assert that we are finished, because there is
5563 * nothing we can do.
5564 */
5565 if (mddev->degraded >= conf->max_degraded &&
5566 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5567 sector_t rv = mddev->dev_sectors - sector_nr;
5568 *skipped = 1;
5569 return rv;
5570 }
5571 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
5572 !conf->fullsync &&
5573 !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
5574 sync_blocks >= STRIPE_SECTORS) {
5575 /* we can skip this block, and probably more */
5576 sync_blocks /= STRIPE_SECTORS;
5577 *skipped = 1;
5578 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
5579 }
5580
5581 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
5582
5583 sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
5584 if (sh == NULL) {
5585 sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
5586 /* make sure we don't swamp the stripe cache if someone else
5587 * is trying to get access
5588 */
5589 schedule_timeout_uninterruptible(1);
5590 }
5591 /* Need to check if array will still be degraded after recovery/resync
5592 * Note in case of > 1 drive failures it's possible we're rebuilding
5593 * one drive while leaving another faulty drive in array.
5594 */
5595 rcu_read_lock();
5596 for (i = 0; i < conf->raid_disks; i++) {
5597 struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
5598
5599 if (rdev == NULL || test_bit(Faulty, &rdev->flags))
5600 still_degraded = 1;
5601 }
5602 rcu_read_unlock();
5603
5604 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
5605
5606 set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
5607 set_bit(STRIPE_HANDLE, &sh->state);
5608
5609 release_stripe(sh);
5610
5611 return STRIPE_SECTORS;
5612 }
5613
5614 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
5615 {
5616 /* We may not be able to submit a whole bio at once as there
5617 * may not be enough stripe_heads available.
5618 * We cannot pre-allocate enough stripe_heads as we may need
5619 * more than exist in the cache (if we allow ever large chunks).
5620 * So we do one stripe head at a time and record in
5621 * ->bi_hw_segments how many have been done.
5622 *
5623 * We *know* that this entire raid_bio is in one chunk, so
5624 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
5625 */
5626 struct stripe_head *sh;
5627 int dd_idx;
5628 sector_t sector, logical_sector, last_sector;
5629 int scnt = 0;
5630 int remaining;
5631 int handled = 0;
5632
5633 logical_sector = raid_bio->bi_iter.bi_sector &
5634 ~((sector_t)STRIPE_SECTORS-1);
5635 sector = raid5_compute_sector(conf, logical_sector,
5636 0, &dd_idx, NULL);
5637 last_sector = bio_end_sector(raid_bio);
5638
5639 for (; logical_sector < last_sector;
5640 logical_sector += STRIPE_SECTORS,
5641 sector += STRIPE_SECTORS,
5642 scnt++) {
5643
5644 if (scnt < raid5_bi_processed_stripes(raid_bio))
5645 /* already done this stripe */
5646 continue;
5647
5648 sh = get_active_stripe(conf, sector, 0, 1, 1);
5649
5650 if (!sh) {
5651 /* failed to get a stripe - must wait */
5652 raid5_set_bi_processed_stripes(raid_bio, scnt);
5653 conf->retry_read_aligned = raid_bio;
5654 return handled;
5655 }
5656
5657 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
5658 release_stripe(sh);
5659 raid5_set_bi_processed_stripes(raid_bio, scnt);
5660 conf->retry_read_aligned = raid_bio;
5661 return handled;
5662 }
5663
5664 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
5665 handle_stripe(sh);
5666 release_stripe(sh);
5667 handled++;
5668 }
5669 remaining = raid5_dec_bi_active_stripes(raid_bio);
5670 if (remaining == 0) {
5671 trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
5672 raid_bio, 0);
5673 bio_endio(raid_bio, 0);
5674 }
5675 if (atomic_dec_and_test(&conf->active_aligned_reads))
5676 wake_up(&conf->wait_for_stripe);
5677 return handled;
5678 }
5679
5680 static int handle_active_stripes(struct r5conf *conf, int group,
5681 struct r5worker *worker,
5682 struct list_head *temp_inactive_list)
5683 {
5684 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
5685 int i, batch_size = 0, hash;
5686 bool release_inactive = false;
5687
5688 while (batch_size < MAX_STRIPE_BATCH &&
5689 (sh = __get_priority_stripe(conf, group)) != NULL)
5690 batch[batch_size++] = sh;
5691
5692 if (batch_size == 0) {
5693 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
5694 if (!list_empty(temp_inactive_list + i))
5695 break;
5696 if (i == NR_STRIPE_HASH_LOCKS)
5697 return batch_size;
5698 release_inactive = true;
5699 }
5700 spin_unlock_irq(&conf->device_lock);
5701
5702 release_inactive_stripe_list(conf, temp_inactive_list,
5703 NR_STRIPE_HASH_LOCKS);
5704
5705 if (release_inactive) {
5706 spin_lock_irq(&conf->device_lock);
5707 return 0;
5708 }
5709
5710 for (i = 0; i < batch_size; i++)
5711 handle_stripe(batch[i]);
5712
5713 cond_resched();
5714
5715 spin_lock_irq(&conf->device_lock);
5716 for (i = 0; i < batch_size; i++) {
5717 hash = batch[i]->hash_lock_index;
5718 __release_stripe(conf, batch[i], &temp_inactive_list[hash]);
5719 }
5720 return batch_size;
5721 }
5722
5723 static void raid5_do_work(struct work_struct *work)
5724 {
5725 struct r5worker *worker = container_of(work, struct r5worker, work);
5726 struct r5worker_group *group = worker->group;
5727 struct r5conf *conf = group->conf;
5728 int group_id = group - conf->worker_groups;
5729 int handled;
5730 struct blk_plug plug;
5731
5732 pr_debug("+++ raid5worker active\n");
5733
5734 blk_start_plug(&plug);
5735 handled = 0;
5736 spin_lock_irq(&conf->device_lock);
5737 while (1) {
5738 int batch_size, released;
5739
5740 released = release_stripe_list(conf, worker->temp_inactive_list);
5741
5742 batch_size = handle_active_stripes(conf, group_id, worker,
5743 worker->temp_inactive_list);
5744 worker->working = false;
5745 if (!batch_size && !released)
5746 break;
5747 handled += batch_size;
5748 }
5749 pr_debug("%d stripes handled\n", handled);
5750
5751 spin_unlock_irq(&conf->device_lock);
5752 blk_finish_plug(&plug);
5753
5754 pr_debug("--- raid5worker inactive\n");
5755 }
5756
5757 /*
5758 * This is our raid5 kernel thread.
5759 *
5760 * We scan the hash table for stripes which can be handled now.
5761 * During the scan, completed stripes are saved for us by the interrupt
5762 * handler, so that they will not have to wait for our next wakeup.
5763 */
5764 static void raid5d(struct md_thread *thread)
5765 {
5766 struct mddev *mddev = thread->mddev;
5767 struct r5conf *conf = mddev->private;
5768 int handled;
5769 struct blk_plug plug;
5770
5771 pr_debug("+++ raid5d active\n");
5772
5773 md_check_recovery(mddev);
5774
5775 blk_start_plug(&plug);
5776 handled = 0;
5777 spin_lock_irq(&conf->device_lock);
5778 while (1) {
5779 struct bio *bio;
5780 int batch_size, released;
5781
5782 released = release_stripe_list(conf, conf->temp_inactive_list);
5783 if (released)
5784 clear_bit(R5_DID_ALLOC, &conf->cache_state);
5785
5786 if (
5787 !list_empty(&conf->bitmap_list)) {
5788 /* Now is a good time to flush some bitmap updates */
5789 conf->seq_flush++;
5790 spin_unlock_irq(&conf->device_lock);
5791 bitmap_unplug(mddev->bitmap);
5792 spin_lock_irq(&conf->device_lock);
5793 conf->seq_write = conf->seq_flush;
5794 activate_bit_delay(conf, conf->temp_inactive_list);
5795 }
5796 raid5_activate_delayed(conf);
5797
5798 while ((bio = remove_bio_from_retry(conf))) {
5799 int ok;
5800 spin_unlock_irq(&conf->device_lock);
5801 ok = retry_aligned_read(conf, bio);
5802 spin_lock_irq(&conf->device_lock);
5803 if (!ok)
5804 break;
5805 handled++;
5806 }
5807
5808 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL,
5809 conf->temp_inactive_list);
5810 if (!batch_size && !released)
5811 break;
5812 handled += batch_size;
5813
5814 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
5815 spin_unlock_irq(&conf->device_lock);
5816 md_check_recovery(mddev);
5817 spin_lock_irq(&conf->device_lock);
5818 }
5819 }
5820 pr_debug("%d stripes handled\n", handled);
5821
5822 spin_unlock_irq(&conf->device_lock);
5823 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) {
5824 grow_one_stripe(conf, __GFP_NOWARN);
5825 /* Set flag even if allocation failed. This helps
5826 * slow down allocation requests when mem is short
5827 */
5828 set_bit(R5_DID_ALLOC, &conf->cache_state);
5829 }
5830
5831 async_tx_issue_pending_all();
5832 blk_finish_plug(&plug);
5833
5834 pr_debug("--- raid5d inactive\n");
5835 }
5836
5837 static ssize_t
5838 raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
5839 {
5840 struct r5conf *conf;
5841 int ret = 0;
5842 spin_lock(&mddev->lock);
5843 conf = mddev->private;
5844 if (conf)
5845 ret = sprintf(page, "%d\n", conf->min_nr_stripes);
5846 spin_unlock(&mddev->lock);
5847 return ret;
5848 }
5849
5850 int
5851 raid5_set_cache_size(struct mddev *mddev, int size)
5852 {
5853 struct r5conf *conf = mddev->private;
5854 int err;
5855
5856 if (size <= 16 || size > 32768)
5857 return -EINVAL;
5858
5859 conf->min_nr_stripes = size;
5860 while (size < conf->max_nr_stripes &&
5861 drop_one_stripe(conf))
5862 ;
5863
5864
5865 err = md_allow_write(mddev);
5866 if (err)
5867 return err;
5868
5869 while (size > conf->max_nr_stripes)
5870 if (!grow_one_stripe(conf, GFP_KERNEL))
5871 break;
5872
5873 return 0;
5874 }
5875 EXPORT_SYMBOL(raid5_set_cache_size);
5876
5877 static ssize_t
5878 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
5879 {
5880 struct r5conf *conf;
5881 unsigned long new;
5882 int err;
5883
5884 if (len >= PAGE_SIZE)
5885 return -EINVAL;
5886 if (kstrtoul(page, 10, &new))
5887 return -EINVAL;
5888 err = mddev_lock(mddev);
5889 if (err)
5890 return err;
5891 conf = mddev->private;
5892 if (!conf)
5893 err = -ENODEV;
5894 else
5895 err = raid5_set_cache_size(mddev, new);
5896 mddev_unlock(mddev);
5897
5898 return err ?: len;
5899 }
5900
5901 static struct md_sysfs_entry
5902 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
5903 raid5_show_stripe_cache_size,
5904 raid5_store_stripe_cache_size);
5905
5906 static ssize_t
5907 raid5_show_rmw_level(struct mddev *mddev, char *page)
5908 {
5909 struct r5conf *conf = mddev->private;
5910 if (conf)
5911 return sprintf(page, "%d\n", conf->rmw_level);
5912 else
5913 return 0;
5914 }
5915
5916 static ssize_t
5917 raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len)
5918 {
5919 struct r5conf *conf = mddev->private;
5920 unsigned long new;
5921
5922 if (!conf)
5923 return -ENODEV;
5924
5925 if (len >= PAGE_SIZE)
5926 return -EINVAL;
5927
5928 if (kstrtoul(page, 10, &new))
5929 return -EINVAL;
5930
5931 if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome)
5932 return -EINVAL;
5933
5934 if (new != PARITY_DISABLE_RMW &&
5935 new != PARITY_ENABLE_RMW &&
5936 new != PARITY_PREFER_RMW)
5937 return -EINVAL;
5938
5939 conf->rmw_level = new;
5940 return len;
5941 }
5942
5943 static struct md_sysfs_entry
5944 raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR,
5945 raid5_show_rmw_level,
5946 raid5_store_rmw_level);
5947
5948
5949 static ssize_t
5950 raid5_show_preread_threshold(struct mddev *mddev, char *page)
5951 {
5952 struct r5conf *conf;
5953 int ret = 0;
5954 spin_lock(&mddev->lock);
5955 conf = mddev->private;
5956 if (conf)
5957 ret = sprintf(page, "%d\n", conf->bypass_threshold);
5958 spin_unlock(&mddev->lock);
5959 return ret;
5960 }
5961
5962 static ssize_t
5963 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
5964 {
5965 struct r5conf *conf;
5966 unsigned long new;
5967 int err;
5968
5969 if (len >= PAGE_SIZE)
5970 return -EINVAL;
5971 if (kstrtoul(page, 10, &new))
5972 return -EINVAL;
5973
5974 err = mddev_lock(mddev);
5975 if (err)
5976 return err;
5977 conf = mddev->private;
5978 if (!conf)
5979 err = -ENODEV;
5980 else if (new > conf->min_nr_stripes)
5981 err = -EINVAL;
5982 else
5983 conf->bypass_threshold = new;
5984 mddev_unlock(mddev);
5985 return err ?: len;
5986 }
5987
5988 static struct md_sysfs_entry
5989 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
5990 S_IRUGO | S_IWUSR,
5991 raid5_show_preread_threshold,
5992 raid5_store_preread_threshold);
5993
5994 static ssize_t
5995 raid5_show_skip_copy(struct mddev *mddev, char *page)
5996 {
5997 struct r5conf *conf;
5998 int ret = 0;
5999 spin_lock(&mddev->lock);
6000 conf = mddev->private;
6001 if (conf)
6002 ret = sprintf(page, "%d\n", conf->skip_copy);
6003 spin_unlock(&mddev->lock);
6004 return ret;
6005 }
6006
6007 static ssize_t
6008 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
6009 {
6010 struct r5conf *conf;
6011 unsigned long new;
6012 int err;
6013
6014 if (len >= PAGE_SIZE)
6015 return -EINVAL;
6016 if (kstrtoul(page, 10, &new))
6017 return -EINVAL;
6018 new = !!new;
6019
6020 err = mddev_lock(mddev);
6021 if (err)
6022 return err;
6023 conf = mddev->private;
6024 if (!conf)
6025 err = -ENODEV;
6026 else if (new != conf->skip_copy) {
6027 mddev_suspend(mddev);
6028 conf->skip_copy = new;
6029 if (new)
6030 mddev->queue->backing_dev_info.capabilities |=
6031 BDI_CAP_STABLE_WRITES;
6032 else
6033 mddev->queue->backing_dev_info.capabilities &=
6034 ~BDI_CAP_STABLE_WRITES;
6035 mddev_resume(mddev);
6036 }
6037 mddev_unlock(mddev);
6038 return err ?: len;
6039 }
6040
6041 static struct md_sysfs_entry
6042 raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR,
6043 raid5_show_skip_copy,
6044 raid5_store_skip_copy);
6045
6046 static ssize_t
6047 stripe_cache_active_show(struct mddev *mddev, char *page)
6048 {
6049 struct r5conf *conf = mddev->private;
6050 if (conf)
6051 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
6052 else
6053 return 0;
6054 }
6055
6056 static struct md_sysfs_entry
6057 raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
6058
6059 static ssize_t
6060 raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
6061 {
6062 struct r5conf *conf;
6063 int ret = 0;
6064 spin_lock(&mddev->lock);
6065 conf = mddev->private;
6066 if (conf)
6067 ret = sprintf(page, "%d\n", conf->worker_cnt_per_group);
6068 spin_unlock(&mddev->lock);
6069 return ret;
6070 }
6071
6072 static int alloc_thread_groups(struct r5conf *conf, int cnt,
6073 int *group_cnt,
6074 int *worker_cnt_per_group,
6075 struct r5worker_group **worker_groups);
6076 static ssize_t
6077 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
6078 {
6079 struct r5conf *conf;
6080 unsigned long new;
6081 int err;
6082 struct r5worker_group *new_groups, *old_groups;
6083 int group_cnt, worker_cnt_per_group;
6084
6085 if (len >= PAGE_SIZE)
6086 return -EINVAL;
6087 if (kstrtoul(page, 10, &new))
6088 return -EINVAL;
6089
6090 err = mddev_lock(mddev);
6091 if (err)
6092 return err;
6093 conf = mddev->private;
6094 if (!conf)
6095 err = -ENODEV;
6096 else if (new != conf->worker_cnt_per_group) {
6097 mddev_suspend(mddev);
6098
6099 old_groups = conf->worker_groups;
6100 if (old_groups)
6101 flush_workqueue(raid5_wq);
6102
6103 err = alloc_thread_groups(conf, new,
6104 &group_cnt, &worker_cnt_per_group,
6105 &new_groups);
6106 if (!err) {
6107 spin_lock_irq(&conf->device_lock);
6108 conf->group_cnt = group_cnt;
6109 conf->worker_cnt_per_group = worker_cnt_per_group;
6110 conf->worker_groups = new_groups;
6111 spin_unlock_irq(&conf->device_lock);
6112
6113 if (old_groups)
6114 kfree(old_groups[0].workers);
6115 kfree(old_groups);
6116 }
6117 mddev_resume(mddev);
6118 }
6119 mddev_unlock(mddev);
6120
6121 return err ?: len;
6122 }
6123
6124 static struct md_sysfs_entry
6125 raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR,
6126 raid5_show_group_thread_cnt,
6127 raid5_store_group_thread_cnt);
6128
6129 static struct attribute *raid5_attrs[] = {
6130 &raid5_stripecache_size.attr,
6131 &raid5_stripecache_active.attr,
6132 &raid5_preread_bypass_threshold.attr,
6133 &raid5_group_thread_cnt.attr,
6134 &raid5_skip_copy.attr,
6135 &raid5_rmw_level.attr,
6136 NULL,
6137 };
6138 static struct attribute_group raid5_attrs_group = {
6139 .name = NULL,
6140 .attrs = raid5_attrs,
6141 };
6142
6143 static int alloc_thread_groups(struct r5conf *conf, int cnt,
6144 int *group_cnt,
6145 int *worker_cnt_per_group,
6146 struct r5worker_group **worker_groups)
6147 {
6148 int i, j, k;
6149 ssize_t size;
6150 struct r5worker *workers;
6151
6152 *worker_cnt_per_group = cnt;
6153 if (cnt == 0) {
6154 *group_cnt = 0;
6155 *worker_groups = NULL;
6156 return 0;
6157 }
6158 *group_cnt = num_possible_nodes();
6159 size = sizeof(struct r5worker) * cnt;
6160 workers = kzalloc(size * *group_cnt, GFP_NOIO);
6161 *worker_groups = kzalloc(sizeof(struct r5worker_group) *
6162 *group_cnt, GFP_NOIO);
6163 if (!*worker_groups || !workers) {
6164 kfree(workers);
6165 kfree(*worker_groups);
6166 return -ENOMEM;
6167 }
6168
6169 for (i = 0; i < *group_cnt; i++) {
6170 struct r5worker_group *group;
6171
6172 group = &(*worker_groups)[i];
6173 INIT_LIST_HEAD(&group->handle_list);
6174 group->conf = conf;
6175 group->workers = workers + i * cnt;
6176
6177 for (j = 0; j < cnt; j++) {
6178 struct r5worker *worker = group->workers + j;
6179 worker->group = group;
6180 INIT_WORK(&worker->work, raid5_do_work);
6181
6182 for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++)
6183 INIT_LIST_HEAD(worker->temp_inactive_list + k);
6184 }
6185 }
6186
6187 return 0;
6188 }
6189
6190 static void free_thread_groups(struct r5conf *conf)
6191 {
6192 if (conf->worker_groups)
6193 kfree(conf->worker_groups[0].workers);
6194 kfree(conf->worker_groups);
6195 conf->worker_groups = NULL;
6196 }
6197
6198 static sector_t
6199 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
6200 {
6201 struct r5conf *conf = mddev->private;
6202
6203 if (!sectors)
6204 sectors = mddev->dev_sectors;
6205 if (!raid_disks)
6206 /* size is defined by the smallest of previous and new size */
6207 raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
6208
6209 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
6210 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
6211 return sectors * (raid_disks - conf->max_degraded);
6212 }
6213
6214 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
6215 {
6216 safe_put_page(percpu->spare_page);
6217 if (percpu->scribble)
6218 flex_array_free(percpu->scribble);
6219 percpu->spare_page = NULL;
6220 percpu->scribble = NULL;
6221 }
6222
6223 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
6224 {
6225 if (conf->level == 6 && !percpu->spare_page)
6226 percpu->spare_page = alloc_page(GFP_KERNEL);
6227 if (!percpu->scribble)
6228 percpu->scribble = scribble_alloc(max(conf->raid_disks,
6229 conf->previous_raid_disks),
6230 max(conf->chunk_sectors,
6231 conf->prev_chunk_sectors)
6232 / STRIPE_SECTORS,
6233 GFP_KERNEL);
6234
6235 if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
6236 free_scratch_buffer(conf, percpu);
6237 return -ENOMEM;
6238 }
6239
6240 return 0;
6241 }
6242
6243 static void raid5_free_percpu(struct r5conf *conf)
6244 {
6245 unsigned long cpu;
6246
6247 if (!conf->percpu)
6248 return;
6249
6250 #ifdef CONFIG_HOTPLUG_CPU
6251 unregister_cpu_notifier(&conf->cpu_notify);
6252 #endif
6253
6254 get_online_cpus();
6255 for_each_possible_cpu(cpu)
6256 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
6257 put_online_cpus();
6258
6259 free_percpu(conf->percpu);
6260 }
6261
6262 static void free_conf(struct r5conf *conf)
6263 {
6264 if (conf->shrinker.seeks)
6265 unregister_shrinker(&conf->shrinker);
6266 free_thread_groups(conf);
6267 shrink_stripes(conf);
6268 raid5_free_percpu(conf);
6269 kfree(conf->disks);
6270 kfree(conf->stripe_hashtbl);
6271 kfree(conf);
6272 }
6273
6274 #ifdef CONFIG_HOTPLUG_CPU
6275 static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
6276 void *hcpu)
6277 {
6278 struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify);
6279 long cpu = (long)hcpu;
6280 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
6281
6282 switch (action) {
6283 case CPU_UP_PREPARE:
6284 case CPU_UP_PREPARE_FROZEN:
6285 if (alloc_scratch_buffer(conf, percpu)) {
6286 pr_err("%s: failed memory allocation for cpu%ld\n",
6287 __func__, cpu);
6288 return notifier_from_errno(-ENOMEM);
6289 }
6290 break;
6291 case CPU_DEAD:
6292 case CPU_DEAD_FROZEN:
6293 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
6294 break;
6295 default:
6296 break;
6297 }
6298 return NOTIFY_OK;
6299 }
6300 #endif
6301
6302 static int raid5_alloc_percpu(struct r5conf *conf)
6303 {
6304 unsigned long cpu;
6305 int err = 0;
6306
6307 conf->percpu = alloc_percpu(struct raid5_percpu);
6308 if (!conf->percpu)
6309 return -ENOMEM;
6310
6311 #ifdef CONFIG_HOTPLUG_CPU
6312 conf->cpu_notify.notifier_call = raid456_cpu_notify;
6313 conf->cpu_notify.priority = 0;
6314 err = register_cpu_notifier(&conf->cpu_notify);
6315 if (err)
6316 return err;
6317 #endif
6318
6319 get_online_cpus();
6320 for_each_present_cpu(cpu) {
6321 err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
6322 if (err) {
6323 pr_err("%s: failed memory allocation for cpu%ld\n",
6324 __func__, cpu);
6325 break;
6326 }
6327 }
6328 put_online_cpus();
6329
6330 return err;
6331 }
6332
6333 static unsigned long raid5_cache_scan(struct shrinker *shrink,
6334 struct shrink_control *sc)
6335 {
6336 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
6337 int ret = 0;
6338 while (ret < sc->nr_to_scan) {
6339 if (drop_one_stripe(conf) == 0)
6340 return SHRINK_STOP;
6341 ret++;
6342 }
6343 return ret;
6344 }
6345
6346 static unsigned long raid5_cache_count(struct shrinker *shrink,
6347 struct shrink_control *sc)
6348 {
6349 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
6350
6351 if (conf->max_nr_stripes < conf->min_nr_stripes)
6352 /* unlikely, but not impossible */
6353 return 0;
6354 return conf->max_nr_stripes - conf->min_nr_stripes;
6355 }
6356
6357 static struct r5conf *setup_conf(struct mddev *mddev)
6358 {
6359 struct r5conf *conf;
6360 int raid_disk, memory, max_disks;
6361 struct md_rdev *rdev;
6362 struct disk_info *disk;
6363 char pers_name[6];
6364 int i;
6365 int group_cnt, worker_cnt_per_group;
6366 struct r5worker_group *new_group;
6367
6368 if (mddev->new_level != 5
6369 && mddev->new_level != 4
6370 && mddev->new_level != 6) {
6371 printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
6372 mdname(mddev), mddev->new_level);
6373 return ERR_PTR(-EIO);
6374 }
6375 if ((mddev->new_level == 5
6376 && !algorithm_valid_raid5(mddev->new_layout)) ||
6377 (mddev->new_level == 6
6378 && !algorithm_valid_raid6(mddev->new_layout))) {
6379 printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
6380 mdname(mddev), mddev->new_layout);
6381 return ERR_PTR(-EIO);
6382 }
6383 if (mddev->new_level == 6 && mddev->raid_disks < 4) {
6384 printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
6385 mdname(mddev), mddev->raid_disks);
6386 return ERR_PTR(-EINVAL);
6387 }
6388
6389 if (!mddev->new_chunk_sectors ||
6390 (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
6391 !is_power_of_2(mddev->new_chunk_sectors)) {
6392 printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
6393 mdname(mddev), mddev->new_chunk_sectors << 9);
6394 return ERR_PTR(-EINVAL);
6395 }
6396
6397 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
6398 if (conf == NULL)
6399 goto abort;
6400 /* Don't enable multi-threading by default*/
6401 if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group,
6402 &new_group)) {
6403 conf->group_cnt = group_cnt;
6404 conf->worker_cnt_per_group = worker_cnt_per_group;
6405 conf->worker_groups = new_group;
6406 } else
6407 goto abort;
6408 spin_lock_init(&conf->device_lock);
6409 seqcount_init(&conf->gen_lock);
6410 init_waitqueue_head(&conf->wait_for_stripe);
6411 init_waitqueue_head(&conf->wait_for_overlap);
6412 INIT_LIST_HEAD(&conf->handle_list);
6413 INIT_LIST_HEAD(&conf->hold_list);
6414 INIT_LIST_HEAD(&conf->delayed_list);
6415 INIT_LIST_HEAD(&conf->bitmap_list);
6416 init_llist_head(&conf->released_stripes);
6417 atomic_set(&conf->active_stripes, 0);
6418 atomic_set(&conf->preread_active_stripes, 0);
6419 atomic_set(&conf->active_aligned_reads, 0);
6420 conf->bypass_threshold = BYPASS_THRESHOLD;
6421 conf->recovery_disabled = mddev->recovery_disabled - 1;
6422
6423 conf->raid_disks = mddev->raid_disks;
6424 if (mddev->reshape_position == MaxSector)
6425 conf->previous_raid_disks = mddev->raid_disks;
6426 else
6427 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
6428 max_disks = max(conf->raid_disks, conf->previous_raid_disks);
6429
6430 conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
6431 GFP_KERNEL);
6432 if (!conf->disks)
6433 goto abort;
6434
6435 conf->mddev = mddev;
6436
6437 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
6438 goto abort;
6439
6440 /* We init hash_locks[0] separately to that it can be used
6441 * as the reference lock in the spin_lock_nest_lock() call
6442 * in lock_all_device_hash_locks_irq in order to convince
6443 * lockdep that we know what we are doing.
6444 */
6445 spin_lock_init(conf->hash_locks);
6446 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
6447 spin_lock_init(conf->hash_locks + i);
6448
6449 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
6450 INIT_LIST_HEAD(conf->inactive_list + i);
6451
6452 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
6453 INIT_LIST_HEAD(conf->temp_inactive_list + i);
6454
6455 conf->level = mddev->new_level;
6456 conf->chunk_sectors = mddev->new_chunk_sectors;
6457 if (raid5_alloc_percpu(conf) != 0)
6458 goto abort;
6459
6460 pr_debug("raid456: run(%s) called.\n", mdname(mddev));
6461
6462 rdev_for_each(rdev, mddev) {
6463 raid_disk = rdev->raid_disk;
6464 if (raid_disk >= max_disks
6465 || raid_disk < 0)
6466 continue;
6467 disk = conf->disks + raid_disk;
6468
6469 if (test_bit(Replacement, &rdev->flags)) {
6470 if (disk->replacement)
6471 goto abort;
6472 disk->replacement = rdev;
6473 } else {
6474 if (disk->rdev)
6475 goto abort;
6476 disk->rdev = rdev;
6477 }
6478
6479 if (test_bit(In_sync, &rdev->flags)) {
6480 char b[BDEVNAME_SIZE];
6481 printk(KERN_INFO "md/raid:%s: device %s operational as raid"
6482 " disk %d\n",
6483 mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
6484 } else if (rdev->saved_raid_disk != raid_disk)
6485 /* Cannot rely on bitmap to complete recovery */
6486 conf->fullsync = 1;
6487 }
6488
6489 conf->level = mddev->new_level;
6490 if (conf->level == 6) {
6491 conf->max_degraded = 2;
6492 if (raid6_call.xor_syndrome)
6493 conf->rmw_level = PARITY_ENABLE_RMW;
6494 else
6495 conf->rmw_level = PARITY_DISABLE_RMW;
6496 } else {
6497 conf->max_degraded = 1;
6498 conf->rmw_level = PARITY_ENABLE_RMW;
6499 }
6500 conf->algorithm = mddev->new_layout;
6501 conf->reshape_progress = mddev->reshape_position;
6502 if (conf->reshape_progress != MaxSector) {
6503 conf->prev_chunk_sectors = mddev->chunk_sectors;
6504 conf->prev_algo = mddev->layout;
6505 }
6506
6507 conf->min_nr_stripes = NR_STRIPES;
6508 memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
6509 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
6510 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
6511 if (grow_stripes(conf, conf->min_nr_stripes)) {
6512 printk(KERN_ERR
6513 "md/raid:%s: couldn't allocate %dkB for buffers\n",
6514 mdname(mddev), memory);
6515 goto abort;
6516 } else
6517 printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
6518 mdname(mddev), memory);
6519 /*
6520 * Losing a stripe head costs more than the time to refill it,
6521 * it reduces the queue depth and so can hurt throughput.
6522 * So set it rather large, scaled by number of devices.
6523 */
6524 conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4;
6525 conf->shrinker.scan_objects = raid5_cache_scan;
6526 conf->shrinker.count_objects = raid5_cache_count;
6527 conf->shrinker.batch = 128;
6528 conf->shrinker.flags = 0;
6529 register_shrinker(&conf->shrinker);
6530
6531 sprintf(pers_name, "raid%d", mddev->new_level);
6532 conf->thread = md_register_thread(raid5d, mddev, pers_name);
6533 if (!conf->thread) {
6534 printk(KERN_ERR
6535 "md/raid:%s: couldn't allocate thread.\n",
6536 mdname(mddev));
6537 goto abort;
6538 }
6539
6540 return conf;
6541
6542 abort:
6543 if (conf) {
6544 free_conf(conf);
6545 return ERR_PTR(-EIO);
6546 } else
6547 return ERR_PTR(-ENOMEM);
6548 }
6549
6550 static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
6551 {
6552 switch (algo) {
6553 case ALGORITHM_PARITY_0:
6554 if (raid_disk < max_degraded)
6555 return 1;
6556 break;
6557 case ALGORITHM_PARITY_N:
6558 if (raid_disk >= raid_disks - max_degraded)
6559 return 1;
6560 break;
6561 case ALGORITHM_PARITY_0_6:
6562 if (raid_disk == 0 ||
6563 raid_disk == raid_disks - 1)
6564 return 1;
6565 break;
6566 case ALGORITHM_LEFT_ASYMMETRIC_6:
6567 case ALGORITHM_RIGHT_ASYMMETRIC_6:
6568 case ALGORITHM_LEFT_SYMMETRIC_6:
6569 case ALGORITHM_RIGHT_SYMMETRIC_6:
6570 if (raid_disk == raid_disks - 1)
6571 return 1;
6572 }
6573 return 0;
6574 }
6575
6576 static int run(struct mddev *mddev)
6577 {
6578 struct r5conf *conf;
6579 int working_disks = 0;
6580 int dirty_parity_disks = 0;
6581 struct md_rdev *rdev;
6582 sector_t reshape_offset = 0;
6583 int i;
6584 long long min_offset_diff = 0;
6585 int first = 1;
6586
6587 if (mddev->recovery_cp != MaxSector)
6588 printk(KERN_NOTICE "md/raid:%s: not clean"
6589 " -- starting background reconstruction\n",
6590 mdname(mddev));
6591
6592 rdev_for_each(rdev, mddev) {
6593 long long diff;
6594 if (rdev->raid_disk < 0)
6595 continue;
6596 diff = (rdev->new_data_offset - rdev->data_offset);
6597 if (first) {
6598 min_offset_diff = diff;
6599 first = 0;
6600 } else if (mddev->reshape_backwards &&
6601 diff < min_offset_diff)
6602 min_offset_diff = diff;
6603 else if (!mddev->reshape_backwards &&
6604 diff > min_offset_diff)
6605 min_offset_diff = diff;
6606 }
6607
6608 if (mddev->reshape_position != MaxSector) {
6609 /* Check that we can continue the reshape.
6610 * Difficulties arise if the stripe we would write to
6611 * next is at or after the stripe we would read from next.
6612 * For a reshape that changes the number of devices, this
6613 * is only possible for a very short time, and mdadm makes
6614 * sure that time appears to have past before assembling
6615 * the array. So we fail if that time hasn't passed.
6616 * For a reshape that keeps the number of devices the same
6617 * mdadm must be monitoring the reshape can keeping the
6618 * critical areas read-only and backed up. It will start
6619 * the array in read-only mode, so we check for that.
6620 */
6621 sector_t here_new, here_old;
6622 int old_disks;
6623 int max_degraded = (mddev->level == 6 ? 2 : 1);
6624
6625 if (mddev->new_level != mddev->level) {
6626 printk(KERN_ERR "md/raid:%s: unsupported reshape "
6627 "required - aborting.\n",
6628 mdname(mddev));
6629 return -EINVAL;
6630 }
6631 old_disks = mddev->raid_disks - mddev->delta_disks;
6632 /* reshape_position must be on a new-stripe boundary, and one
6633 * further up in new geometry must map after here in old
6634 * geometry.
6635 */
6636 here_new = mddev->reshape_position;
6637 if (sector_div(here_new, mddev->new_chunk_sectors *
6638 (mddev->raid_disks - max_degraded))) {
6639 printk(KERN_ERR "md/raid:%s: reshape_position not "
6640 "on a stripe boundary\n", mdname(mddev));
6641 return -EINVAL;
6642 }
6643 reshape_offset = here_new * mddev->new_chunk_sectors;
6644 /* here_new is the stripe we will write to */
6645 here_old = mddev->reshape_position;
6646 sector_div(here_old, mddev->chunk_sectors *
6647 (old_disks-max_degraded));
6648 /* here_old is the first stripe that we might need to read
6649 * from */
6650 if (mddev->delta_disks == 0) {
6651 if ((here_new * mddev->new_chunk_sectors !=
6652 here_old * mddev->chunk_sectors)) {
6653 printk(KERN_ERR "md/raid:%s: reshape position is"
6654 " confused - aborting\n", mdname(mddev));
6655 return -EINVAL;
6656 }
6657 /* We cannot be sure it is safe to start an in-place
6658 * reshape. It is only safe if user-space is monitoring
6659 * and taking constant backups.
6660 * mdadm always starts a situation like this in
6661 * readonly mode so it can take control before
6662 * allowing any writes. So just check for that.
6663 */
6664 if (abs(min_offset_diff) >= mddev->chunk_sectors &&
6665 abs(min_offset_diff) >= mddev->new_chunk_sectors)
6666 /* not really in-place - so OK */;
6667 else if (mddev->ro == 0) {
6668 printk(KERN_ERR "md/raid:%s: in-place reshape "
6669 "must be started in read-only mode "
6670 "- aborting\n",
6671 mdname(mddev));
6672 return -EINVAL;
6673 }
6674 } else if (mddev->reshape_backwards
6675 ? (here_new * mddev->new_chunk_sectors + min_offset_diff <=
6676 here_old * mddev->chunk_sectors)
6677 : (here_new * mddev->new_chunk_sectors >=
6678 here_old * mddev->chunk_sectors + (-min_offset_diff))) {
6679 /* Reading from the same stripe as writing to - bad */
6680 printk(KERN_ERR "md/raid:%s: reshape_position too early for "
6681 "auto-recovery - aborting.\n",
6682 mdname(mddev));
6683 return -EINVAL;
6684 }
6685 printk(KERN_INFO "md/raid:%s: reshape will continue\n",
6686 mdname(mddev));
6687 /* OK, we should be able to continue; */
6688 } else {
6689 BUG_ON(mddev->level != mddev->new_level);
6690 BUG_ON(mddev->layout != mddev->new_layout);
6691 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
6692 BUG_ON(mddev->delta_disks != 0);
6693 }
6694
6695 if (mddev->private == NULL)
6696 conf = setup_conf(mddev);
6697 else
6698 conf = mddev->private;
6699
6700 if (IS_ERR(conf))
6701 return PTR_ERR(conf);
6702
6703 conf->min_offset_diff = min_offset_diff;
6704 mddev->thread = conf->thread;
6705 conf->thread = NULL;
6706 mddev->private = conf;
6707
6708 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
6709 i++) {
6710 rdev = conf->disks[i].rdev;
6711 if (!rdev && conf->disks[i].replacement) {
6712 /* The replacement is all we have yet */
6713 rdev = conf->disks[i].replacement;
6714 conf->disks[i].replacement = NULL;
6715 clear_bit(Replacement, &rdev->flags);
6716 conf->disks[i].rdev = rdev;
6717 }
6718 if (!rdev)
6719 continue;
6720 if (conf->disks[i].replacement &&
6721 conf->reshape_progress != MaxSector) {
6722 /* replacements and reshape simply do not mix. */
6723 printk(KERN_ERR "md: cannot handle concurrent "
6724 "replacement and reshape.\n");
6725 goto abort;
6726 }
6727 if (test_bit(In_sync, &rdev->flags)) {
6728 working_disks++;
6729 continue;
6730 }
6731 /* This disc is not fully in-sync. However if it
6732 * just stored parity (beyond the recovery_offset),
6733 * when we don't need to be concerned about the
6734 * array being dirty.
6735 * When reshape goes 'backwards', we never have
6736 * partially completed devices, so we only need
6737 * to worry about reshape going forwards.
6738 */
6739 /* Hack because v0.91 doesn't store recovery_offset properly. */
6740 if (mddev->major_version == 0 &&
6741 mddev->minor_version > 90)
6742 rdev->recovery_offset = reshape_offset;
6743
6744 if (rdev->recovery_offset < reshape_offset) {
6745 /* We need to check old and new layout */
6746 if (!only_parity(rdev->raid_disk,
6747 conf->algorithm,
6748 conf->raid_disks,
6749 conf->max_degraded))
6750 continue;
6751 }
6752 if (!only_parity(rdev->raid_disk,
6753 conf->prev_algo,
6754 conf->previous_raid_disks,
6755 conf->max_degraded))
6756 continue;
6757 dirty_parity_disks++;
6758 }
6759
6760 /*
6761 * 0 for a fully functional array, 1 or 2 for a degraded array.
6762 */
6763 mddev->degraded = calc_degraded(conf);
6764
6765 if (has_failed(conf)) {
6766 printk(KERN_ERR "md/raid:%s: not enough operational devices"
6767 " (%d/%d failed)\n",
6768 mdname(mddev), mddev->degraded, conf->raid_disks);
6769 goto abort;
6770 }
6771
6772 /* device size must be a multiple of chunk size */
6773 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
6774 mddev->resync_max_sectors = mddev->dev_sectors;
6775
6776 if (mddev->degraded > dirty_parity_disks &&
6777 mddev->recovery_cp != MaxSector) {
6778 if (mddev->ok_start_degraded)
6779 printk(KERN_WARNING
6780 "md/raid:%s: starting dirty degraded array"
6781 " - data corruption possible.\n",
6782 mdname(mddev));
6783 else {
6784 printk(KERN_ERR
6785 "md/raid:%s: cannot start dirty degraded array.\n",
6786 mdname(mddev));
6787 goto abort;
6788 }
6789 }
6790
6791 if (mddev->degraded == 0)
6792 printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
6793 " devices, algorithm %d\n", mdname(mddev), conf->level,
6794 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
6795 mddev->new_layout);
6796 else
6797 printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
6798 " out of %d devices, algorithm %d\n",
6799 mdname(mddev), conf->level,
6800 mddev->raid_disks - mddev->degraded,
6801 mddev->raid_disks, mddev->new_layout);
6802
6803 print_raid5_conf(conf);
6804
6805 if (conf->reshape_progress != MaxSector) {
6806 conf->reshape_safe = conf->reshape_progress;
6807 atomic_set(&conf->reshape_stripes, 0);
6808 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6809 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
6810 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
6811 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6812 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
6813 "reshape");
6814 }
6815
6816 /* Ok, everything is just fine now */
6817 if (mddev->to_remove == &raid5_attrs_group)
6818 mddev->to_remove = NULL;
6819 else if (mddev->kobj.sd &&
6820 sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
6821 printk(KERN_WARNING
6822 "raid5: failed to create sysfs attributes for %s\n",
6823 mdname(mddev));
6824 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
6825
6826 if (mddev->queue) {
6827 int chunk_size;
6828 bool discard_supported = true;
6829 /* read-ahead size must cover two whole stripes, which
6830 * is 2 * (datadisks) * chunksize where 'n' is the
6831 * number of raid devices
6832 */
6833 int data_disks = conf->previous_raid_disks - conf->max_degraded;
6834 int stripe = data_disks *
6835 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
6836 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
6837 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
6838
6839 chunk_size = mddev->chunk_sectors << 9;
6840 blk_queue_io_min(mddev->queue, chunk_size);
6841 blk_queue_io_opt(mddev->queue, chunk_size *
6842 (conf->raid_disks - conf->max_degraded));
6843 mddev->queue->limits.raid_partial_stripes_expensive = 1;
6844 /*
6845 * We can only discard a whole stripe. It doesn't make sense to
6846 * discard data disk but write parity disk
6847 */
6848 stripe = stripe * PAGE_SIZE;
6849 /* Round up to power of 2, as discard handling
6850 * currently assumes that */
6851 while ((stripe-1) & stripe)
6852 stripe = (stripe | (stripe-1)) + 1;
6853 mddev->queue->limits.discard_alignment = stripe;
6854 mddev->queue->limits.discard_granularity = stripe;
6855 /*
6856 * unaligned part of discard request will be ignored, so can't
6857 * guarantee discard_zeroes_data
6858 */
6859 mddev->queue->limits.discard_zeroes_data = 0;
6860
6861 blk_queue_max_write_same_sectors(mddev->queue, 0);
6862
6863 rdev_for_each(rdev, mddev) {
6864 disk_stack_limits(mddev->gendisk, rdev->bdev,
6865 rdev->data_offset << 9);
6866 disk_stack_limits(mddev->gendisk, rdev->bdev,
6867 rdev->new_data_offset << 9);
6868 /*
6869 * discard_zeroes_data is required, otherwise data
6870 * could be lost. Consider a scenario: discard a stripe
6871 * (the stripe could be inconsistent if
6872 * discard_zeroes_data is 0); write one disk of the
6873 * stripe (the stripe could be inconsistent again
6874 * depending on which disks are used to calculate
6875 * parity); the disk is broken; The stripe data of this
6876 * disk is lost.
6877 */
6878 if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) ||
6879 !bdev_get_queue(rdev->bdev)->
6880 limits.discard_zeroes_data)
6881 discard_supported = false;
6882 /* Unfortunately, discard_zeroes_data is not currently
6883 * a guarantee - just a hint. So we only allow DISCARD
6884 * if the sysadmin has confirmed that only safe devices
6885 * are in use by setting a module parameter.
6886 */
6887 if (!devices_handle_discard_safely) {
6888 if (discard_supported) {
6889 pr_info("md/raid456: discard support disabled due to uncertainty.\n");
6890 pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
6891 }
6892 discard_supported = false;
6893 }
6894 }
6895
6896 if (discard_supported &&
6897 mddev->queue->limits.max_discard_sectors >= stripe &&
6898 mddev->queue->limits.discard_granularity >= stripe)
6899 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
6900 mddev->queue);
6901 else
6902 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
6903 mddev->queue);
6904 }
6905
6906 return 0;
6907 abort:
6908 md_unregister_thread(&mddev->thread);
6909 print_raid5_conf(conf);
6910 free_conf(conf);
6911 mddev->private = NULL;
6912 printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
6913 return -EIO;
6914 }
6915
6916 static void raid5_free(struct mddev *mddev, void *priv)
6917 {
6918 struct r5conf *conf = priv;
6919
6920 free_conf(conf);
6921 mddev->to_remove = &raid5_attrs_group;
6922 }
6923
6924 static void status(struct seq_file *seq, struct mddev *mddev)
6925 {
6926 struct r5conf *conf = mddev->private;
6927 int i;
6928
6929 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
6930 mddev->chunk_sectors / 2, mddev->layout);
6931 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
6932 for (i = 0; i < conf->raid_disks; i++)
6933 seq_printf (seq, "%s",
6934 conf->disks[i].rdev &&
6935 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
6936 seq_printf (seq, "]");
6937 }
6938
6939 static void print_raid5_conf (struct r5conf *conf)
6940 {
6941 int i;
6942 struct disk_info *tmp;
6943
6944 printk(KERN_DEBUG "RAID conf printout:\n");
6945 if (!conf) {
6946 printk("(conf==NULL)\n");
6947 return;
6948 }
6949 printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
6950 conf->raid_disks,
6951 conf->raid_disks - conf->mddev->degraded);
6952
6953 for (i = 0; i < conf->raid_disks; i++) {
6954 char b[BDEVNAME_SIZE];
6955 tmp = conf->disks + i;
6956 if (tmp->rdev)
6957 printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
6958 i, !test_bit(Faulty, &tmp->rdev->flags),
6959 bdevname(tmp->rdev->bdev, b));
6960 }
6961 }
6962
6963 static int raid5_spare_active(struct mddev *mddev)
6964 {
6965 int i;
6966 struct r5conf *conf = mddev->private;
6967 struct disk_info *tmp;
6968 int count = 0;
6969 unsigned long flags;
6970
6971 for (i = 0; i < conf->raid_disks; i++) {
6972 tmp = conf->disks + i;
6973 if (tmp->replacement
6974 && tmp->replacement->recovery_offset == MaxSector
6975 && !test_bit(Faulty, &tmp->replacement->flags)
6976 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
6977 /* Replacement has just become active. */
6978 if (!tmp->rdev
6979 || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
6980 count++;
6981 if (tmp->rdev) {
6982 /* Replaced device not technically faulty,
6983 * but we need to be sure it gets removed
6984 * and never re-added.
6985 */
6986 set_bit(Faulty, &tmp->rdev->flags);
6987 sysfs_notify_dirent_safe(
6988 tmp->rdev->sysfs_state);
6989 }
6990 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
6991 } else if (tmp->rdev
6992 && tmp->rdev->recovery_offset == MaxSector
6993 && !test_bit(Faulty, &tmp->rdev->flags)
6994 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
6995 count++;
6996 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
6997 }
6998 }
6999 spin_lock_irqsave(&conf->device_lock, flags);
7000 mddev->degraded = calc_degraded(conf);
7001 spin_unlock_irqrestore(&conf->device_lock, flags);
7002 print_raid5_conf(conf);
7003 return count;
7004 }
7005
7006 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
7007 {
7008 struct r5conf *conf = mddev->private;
7009 int err = 0;
7010 int number = rdev->raid_disk;
7011 struct md_rdev **rdevp;
7012 struct disk_info *p = conf->disks + number;
7013
7014 print_raid5_conf(conf);
7015 if (rdev == p->rdev)
7016 rdevp = &p->rdev;
7017 else if (rdev == p->replacement)
7018 rdevp = &p->replacement;
7019 else
7020 return 0;
7021
7022 if (number >= conf->raid_disks &&
7023 conf->reshape_progress == MaxSector)
7024 clear_bit(In_sync, &rdev->flags);
7025
7026 if (test_bit(In_sync, &rdev->flags) ||
7027 atomic_read(&rdev->nr_pending)) {
7028 err = -EBUSY;
7029 goto abort;
7030 }
7031 /* Only remove non-faulty devices if recovery
7032 * isn't possible.
7033 */
7034 if (!test_bit(Faulty, &rdev->flags) &&
7035 mddev->recovery_disabled != conf->recovery_disabled &&
7036 !has_failed(conf) &&
7037 (!p->replacement || p->replacement == rdev) &&
7038 number < conf->raid_disks) {
7039 err = -EBUSY;
7040 goto abort;
7041 }
7042 *rdevp = NULL;
7043 synchronize_rcu();
7044 if (atomic_read(&rdev->nr_pending)) {
7045 /* lost the race, try later */
7046 err = -EBUSY;
7047 *rdevp = rdev;
7048 } else if (p->replacement) {
7049 /* We must have just cleared 'rdev' */
7050 p->rdev = p->replacement;
7051 clear_bit(Replacement, &p->replacement->flags);
7052 smp_mb(); /* Make sure other CPUs may see both as identical
7053 * but will never see neither - if they are careful
7054 */
7055 p->replacement = NULL;
7056 clear_bit(WantReplacement, &rdev->flags);
7057 } else
7058 /* We might have just removed the Replacement as faulty-
7059 * clear the bit just in case
7060 */
7061 clear_bit(WantReplacement, &rdev->flags);
7062 abort:
7063
7064 print_raid5_conf(conf);
7065 return err;
7066 }
7067
7068 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
7069 {
7070 struct r5conf *conf = mddev->private;
7071 int err = -EEXIST;
7072 int disk;
7073 struct disk_info *p;
7074 int first = 0;
7075 int last = conf->raid_disks - 1;
7076
7077 if (mddev->recovery_disabled == conf->recovery_disabled)
7078 return -EBUSY;
7079
7080 if (rdev->saved_raid_disk < 0 && has_failed(conf))
7081 /* no point adding a device */
7082 return -EINVAL;
7083
7084 if (rdev->raid_disk >= 0)
7085 first = last = rdev->raid_disk;
7086
7087 /*
7088 * find the disk ... but prefer rdev->saved_raid_disk
7089 * if possible.
7090 */
7091 if (rdev->saved_raid_disk >= 0 &&
7092 rdev->saved_raid_disk >= first &&
7093 conf->disks[rdev->saved_raid_disk].rdev == NULL)
7094 first = rdev->saved_raid_disk;
7095
7096 for (disk = first; disk <= last; disk++) {
7097 p = conf->disks + disk;
7098 if (p->rdev == NULL) {
7099 clear_bit(In_sync, &rdev->flags);
7100 rdev->raid_disk = disk;
7101 err = 0;
7102 if (rdev->saved_raid_disk != disk)
7103 conf->fullsync = 1;
7104 rcu_assign_pointer(p->rdev, rdev);
7105 goto out;
7106 }
7107 }
7108 for (disk = first; disk <= last; disk++) {
7109 p = conf->disks + disk;
7110 if (test_bit(WantReplacement, &p->rdev->flags) &&
7111 p->replacement == NULL) {
7112 clear_bit(In_sync, &rdev->flags);
7113 set_bit(Replacement, &rdev->flags);
7114 rdev->raid_disk = disk;
7115 err = 0;
7116 conf->fullsync = 1;
7117 rcu_assign_pointer(p->replacement, rdev);
7118 break;
7119 }
7120 }
7121 out:
7122 print_raid5_conf(conf);
7123 return err;
7124 }
7125
7126 static int raid5_resize(struct mddev *mddev, sector_t sectors)
7127 {
7128 /* no resync is happening, and there is enough space
7129 * on all devices, so we can resize.
7130 * We need to make sure resync covers any new space.
7131 * If the array is shrinking we should possibly wait until
7132 * any io in the removed space completes, but it hardly seems
7133 * worth it.
7134 */
7135 sector_t newsize;
7136 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
7137 newsize = raid5_size(mddev, sectors, mddev->raid_disks);
7138 if (mddev->external_size &&
7139 mddev->array_sectors > newsize)
7140 return -EINVAL;
7141 if (mddev->bitmap) {
7142 int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0);
7143 if (ret)
7144 return ret;
7145 }
7146 md_set_array_sectors(mddev, newsize);
7147 set_capacity(mddev->gendisk, mddev->array_sectors);
7148 revalidate_disk(mddev->gendisk);
7149 if (sectors > mddev->dev_sectors &&
7150 mddev->recovery_cp > mddev->dev_sectors) {
7151 mddev->recovery_cp = mddev->dev_sectors;
7152 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7153 }
7154 mddev->dev_sectors = sectors;
7155 mddev->resync_max_sectors = sectors;
7156 return 0;
7157 }
7158
7159 static int check_stripe_cache(struct mddev *mddev)
7160 {
7161 /* Can only proceed if there are plenty of stripe_heads.
7162 * We need a minimum of one full stripe,, and for sensible progress
7163 * it is best to have about 4 times that.
7164 * If we require 4 times, then the default 256 4K stripe_heads will
7165 * allow for chunk sizes up to 256K, which is probably OK.
7166 * If the chunk size is greater, user-space should request more
7167 * stripe_heads first.
7168 */
7169 struct r5conf *conf = mddev->private;
7170 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
7171 > conf->min_nr_stripes ||
7172 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
7173 > conf->min_nr_stripes) {
7174 printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
7175 mdname(mddev),
7176 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
7177 / STRIPE_SIZE)*4);
7178 return 0;
7179 }
7180 return 1;
7181 }
7182
7183 static int check_reshape(struct mddev *mddev)
7184 {
7185 struct r5conf *conf = mddev->private;
7186
7187 if (mddev->delta_disks == 0 &&
7188 mddev->new_layout == mddev->layout &&
7189 mddev->new_chunk_sectors == mddev->chunk_sectors)
7190 return 0; /* nothing to do */
7191 if (has_failed(conf))
7192 return -EINVAL;
7193 if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) {
7194 /* We might be able to shrink, but the devices must
7195 * be made bigger first.
7196 * For raid6, 4 is the minimum size.
7197 * Otherwise 2 is the minimum
7198 */
7199 int min = 2;
7200 if (mddev->level == 6)
7201 min = 4;
7202 if (mddev->raid_disks + mddev->delta_disks < min)
7203 return -EINVAL;
7204 }
7205
7206 if (!check_stripe_cache(mddev))
7207 return -ENOSPC;
7208
7209 if (mddev->new_chunk_sectors > mddev->chunk_sectors ||
7210 mddev->delta_disks > 0)
7211 if (resize_chunks(conf,
7212 conf->previous_raid_disks
7213 + max(0, mddev->delta_disks),
7214 max(mddev->new_chunk_sectors,
7215 mddev->chunk_sectors)
7216 ) < 0)
7217 return -ENOMEM;
7218 return resize_stripes(conf, (conf->previous_raid_disks
7219 + mddev->delta_disks));
7220 }
7221
7222 static int raid5_start_reshape(struct mddev *mddev)
7223 {
7224 struct r5conf *conf = mddev->private;
7225 struct md_rdev *rdev;
7226 int spares = 0;
7227 unsigned long flags;
7228
7229 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
7230 return -EBUSY;
7231
7232 if (!check_stripe_cache(mddev))
7233 return -ENOSPC;
7234
7235 if (has_failed(conf))
7236 return -EINVAL;
7237
7238 rdev_for_each(rdev, mddev) {
7239 if (!test_bit(In_sync, &rdev->flags)
7240 && !test_bit(Faulty, &rdev->flags))
7241 spares++;
7242 }
7243
7244 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
7245 /* Not enough devices even to make a degraded array
7246 * of that size
7247 */
7248 return -EINVAL;
7249
7250 /* Refuse to reduce size of the array. Any reductions in
7251 * array size must be through explicit setting of array_size
7252 * attribute.
7253 */
7254 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
7255 < mddev->array_sectors) {
7256 printk(KERN_ERR "md/raid:%s: array size must be reduced "
7257 "before number of disks\n", mdname(mddev));
7258 return -EINVAL;
7259 }
7260
7261 atomic_set(&conf->reshape_stripes, 0);
7262 spin_lock_irq(&conf->device_lock);
7263 write_seqcount_begin(&conf->gen_lock);
7264 conf->previous_raid_disks = conf->raid_disks;
7265 conf->raid_disks += mddev->delta_disks;
7266 conf->prev_chunk_sectors = conf->chunk_sectors;
7267 conf->chunk_sectors = mddev->new_chunk_sectors;
7268 conf->prev_algo = conf->algorithm;
7269 conf->algorithm = mddev->new_layout;
7270 conf->generation++;
7271 /* Code that selects data_offset needs to see the generation update
7272 * if reshape_progress has been set - so a memory barrier needed.
7273 */
7274 smp_mb();
7275 if (mddev->reshape_backwards)
7276 conf->reshape_progress = raid5_size(mddev, 0, 0);
7277 else
7278 conf->reshape_progress = 0;
7279 conf->reshape_safe = conf->reshape_progress;
7280 write_seqcount_end(&conf->gen_lock);
7281 spin_unlock_irq(&conf->device_lock);
7282
7283 /* Now make sure any requests that proceeded on the assumption
7284 * the reshape wasn't running - like Discard or Read - have
7285 * completed.
7286 */
7287 mddev_suspend(mddev);
7288 mddev_resume(mddev);
7289
7290 /* Add some new drives, as many as will fit.
7291 * We know there are enough to make the newly sized array work.
7292 * Don't add devices if we are reducing the number of
7293 * devices in the array. This is because it is not possible
7294 * to correctly record the "partially reconstructed" state of
7295 * such devices during the reshape and confusion could result.
7296 */
7297 if (mddev->delta_disks >= 0) {
7298 rdev_for_each(rdev, mddev)
7299 if (rdev->raid_disk < 0 &&
7300 !test_bit(Faulty, &rdev->flags)) {
7301 if (raid5_add_disk(mddev, rdev) == 0) {
7302 if (rdev->raid_disk
7303 >= conf->previous_raid_disks)
7304 set_bit(In_sync, &rdev->flags);
7305 else
7306 rdev->recovery_offset = 0;
7307
7308 if (sysfs_link_rdev(mddev, rdev))
7309 /* Failure here is OK */;
7310 }
7311 } else if (rdev->raid_disk >= conf->previous_raid_disks
7312 && !test_bit(Faulty, &rdev->flags)) {
7313 /* This is a spare that was manually added */
7314 set_bit(In_sync, &rdev->flags);
7315 }
7316
7317 /* When a reshape changes the number of devices,
7318 * ->degraded is measured against the larger of the
7319 * pre and post number of devices.
7320 */
7321 spin_lock_irqsave(&conf->device_lock, flags);
7322 mddev->degraded = calc_degraded(conf);
7323 spin_unlock_irqrestore(&conf->device_lock, flags);
7324 }
7325 mddev->raid_disks = conf->raid_disks;
7326 mddev->reshape_position = conf->reshape_progress;
7327 set_bit(MD_CHANGE_DEVS, &mddev->flags);
7328
7329 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7330 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7331 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7332 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7333 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
7334 "reshape");
7335 if (!mddev->sync_thread) {
7336 mddev->recovery = 0;
7337 spin_lock_irq(&conf->device_lock);
7338 write_seqcount_begin(&conf->gen_lock);
7339 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
7340 mddev->new_chunk_sectors =
7341 conf->chunk_sectors = conf->prev_chunk_sectors;
7342 mddev->new_layout = conf->algorithm = conf->prev_algo;
7343 rdev_for_each(rdev, mddev)
7344 rdev->new_data_offset = rdev->data_offset;
7345 smp_wmb();
7346 conf->generation --;
7347 conf->reshape_progress = MaxSector;
7348 mddev->reshape_position = MaxSector;
7349 write_seqcount_end(&conf->gen_lock);
7350 spin_unlock_irq(&conf->device_lock);
7351 return -EAGAIN;
7352 }
7353 conf->reshape_checkpoint = jiffies;
7354 md_wakeup_thread(mddev->sync_thread);
7355 md_new_event(mddev);
7356 return 0;
7357 }
7358
7359 /* This is called from the reshape thread and should make any
7360 * changes needed in 'conf'
7361 */
7362 static void end_reshape(struct r5conf *conf)
7363 {
7364
7365 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
7366 struct md_rdev *rdev;
7367
7368 spin_lock_irq(&conf->device_lock);
7369 conf->previous_raid_disks = conf->raid_disks;
7370 rdev_for_each(rdev, conf->mddev)
7371 rdev->data_offset = rdev->new_data_offset;
7372 smp_wmb();
7373 conf->reshape_progress = MaxSector;
7374 spin_unlock_irq(&conf->device_lock);
7375 wake_up(&conf->wait_for_overlap);
7376
7377 /* read-ahead size must cover two whole stripes, which is
7378 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
7379 */
7380 if (conf->mddev->queue) {
7381 int data_disks = conf->raid_disks - conf->max_degraded;
7382 int stripe = data_disks * ((conf->chunk_sectors << 9)
7383 / PAGE_SIZE);
7384 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
7385 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
7386 }
7387 }
7388 }
7389
7390 /* This is called from the raid5d thread with mddev_lock held.
7391 * It makes config changes to the device.
7392 */
7393 static void raid5_finish_reshape(struct mddev *mddev)
7394 {
7395 struct r5conf *conf = mddev->private;
7396
7397 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7398
7399 if (mddev->delta_disks > 0) {
7400 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
7401 set_capacity(mddev->gendisk, mddev->array_sectors);
7402 revalidate_disk(mddev->gendisk);
7403 } else {
7404 int d;
7405 spin_lock_irq(&conf->device_lock);
7406 mddev->degraded = calc_degraded(conf);
7407 spin_unlock_irq(&conf->device_lock);
7408 for (d = conf->raid_disks ;
7409 d < conf->raid_disks - mddev->delta_disks;
7410 d++) {
7411 struct md_rdev *rdev = conf->disks[d].rdev;
7412 if (rdev)
7413 clear_bit(In_sync, &rdev->flags);
7414 rdev = conf->disks[d].replacement;
7415 if (rdev)
7416 clear_bit(In_sync, &rdev->flags);
7417 }
7418 }
7419 mddev->layout = conf->algorithm;
7420 mddev->chunk_sectors = conf->chunk_sectors;
7421 mddev->reshape_position = MaxSector;
7422 mddev->delta_disks = 0;
7423 mddev->reshape_backwards = 0;
7424 }
7425 }
7426
7427 static void raid5_quiesce(struct mddev *mddev, int state)
7428 {
7429 struct r5conf *conf = mddev->private;
7430
7431 switch(state) {
7432 case 2: /* resume for a suspend */
7433 wake_up(&conf->wait_for_overlap);
7434 break;
7435
7436 case 1: /* stop all writes */
7437 lock_all_device_hash_locks_irq(conf);
7438 /* '2' tells resync/reshape to pause so that all
7439 * active stripes can drain
7440 */
7441 conf->quiesce = 2;
7442 wait_event_cmd(conf->wait_for_stripe,
7443 atomic_read(&conf->active_stripes) == 0 &&
7444 atomic_read(&conf->active_aligned_reads) == 0,
7445 unlock_all_device_hash_locks_irq(conf),
7446 lock_all_device_hash_locks_irq(conf));
7447 conf->quiesce = 1;
7448 unlock_all_device_hash_locks_irq(conf);
7449 /* allow reshape to continue */
7450 wake_up(&conf->wait_for_overlap);
7451 break;
7452
7453 case 0: /* re-enable writes */
7454 lock_all_device_hash_locks_irq(conf);
7455 conf->quiesce = 0;
7456 wake_up(&conf->wait_for_stripe);
7457 wake_up(&conf->wait_for_overlap);
7458 unlock_all_device_hash_locks_irq(conf);
7459 break;
7460 }
7461 }
7462
7463 static void *raid45_takeover_raid0(struct mddev *mddev, int level)
7464 {
7465 struct r0conf *raid0_conf = mddev->private;
7466 sector_t sectors;
7467
7468 /* for raid0 takeover only one zone is supported */
7469 if (raid0_conf->nr_strip_zones > 1) {
7470 printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
7471 mdname(mddev));
7472 return ERR_PTR(-EINVAL);
7473 }
7474
7475 sectors = raid0_conf->strip_zone[0].zone_end;
7476 sector_div(sectors, raid0_conf->strip_zone[0].nb_dev);
7477 mddev->dev_sectors = sectors;
7478 mddev->new_level = level;
7479 mddev->new_layout = ALGORITHM_PARITY_N;
7480 mddev->new_chunk_sectors = mddev->chunk_sectors;
7481 mddev->raid_disks += 1;
7482 mddev->delta_disks = 1;
7483 /* make sure it will be not marked as dirty */
7484 mddev->recovery_cp = MaxSector;
7485
7486 return setup_conf(mddev);
7487 }
7488
7489 static void *raid5_takeover_raid1(struct mddev *mddev)
7490 {
7491 int chunksect;
7492
7493 if (mddev->raid_disks != 2 ||
7494 mddev->degraded > 1)
7495 return ERR_PTR(-EINVAL);
7496
7497 /* Should check if there are write-behind devices? */
7498
7499 chunksect = 64*2; /* 64K by default */
7500
7501 /* The array must be an exact multiple of chunksize */
7502 while (chunksect && (mddev->array_sectors & (chunksect-1)))
7503 chunksect >>= 1;
7504
7505 if ((chunksect<<9) < STRIPE_SIZE)
7506 /* array size does not allow a suitable chunk size */
7507 return ERR_PTR(-EINVAL);
7508
7509 mddev->new_level = 5;
7510 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
7511 mddev->new_chunk_sectors = chunksect;
7512
7513 return setup_conf(mddev);
7514 }
7515
7516 static void *raid5_takeover_raid6(struct mddev *mddev)
7517 {
7518 int new_layout;
7519
7520 switch (mddev->layout) {
7521 case ALGORITHM_LEFT_ASYMMETRIC_6:
7522 new_layout = ALGORITHM_LEFT_ASYMMETRIC;
7523 break;
7524 case ALGORITHM_RIGHT_ASYMMETRIC_6:
7525 new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
7526 break;
7527 case ALGORITHM_LEFT_SYMMETRIC_6:
7528 new_layout = ALGORITHM_LEFT_SYMMETRIC;
7529 break;
7530 case ALGORITHM_RIGHT_SYMMETRIC_6:
7531 new_layout = ALGORITHM_RIGHT_SYMMETRIC;
7532 break;
7533 case ALGORITHM_PARITY_0_6:
7534 new_layout = ALGORITHM_PARITY_0;
7535 break;
7536 case ALGORITHM_PARITY_N:
7537 new_layout = ALGORITHM_PARITY_N;
7538 break;
7539 default:
7540 return ERR_PTR(-EINVAL);
7541 }
7542 mddev->new_level = 5;
7543 mddev->new_layout = new_layout;
7544 mddev->delta_disks = -1;
7545 mddev->raid_disks -= 1;
7546 return setup_conf(mddev);
7547 }
7548
7549 static int raid5_check_reshape(struct mddev *mddev)
7550 {
7551 /* For a 2-drive array, the layout and chunk size can be changed
7552 * immediately as not restriping is needed.
7553 * For larger arrays we record the new value - after validation
7554 * to be used by a reshape pass.
7555 */
7556 struct r5conf *conf = mddev->private;
7557 int new_chunk = mddev->new_chunk_sectors;
7558
7559 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
7560 return -EINVAL;
7561 if (new_chunk > 0) {
7562 if (!is_power_of_2(new_chunk))
7563 return -EINVAL;
7564 if (new_chunk < (PAGE_SIZE>>9))
7565 return -EINVAL;
7566 if (mddev->array_sectors & (new_chunk-1))
7567 /* not factor of array size */
7568 return -EINVAL;
7569 }
7570
7571 /* They look valid */
7572
7573 if (mddev->raid_disks == 2) {
7574 /* can make the change immediately */
7575 if (mddev->new_layout >= 0) {
7576 conf->algorithm = mddev->new_layout;
7577 mddev->layout = mddev->new_layout;
7578 }
7579 if (new_chunk > 0) {
7580 conf->chunk_sectors = new_chunk ;
7581 mddev->chunk_sectors = new_chunk;
7582 }
7583 set_bit(MD_CHANGE_DEVS, &mddev->flags);
7584 md_wakeup_thread(mddev->thread);
7585 }
7586 return check_reshape(mddev);
7587 }
7588
7589 static int raid6_check_reshape(struct mddev *mddev)
7590 {
7591 int new_chunk = mddev->new_chunk_sectors;
7592
7593 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
7594 return -EINVAL;
7595 if (new_chunk > 0) {
7596 if (!is_power_of_2(new_chunk))
7597 return -EINVAL;
7598 if (new_chunk < (PAGE_SIZE >> 9))
7599 return -EINVAL;
7600 if (mddev->array_sectors & (new_chunk-1))
7601 /* not factor of array size */
7602 return -EINVAL;
7603 }
7604
7605 /* They look valid */
7606 return check_reshape(mddev);
7607 }
7608
7609 static void *raid5_takeover(struct mddev *mddev)
7610 {
7611 /* raid5 can take over:
7612 * raid0 - if there is only one strip zone - make it a raid4 layout
7613 * raid1 - if there are two drives. We need to know the chunk size
7614 * raid4 - trivial - just use a raid4 layout.
7615 * raid6 - Providing it is a *_6 layout
7616 */
7617 if (mddev->level == 0)
7618 return raid45_takeover_raid0(mddev, 5);
7619 if (mddev->level == 1)
7620 return raid5_takeover_raid1(mddev);
7621 if (mddev->level == 4) {
7622 mddev->new_layout = ALGORITHM_PARITY_N;
7623 mddev->new_level = 5;
7624 return setup_conf(mddev);
7625 }
7626 if (mddev->level == 6)
7627 return raid5_takeover_raid6(mddev);
7628
7629 return ERR_PTR(-EINVAL);
7630 }
7631
7632 static void *raid4_takeover(struct mddev *mddev)
7633 {
7634 /* raid4 can take over:
7635 * raid0 - if there is only one strip zone
7636 * raid5 - if layout is right
7637 */
7638 if (mddev->level == 0)
7639 return raid45_takeover_raid0(mddev, 4);
7640 if (mddev->level == 5 &&
7641 mddev->layout == ALGORITHM_PARITY_N) {
7642 mddev->new_layout = 0;
7643 mddev->new_level = 4;
7644 return setup_conf(mddev);
7645 }
7646 return ERR_PTR(-EINVAL);
7647 }
7648
7649 static struct md_personality raid5_personality;
7650
7651 static void *raid6_takeover(struct mddev *mddev)
7652 {
7653 /* Currently can only take over a raid5. We map the
7654 * personality to an equivalent raid6 personality
7655 * with the Q block at the end.
7656 */
7657 int new_layout;
7658
7659 if (mddev->pers != &raid5_personality)
7660 return ERR_PTR(-EINVAL);
7661 if (mddev->degraded > 1)
7662 return ERR_PTR(-EINVAL);
7663 if (mddev->raid_disks > 253)
7664 return ERR_PTR(-EINVAL);
7665 if (mddev->raid_disks < 3)
7666 return ERR_PTR(-EINVAL);
7667
7668 switch (mddev->layout) {
7669 case ALGORITHM_LEFT_ASYMMETRIC:
7670 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
7671 break;
7672 case ALGORITHM_RIGHT_ASYMMETRIC:
7673 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
7674 break;
7675 case ALGORITHM_LEFT_SYMMETRIC:
7676 new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
7677 break;
7678 case ALGORITHM_RIGHT_SYMMETRIC:
7679 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
7680 break;
7681 case ALGORITHM_PARITY_0:
7682 new_layout = ALGORITHM_PARITY_0_6;
7683 break;
7684 case ALGORITHM_PARITY_N:
7685 new_layout = ALGORITHM_PARITY_N;
7686 break;
7687 default:
7688 return ERR_PTR(-EINVAL);
7689 }
7690 mddev->new_level = 6;
7691 mddev->new_layout = new_layout;
7692 mddev->delta_disks = 1;
7693 mddev->raid_disks += 1;
7694 return setup_conf(mddev);
7695 }
7696
7697 static struct md_personality raid6_personality =
7698 {
7699 .name = "raid6",
7700 .level = 6,
7701 .owner = THIS_MODULE,
7702 .make_request = make_request,
7703 .run = run,
7704 .free = raid5_free,
7705 .status = status,
7706 .error_handler = error,
7707 .hot_add_disk = raid5_add_disk,
7708 .hot_remove_disk= raid5_remove_disk,
7709 .spare_active = raid5_spare_active,
7710 .sync_request = sync_request,
7711 .resize = raid5_resize,
7712 .size = raid5_size,
7713 .check_reshape = raid6_check_reshape,
7714 .start_reshape = raid5_start_reshape,
7715 .finish_reshape = raid5_finish_reshape,
7716 .quiesce = raid5_quiesce,
7717 .takeover = raid6_takeover,
7718 .congested = raid5_congested,
7719 .mergeable_bvec = raid5_mergeable_bvec,
7720 };
7721 static struct md_personality raid5_personality =
7722 {
7723 .name = "raid5",
7724 .level = 5,
7725 .owner = THIS_MODULE,
7726 .make_request = make_request,
7727 .run = run,
7728 .free = raid5_free,
7729 .status = status,
7730 .error_handler = error,
7731 .hot_add_disk = raid5_add_disk,
7732 .hot_remove_disk= raid5_remove_disk,
7733 .spare_active = raid5_spare_active,
7734 .sync_request = sync_request,
7735 .resize = raid5_resize,
7736 .size = raid5_size,
7737 .check_reshape = raid5_check_reshape,
7738 .start_reshape = raid5_start_reshape,
7739 .finish_reshape = raid5_finish_reshape,
7740 .quiesce = raid5_quiesce,
7741 .takeover = raid5_takeover,
7742 .congested = raid5_congested,
7743 .mergeable_bvec = raid5_mergeable_bvec,
7744 };
7745
7746 static struct md_personality raid4_personality =
7747 {
7748 .name = "raid4",
7749 .level = 4,
7750 .owner = THIS_MODULE,
7751 .make_request = make_request,
7752 .run = run,
7753 .free = raid5_free,
7754 .status = status,
7755 .error_handler = error,
7756 .hot_add_disk = raid5_add_disk,
7757 .hot_remove_disk= raid5_remove_disk,
7758 .spare_active = raid5_spare_active,
7759 .sync_request = sync_request,
7760 .resize = raid5_resize,
7761 .size = raid5_size,
7762 .check_reshape = raid5_check_reshape,
7763 .start_reshape = raid5_start_reshape,
7764 .finish_reshape = raid5_finish_reshape,
7765 .quiesce = raid5_quiesce,
7766 .takeover = raid4_takeover,
7767 .congested = raid5_congested,
7768 .mergeable_bvec = raid5_mergeable_bvec,
7769 };
7770
7771 static int __init raid5_init(void)
7772 {
7773 raid5_wq = alloc_workqueue("raid5wq",
7774 WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
7775 if (!raid5_wq)
7776 return -ENOMEM;
7777 register_md_personality(&raid6_personality);
7778 register_md_personality(&raid5_personality);
7779 register_md_personality(&raid4_personality);
7780 return 0;
7781 }
7782
7783 static void raid5_exit(void)
7784 {
7785 unregister_md_personality(&raid6_personality);
7786 unregister_md_personality(&raid5_personality);
7787 unregister_md_personality(&raid4_personality);
7788 destroy_workqueue(raid5_wq);
7789 }
7790
7791 module_init(raid5_init);
7792 module_exit(raid5_exit);
7793 MODULE_LICENSE("GPL");
7794 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
7795 MODULE_ALIAS("md-personality-4"); /* RAID5 */
7796 MODULE_ALIAS("md-raid5");
7797 MODULE_ALIAS("md-raid4");
7798 MODULE_ALIAS("md-level-5");
7799 MODULE_ALIAS("md-level-4");
7800 MODULE_ALIAS("md-personality-8"); /* RAID6 */
7801 MODULE_ALIAS("md-raid6");
7802 MODULE_ALIAS("md-level-6");
7803
7804 /* This used to be two separate modules, they were: */
7805 MODULE_ALIAS("raid5");
7806 MODULE_ALIAS("raid6");
This page took 0.410389 seconds and 5 git commands to generate.