2 * Copyright (C) 2012 Fusion-io All rights reserved.
3 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/wait.h>
21 #include <linux/bio.h>
22 #include <linux/slab.h>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/random.h>
26 #include <linux/iocontext.h>
27 #include <linux/capability.h>
28 #include <linux/ratelimit.h>
29 #include <linux/kthread.h>
30 #include <linux/raid/pq.h>
31 #include <linux/hash.h>
32 #include <linux/list_sort.h>
33 #include <linux/raid/xor.h>
34 #include <linux/vmalloc.h>
35 #include <asm/div64.h>
37 #include "extent_map.h"
39 #include "transaction.h"
40 #include "print-tree.h"
43 #include "async-thread.h"
44 #include "check-integrity.h"
45 #include "rcu-string.h"
47 /* set when additional merges to this rbio are not allowed */
48 #define RBIO_RMW_LOCKED_BIT 1
51 * set when this rbio is sitting in the hash, but it is just a cache
54 #define RBIO_CACHE_BIT 2
57 * set when it is safe to trust the stripe_pages for caching
59 #define RBIO_CACHE_READY_BIT 3
61 #define RBIO_CACHE_SIZE 1024
65 BTRFS_RBIO_READ_REBUILD
,
66 BTRFS_RBIO_PARITY_SCRUB
,
67 BTRFS_RBIO_REBUILD_MISSING
,
70 struct btrfs_raid_bio
{
71 struct btrfs_fs_info
*fs_info
;
72 struct btrfs_bio
*bbio
;
74 /* while we're doing rmw on a stripe
75 * we put it into a hash table so we can
76 * lock the stripe and merge more rbios
79 struct list_head hash_list
;
82 * LRU list for the stripe cache
84 struct list_head stripe_cache
;
87 * for scheduling work in the helper threads
89 struct btrfs_work work
;
92 * bio list and bio_list_lock are used
93 * to add more bios into the stripe
94 * in hopes of avoiding the full rmw
96 struct bio_list bio_list
;
97 spinlock_t bio_list_lock
;
99 /* also protected by the bio_list_lock, the
100 * plug list is used by the plugging code
101 * to collect partial bios while plugged. The
102 * stripe locking code also uses it to hand off
103 * the stripe lock to the next pending IO
105 struct list_head plug_list
;
108 * flags that tell us if it is safe to
109 * merge with this bio
113 /* size of each individual stripe on disk */
116 /* number of data stripes (no p/q) */
123 * set if we're doing a parity rebuild
124 * for a read from higher up, which is handled
125 * differently from a parity rebuild as part of
128 enum btrfs_rbio_ops operation
;
130 /* first bad stripe */
133 /* second bad stripe (for raid6 use) */
138 * number of pages needed to represent the full
144 * size of all the bios in the bio_list. This
145 * helps us decide if the rbio maps to a full
154 atomic_t stripes_pending
;
158 * these are two arrays of pointers. We allocate the
159 * rbio big enough to hold them both and setup their
160 * locations when the rbio is allocated
163 /* pointers to pages that we allocated for
164 * reading/writing stripes directly from the disk (including P/Q)
166 struct page
**stripe_pages
;
169 * pointers to the pages in the bio_list. Stored
170 * here for faster lookup
172 struct page
**bio_pages
;
175 * bitmap to record which horizontal stripe has data
177 unsigned long *dbitmap
;
180 static int __raid56_parity_recover(struct btrfs_raid_bio
*rbio
);
181 static noinline
void finish_rmw(struct btrfs_raid_bio
*rbio
);
182 static void rmw_work(struct btrfs_work
*work
);
183 static void read_rebuild_work(struct btrfs_work
*work
);
184 static void async_rmw_stripe(struct btrfs_raid_bio
*rbio
);
185 static void async_read_rebuild(struct btrfs_raid_bio
*rbio
);
186 static int fail_bio_stripe(struct btrfs_raid_bio
*rbio
, struct bio
*bio
);
187 static int fail_rbio_index(struct btrfs_raid_bio
*rbio
, int failed
);
188 static void __free_raid_bio(struct btrfs_raid_bio
*rbio
);
189 static void index_rbio_pages(struct btrfs_raid_bio
*rbio
);
190 static int alloc_rbio_pages(struct btrfs_raid_bio
*rbio
);
192 static noinline
void finish_parity_scrub(struct btrfs_raid_bio
*rbio
,
194 static void async_scrub_parity(struct btrfs_raid_bio
*rbio
);
197 * the stripe hash table is used for locking, and to collect
198 * bios in hopes of making a full stripe
200 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info
*info
)
202 struct btrfs_stripe_hash_table
*table
;
203 struct btrfs_stripe_hash_table
*x
;
204 struct btrfs_stripe_hash
*cur
;
205 struct btrfs_stripe_hash
*h
;
206 int num_entries
= 1 << BTRFS_STRIPE_HASH_TABLE_BITS
;
210 if (info
->stripe_hash_table
)
214 * The table is large, starting with order 4 and can go as high as
215 * order 7 in case lock debugging is turned on.
217 * Try harder to allocate and fallback to vmalloc to lower the chance
218 * of a failing mount.
220 table_size
= sizeof(*table
) + sizeof(*h
) * num_entries
;
221 table
= kzalloc(table_size
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_REPEAT
);
223 table
= vzalloc(table_size
);
228 spin_lock_init(&table
->cache_lock
);
229 INIT_LIST_HEAD(&table
->stripe_cache
);
233 for (i
= 0; i
< num_entries
; i
++) {
235 INIT_LIST_HEAD(&cur
->hash_list
);
236 spin_lock_init(&cur
->lock
);
237 init_waitqueue_head(&cur
->wait
);
240 x
= cmpxchg(&info
->stripe_hash_table
, NULL
, table
);
247 * caching an rbio means to copy anything from the
248 * bio_pages array into the stripe_pages array. We
249 * use the page uptodate bit in the stripe cache array
250 * to indicate if it has valid data
252 * once the caching is done, we set the cache ready
255 static void cache_rbio_pages(struct btrfs_raid_bio
*rbio
)
262 ret
= alloc_rbio_pages(rbio
);
266 for (i
= 0; i
< rbio
->nr_pages
; i
++) {
267 if (!rbio
->bio_pages
[i
])
270 s
= kmap(rbio
->bio_pages
[i
]);
271 d
= kmap(rbio
->stripe_pages
[i
]);
273 memcpy(d
, s
, PAGE_CACHE_SIZE
);
275 kunmap(rbio
->bio_pages
[i
]);
276 kunmap(rbio
->stripe_pages
[i
]);
277 SetPageUptodate(rbio
->stripe_pages
[i
]);
279 set_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
283 * we hash on the first logical address of the stripe
285 static int rbio_bucket(struct btrfs_raid_bio
*rbio
)
287 u64 num
= rbio
->bbio
->raid_map
[0];
290 * we shift down quite a bit. We're using byte
291 * addressing, and most of the lower bits are zeros.
292 * This tends to upset hash_64, and it consistently
293 * returns just one or two different values.
295 * shifting off the lower bits fixes things.
297 return hash_64(num
>> 16, BTRFS_STRIPE_HASH_TABLE_BITS
);
301 * stealing an rbio means taking all the uptodate pages from the stripe
302 * array in the source rbio and putting them into the destination rbio
304 static void steal_rbio(struct btrfs_raid_bio
*src
, struct btrfs_raid_bio
*dest
)
310 if (!test_bit(RBIO_CACHE_READY_BIT
, &src
->flags
))
313 for (i
= 0; i
< dest
->nr_pages
; i
++) {
314 s
= src
->stripe_pages
[i
];
315 if (!s
|| !PageUptodate(s
)) {
319 d
= dest
->stripe_pages
[i
];
323 dest
->stripe_pages
[i
] = s
;
324 src
->stripe_pages
[i
] = NULL
;
329 * merging means we take the bio_list from the victim and
330 * splice it into the destination. The victim should
331 * be discarded afterwards.
333 * must be called with dest->rbio_list_lock held
335 static void merge_rbio(struct btrfs_raid_bio
*dest
,
336 struct btrfs_raid_bio
*victim
)
338 bio_list_merge(&dest
->bio_list
, &victim
->bio_list
);
339 dest
->bio_list_bytes
+= victim
->bio_list_bytes
;
340 dest
->generic_bio_cnt
+= victim
->generic_bio_cnt
;
341 bio_list_init(&victim
->bio_list
);
345 * used to prune items that are in the cache. The caller
346 * must hold the hash table lock.
348 static void __remove_rbio_from_cache(struct btrfs_raid_bio
*rbio
)
350 int bucket
= rbio_bucket(rbio
);
351 struct btrfs_stripe_hash_table
*table
;
352 struct btrfs_stripe_hash
*h
;
356 * check the bit again under the hash table lock.
358 if (!test_bit(RBIO_CACHE_BIT
, &rbio
->flags
))
361 table
= rbio
->fs_info
->stripe_hash_table
;
362 h
= table
->table
+ bucket
;
364 /* hold the lock for the bucket because we may be
365 * removing it from the hash table
370 * hold the lock for the bio list because we need
371 * to make sure the bio list is empty
373 spin_lock(&rbio
->bio_list_lock
);
375 if (test_and_clear_bit(RBIO_CACHE_BIT
, &rbio
->flags
)) {
376 list_del_init(&rbio
->stripe_cache
);
377 table
->cache_size
-= 1;
380 /* if the bio list isn't empty, this rbio is
381 * still involved in an IO. We take it out
382 * of the cache list, and drop the ref that
383 * was held for the list.
385 * If the bio_list was empty, we also remove
386 * the rbio from the hash_table, and drop
387 * the corresponding ref
389 if (bio_list_empty(&rbio
->bio_list
)) {
390 if (!list_empty(&rbio
->hash_list
)) {
391 list_del_init(&rbio
->hash_list
);
392 atomic_dec(&rbio
->refs
);
393 BUG_ON(!list_empty(&rbio
->plug_list
));
398 spin_unlock(&rbio
->bio_list_lock
);
399 spin_unlock(&h
->lock
);
402 __free_raid_bio(rbio
);
406 * prune a given rbio from the cache
408 static void remove_rbio_from_cache(struct btrfs_raid_bio
*rbio
)
410 struct btrfs_stripe_hash_table
*table
;
413 if (!test_bit(RBIO_CACHE_BIT
, &rbio
->flags
))
416 table
= rbio
->fs_info
->stripe_hash_table
;
418 spin_lock_irqsave(&table
->cache_lock
, flags
);
419 __remove_rbio_from_cache(rbio
);
420 spin_unlock_irqrestore(&table
->cache_lock
, flags
);
424 * remove everything in the cache
426 static void btrfs_clear_rbio_cache(struct btrfs_fs_info
*info
)
428 struct btrfs_stripe_hash_table
*table
;
430 struct btrfs_raid_bio
*rbio
;
432 table
= info
->stripe_hash_table
;
434 spin_lock_irqsave(&table
->cache_lock
, flags
);
435 while (!list_empty(&table
->stripe_cache
)) {
436 rbio
= list_entry(table
->stripe_cache
.next
,
437 struct btrfs_raid_bio
,
439 __remove_rbio_from_cache(rbio
);
441 spin_unlock_irqrestore(&table
->cache_lock
, flags
);
445 * remove all cached entries and free the hash table
448 void btrfs_free_stripe_hash_table(struct btrfs_fs_info
*info
)
450 if (!info
->stripe_hash_table
)
452 btrfs_clear_rbio_cache(info
);
453 kvfree(info
->stripe_hash_table
);
454 info
->stripe_hash_table
= NULL
;
458 * insert an rbio into the stripe cache. It
459 * must have already been prepared by calling
462 * If this rbio was already cached, it gets
463 * moved to the front of the lru.
465 * If the size of the rbio cache is too big, we
468 static void cache_rbio(struct btrfs_raid_bio
*rbio
)
470 struct btrfs_stripe_hash_table
*table
;
473 if (!test_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
))
476 table
= rbio
->fs_info
->stripe_hash_table
;
478 spin_lock_irqsave(&table
->cache_lock
, flags
);
479 spin_lock(&rbio
->bio_list_lock
);
481 /* bump our ref if we were not in the list before */
482 if (!test_and_set_bit(RBIO_CACHE_BIT
, &rbio
->flags
))
483 atomic_inc(&rbio
->refs
);
485 if (!list_empty(&rbio
->stripe_cache
)){
486 list_move(&rbio
->stripe_cache
, &table
->stripe_cache
);
488 list_add(&rbio
->stripe_cache
, &table
->stripe_cache
);
489 table
->cache_size
+= 1;
492 spin_unlock(&rbio
->bio_list_lock
);
494 if (table
->cache_size
> RBIO_CACHE_SIZE
) {
495 struct btrfs_raid_bio
*found
;
497 found
= list_entry(table
->stripe_cache
.prev
,
498 struct btrfs_raid_bio
,
502 __remove_rbio_from_cache(found
);
505 spin_unlock_irqrestore(&table
->cache_lock
, flags
);
510 * helper function to run the xor_blocks api. It is only
511 * able to do MAX_XOR_BLOCKS at a time, so we need to
514 static void run_xor(void **pages
, int src_cnt
, ssize_t len
)
518 void *dest
= pages
[src_cnt
];
521 xor_src_cnt
= min(src_cnt
, MAX_XOR_BLOCKS
);
522 xor_blocks(xor_src_cnt
, len
, dest
, pages
+ src_off
);
524 src_cnt
-= xor_src_cnt
;
525 src_off
+= xor_src_cnt
;
530 * returns true if the bio list inside this rbio
531 * covers an entire stripe (no rmw required).
532 * Must be called with the bio list lock held, or
533 * at a time when you know it is impossible to add
534 * new bios into the list
536 static int __rbio_is_full(struct btrfs_raid_bio
*rbio
)
538 unsigned long size
= rbio
->bio_list_bytes
;
541 if (size
!= rbio
->nr_data
* rbio
->stripe_len
)
544 BUG_ON(size
> rbio
->nr_data
* rbio
->stripe_len
);
548 static int rbio_is_full(struct btrfs_raid_bio
*rbio
)
553 spin_lock_irqsave(&rbio
->bio_list_lock
, flags
);
554 ret
= __rbio_is_full(rbio
);
555 spin_unlock_irqrestore(&rbio
->bio_list_lock
, flags
);
560 * returns 1 if it is safe to merge two rbios together.
561 * The merging is safe if the two rbios correspond to
562 * the same stripe and if they are both going in the same
563 * direction (read vs write), and if neither one is
564 * locked for final IO
566 * The caller is responsible for locking such that
567 * rmw_locked is safe to test
569 static int rbio_can_merge(struct btrfs_raid_bio
*last
,
570 struct btrfs_raid_bio
*cur
)
572 if (test_bit(RBIO_RMW_LOCKED_BIT
, &last
->flags
) ||
573 test_bit(RBIO_RMW_LOCKED_BIT
, &cur
->flags
))
577 * we can't merge with cached rbios, since the
578 * idea is that when we merge the destination
579 * rbio is going to run our IO for us. We can
580 * steal from cached rbio's though, other functions
583 if (test_bit(RBIO_CACHE_BIT
, &last
->flags
) ||
584 test_bit(RBIO_CACHE_BIT
, &cur
->flags
))
587 if (last
->bbio
->raid_map
[0] !=
588 cur
->bbio
->raid_map
[0])
591 /* we can't merge with different operations */
592 if (last
->operation
!= cur
->operation
)
595 * We've need read the full stripe from the drive.
596 * check and repair the parity and write the new results.
598 * We're not allowed to add any new bios to the
599 * bio list here, anyone else that wants to
600 * change this stripe needs to do their own rmw.
602 if (last
->operation
== BTRFS_RBIO_PARITY_SCRUB
||
603 cur
->operation
== BTRFS_RBIO_PARITY_SCRUB
)
606 if (last
->operation
== BTRFS_RBIO_REBUILD_MISSING
||
607 cur
->operation
== BTRFS_RBIO_REBUILD_MISSING
)
614 * helper to index into the pstripe
616 static struct page
*rbio_pstripe_page(struct btrfs_raid_bio
*rbio
, int index
)
618 index
+= (rbio
->nr_data
* rbio
->stripe_len
) >> PAGE_CACHE_SHIFT
;
619 return rbio
->stripe_pages
[index
];
623 * helper to index into the qstripe, returns null
624 * if there is no qstripe
626 static struct page
*rbio_qstripe_page(struct btrfs_raid_bio
*rbio
, int index
)
628 if (rbio
->nr_data
+ 1 == rbio
->real_stripes
)
631 index
+= ((rbio
->nr_data
+ 1) * rbio
->stripe_len
) >>
633 return rbio
->stripe_pages
[index
];
637 * The first stripe in the table for a logical address
638 * has the lock. rbios are added in one of three ways:
640 * 1) Nobody has the stripe locked yet. The rbio is given
641 * the lock and 0 is returned. The caller must start the IO
644 * 2) Someone has the stripe locked, but we're able to merge
645 * with the lock owner. The rbio is freed and the IO will
646 * start automatically along with the existing rbio. 1 is returned.
648 * 3) Someone has the stripe locked, but we're not able to merge.
649 * The rbio is added to the lock owner's plug list, or merged into
650 * an rbio already on the plug list. When the lock owner unlocks,
651 * the next rbio on the list is run and the IO is started automatically.
654 * If we return 0, the caller still owns the rbio and must continue with
655 * IO submission. If we return 1, the caller must assume the rbio has
656 * already been freed.
658 static noinline
int lock_stripe_add(struct btrfs_raid_bio
*rbio
)
660 int bucket
= rbio_bucket(rbio
);
661 struct btrfs_stripe_hash
*h
= rbio
->fs_info
->stripe_hash_table
->table
+ bucket
;
662 struct btrfs_raid_bio
*cur
;
663 struct btrfs_raid_bio
*pending
;
666 struct btrfs_raid_bio
*freeit
= NULL
;
667 struct btrfs_raid_bio
*cache_drop
= NULL
;
671 spin_lock_irqsave(&h
->lock
, flags
);
672 list_for_each_entry(cur
, &h
->hash_list
, hash_list
) {
674 if (cur
->bbio
->raid_map
[0] == rbio
->bbio
->raid_map
[0]) {
675 spin_lock(&cur
->bio_list_lock
);
677 /* can we steal this cached rbio's pages? */
678 if (bio_list_empty(&cur
->bio_list
) &&
679 list_empty(&cur
->plug_list
) &&
680 test_bit(RBIO_CACHE_BIT
, &cur
->flags
) &&
681 !test_bit(RBIO_RMW_LOCKED_BIT
, &cur
->flags
)) {
682 list_del_init(&cur
->hash_list
);
683 atomic_dec(&cur
->refs
);
685 steal_rbio(cur
, rbio
);
687 spin_unlock(&cur
->bio_list_lock
);
692 /* can we merge into the lock owner? */
693 if (rbio_can_merge(cur
, rbio
)) {
694 merge_rbio(cur
, rbio
);
695 spin_unlock(&cur
->bio_list_lock
);
703 * we couldn't merge with the running
704 * rbio, see if we can merge with the
705 * pending ones. We don't have to
706 * check for rmw_locked because there
707 * is no way they are inside finish_rmw
710 list_for_each_entry(pending
, &cur
->plug_list
,
712 if (rbio_can_merge(pending
, rbio
)) {
713 merge_rbio(pending
, rbio
);
714 spin_unlock(&cur
->bio_list_lock
);
721 /* no merging, put us on the tail of the plug list,
722 * our rbio will be started with the currently
723 * running rbio unlocks
725 list_add_tail(&rbio
->plug_list
, &cur
->plug_list
);
726 spin_unlock(&cur
->bio_list_lock
);
732 atomic_inc(&rbio
->refs
);
733 list_add(&rbio
->hash_list
, &h
->hash_list
);
735 spin_unlock_irqrestore(&h
->lock
, flags
);
737 remove_rbio_from_cache(cache_drop
);
739 __free_raid_bio(freeit
);
744 * called as rmw or parity rebuild is completed. If the plug list has more
745 * rbios waiting for this stripe, the next one on the list will be started
747 static noinline
void unlock_stripe(struct btrfs_raid_bio
*rbio
)
750 struct btrfs_stripe_hash
*h
;
754 bucket
= rbio_bucket(rbio
);
755 h
= rbio
->fs_info
->stripe_hash_table
->table
+ bucket
;
757 if (list_empty(&rbio
->plug_list
))
760 spin_lock_irqsave(&h
->lock
, flags
);
761 spin_lock(&rbio
->bio_list_lock
);
763 if (!list_empty(&rbio
->hash_list
)) {
765 * if we're still cached and there is no other IO
766 * to perform, just leave this rbio here for others
767 * to steal from later
769 if (list_empty(&rbio
->plug_list
) &&
770 test_bit(RBIO_CACHE_BIT
, &rbio
->flags
)) {
772 clear_bit(RBIO_RMW_LOCKED_BIT
, &rbio
->flags
);
773 BUG_ON(!bio_list_empty(&rbio
->bio_list
));
777 list_del_init(&rbio
->hash_list
);
778 atomic_dec(&rbio
->refs
);
781 * we use the plug list to hold all the rbios
782 * waiting for the chance to lock this stripe.
783 * hand the lock over to one of them.
785 if (!list_empty(&rbio
->plug_list
)) {
786 struct btrfs_raid_bio
*next
;
787 struct list_head
*head
= rbio
->plug_list
.next
;
789 next
= list_entry(head
, struct btrfs_raid_bio
,
792 list_del_init(&rbio
->plug_list
);
794 list_add(&next
->hash_list
, &h
->hash_list
);
795 atomic_inc(&next
->refs
);
796 spin_unlock(&rbio
->bio_list_lock
);
797 spin_unlock_irqrestore(&h
->lock
, flags
);
799 if (next
->operation
== BTRFS_RBIO_READ_REBUILD
)
800 async_read_rebuild(next
);
801 else if (next
->operation
== BTRFS_RBIO_REBUILD_MISSING
) {
802 steal_rbio(rbio
, next
);
803 async_read_rebuild(next
);
804 } else if (next
->operation
== BTRFS_RBIO_WRITE
) {
805 steal_rbio(rbio
, next
);
806 async_rmw_stripe(next
);
807 } else if (next
->operation
== BTRFS_RBIO_PARITY_SCRUB
) {
808 steal_rbio(rbio
, next
);
809 async_scrub_parity(next
);
813 } else if (waitqueue_active(&h
->wait
)) {
814 spin_unlock(&rbio
->bio_list_lock
);
815 spin_unlock_irqrestore(&h
->lock
, flags
);
821 spin_unlock(&rbio
->bio_list_lock
);
822 spin_unlock_irqrestore(&h
->lock
, flags
);
826 remove_rbio_from_cache(rbio
);
829 static void __free_raid_bio(struct btrfs_raid_bio
*rbio
)
833 WARN_ON(atomic_read(&rbio
->refs
) < 0);
834 if (!atomic_dec_and_test(&rbio
->refs
))
837 WARN_ON(!list_empty(&rbio
->stripe_cache
));
838 WARN_ON(!list_empty(&rbio
->hash_list
));
839 WARN_ON(!bio_list_empty(&rbio
->bio_list
));
841 for (i
= 0; i
< rbio
->nr_pages
; i
++) {
842 if (rbio
->stripe_pages
[i
]) {
843 __free_page(rbio
->stripe_pages
[i
]);
844 rbio
->stripe_pages
[i
] = NULL
;
848 btrfs_put_bbio(rbio
->bbio
);
852 static void free_raid_bio(struct btrfs_raid_bio
*rbio
)
855 __free_raid_bio(rbio
);
859 * this frees the rbio and runs through all the bios in the
860 * bio_list and calls end_io on them
862 static void rbio_orig_end_io(struct btrfs_raid_bio
*rbio
, int err
)
864 struct bio
*cur
= bio_list_get(&rbio
->bio_list
);
867 if (rbio
->generic_bio_cnt
)
868 btrfs_bio_counter_sub(rbio
->fs_info
, rbio
->generic_bio_cnt
);
882 * end io function used by finish_rmw. When we finally
883 * get here, we've written a full stripe
885 static void raid_write_end_io(struct bio
*bio
)
887 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
888 int err
= bio
->bi_error
;
891 fail_bio_stripe(rbio
, bio
);
895 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
900 /* OK, we have read all the stripes we need to. */
901 if (atomic_read(&rbio
->error
) > rbio
->bbio
->max_errors
)
904 rbio_orig_end_io(rbio
, err
);
909 * the read/modify/write code wants to use the original bio for
910 * any pages it included, and then use the rbio for everything
911 * else. This function decides if a given index (stripe number)
912 * and page number in that stripe fall inside the original bio
915 * if you set bio_list_only, you'll get a NULL back for any ranges
916 * that are outside the bio_list
918 * This doesn't take any refs on anything, you get a bare page pointer
919 * and the caller must bump refs as required.
921 * You must call index_rbio_pages once before you can trust
922 * the answers from this function.
924 static struct page
*page_in_rbio(struct btrfs_raid_bio
*rbio
,
925 int index
, int pagenr
, int bio_list_only
)
928 struct page
*p
= NULL
;
930 chunk_page
= index
* (rbio
->stripe_len
>> PAGE_SHIFT
) + pagenr
;
932 spin_lock_irq(&rbio
->bio_list_lock
);
933 p
= rbio
->bio_pages
[chunk_page
];
934 spin_unlock_irq(&rbio
->bio_list_lock
);
936 if (p
|| bio_list_only
)
939 return rbio
->stripe_pages
[chunk_page
];
943 * number of pages we need for the entire stripe across all the
946 static unsigned long rbio_nr_pages(unsigned long stripe_len
, int nr_stripes
)
948 unsigned long nr
= stripe_len
* nr_stripes
;
949 return DIV_ROUND_UP(nr
, PAGE_CACHE_SIZE
);
953 * allocation and initial setup for the btrfs_raid_bio. Not
954 * this does not allocate any pages for rbio->pages.
956 static struct btrfs_raid_bio
*alloc_rbio(struct btrfs_root
*root
,
957 struct btrfs_bio
*bbio
, u64 stripe_len
)
959 struct btrfs_raid_bio
*rbio
;
961 int real_stripes
= bbio
->num_stripes
- bbio
->num_tgtdevs
;
962 int num_pages
= rbio_nr_pages(stripe_len
, real_stripes
);
963 int stripe_npages
= DIV_ROUND_UP(stripe_len
, PAGE_SIZE
);
966 rbio
= kzalloc(sizeof(*rbio
) + num_pages
* sizeof(struct page
*) * 2 +
967 DIV_ROUND_UP(stripe_npages
, BITS_PER_LONG
/ 8),
970 return ERR_PTR(-ENOMEM
);
972 bio_list_init(&rbio
->bio_list
);
973 INIT_LIST_HEAD(&rbio
->plug_list
);
974 spin_lock_init(&rbio
->bio_list_lock
);
975 INIT_LIST_HEAD(&rbio
->stripe_cache
);
976 INIT_LIST_HEAD(&rbio
->hash_list
);
978 rbio
->fs_info
= root
->fs_info
;
979 rbio
->stripe_len
= stripe_len
;
980 rbio
->nr_pages
= num_pages
;
981 rbio
->real_stripes
= real_stripes
;
982 rbio
->stripe_npages
= stripe_npages
;
985 atomic_set(&rbio
->refs
, 1);
986 atomic_set(&rbio
->error
, 0);
987 atomic_set(&rbio
->stripes_pending
, 0);
990 * the stripe_pages and bio_pages array point to the extra
991 * memory we allocated past the end of the rbio
994 rbio
->stripe_pages
= p
;
995 rbio
->bio_pages
= p
+ sizeof(struct page
*) * num_pages
;
996 rbio
->dbitmap
= p
+ sizeof(struct page
*) * num_pages
* 2;
998 if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID5
)
999 nr_data
= real_stripes
- 1;
1000 else if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID6
)
1001 nr_data
= real_stripes
- 2;
1005 rbio
->nr_data
= nr_data
;
1009 /* allocate pages for all the stripes in the bio, including parity */
1010 static int alloc_rbio_pages(struct btrfs_raid_bio
*rbio
)
1015 for (i
= 0; i
< rbio
->nr_pages
; i
++) {
1016 if (rbio
->stripe_pages
[i
])
1018 page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
1021 rbio
->stripe_pages
[i
] = page
;
1022 ClearPageUptodate(page
);
1027 /* allocate pages for just the p/q stripes */
1028 static int alloc_rbio_parity_pages(struct btrfs_raid_bio
*rbio
)
1033 i
= (rbio
->nr_data
* rbio
->stripe_len
) >> PAGE_CACHE_SHIFT
;
1035 for (; i
< rbio
->nr_pages
; i
++) {
1036 if (rbio
->stripe_pages
[i
])
1038 page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
1041 rbio
->stripe_pages
[i
] = page
;
1047 * add a single page from a specific stripe into our list of bios for IO
1048 * this will try to merge into existing bios if possible, and returns
1049 * zero if all went well.
1051 static int rbio_add_io_page(struct btrfs_raid_bio
*rbio
,
1052 struct bio_list
*bio_list
,
1055 unsigned long page_index
,
1056 unsigned long bio_max_len
)
1058 struct bio
*last
= bio_list
->tail
;
1062 struct btrfs_bio_stripe
*stripe
;
1065 stripe
= &rbio
->bbio
->stripes
[stripe_nr
];
1066 disk_start
= stripe
->physical
+ (page_index
<< PAGE_CACHE_SHIFT
);
1068 /* if the device is missing, just fail this stripe */
1069 if (!stripe
->dev
->bdev
)
1070 return fail_rbio_index(rbio
, stripe_nr
);
1072 /* see if we can add this page onto our existing bio */
1074 last_end
= (u64
)last
->bi_iter
.bi_sector
<< 9;
1075 last_end
+= last
->bi_iter
.bi_size
;
1078 * we can't merge these if they are from different
1079 * devices or if they are not contiguous
1081 if (last_end
== disk_start
&& stripe
->dev
->bdev
&&
1083 last
->bi_bdev
== stripe
->dev
->bdev
) {
1084 ret
= bio_add_page(last
, page
, PAGE_CACHE_SIZE
, 0);
1085 if (ret
== PAGE_CACHE_SIZE
)
1090 /* put a new bio on the list */
1091 bio
= btrfs_io_bio_alloc(GFP_NOFS
, bio_max_len
>> PAGE_SHIFT
?:1);
1095 bio
->bi_iter
.bi_size
= 0;
1096 bio
->bi_bdev
= stripe
->dev
->bdev
;
1097 bio
->bi_iter
.bi_sector
= disk_start
>> 9;
1099 bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0);
1100 bio_list_add(bio_list
, bio
);
1105 * while we're doing the read/modify/write cycle, we could
1106 * have errors in reading pages off the disk. This checks
1107 * for errors and if we're not able to read the page it'll
1108 * trigger parity reconstruction. The rmw will be finished
1109 * after we've reconstructed the failed stripes
1111 static void validate_rbio_for_rmw(struct btrfs_raid_bio
*rbio
)
1113 if (rbio
->faila
>= 0 || rbio
->failb
>= 0) {
1114 BUG_ON(rbio
->faila
== rbio
->real_stripes
- 1);
1115 __raid56_parity_recover(rbio
);
1122 * these are just the pages from the rbio array, not from anything
1123 * the FS sent down to us
1125 static struct page
*rbio_stripe_page(struct btrfs_raid_bio
*rbio
, int stripe
, int page
)
1128 index
= stripe
* (rbio
->stripe_len
>> PAGE_CACHE_SHIFT
);
1130 return rbio
->stripe_pages
[index
];
1134 * helper function to walk our bio list and populate the bio_pages array with
1135 * the result. This seems expensive, but it is faster than constantly
1136 * searching through the bio list as we setup the IO in finish_rmw or stripe
1139 * This must be called before you trust the answers from page_in_rbio
1141 static void index_rbio_pages(struct btrfs_raid_bio
*rbio
)
1145 unsigned long stripe_offset
;
1146 unsigned long page_index
;
1150 spin_lock_irq(&rbio
->bio_list_lock
);
1151 bio_list_for_each(bio
, &rbio
->bio_list
) {
1152 start
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
1153 stripe_offset
= start
- rbio
->bbio
->raid_map
[0];
1154 page_index
= stripe_offset
>> PAGE_CACHE_SHIFT
;
1156 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
1157 p
= bio
->bi_io_vec
[i
].bv_page
;
1158 rbio
->bio_pages
[page_index
+ i
] = p
;
1161 spin_unlock_irq(&rbio
->bio_list_lock
);
1165 * this is called from one of two situations. We either
1166 * have a full stripe from the higher layers, or we've read all
1167 * the missing bits off disk.
1169 * This will calculate the parity and then send down any
1172 static noinline
void finish_rmw(struct btrfs_raid_bio
*rbio
)
1174 struct btrfs_bio
*bbio
= rbio
->bbio
;
1175 void *pointers
[rbio
->real_stripes
];
1176 int stripe_len
= rbio
->stripe_len
;
1177 int nr_data
= rbio
->nr_data
;
1182 struct bio_list bio_list
;
1184 int pages_per_stripe
= stripe_len
>> PAGE_CACHE_SHIFT
;
1187 bio_list_init(&bio_list
);
1189 if (rbio
->real_stripes
- rbio
->nr_data
== 1) {
1190 p_stripe
= rbio
->real_stripes
- 1;
1191 } else if (rbio
->real_stripes
- rbio
->nr_data
== 2) {
1192 p_stripe
= rbio
->real_stripes
- 2;
1193 q_stripe
= rbio
->real_stripes
- 1;
1198 /* at this point we either have a full stripe,
1199 * or we've read the full stripe from the drive.
1200 * recalculate the parity and write the new results.
1202 * We're not allowed to add any new bios to the
1203 * bio list here, anyone else that wants to
1204 * change this stripe needs to do their own rmw.
1206 spin_lock_irq(&rbio
->bio_list_lock
);
1207 set_bit(RBIO_RMW_LOCKED_BIT
, &rbio
->flags
);
1208 spin_unlock_irq(&rbio
->bio_list_lock
);
1210 atomic_set(&rbio
->error
, 0);
1213 * now that we've set rmw_locked, run through the
1214 * bio list one last time and map the page pointers
1216 * We don't cache full rbios because we're assuming
1217 * the higher layers are unlikely to use this area of
1218 * the disk again soon. If they do use it again,
1219 * hopefully they will send another full bio.
1221 index_rbio_pages(rbio
);
1222 if (!rbio_is_full(rbio
))
1223 cache_rbio_pages(rbio
);
1225 clear_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
1227 for (pagenr
= 0; pagenr
< pages_per_stripe
; pagenr
++) {
1229 /* first collect one page from each data stripe */
1230 for (stripe
= 0; stripe
< nr_data
; stripe
++) {
1231 p
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
1232 pointers
[stripe
] = kmap(p
);
1235 /* then add the parity stripe */
1236 p
= rbio_pstripe_page(rbio
, pagenr
);
1238 pointers
[stripe
++] = kmap(p
);
1240 if (q_stripe
!= -1) {
1243 * raid6, add the qstripe and call the
1244 * library function to fill in our p/q
1246 p
= rbio_qstripe_page(rbio
, pagenr
);
1248 pointers
[stripe
++] = kmap(p
);
1250 raid6_call
.gen_syndrome(rbio
->real_stripes
, PAGE_SIZE
,
1254 memcpy(pointers
[nr_data
], pointers
[0], PAGE_SIZE
);
1255 run_xor(pointers
+ 1, nr_data
- 1, PAGE_CACHE_SIZE
);
1259 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++)
1260 kunmap(page_in_rbio(rbio
, stripe
, pagenr
, 0));
1264 * time to start writing. Make bios for everything from the
1265 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1268 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1269 for (pagenr
= 0; pagenr
< pages_per_stripe
; pagenr
++) {
1271 if (stripe
< rbio
->nr_data
) {
1272 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
1276 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1279 ret
= rbio_add_io_page(rbio
, &bio_list
,
1280 page
, stripe
, pagenr
, rbio
->stripe_len
);
1286 if (likely(!bbio
->num_tgtdevs
))
1289 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1290 if (!bbio
->tgtdev_map
[stripe
])
1293 for (pagenr
= 0; pagenr
< pages_per_stripe
; pagenr
++) {
1295 if (stripe
< rbio
->nr_data
) {
1296 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
1300 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1303 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
1304 rbio
->bbio
->tgtdev_map
[stripe
],
1305 pagenr
, rbio
->stripe_len
);
1312 atomic_set(&rbio
->stripes_pending
, bio_list_size(&bio_list
));
1313 BUG_ON(atomic_read(&rbio
->stripes_pending
) == 0);
1316 bio
= bio_list_pop(&bio_list
);
1320 bio
->bi_private
= rbio
;
1321 bio
->bi_end_io
= raid_write_end_io
;
1322 submit_bio(WRITE
, bio
);
1327 rbio_orig_end_io(rbio
, -EIO
);
1331 * helper to find the stripe number for a given bio. Used to figure out which
1332 * stripe has failed. This expects the bio to correspond to a physical disk,
1333 * so it looks up based on physical sector numbers.
1335 static int find_bio_stripe(struct btrfs_raid_bio
*rbio
,
1338 u64 physical
= bio
->bi_iter
.bi_sector
;
1341 struct btrfs_bio_stripe
*stripe
;
1345 for (i
= 0; i
< rbio
->bbio
->num_stripes
; i
++) {
1346 stripe
= &rbio
->bbio
->stripes
[i
];
1347 stripe_start
= stripe
->physical
;
1348 if (physical
>= stripe_start
&&
1349 physical
< stripe_start
+ rbio
->stripe_len
&&
1350 bio
->bi_bdev
== stripe
->dev
->bdev
) {
1358 * helper to find the stripe number for a given
1359 * bio (before mapping). Used to figure out which stripe has
1360 * failed. This looks up based on logical block numbers.
1362 static int find_logical_bio_stripe(struct btrfs_raid_bio
*rbio
,
1365 u64 logical
= bio
->bi_iter
.bi_sector
;
1371 for (i
= 0; i
< rbio
->nr_data
; i
++) {
1372 stripe_start
= rbio
->bbio
->raid_map
[i
];
1373 if (logical
>= stripe_start
&&
1374 logical
< stripe_start
+ rbio
->stripe_len
) {
1382 * returns -EIO if we had too many failures
1384 static int fail_rbio_index(struct btrfs_raid_bio
*rbio
, int failed
)
1386 unsigned long flags
;
1389 spin_lock_irqsave(&rbio
->bio_list_lock
, flags
);
1391 /* we already know this stripe is bad, move on */
1392 if (rbio
->faila
== failed
|| rbio
->failb
== failed
)
1395 if (rbio
->faila
== -1) {
1396 /* first failure on this rbio */
1397 rbio
->faila
= failed
;
1398 atomic_inc(&rbio
->error
);
1399 } else if (rbio
->failb
== -1) {
1400 /* second failure on this rbio */
1401 rbio
->failb
= failed
;
1402 atomic_inc(&rbio
->error
);
1407 spin_unlock_irqrestore(&rbio
->bio_list_lock
, flags
);
1413 * helper to fail a stripe based on a physical disk
1416 static int fail_bio_stripe(struct btrfs_raid_bio
*rbio
,
1419 int failed
= find_bio_stripe(rbio
, bio
);
1424 return fail_rbio_index(rbio
, failed
);
1428 * this sets each page in the bio uptodate. It should only be used on private
1429 * rbio pages, nothing that comes in from the higher layers
1431 static void set_bio_pages_uptodate(struct bio
*bio
)
1436 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
1437 p
= bio
->bi_io_vec
[i
].bv_page
;
1443 * end io for the read phase of the rmw cycle. All the bios here are physical
1444 * stripe bios we've read from the disk so we can recalculate the parity of the
1447 * This will usually kick off finish_rmw once all the bios are read in, but it
1448 * may trigger parity reconstruction if we had any errors along the way
1450 static void raid_rmw_end_io(struct bio
*bio
)
1452 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
1455 fail_bio_stripe(rbio
, bio
);
1457 set_bio_pages_uptodate(bio
);
1461 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
1464 if (atomic_read(&rbio
->error
) > rbio
->bbio
->max_errors
)
1468 * this will normally call finish_rmw to start our write
1469 * but if there are any failed stripes we'll reconstruct
1472 validate_rbio_for_rmw(rbio
);
1477 rbio_orig_end_io(rbio
, -EIO
);
1480 static void async_rmw_stripe(struct btrfs_raid_bio
*rbio
)
1482 btrfs_init_work(&rbio
->work
, btrfs_rmw_helper
,
1483 rmw_work
, NULL
, NULL
);
1485 btrfs_queue_work(rbio
->fs_info
->rmw_workers
,
1489 static void async_read_rebuild(struct btrfs_raid_bio
*rbio
)
1491 btrfs_init_work(&rbio
->work
, btrfs_rmw_helper
,
1492 read_rebuild_work
, NULL
, NULL
);
1494 btrfs_queue_work(rbio
->fs_info
->rmw_workers
,
1499 * the stripe must be locked by the caller. It will
1500 * unlock after all the writes are done
1502 static int raid56_rmw_stripe(struct btrfs_raid_bio
*rbio
)
1504 int bios_to_read
= 0;
1505 struct bio_list bio_list
;
1507 int nr_pages
= DIV_ROUND_UP(rbio
->stripe_len
, PAGE_CACHE_SIZE
);
1512 bio_list_init(&bio_list
);
1514 ret
= alloc_rbio_pages(rbio
);
1518 index_rbio_pages(rbio
);
1520 atomic_set(&rbio
->error
, 0);
1522 * build a list of bios to read all the missing parts of this
1525 for (stripe
= 0; stripe
< rbio
->nr_data
; stripe
++) {
1526 for (pagenr
= 0; pagenr
< nr_pages
; pagenr
++) {
1529 * we want to find all the pages missing from
1530 * the rbio and read them from the disk. If
1531 * page_in_rbio finds a page in the bio list
1532 * we don't need to read it off the stripe.
1534 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
1538 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1540 * the bio cache may have handed us an uptodate
1541 * page. If so, be happy and use it
1543 if (PageUptodate(page
))
1546 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
1547 stripe
, pagenr
, rbio
->stripe_len
);
1553 bios_to_read
= bio_list_size(&bio_list
);
1554 if (!bios_to_read
) {
1556 * this can happen if others have merged with
1557 * us, it means there is nothing left to read.
1558 * But if there are missing devices it may not be
1559 * safe to do the full stripe write yet.
1565 * the bbio may be freed once we submit the last bio. Make sure
1566 * not to touch it after that
1568 atomic_set(&rbio
->stripes_pending
, bios_to_read
);
1570 bio
= bio_list_pop(&bio_list
);
1574 bio
->bi_private
= rbio
;
1575 bio
->bi_end_io
= raid_rmw_end_io
;
1577 btrfs_bio_wq_end_io(rbio
->fs_info
, bio
,
1578 BTRFS_WQ_ENDIO_RAID56
);
1580 submit_bio(READ
, bio
);
1582 /* the actual write will happen once the reads are done */
1586 rbio_orig_end_io(rbio
, -EIO
);
1590 validate_rbio_for_rmw(rbio
);
1595 * if the upper layers pass in a full stripe, we thank them by only allocating
1596 * enough pages to hold the parity, and sending it all down quickly.
1598 static int full_stripe_write(struct btrfs_raid_bio
*rbio
)
1602 ret
= alloc_rbio_parity_pages(rbio
);
1604 __free_raid_bio(rbio
);
1608 ret
= lock_stripe_add(rbio
);
1615 * partial stripe writes get handed over to async helpers.
1616 * We're really hoping to merge a few more writes into this
1617 * rbio before calculating new parity
1619 static int partial_stripe_write(struct btrfs_raid_bio
*rbio
)
1623 ret
= lock_stripe_add(rbio
);
1625 async_rmw_stripe(rbio
);
1630 * sometimes while we were reading from the drive to
1631 * recalculate parity, enough new bios come into create
1632 * a full stripe. So we do a check here to see if we can
1633 * go directly to finish_rmw
1635 static int __raid56_parity_write(struct btrfs_raid_bio
*rbio
)
1637 /* head off into rmw land if we don't have a full stripe */
1638 if (!rbio_is_full(rbio
))
1639 return partial_stripe_write(rbio
);
1640 return full_stripe_write(rbio
);
1644 * We use plugging call backs to collect full stripes.
1645 * Any time we get a partial stripe write while plugged
1646 * we collect it into a list. When the unplug comes down,
1647 * we sort the list by logical block number and merge
1648 * everything we can into the same rbios
1650 struct btrfs_plug_cb
{
1651 struct blk_plug_cb cb
;
1652 struct btrfs_fs_info
*info
;
1653 struct list_head rbio_list
;
1654 struct btrfs_work work
;
1658 * rbios on the plug list are sorted for easier merging.
1660 static int plug_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
1662 struct btrfs_raid_bio
*ra
= container_of(a
, struct btrfs_raid_bio
,
1664 struct btrfs_raid_bio
*rb
= container_of(b
, struct btrfs_raid_bio
,
1666 u64 a_sector
= ra
->bio_list
.head
->bi_iter
.bi_sector
;
1667 u64 b_sector
= rb
->bio_list
.head
->bi_iter
.bi_sector
;
1669 if (a_sector
< b_sector
)
1671 if (a_sector
> b_sector
)
1676 static void run_plug(struct btrfs_plug_cb
*plug
)
1678 struct btrfs_raid_bio
*cur
;
1679 struct btrfs_raid_bio
*last
= NULL
;
1682 * sort our plug list then try to merge
1683 * everything we can in hopes of creating full
1686 list_sort(NULL
, &plug
->rbio_list
, plug_cmp
);
1687 while (!list_empty(&plug
->rbio_list
)) {
1688 cur
= list_entry(plug
->rbio_list
.next
,
1689 struct btrfs_raid_bio
, plug_list
);
1690 list_del_init(&cur
->plug_list
);
1692 if (rbio_is_full(cur
)) {
1693 /* we have a full stripe, send it down */
1694 full_stripe_write(cur
);
1698 if (rbio_can_merge(last
, cur
)) {
1699 merge_rbio(last
, cur
);
1700 __free_raid_bio(cur
);
1704 __raid56_parity_write(last
);
1709 __raid56_parity_write(last
);
1715 * if the unplug comes from schedule, we have to push the
1716 * work off to a helper thread
1718 static void unplug_work(struct btrfs_work
*work
)
1720 struct btrfs_plug_cb
*plug
;
1721 plug
= container_of(work
, struct btrfs_plug_cb
, work
);
1725 static void btrfs_raid_unplug(struct blk_plug_cb
*cb
, bool from_schedule
)
1727 struct btrfs_plug_cb
*plug
;
1728 plug
= container_of(cb
, struct btrfs_plug_cb
, cb
);
1730 if (from_schedule
) {
1731 btrfs_init_work(&plug
->work
, btrfs_rmw_helper
,
1732 unplug_work
, NULL
, NULL
);
1733 btrfs_queue_work(plug
->info
->rmw_workers
,
1741 * our main entry point for writes from the rest of the FS.
1743 int raid56_parity_write(struct btrfs_root
*root
, struct bio
*bio
,
1744 struct btrfs_bio
*bbio
, u64 stripe_len
)
1746 struct btrfs_raid_bio
*rbio
;
1747 struct btrfs_plug_cb
*plug
= NULL
;
1748 struct blk_plug_cb
*cb
;
1751 rbio
= alloc_rbio(root
, bbio
, stripe_len
);
1753 btrfs_put_bbio(bbio
);
1754 return PTR_ERR(rbio
);
1756 bio_list_add(&rbio
->bio_list
, bio
);
1757 rbio
->bio_list_bytes
= bio
->bi_iter
.bi_size
;
1758 rbio
->operation
= BTRFS_RBIO_WRITE
;
1760 btrfs_bio_counter_inc_noblocked(root
->fs_info
);
1761 rbio
->generic_bio_cnt
= 1;
1764 * don't plug on full rbios, just get them out the door
1765 * as quickly as we can
1767 if (rbio_is_full(rbio
)) {
1768 ret
= full_stripe_write(rbio
);
1770 btrfs_bio_counter_dec(root
->fs_info
);
1774 cb
= blk_check_plugged(btrfs_raid_unplug
, root
->fs_info
,
1777 plug
= container_of(cb
, struct btrfs_plug_cb
, cb
);
1779 plug
->info
= root
->fs_info
;
1780 INIT_LIST_HEAD(&plug
->rbio_list
);
1782 list_add_tail(&rbio
->plug_list
, &plug
->rbio_list
);
1785 ret
= __raid56_parity_write(rbio
);
1787 btrfs_bio_counter_dec(root
->fs_info
);
1793 * all parity reconstruction happens here. We've read in everything
1794 * we can find from the drives and this does the heavy lifting of
1795 * sorting the good from the bad.
1797 static void __raid_recover_end_io(struct btrfs_raid_bio
*rbio
)
1801 int faila
= -1, failb
= -1;
1802 int nr_pages
= DIV_ROUND_UP(rbio
->stripe_len
, PAGE_CACHE_SIZE
);
1807 pointers
= kcalloc(rbio
->real_stripes
, sizeof(void *), GFP_NOFS
);
1813 faila
= rbio
->faila
;
1814 failb
= rbio
->failb
;
1816 if (rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
1817 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) {
1818 spin_lock_irq(&rbio
->bio_list_lock
);
1819 set_bit(RBIO_RMW_LOCKED_BIT
, &rbio
->flags
);
1820 spin_unlock_irq(&rbio
->bio_list_lock
);
1823 index_rbio_pages(rbio
);
1825 for (pagenr
= 0; pagenr
< nr_pages
; pagenr
++) {
1827 * Now we just use bitmap to mark the horizontal stripes in
1828 * which we have data when doing parity scrub.
1830 if (rbio
->operation
== BTRFS_RBIO_PARITY_SCRUB
&&
1831 !test_bit(pagenr
, rbio
->dbitmap
))
1834 /* setup our array of pointers with pages
1837 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1839 * if we're rebuilding a read, we have to use
1840 * pages from the bio list
1842 if ((rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
1843 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) &&
1844 (stripe
== faila
|| stripe
== failb
)) {
1845 page
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
1847 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1849 pointers
[stripe
] = kmap(page
);
1852 /* all raid6 handling here */
1853 if (rbio
->bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID6
) {
1855 * single failure, rebuild from parity raid5
1859 if (faila
== rbio
->nr_data
) {
1861 * Just the P stripe has failed, without
1862 * a bad data or Q stripe.
1863 * TODO, we should redo the xor here.
1869 * a single failure in raid6 is rebuilt
1870 * in the pstripe code below
1875 /* make sure our ps and qs are in order */
1876 if (faila
> failb
) {
1882 /* if the q stripe is failed, do a pstripe reconstruction
1884 * If both the q stripe and the P stripe are failed, we're
1885 * here due to a crc mismatch and we can't give them the
1888 if (rbio
->bbio
->raid_map
[failb
] == RAID6_Q_STRIPE
) {
1889 if (rbio
->bbio
->raid_map
[faila
] ==
1895 * otherwise we have one bad data stripe and
1896 * a good P stripe. raid5!
1901 if (rbio
->bbio
->raid_map
[failb
] == RAID5_P_STRIPE
) {
1902 raid6_datap_recov(rbio
->real_stripes
,
1903 PAGE_SIZE
, faila
, pointers
);
1905 raid6_2data_recov(rbio
->real_stripes
,
1906 PAGE_SIZE
, faila
, failb
,
1912 /* rebuild from P stripe here (raid5 or raid6) */
1913 BUG_ON(failb
!= -1);
1915 /* Copy parity block into failed block to start with */
1916 memcpy(pointers
[faila
],
1917 pointers
[rbio
->nr_data
],
1920 /* rearrange the pointer array */
1921 p
= pointers
[faila
];
1922 for (stripe
= faila
; stripe
< rbio
->nr_data
- 1; stripe
++)
1923 pointers
[stripe
] = pointers
[stripe
+ 1];
1924 pointers
[rbio
->nr_data
- 1] = p
;
1926 /* xor in the rest */
1927 run_xor(pointers
, rbio
->nr_data
- 1, PAGE_CACHE_SIZE
);
1929 /* if we're doing this rebuild as part of an rmw, go through
1930 * and set all of our private rbio pages in the
1931 * failed stripes as uptodate. This way finish_rmw will
1932 * know they can be trusted. If this was a read reconstruction,
1933 * other endio functions will fiddle the uptodate bits
1935 if (rbio
->operation
== BTRFS_RBIO_WRITE
) {
1936 for (i
= 0; i
< nr_pages
; i
++) {
1938 page
= rbio_stripe_page(rbio
, faila
, i
);
1939 SetPageUptodate(page
);
1942 page
= rbio_stripe_page(rbio
, failb
, i
);
1943 SetPageUptodate(page
);
1947 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1949 * if we're rebuilding a read, we have to use
1950 * pages from the bio list
1952 if ((rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
1953 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) &&
1954 (stripe
== faila
|| stripe
== failb
)) {
1955 page
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
1957 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1968 if (rbio
->operation
== BTRFS_RBIO_READ_REBUILD
) {
1970 cache_rbio_pages(rbio
);
1972 clear_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
1974 rbio_orig_end_io(rbio
, err
);
1975 } else if (rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) {
1976 rbio_orig_end_io(rbio
, err
);
1977 } else if (err
== 0) {
1981 if (rbio
->operation
== BTRFS_RBIO_WRITE
)
1983 else if (rbio
->operation
== BTRFS_RBIO_PARITY_SCRUB
)
1984 finish_parity_scrub(rbio
, 0);
1988 rbio_orig_end_io(rbio
, err
);
1993 * This is called only for stripes we've read from disk to
1994 * reconstruct the parity.
1996 static void raid_recover_end_io(struct bio
*bio
)
1998 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
2001 * we only read stripe pages off the disk, set them
2002 * up to date if there were no errors
2005 fail_bio_stripe(rbio
, bio
);
2007 set_bio_pages_uptodate(bio
);
2010 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
2013 if (atomic_read(&rbio
->error
) > rbio
->bbio
->max_errors
)
2014 rbio_orig_end_io(rbio
, -EIO
);
2016 __raid_recover_end_io(rbio
);
2020 * reads everything we need off the disk to reconstruct
2021 * the parity. endio handlers trigger final reconstruction
2022 * when the IO is done.
2024 * This is used both for reads from the higher layers and for
2025 * parity construction required to finish a rmw cycle.
2027 static int __raid56_parity_recover(struct btrfs_raid_bio
*rbio
)
2029 int bios_to_read
= 0;
2030 struct bio_list bio_list
;
2032 int nr_pages
= DIV_ROUND_UP(rbio
->stripe_len
, PAGE_CACHE_SIZE
);
2037 bio_list_init(&bio_list
);
2039 ret
= alloc_rbio_pages(rbio
);
2043 atomic_set(&rbio
->error
, 0);
2046 * read everything that hasn't failed. Thanks to the
2047 * stripe cache, it is possible that some or all of these
2048 * pages are going to be uptodate.
2050 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
2051 if (rbio
->faila
== stripe
|| rbio
->failb
== stripe
) {
2052 atomic_inc(&rbio
->error
);
2056 for (pagenr
= 0; pagenr
< nr_pages
; pagenr
++) {
2060 * the rmw code may have already read this
2063 p
= rbio_stripe_page(rbio
, stripe
, pagenr
);
2064 if (PageUptodate(p
))
2067 ret
= rbio_add_io_page(rbio
, &bio_list
,
2068 rbio_stripe_page(rbio
, stripe
, pagenr
),
2069 stripe
, pagenr
, rbio
->stripe_len
);
2075 bios_to_read
= bio_list_size(&bio_list
);
2076 if (!bios_to_read
) {
2078 * we might have no bios to read just because the pages
2079 * were up to date, or we might have no bios to read because
2080 * the devices were gone.
2082 if (atomic_read(&rbio
->error
) <= rbio
->bbio
->max_errors
) {
2083 __raid_recover_end_io(rbio
);
2091 * the bbio may be freed once we submit the last bio. Make sure
2092 * not to touch it after that
2094 atomic_set(&rbio
->stripes_pending
, bios_to_read
);
2096 bio
= bio_list_pop(&bio_list
);
2100 bio
->bi_private
= rbio
;
2101 bio
->bi_end_io
= raid_recover_end_io
;
2103 btrfs_bio_wq_end_io(rbio
->fs_info
, bio
,
2104 BTRFS_WQ_ENDIO_RAID56
);
2106 submit_bio(READ
, bio
);
2112 if (rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
2113 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
)
2114 rbio_orig_end_io(rbio
, -EIO
);
2119 * the main entry point for reads from the higher layers. This
2120 * is really only called when the normal read path had a failure,
2121 * so we assume the bio they send down corresponds to a failed part
2124 int raid56_parity_recover(struct btrfs_root
*root
, struct bio
*bio
,
2125 struct btrfs_bio
*bbio
, u64 stripe_len
,
2126 int mirror_num
, int generic_io
)
2128 struct btrfs_raid_bio
*rbio
;
2131 rbio
= alloc_rbio(root
, bbio
, stripe_len
);
2134 btrfs_put_bbio(bbio
);
2135 return PTR_ERR(rbio
);
2138 rbio
->operation
= BTRFS_RBIO_READ_REBUILD
;
2139 bio_list_add(&rbio
->bio_list
, bio
);
2140 rbio
->bio_list_bytes
= bio
->bi_iter
.bi_size
;
2142 rbio
->faila
= find_logical_bio_stripe(rbio
, bio
);
2143 if (rbio
->faila
== -1) {
2146 btrfs_put_bbio(bbio
);
2152 btrfs_bio_counter_inc_noblocked(root
->fs_info
);
2153 rbio
->generic_bio_cnt
= 1;
2155 btrfs_get_bbio(bbio
);
2159 * reconstruct from the q stripe if they are
2160 * asking for mirror 3
2162 if (mirror_num
== 3)
2163 rbio
->failb
= rbio
->real_stripes
- 2;
2165 ret
= lock_stripe_add(rbio
);
2168 * __raid56_parity_recover will end the bio with
2169 * any errors it hits. We don't want to return
2170 * its error value up the stack because our caller
2171 * will end up calling bio_endio with any nonzero
2175 __raid56_parity_recover(rbio
);
2177 * our rbio has been added to the list of
2178 * rbios that will be handled after the
2179 * currently lock owner is done
2185 static void rmw_work(struct btrfs_work
*work
)
2187 struct btrfs_raid_bio
*rbio
;
2189 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2190 raid56_rmw_stripe(rbio
);
2193 static void read_rebuild_work(struct btrfs_work
*work
)
2195 struct btrfs_raid_bio
*rbio
;
2197 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2198 __raid56_parity_recover(rbio
);
2202 * The following code is used to scrub/replace the parity stripe
2204 * Note: We need make sure all the pages that add into the scrub/replace
2205 * raid bio are correct and not be changed during the scrub/replace. That
2206 * is those pages just hold metadata or file data with checksum.
2209 struct btrfs_raid_bio
*
2210 raid56_parity_alloc_scrub_rbio(struct btrfs_root
*root
, struct bio
*bio
,
2211 struct btrfs_bio
*bbio
, u64 stripe_len
,
2212 struct btrfs_device
*scrub_dev
,
2213 unsigned long *dbitmap
, int stripe_nsectors
)
2215 struct btrfs_raid_bio
*rbio
;
2218 rbio
= alloc_rbio(root
, bbio
, stripe_len
);
2221 bio_list_add(&rbio
->bio_list
, bio
);
2223 * This is a special bio which is used to hold the completion handler
2224 * and make the scrub rbio is similar to the other types
2226 ASSERT(!bio
->bi_iter
.bi_size
);
2227 rbio
->operation
= BTRFS_RBIO_PARITY_SCRUB
;
2229 for (i
= 0; i
< rbio
->real_stripes
; i
++) {
2230 if (bbio
->stripes
[i
].dev
== scrub_dev
) {
2236 /* Now we just support the sectorsize equals to page size */
2237 ASSERT(root
->sectorsize
== PAGE_SIZE
);
2238 ASSERT(rbio
->stripe_npages
== stripe_nsectors
);
2239 bitmap_copy(rbio
->dbitmap
, dbitmap
, stripe_nsectors
);
2244 /* Used for both parity scrub and missing. */
2245 void raid56_add_scrub_pages(struct btrfs_raid_bio
*rbio
, struct page
*page
,
2251 ASSERT(logical
>= rbio
->bbio
->raid_map
[0]);
2252 ASSERT(logical
+ PAGE_SIZE
<= rbio
->bbio
->raid_map
[0] +
2253 rbio
->stripe_len
* rbio
->nr_data
);
2254 stripe_offset
= (int)(logical
- rbio
->bbio
->raid_map
[0]);
2255 index
= stripe_offset
>> PAGE_CACHE_SHIFT
;
2256 rbio
->bio_pages
[index
] = page
;
2260 * We just scrub the parity that we have correct data on the same horizontal,
2261 * so we needn't allocate all pages for all the stripes.
2263 static int alloc_rbio_essential_pages(struct btrfs_raid_bio
*rbio
)
2270 for_each_set_bit(bit
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2271 for (i
= 0; i
< rbio
->real_stripes
; i
++) {
2272 index
= i
* rbio
->stripe_npages
+ bit
;
2273 if (rbio
->stripe_pages
[index
])
2276 page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2279 rbio
->stripe_pages
[index
] = page
;
2280 ClearPageUptodate(page
);
2287 * end io function used by finish_rmw. When we finally
2288 * get here, we've written a full stripe
2290 static void raid_write_parity_end_io(struct bio
*bio
)
2292 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
2293 int err
= bio
->bi_error
;
2296 fail_bio_stripe(rbio
, bio
);
2300 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
2305 if (atomic_read(&rbio
->error
))
2308 rbio_orig_end_io(rbio
, err
);
2311 static noinline
void finish_parity_scrub(struct btrfs_raid_bio
*rbio
,
2314 struct btrfs_bio
*bbio
= rbio
->bbio
;
2315 void *pointers
[rbio
->real_stripes
];
2316 DECLARE_BITMAP(pbitmap
, rbio
->stripe_npages
);
2317 int nr_data
= rbio
->nr_data
;
2322 struct page
*p_page
= NULL
;
2323 struct page
*q_page
= NULL
;
2324 struct bio_list bio_list
;
2329 bio_list_init(&bio_list
);
2331 if (rbio
->real_stripes
- rbio
->nr_data
== 1) {
2332 p_stripe
= rbio
->real_stripes
- 1;
2333 } else if (rbio
->real_stripes
- rbio
->nr_data
== 2) {
2334 p_stripe
= rbio
->real_stripes
- 2;
2335 q_stripe
= rbio
->real_stripes
- 1;
2340 if (bbio
->num_tgtdevs
&& bbio
->tgtdev_map
[rbio
->scrubp
]) {
2342 bitmap_copy(pbitmap
, rbio
->dbitmap
, rbio
->stripe_npages
);
2346 * Because the higher layers(scrubber) are unlikely to
2347 * use this area of the disk again soon, so don't cache
2350 clear_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
2355 p_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2358 SetPageUptodate(p_page
);
2360 if (q_stripe
!= -1) {
2361 q_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2363 __free_page(p_page
);
2366 SetPageUptodate(q_page
);
2369 atomic_set(&rbio
->error
, 0);
2371 for_each_set_bit(pagenr
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2374 /* first collect one page from each data stripe */
2375 for (stripe
= 0; stripe
< nr_data
; stripe
++) {
2376 p
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
2377 pointers
[stripe
] = kmap(p
);
2380 /* then add the parity stripe */
2381 pointers
[stripe
++] = kmap(p_page
);
2383 if (q_stripe
!= -1) {
2386 * raid6, add the qstripe and call the
2387 * library function to fill in our p/q
2389 pointers
[stripe
++] = kmap(q_page
);
2391 raid6_call
.gen_syndrome(rbio
->real_stripes
, PAGE_SIZE
,
2395 memcpy(pointers
[nr_data
], pointers
[0], PAGE_SIZE
);
2396 run_xor(pointers
+ 1, nr_data
- 1, PAGE_CACHE_SIZE
);
2399 /* Check scrubbing pairty and repair it */
2400 p
= rbio_stripe_page(rbio
, rbio
->scrubp
, pagenr
);
2402 if (memcmp(parity
, pointers
[rbio
->scrubp
], PAGE_CACHE_SIZE
))
2403 memcpy(parity
, pointers
[rbio
->scrubp
], PAGE_CACHE_SIZE
);
2405 /* Parity is right, needn't writeback */
2406 bitmap_clear(rbio
->dbitmap
, pagenr
, 1);
2409 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++)
2410 kunmap(page_in_rbio(rbio
, stripe
, pagenr
, 0));
2413 __free_page(p_page
);
2415 __free_page(q_page
);
2419 * time to start writing. Make bios for everything from the
2420 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2423 for_each_set_bit(pagenr
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2426 page
= rbio_stripe_page(rbio
, rbio
->scrubp
, pagenr
);
2427 ret
= rbio_add_io_page(rbio
, &bio_list
,
2428 page
, rbio
->scrubp
, pagenr
, rbio
->stripe_len
);
2436 for_each_set_bit(pagenr
, pbitmap
, rbio
->stripe_npages
) {
2439 page
= rbio_stripe_page(rbio
, rbio
->scrubp
, pagenr
);
2440 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
2441 bbio
->tgtdev_map
[rbio
->scrubp
],
2442 pagenr
, rbio
->stripe_len
);
2448 nr_data
= bio_list_size(&bio_list
);
2450 /* Every parity is right */
2451 rbio_orig_end_io(rbio
, 0);
2455 atomic_set(&rbio
->stripes_pending
, nr_data
);
2458 bio
= bio_list_pop(&bio_list
);
2462 bio
->bi_private
= rbio
;
2463 bio
->bi_end_io
= raid_write_parity_end_io
;
2464 submit_bio(WRITE
, bio
);
2469 rbio_orig_end_io(rbio
, -EIO
);
2472 static inline int is_data_stripe(struct btrfs_raid_bio
*rbio
, int stripe
)
2474 if (stripe
>= 0 && stripe
< rbio
->nr_data
)
2480 * While we're doing the parity check and repair, we could have errors
2481 * in reading pages off the disk. This checks for errors and if we're
2482 * not able to read the page it'll trigger parity reconstruction. The
2483 * parity scrub will be finished after we've reconstructed the failed
2486 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio
*rbio
)
2488 if (atomic_read(&rbio
->error
) > rbio
->bbio
->max_errors
)
2491 if (rbio
->faila
>= 0 || rbio
->failb
>= 0) {
2492 int dfail
= 0, failp
= -1;
2494 if (is_data_stripe(rbio
, rbio
->faila
))
2496 else if (is_parity_stripe(rbio
->faila
))
2497 failp
= rbio
->faila
;
2499 if (is_data_stripe(rbio
, rbio
->failb
))
2501 else if (is_parity_stripe(rbio
->failb
))
2502 failp
= rbio
->failb
;
2505 * Because we can not use a scrubbing parity to repair
2506 * the data, so the capability of the repair is declined.
2507 * (In the case of RAID5, we can not repair anything)
2509 if (dfail
> rbio
->bbio
->max_errors
- 1)
2513 * If all data is good, only parity is correctly, just
2514 * repair the parity.
2517 finish_parity_scrub(rbio
, 0);
2522 * Here means we got one corrupted data stripe and one
2523 * corrupted parity on RAID6, if the corrupted parity
2524 * is scrubbing parity, luckly, use the other one to repair
2525 * the data, or we can not repair the data stripe.
2527 if (failp
!= rbio
->scrubp
)
2530 __raid_recover_end_io(rbio
);
2532 finish_parity_scrub(rbio
, 1);
2537 rbio_orig_end_io(rbio
, -EIO
);
2541 * end io for the read phase of the rmw cycle. All the bios here are physical
2542 * stripe bios we've read from the disk so we can recalculate the parity of the
2545 * This will usually kick off finish_rmw once all the bios are read in, but it
2546 * may trigger parity reconstruction if we had any errors along the way
2548 static void raid56_parity_scrub_end_io(struct bio
*bio
)
2550 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
2553 fail_bio_stripe(rbio
, bio
);
2555 set_bio_pages_uptodate(bio
);
2559 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
2563 * this will normally call finish_rmw to start our write
2564 * but if there are any failed stripes we'll reconstruct
2567 validate_rbio_for_parity_scrub(rbio
);
2570 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio
*rbio
)
2572 int bios_to_read
= 0;
2573 struct bio_list bio_list
;
2579 ret
= alloc_rbio_essential_pages(rbio
);
2583 bio_list_init(&bio_list
);
2585 atomic_set(&rbio
->error
, 0);
2587 * build a list of bios to read all the missing parts of this
2590 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
2591 for_each_set_bit(pagenr
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2594 * we want to find all the pages missing from
2595 * the rbio and read them from the disk. If
2596 * page_in_rbio finds a page in the bio list
2597 * we don't need to read it off the stripe.
2599 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
2603 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
2605 * the bio cache may have handed us an uptodate
2606 * page. If so, be happy and use it
2608 if (PageUptodate(page
))
2611 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
2612 stripe
, pagenr
, rbio
->stripe_len
);
2618 bios_to_read
= bio_list_size(&bio_list
);
2619 if (!bios_to_read
) {
2621 * this can happen if others have merged with
2622 * us, it means there is nothing left to read.
2623 * But if there are missing devices it may not be
2624 * safe to do the full stripe write yet.
2630 * the bbio may be freed once we submit the last bio. Make sure
2631 * not to touch it after that
2633 atomic_set(&rbio
->stripes_pending
, bios_to_read
);
2635 bio
= bio_list_pop(&bio_list
);
2639 bio
->bi_private
= rbio
;
2640 bio
->bi_end_io
= raid56_parity_scrub_end_io
;
2642 btrfs_bio_wq_end_io(rbio
->fs_info
, bio
,
2643 BTRFS_WQ_ENDIO_RAID56
);
2645 submit_bio(READ
, bio
);
2647 /* the actual write will happen once the reads are done */
2651 rbio_orig_end_io(rbio
, -EIO
);
2655 validate_rbio_for_parity_scrub(rbio
);
2658 static void scrub_parity_work(struct btrfs_work
*work
)
2660 struct btrfs_raid_bio
*rbio
;
2662 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2663 raid56_parity_scrub_stripe(rbio
);
2666 static void async_scrub_parity(struct btrfs_raid_bio
*rbio
)
2668 btrfs_init_work(&rbio
->work
, btrfs_rmw_helper
,
2669 scrub_parity_work
, NULL
, NULL
);
2671 btrfs_queue_work(rbio
->fs_info
->rmw_workers
,
2675 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio
*rbio
)
2677 if (!lock_stripe_add(rbio
))
2678 async_scrub_parity(rbio
);
2681 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2683 struct btrfs_raid_bio
*
2684 raid56_alloc_missing_rbio(struct btrfs_root
*root
, struct bio
*bio
,
2685 struct btrfs_bio
*bbio
, u64 length
)
2687 struct btrfs_raid_bio
*rbio
;
2689 rbio
= alloc_rbio(root
, bbio
, length
);
2693 rbio
->operation
= BTRFS_RBIO_REBUILD_MISSING
;
2694 bio_list_add(&rbio
->bio_list
, bio
);
2696 * This is a special bio which is used to hold the completion handler
2697 * and make the scrub rbio is similar to the other types
2699 ASSERT(!bio
->bi_iter
.bi_size
);
2701 rbio
->faila
= find_logical_bio_stripe(rbio
, bio
);
2702 if (rbio
->faila
== -1) {
2711 static void missing_raid56_work(struct btrfs_work
*work
)
2713 struct btrfs_raid_bio
*rbio
;
2715 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2716 __raid56_parity_recover(rbio
);
2719 static void async_missing_raid56(struct btrfs_raid_bio
*rbio
)
2721 btrfs_init_work(&rbio
->work
, btrfs_rmw_helper
,
2722 missing_raid56_work
, NULL
, NULL
);
2724 btrfs_queue_work(rbio
->fs_info
->rmw_workers
, &rbio
->work
);
2727 void raid56_submit_missing_rbio(struct btrfs_raid_bio
*rbio
)
2729 if (!lock_stripe_add(rbio
))
2730 async_missing_raid56(rbio
);