2 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
24 #include "ordered-data.h"
25 #include "transaction.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
39 * Future enhancements:
40 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
42 * - track and record media errors, throw out bad devices
43 * - add a mode to also read unallocated space
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
55 #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
64 #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
66 struct scrub_recover
{
68 struct btrfs_bio
*bbio
;
73 struct scrub_block
*sblock
;
75 struct btrfs_device
*dev
;
76 struct list_head list
;
77 u64 flags
; /* extent flags */
81 u64 physical_for_dev_replace
;
84 unsigned int mirror_num
:8;
85 unsigned int have_csum
:1;
86 unsigned int io_error
:1;
88 u8 csum
[BTRFS_CSUM_SIZE
];
90 struct scrub_recover
*recover
;
95 struct scrub_ctx
*sctx
;
96 struct btrfs_device
*dev
;
101 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102 struct scrub_page
*pagev
[SCRUB_PAGES_PER_WR_BIO
];
104 struct scrub_page
*pagev
[SCRUB_PAGES_PER_RD_BIO
];
108 struct btrfs_work work
;
112 struct scrub_page
*pagev
[SCRUB_MAX_PAGES_PER_BLOCK
];
114 atomic_t outstanding_pages
;
115 atomic_t refs
; /* free mem on transition to zero */
116 struct scrub_ctx
*sctx
;
117 struct scrub_parity
*sparity
;
119 unsigned int header_error
:1;
120 unsigned int checksum_error
:1;
121 unsigned int no_io_error_seen
:1;
122 unsigned int generation_error
:1; /* also sets header_error */
124 /* The following is for the data used to check parity */
125 /* It is for the data with checksum */
126 unsigned int data_corrected
:1;
130 /* Used for the chunks with parity stripe such RAID5/6 */
131 struct scrub_parity
{
132 struct scrub_ctx
*sctx
;
134 struct btrfs_device
*scrub_dev
;
146 struct list_head spages
;
148 /* Work of parity check and repair */
149 struct btrfs_work work
;
151 /* Mark the parity blocks which have data */
152 unsigned long *dbitmap
;
155 * Mark the parity blocks which have data, but errors happen when
156 * read data or check data
158 unsigned long *ebitmap
;
160 unsigned long bitmap
[0];
163 struct scrub_wr_ctx
{
164 struct scrub_bio
*wr_curr_bio
;
165 struct btrfs_device
*tgtdev
;
166 int pages_per_wr_bio
; /* <= SCRUB_PAGES_PER_WR_BIO */
167 atomic_t flush_all_writes
;
168 struct mutex wr_lock
;
172 struct scrub_bio
*bios
[SCRUB_BIOS_PER_SCTX
];
173 struct btrfs_root
*dev_root
;
176 atomic_t bios_in_flight
;
177 atomic_t workers_pending
;
178 spinlock_t list_lock
;
179 wait_queue_head_t list_wait
;
181 struct list_head csum_list
;
184 int pages_per_rd_bio
;
189 struct scrub_wr_ctx wr_ctx
;
194 struct btrfs_scrub_progress stat
;
195 spinlock_t stat_lock
;
198 * Use a ref counter to avoid use-after-free issues. Scrub workers
199 * decrement bios_in_flight and workers_pending and then do a wakeup
200 * on the list_wait wait queue. We must ensure the main scrub task
201 * doesn't free the scrub context before or while the workers are
202 * doing the wakeup() call.
207 struct scrub_fixup_nodatasum
{
208 struct scrub_ctx
*sctx
;
209 struct btrfs_device
*dev
;
211 struct btrfs_root
*root
;
212 struct btrfs_work work
;
216 struct scrub_nocow_inode
{
220 struct list_head list
;
223 struct scrub_copy_nocow_ctx
{
224 struct scrub_ctx
*sctx
;
228 u64 physical_for_dev_replace
;
229 struct list_head inodes
;
230 struct btrfs_work work
;
233 struct scrub_warning
{
234 struct btrfs_path
*path
;
235 u64 extent_item_size
;
239 struct btrfs_device
*dev
;
242 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
);
243 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
);
244 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
);
245 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
);
246 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
);
247 static int scrub_setup_recheck_block(struct scrub_block
*original_sblock
,
248 struct scrub_block
*sblocks_for_recheck
);
249 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
250 struct scrub_block
*sblock
, int is_metadata
,
251 int have_csum
, u8
*csum
, u64 generation
,
252 u16 csum_size
, int retry_failed_mirror
);
253 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
254 struct scrub_block
*sblock
,
255 int is_metadata
, int have_csum
,
256 const u8
*csum
, u64 generation
,
258 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
259 struct scrub_block
*sblock_good
);
260 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
261 struct scrub_block
*sblock_good
,
262 int page_num
, int force_write
);
263 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
);
264 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
266 static int scrub_checksum_data(struct scrub_block
*sblock
);
267 static int scrub_checksum_tree_block(struct scrub_block
*sblock
);
268 static int scrub_checksum_super(struct scrub_block
*sblock
);
269 static void scrub_block_get(struct scrub_block
*sblock
);
270 static void scrub_block_put(struct scrub_block
*sblock
);
271 static void scrub_page_get(struct scrub_page
*spage
);
272 static void scrub_page_put(struct scrub_page
*spage
);
273 static void scrub_parity_get(struct scrub_parity
*sparity
);
274 static void scrub_parity_put(struct scrub_parity
*sparity
);
275 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
276 struct scrub_page
*spage
);
277 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
278 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
279 u64 gen
, int mirror_num
, u8
*csum
, int force
,
280 u64 physical_for_dev_replace
);
281 static void scrub_bio_end_io(struct bio
*bio
, int err
);
282 static void scrub_bio_end_io_worker(struct btrfs_work
*work
);
283 static void scrub_block_complete(struct scrub_block
*sblock
);
284 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
285 u64 extent_logical
, u64 extent_len
,
286 u64
*extent_physical
,
287 struct btrfs_device
**extent_dev
,
288 int *extent_mirror_num
);
289 static int scrub_setup_wr_ctx(struct scrub_ctx
*sctx
,
290 struct scrub_wr_ctx
*wr_ctx
,
291 struct btrfs_fs_info
*fs_info
,
292 struct btrfs_device
*dev
,
294 static void scrub_free_wr_ctx(struct scrub_wr_ctx
*wr_ctx
);
295 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
296 struct scrub_page
*spage
);
297 static void scrub_wr_submit(struct scrub_ctx
*sctx
);
298 static void scrub_wr_bio_end_io(struct bio
*bio
, int err
);
299 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
);
300 static int write_page_nocow(struct scrub_ctx
*sctx
,
301 u64 physical_for_dev_replace
, struct page
*page
);
302 static int copy_nocow_pages_for_inode(u64 inum
, u64 offset
, u64 root
,
303 struct scrub_copy_nocow_ctx
*ctx
);
304 static int copy_nocow_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
305 int mirror_num
, u64 physical_for_dev_replace
);
306 static void copy_nocow_pages_worker(struct btrfs_work
*work
);
307 static void __scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
);
308 static void scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
);
309 static void scrub_put_ctx(struct scrub_ctx
*sctx
);
312 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
)
314 atomic_inc(&sctx
->refs
);
315 atomic_inc(&sctx
->bios_in_flight
);
318 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
)
320 atomic_dec(&sctx
->bios_in_flight
);
321 wake_up(&sctx
->list_wait
);
325 static void __scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
)
327 while (atomic_read(&fs_info
->scrub_pause_req
)) {
328 mutex_unlock(&fs_info
->scrub_lock
);
329 wait_event(fs_info
->scrub_pause_wait
,
330 atomic_read(&fs_info
->scrub_pause_req
) == 0);
331 mutex_lock(&fs_info
->scrub_lock
);
335 static void scrub_pause_on(struct btrfs_fs_info
*fs_info
)
337 atomic_inc(&fs_info
->scrubs_paused
);
338 wake_up(&fs_info
->scrub_pause_wait
);
341 static void scrub_pause_off(struct btrfs_fs_info
*fs_info
)
343 mutex_lock(&fs_info
->scrub_lock
);
344 __scrub_blocked_if_needed(fs_info
);
345 atomic_dec(&fs_info
->scrubs_paused
);
346 mutex_unlock(&fs_info
->scrub_lock
);
348 wake_up(&fs_info
->scrub_pause_wait
);
351 static void scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
)
353 scrub_pause_on(fs_info
);
354 scrub_pause_off(fs_info
);
358 * used for workers that require transaction commits (i.e., for the
361 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
)
363 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
365 atomic_inc(&sctx
->refs
);
367 * increment scrubs_running to prevent cancel requests from
368 * completing as long as a worker is running. we must also
369 * increment scrubs_paused to prevent deadlocking on pause
370 * requests used for transactions commits (as the worker uses a
371 * transaction context). it is safe to regard the worker
372 * as paused for all matters practical. effectively, we only
373 * avoid cancellation requests from completing.
375 mutex_lock(&fs_info
->scrub_lock
);
376 atomic_inc(&fs_info
->scrubs_running
);
377 atomic_inc(&fs_info
->scrubs_paused
);
378 mutex_unlock(&fs_info
->scrub_lock
);
381 * check if @scrubs_running=@scrubs_paused condition
382 * inside wait_event() is not an atomic operation.
383 * which means we may inc/dec @scrub_running/paused
384 * at any time. Let's wake up @scrub_pause_wait as
385 * much as we can to let commit transaction blocked less.
387 wake_up(&fs_info
->scrub_pause_wait
);
389 atomic_inc(&sctx
->workers_pending
);
392 /* used for workers that require transaction commits */
393 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
)
395 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
398 * see scrub_pending_trans_workers_inc() why we're pretending
399 * to be paused in the scrub counters
401 mutex_lock(&fs_info
->scrub_lock
);
402 atomic_dec(&fs_info
->scrubs_running
);
403 atomic_dec(&fs_info
->scrubs_paused
);
404 mutex_unlock(&fs_info
->scrub_lock
);
405 atomic_dec(&sctx
->workers_pending
);
406 wake_up(&fs_info
->scrub_pause_wait
);
407 wake_up(&sctx
->list_wait
);
411 static void scrub_free_csums(struct scrub_ctx
*sctx
)
413 while (!list_empty(&sctx
->csum_list
)) {
414 struct btrfs_ordered_sum
*sum
;
415 sum
= list_first_entry(&sctx
->csum_list
,
416 struct btrfs_ordered_sum
, list
);
417 list_del(&sum
->list
);
422 static noinline_for_stack
void scrub_free_ctx(struct scrub_ctx
*sctx
)
429 scrub_free_wr_ctx(&sctx
->wr_ctx
);
431 /* this can happen when scrub is cancelled */
432 if (sctx
->curr
!= -1) {
433 struct scrub_bio
*sbio
= sctx
->bios
[sctx
->curr
];
435 for (i
= 0; i
< sbio
->page_count
; i
++) {
436 WARN_ON(!sbio
->pagev
[i
]->page
);
437 scrub_block_put(sbio
->pagev
[i
]->sblock
);
442 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
443 struct scrub_bio
*sbio
= sctx
->bios
[i
];
450 scrub_free_csums(sctx
);
454 static void scrub_put_ctx(struct scrub_ctx
*sctx
)
456 if (atomic_dec_and_test(&sctx
->refs
))
457 scrub_free_ctx(sctx
);
460 static noinline_for_stack
461 struct scrub_ctx
*scrub_setup_ctx(struct btrfs_device
*dev
, int is_dev_replace
)
463 struct scrub_ctx
*sctx
;
465 struct btrfs_fs_info
*fs_info
= dev
->dev_root
->fs_info
;
466 int pages_per_rd_bio
;
470 * the setting of pages_per_rd_bio is correct for scrub but might
471 * be wrong for the dev_replace code where we might read from
472 * different devices in the initial huge bios. However, that
473 * code is able to correctly handle the case when adding a page
477 pages_per_rd_bio
= min_t(int, SCRUB_PAGES_PER_RD_BIO
,
478 bio_get_nr_vecs(dev
->bdev
));
480 pages_per_rd_bio
= SCRUB_PAGES_PER_RD_BIO
;
481 sctx
= kzalloc(sizeof(*sctx
), GFP_NOFS
);
484 atomic_set(&sctx
->refs
, 1);
485 sctx
->is_dev_replace
= is_dev_replace
;
486 sctx
->pages_per_rd_bio
= pages_per_rd_bio
;
488 sctx
->dev_root
= dev
->dev_root
;
489 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
490 struct scrub_bio
*sbio
;
492 sbio
= kzalloc(sizeof(*sbio
), GFP_NOFS
);
495 sctx
->bios
[i
] = sbio
;
499 sbio
->page_count
= 0;
500 btrfs_init_work(&sbio
->work
, btrfs_scrub_helper
,
501 scrub_bio_end_io_worker
, NULL
, NULL
);
503 if (i
!= SCRUB_BIOS_PER_SCTX
- 1)
504 sctx
->bios
[i
]->next_free
= i
+ 1;
506 sctx
->bios
[i
]->next_free
= -1;
508 sctx
->first_free
= 0;
509 sctx
->nodesize
= dev
->dev_root
->nodesize
;
510 sctx
->sectorsize
= dev
->dev_root
->sectorsize
;
511 atomic_set(&sctx
->bios_in_flight
, 0);
512 atomic_set(&sctx
->workers_pending
, 0);
513 atomic_set(&sctx
->cancel_req
, 0);
514 sctx
->csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
515 INIT_LIST_HEAD(&sctx
->csum_list
);
517 spin_lock_init(&sctx
->list_lock
);
518 spin_lock_init(&sctx
->stat_lock
);
519 init_waitqueue_head(&sctx
->list_wait
);
521 ret
= scrub_setup_wr_ctx(sctx
, &sctx
->wr_ctx
, fs_info
,
522 fs_info
->dev_replace
.tgtdev
, is_dev_replace
);
524 scrub_free_ctx(sctx
);
530 scrub_free_ctx(sctx
);
531 return ERR_PTR(-ENOMEM
);
534 static int scrub_print_warning_inode(u64 inum
, u64 offset
, u64 root
,
541 struct extent_buffer
*eb
;
542 struct btrfs_inode_item
*inode_item
;
543 struct scrub_warning
*swarn
= warn_ctx
;
544 struct btrfs_fs_info
*fs_info
= swarn
->dev
->dev_root
->fs_info
;
545 struct inode_fs_paths
*ipath
= NULL
;
546 struct btrfs_root
*local_root
;
547 struct btrfs_key root_key
;
548 struct btrfs_key key
;
550 root_key
.objectid
= root
;
551 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
552 root_key
.offset
= (u64
)-1;
553 local_root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
554 if (IS_ERR(local_root
)) {
555 ret
= PTR_ERR(local_root
);
560 * this makes the path point to (inum INODE_ITEM ioff)
563 key
.type
= BTRFS_INODE_ITEM_KEY
;
566 ret
= btrfs_search_slot(NULL
, local_root
, &key
, swarn
->path
, 0, 0);
568 btrfs_release_path(swarn
->path
);
572 eb
= swarn
->path
->nodes
[0];
573 inode_item
= btrfs_item_ptr(eb
, swarn
->path
->slots
[0],
574 struct btrfs_inode_item
);
575 isize
= btrfs_inode_size(eb
, inode_item
);
576 nlink
= btrfs_inode_nlink(eb
, inode_item
);
577 btrfs_release_path(swarn
->path
);
579 ipath
= init_ipath(4096, local_root
, swarn
->path
);
581 ret
= PTR_ERR(ipath
);
585 ret
= paths_from_inode(inum
, ipath
);
591 * we deliberately ignore the bit ipath might have been too small to
592 * hold all of the paths here
594 for (i
= 0; i
< ipath
->fspath
->elem_cnt
; ++i
)
595 printk_in_rcu(KERN_WARNING
"BTRFS: %s at logical %llu on dev "
596 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
597 "length %llu, links %u (path: %s)\n", swarn
->errstr
,
598 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
599 (unsigned long long)swarn
->sector
, root
, inum
, offset
,
600 min(isize
- offset
, (u64
)PAGE_SIZE
), nlink
,
601 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
607 printk_in_rcu(KERN_WARNING
"BTRFS: %s at logical %llu on dev "
608 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
609 "resolving failed with ret=%d\n", swarn
->errstr
,
610 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
611 (unsigned long long)swarn
->sector
, root
, inum
, offset
, ret
);
617 static void scrub_print_warning(const char *errstr
, struct scrub_block
*sblock
)
619 struct btrfs_device
*dev
;
620 struct btrfs_fs_info
*fs_info
;
621 struct btrfs_path
*path
;
622 struct btrfs_key found_key
;
623 struct extent_buffer
*eb
;
624 struct btrfs_extent_item
*ei
;
625 struct scrub_warning swarn
;
626 unsigned long ptr
= 0;
634 WARN_ON(sblock
->page_count
< 1);
635 dev
= sblock
->pagev
[0]->dev
;
636 fs_info
= sblock
->sctx
->dev_root
->fs_info
;
638 path
= btrfs_alloc_path();
642 swarn
.sector
= (sblock
->pagev
[0]->physical
) >> 9;
643 swarn
.logical
= sblock
->pagev
[0]->logical
;
644 swarn
.errstr
= errstr
;
647 ret
= extent_from_logical(fs_info
, swarn
.logical
, path
, &found_key
,
652 extent_item_pos
= swarn
.logical
- found_key
.objectid
;
653 swarn
.extent_item_size
= found_key
.offset
;
656 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
657 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
659 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
661 ret
= tree_backref_for_extent(&ptr
, eb
, &found_key
, ei
,
662 item_size
, &ref_root
,
664 printk_in_rcu(KERN_WARNING
665 "BTRFS: %s at logical %llu on dev %s, "
666 "sector %llu: metadata %s (level %d) in tree "
667 "%llu\n", errstr
, swarn
.logical
,
668 rcu_str_deref(dev
->name
),
669 (unsigned long long)swarn
.sector
,
670 ref_level
? "node" : "leaf",
671 ret
< 0 ? -1 : ref_level
,
672 ret
< 0 ? -1 : ref_root
);
674 btrfs_release_path(path
);
676 btrfs_release_path(path
);
679 iterate_extent_inodes(fs_info
, found_key
.objectid
,
681 scrub_print_warning_inode
, &swarn
);
685 btrfs_free_path(path
);
688 static int scrub_fixup_readpage(u64 inum
, u64 offset
, u64 root
, void *fixup_ctx
)
690 struct page
*page
= NULL
;
692 struct scrub_fixup_nodatasum
*fixup
= fixup_ctx
;
695 struct btrfs_key key
;
696 struct inode
*inode
= NULL
;
697 struct btrfs_fs_info
*fs_info
;
698 u64 end
= offset
+ PAGE_SIZE
- 1;
699 struct btrfs_root
*local_root
;
703 key
.type
= BTRFS_ROOT_ITEM_KEY
;
704 key
.offset
= (u64
)-1;
706 fs_info
= fixup
->root
->fs_info
;
707 srcu_index
= srcu_read_lock(&fs_info
->subvol_srcu
);
709 local_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
710 if (IS_ERR(local_root
)) {
711 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
712 return PTR_ERR(local_root
);
715 key
.type
= BTRFS_INODE_ITEM_KEY
;
718 inode
= btrfs_iget(fs_info
->sb
, &key
, local_root
, NULL
);
719 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
721 return PTR_ERR(inode
);
723 index
= offset
>> PAGE_CACHE_SHIFT
;
725 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
731 if (PageUptodate(page
)) {
732 if (PageDirty(page
)) {
734 * we need to write the data to the defect sector. the
735 * data that was in that sector is not in memory,
736 * because the page was modified. we must not write the
737 * modified page to that sector.
739 * TODO: what could be done here: wait for the delalloc
740 * runner to write out that page (might involve
741 * COW) and see whether the sector is still
742 * referenced afterwards.
744 * For the meantime, we'll treat this error
745 * incorrectable, although there is a chance that a
746 * later scrub will find the bad sector again and that
747 * there's no dirty page in memory, then.
752 ret
= repair_io_failure(inode
, offset
, PAGE_SIZE
,
753 fixup
->logical
, page
,
754 offset
- page_offset(page
),
760 * we need to get good data first. the general readpage path
761 * will call repair_io_failure for us, we just have to make
762 * sure we read the bad mirror.
764 ret
= set_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
765 EXTENT_DAMAGED
, GFP_NOFS
);
767 /* set_extent_bits should give proper error */
774 ret
= extent_read_full_page(&BTRFS_I(inode
)->io_tree
, page
,
777 wait_on_page_locked(page
);
779 corrected
= !test_range_bit(&BTRFS_I(inode
)->io_tree
, offset
,
780 end
, EXTENT_DAMAGED
, 0, NULL
);
782 clear_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
783 EXTENT_DAMAGED
, GFP_NOFS
);
795 if (ret
== 0 && corrected
) {
797 * we only need to call readpage for one of the inodes belonging
798 * to this extent. so make iterate_extent_inodes stop
806 static void scrub_fixup_nodatasum(struct btrfs_work
*work
)
809 struct scrub_fixup_nodatasum
*fixup
;
810 struct scrub_ctx
*sctx
;
811 struct btrfs_trans_handle
*trans
= NULL
;
812 struct btrfs_path
*path
;
813 int uncorrectable
= 0;
815 fixup
= container_of(work
, struct scrub_fixup_nodatasum
, work
);
818 path
= btrfs_alloc_path();
820 spin_lock(&sctx
->stat_lock
);
821 ++sctx
->stat
.malloc_errors
;
822 spin_unlock(&sctx
->stat_lock
);
827 trans
= btrfs_join_transaction(fixup
->root
);
834 * the idea is to trigger a regular read through the standard path. we
835 * read a page from the (failed) logical address by specifying the
836 * corresponding copynum of the failed sector. thus, that readpage is
838 * that is the point where on-the-fly error correction will kick in
839 * (once it's finished) and rewrite the failed sector if a good copy
842 ret
= iterate_inodes_from_logical(fixup
->logical
, fixup
->root
->fs_info
,
843 path
, scrub_fixup_readpage
,
851 spin_lock(&sctx
->stat_lock
);
852 ++sctx
->stat
.corrected_errors
;
853 spin_unlock(&sctx
->stat_lock
);
856 if (trans
&& !IS_ERR(trans
))
857 btrfs_end_transaction(trans
, fixup
->root
);
859 spin_lock(&sctx
->stat_lock
);
860 ++sctx
->stat
.uncorrectable_errors
;
861 spin_unlock(&sctx
->stat_lock
);
862 btrfs_dev_replace_stats_inc(
863 &sctx
->dev_root
->fs_info
->dev_replace
.
864 num_uncorrectable_read_errors
);
865 printk_ratelimited_in_rcu(KERN_ERR
"BTRFS: "
866 "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
867 fixup
->logical
, rcu_str_deref(fixup
->dev
->name
));
870 btrfs_free_path(path
);
873 scrub_pending_trans_workers_dec(sctx
);
876 static inline void scrub_get_recover(struct scrub_recover
*recover
)
878 atomic_inc(&recover
->refs
);
881 static inline void scrub_put_recover(struct scrub_recover
*recover
)
883 if (atomic_dec_and_test(&recover
->refs
)) {
884 btrfs_put_bbio(recover
->bbio
);
890 * scrub_handle_errored_block gets called when either verification of the
891 * pages failed or the bio failed to read, e.g. with EIO. In the latter
892 * case, this function handles all pages in the bio, even though only one
894 * The goal of this function is to repair the errored block by using the
895 * contents of one of the mirrors.
897 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
)
899 struct scrub_ctx
*sctx
= sblock_to_check
->sctx
;
900 struct btrfs_device
*dev
;
901 struct btrfs_fs_info
*fs_info
;
905 unsigned int failed_mirror_index
;
906 unsigned int is_metadata
;
907 unsigned int have_csum
;
909 struct scrub_block
*sblocks_for_recheck
; /* holds one for each mirror */
910 struct scrub_block
*sblock_bad
;
915 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
916 DEFAULT_RATELIMIT_BURST
);
918 BUG_ON(sblock_to_check
->page_count
< 1);
919 fs_info
= sctx
->dev_root
->fs_info
;
920 if (sblock_to_check
->pagev
[0]->flags
& BTRFS_EXTENT_FLAG_SUPER
) {
922 * if we find an error in a super block, we just report it.
923 * They will get written with the next transaction commit
926 spin_lock(&sctx
->stat_lock
);
927 ++sctx
->stat
.super_errors
;
928 spin_unlock(&sctx
->stat_lock
);
931 length
= sblock_to_check
->page_count
* PAGE_SIZE
;
932 logical
= sblock_to_check
->pagev
[0]->logical
;
933 generation
= sblock_to_check
->pagev
[0]->generation
;
934 BUG_ON(sblock_to_check
->pagev
[0]->mirror_num
< 1);
935 failed_mirror_index
= sblock_to_check
->pagev
[0]->mirror_num
- 1;
936 is_metadata
= !(sblock_to_check
->pagev
[0]->flags
&
937 BTRFS_EXTENT_FLAG_DATA
);
938 have_csum
= sblock_to_check
->pagev
[0]->have_csum
;
939 csum
= sblock_to_check
->pagev
[0]->csum
;
940 dev
= sblock_to_check
->pagev
[0]->dev
;
942 if (sctx
->is_dev_replace
&& !is_metadata
&& !have_csum
) {
943 sblocks_for_recheck
= NULL
;
948 * read all mirrors one after the other. This includes to
949 * re-read the extent or metadata block that failed (that was
950 * the cause that this fixup code is called) another time,
951 * page by page this time in order to know which pages
952 * caused I/O errors and which ones are good (for all mirrors).
953 * It is the goal to handle the situation when more than one
954 * mirror contains I/O errors, but the errors do not
955 * overlap, i.e. the data can be repaired by selecting the
956 * pages from those mirrors without I/O error on the
957 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
958 * would be that mirror #1 has an I/O error on the first page,
959 * the second page is good, and mirror #2 has an I/O error on
960 * the second page, but the first page is good.
961 * Then the first page of the first mirror can be repaired by
962 * taking the first page of the second mirror, and the
963 * second page of the second mirror can be repaired by
964 * copying the contents of the 2nd page of the 1st mirror.
965 * One more note: if the pages of one mirror contain I/O
966 * errors, the checksum cannot be verified. In order to get
967 * the best data for repairing, the first attempt is to find
968 * a mirror without I/O errors and with a validated checksum.
969 * Only if this is not possible, the pages are picked from
970 * mirrors with I/O errors without considering the checksum.
971 * If the latter is the case, at the end, the checksum of the
972 * repaired area is verified in order to correctly maintain
976 sblocks_for_recheck
= kcalloc(BTRFS_MAX_MIRRORS
,
977 sizeof(*sblocks_for_recheck
), GFP_NOFS
);
978 if (!sblocks_for_recheck
) {
979 spin_lock(&sctx
->stat_lock
);
980 sctx
->stat
.malloc_errors
++;
981 sctx
->stat
.read_errors
++;
982 sctx
->stat
.uncorrectable_errors
++;
983 spin_unlock(&sctx
->stat_lock
);
984 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
988 /* setup the context, map the logical blocks and alloc the pages */
989 ret
= scrub_setup_recheck_block(sblock_to_check
, sblocks_for_recheck
);
991 spin_lock(&sctx
->stat_lock
);
992 sctx
->stat
.read_errors
++;
993 sctx
->stat
.uncorrectable_errors
++;
994 spin_unlock(&sctx
->stat_lock
);
995 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
998 BUG_ON(failed_mirror_index
>= BTRFS_MAX_MIRRORS
);
999 sblock_bad
= sblocks_for_recheck
+ failed_mirror_index
;
1001 /* build and submit the bios for the failed mirror, check checksums */
1002 scrub_recheck_block(fs_info
, sblock_bad
, is_metadata
, have_csum
,
1003 csum
, generation
, sctx
->csum_size
, 1);
1005 if (!sblock_bad
->header_error
&& !sblock_bad
->checksum_error
&&
1006 sblock_bad
->no_io_error_seen
) {
1008 * the error disappeared after reading page by page, or
1009 * the area was part of a huge bio and other parts of the
1010 * bio caused I/O errors, or the block layer merged several
1011 * read requests into one and the error is caused by a
1012 * different bio (usually one of the two latter cases is
1015 spin_lock(&sctx
->stat_lock
);
1016 sctx
->stat
.unverified_errors
++;
1017 sblock_to_check
->data_corrected
= 1;
1018 spin_unlock(&sctx
->stat_lock
);
1020 if (sctx
->is_dev_replace
)
1021 scrub_write_block_to_dev_replace(sblock_bad
);
1025 if (!sblock_bad
->no_io_error_seen
) {
1026 spin_lock(&sctx
->stat_lock
);
1027 sctx
->stat
.read_errors
++;
1028 spin_unlock(&sctx
->stat_lock
);
1029 if (__ratelimit(&_rs
))
1030 scrub_print_warning("i/o error", sblock_to_check
);
1031 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
1032 } else if (sblock_bad
->checksum_error
) {
1033 spin_lock(&sctx
->stat_lock
);
1034 sctx
->stat
.csum_errors
++;
1035 spin_unlock(&sctx
->stat_lock
);
1036 if (__ratelimit(&_rs
))
1037 scrub_print_warning("checksum error", sblock_to_check
);
1038 btrfs_dev_stat_inc_and_print(dev
,
1039 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
1040 } else if (sblock_bad
->header_error
) {
1041 spin_lock(&sctx
->stat_lock
);
1042 sctx
->stat
.verify_errors
++;
1043 spin_unlock(&sctx
->stat_lock
);
1044 if (__ratelimit(&_rs
))
1045 scrub_print_warning("checksum/header error",
1047 if (sblock_bad
->generation_error
)
1048 btrfs_dev_stat_inc_and_print(dev
,
1049 BTRFS_DEV_STAT_GENERATION_ERRS
);
1051 btrfs_dev_stat_inc_and_print(dev
,
1052 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
1055 if (sctx
->readonly
) {
1056 ASSERT(!sctx
->is_dev_replace
);
1060 if (!is_metadata
&& !have_csum
) {
1061 struct scrub_fixup_nodatasum
*fixup_nodatasum
;
1063 WARN_ON(sctx
->is_dev_replace
);
1068 * !is_metadata and !have_csum, this means that the data
1069 * might not be COW'ed, that it might be modified
1070 * concurrently. The general strategy to work on the
1071 * commit root does not help in the case when COW is not
1074 fixup_nodatasum
= kzalloc(sizeof(*fixup_nodatasum
), GFP_NOFS
);
1075 if (!fixup_nodatasum
)
1076 goto did_not_correct_error
;
1077 fixup_nodatasum
->sctx
= sctx
;
1078 fixup_nodatasum
->dev
= dev
;
1079 fixup_nodatasum
->logical
= logical
;
1080 fixup_nodatasum
->root
= fs_info
->extent_root
;
1081 fixup_nodatasum
->mirror_num
= failed_mirror_index
+ 1;
1082 scrub_pending_trans_workers_inc(sctx
);
1083 btrfs_init_work(&fixup_nodatasum
->work
, btrfs_scrub_helper
,
1084 scrub_fixup_nodatasum
, NULL
, NULL
);
1085 btrfs_queue_work(fs_info
->scrub_workers
,
1086 &fixup_nodatasum
->work
);
1091 * now build and submit the bios for the other mirrors, check
1093 * First try to pick the mirror which is completely without I/O
1094 * errors and also does not have a checksum error.
1095 * If one is found, and if a checksum is present, the full block
1096 * that is known to contain an error is rewritten. Afterwards
1097 * the block is known to be corrected.
1098 * If a mirror is found which is completely correct, and no
1099 * checksum is present, only those pages are rewritten that had
1100 * an I/O error in the block to be repaired, since it cannot be
1101 * determined, which copy of the other pages is better (and it
1102 * could happen otherwise that a correct page would be
1103 * overwritten by a bad one).
1105 for (mirror_index
= 0;
1106 mirror_index
< BTRFS_MAX_MIRRORS
&&
1107 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1109 struct scrub_block
*sblock_other
;
1111 if (mirror_index
== failed_mirror_index
)
1113 sblock_other
= sblocks_for_recheck
+ mirror_index
;
1115 /* build and submit the bios, check checksums */
1116 scrub_recheck_block(fs_info
, sblock_other
, is_metadata
,
1117 have_csum
, csum
, generation
,
1118 sctx
->csum_size
, 0);
1120 if (!sblock_other
->header_error
&&
1121 !sblock_other
->checksum_error
&&
1122 sblock_other
->no_io_error_seen
) {
1123 if (sctx
->is_dev_replace
) {
1124 scrub_write_block_to_dev_replace(sblock_other
);
1125 goto corrected_error
;
1127 ret
= scrub_repair_block_from_good_copy(
1128 sblock_bad
, sblock_other
);
1130 goto corrected_error
;
1135 if (sblock_bad
->no_io_error_seen
&& !sctx
->is_dev_replace
)
1136 goto did_not_correct_error
;
1139 * In case of I/O errors in the area that is supposed to be
1140 * repaired, continue by picking good copies of those pages.
1141 * Select the good pages from mirrors to rewrite bad pages from
1142 * the area to fix. Afterwards verify the checksum of the block
1143 * that is supposed to be repaired. This verification step is
1144 * only done for the purpose of statistic counting and for the
1145 * final scrub report, whether errors remain.
1146 * A perfect algorithm could make use of the checksum and try
1147 * all possible combinations of pages from the different mirrors
1148 * until the checksum verification succeeds. For example, when
1149 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1150 * of mirror #2 is readable but the final checksum test fails,
1151 * then the 2nd page of mirror #3 could be tried, whether now
1152 * the final checksum succeedes. But this would be a rare
1153 * exception and is therefore not implemented. At least it is
1154 * avoided that the good copy is overwritten.
1155 * A more useful improvement would be to pick the sectors
1156 * without I/O error based on sector sizes (512 bytes on legacy
1157 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1158 * mirror could be repaired by taking 512 byte of a different
1159 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1160 * area are unreadable.
1163 for (page_num
= 0; page_num
< sblock_bad
->page_count
;
1165 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1166 struct scrub_block
*sblock_other
= NULL
;
1168 /* skip no-io-error page in scrub */
1169 if (!page_bad
->io_error
&& !sctx
->is_dev_replace
)
1172 /* try to find no-io-error page in mirrors */
1173 if (page_bad
->io_error
) {
1174 for (mirror_index
= 0;
1175 mirror_index
< BTRFS_MAX_MIRRORS
&&
1176 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1178 if (!sblocks_for_recheck
[mirror_index
].
1179 pagev
[page_num
]->io_error
) {
1180 sblock_other
= sblocks_for_recheck
+
1189 if (sctx
->is_dev_replace
) {
1191 * did not find a mirror to fetch the page
1192 * from. scrub_write_page_to_dev_replace()
1193 * handles this case (page->io_error), by
1194 * filling the block with zeros before
1195 * submitting the write request
1198 sblock_other
= sblock_bad
;
1200 if (scrub_write_page_to_dev_replace(sblock_other
,
1202 btrfs_dev_replace_stats_inc(
1204 fs_info
->dev_replace
.
1208 } else if (sblock_other
) {
1209 ret
= scrub_repair_page_from_good_copy(sblock_bad
,
1213 page_bad
->io_error
= 0;
1219 if (success
&& !sctx
->is_dev_replace
) {
1220 if (is_metadata
|| have_csum
) {
1222 * need to verify the checksum now that all
1223 * sectors on disk are repaired (the write
1224 * request for data to be repaired is on its way).
1225 * Just be lazy and use scrub_recheck_block()
1226 * which re-reads the data before the checksum
1227 * is verified, but most likely the data comes out
1228 * of the page cache.
1230 scrub_recheck_block(fs_info
, sblock_bad
,
1231 is_metadata
, have_csum
, csum
,
1232 generation
, sctx
->csum_size
, 1);
1233 if (!sblock_bad
->header_error
&&
1234 !sblock_bad
->checksum_error
&&
1235 sblock_bad
->no_io_error_seen
)
1236 goto corrected_error
;
1238 goto did_not_correct_error
;
1241 spin_lock(&sctx
->stat_lock
);
1242 sctx
->stat
.corrected_errors
++;
1243 sblock_to_check
->data_corrected
= 1;
1244 spin_unlock(&sctx
->stat_lock
);
1245 printk_ratelimited_in_rcu(KERN_ERR
1246 "BTRFS: fixed up error at logical %llu on dev %s\n",
1247 logical
, rcu_str_deref(dev
->name
));
1250 did_not_correct_error
:
1251 spin_lock(&sctx
->stat_lock
);
1252 sctx
->stat
.uncorrectable_errors
++;
1253 spin_unlock(&sctx
->stat_lock
);
1254 printk_ratelimited_in_rcu(KERN_ERR
1255 "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
1256 logical
, rcu_str_deref(dev
->name
));
1260 if (sblocks_for_recheck
) {
1261 for (mirror_index
= 0; mirror_index
< BTRFS_MAX_MIRRORS
;
1263 struct scrub_block
*sblock
= sblocks_for_recheck
+
1265 struct scrub_recover
*recover
;
1268 for (page_index
= 0; page_index
< sblock
->page_count
;
1270 sblock
->pagev
[page_index
]->sblock
= NULL
;
1271 recover
= sblock
->pagev
[page_index
]->recover
;
1273 scrub_put_recover(recover
);
1274 sblock
->pagev
[page_index
]->recover
=
1277 scrub_page_put(sblock
->pagev
[page_index
]);
1280 kfree(sblocks_for_recheck
);
1286 static inline int scrub_nr_raid_mirrors(struct btrfs_bio
*bbio
)
1288 if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID5
)
1290 else if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID6
)
1293 return (int)bbio
->num_stripes
;
1296 static inline void scrub_stripe_index_and_offset(u64 logical
, u64 map_type
,
1299 int nstripes
, int mirror
,
1305 if (map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
1307 for (i
= 0; i
< nstripes
; i
++) {
1308 if (raid_map
[i
] == RAID6_Q_STRIPE
||
1309 raid_map
[i
] == RAID5_P_STRIPE
)
1312 if (logical
>= raid_map
[i
] &&
1313 logical
< raid_map
[i
] + mapped_length
)
1318 *stripe_offset
= logical
- raid_map
[i
];
1320 /* The other RAID type */
1321 *stripe_index
= mirror
;
1326 static int scrub_setup_recheck_block(struct scrub_block
*original_sblock
,
1327 struct scrub_block
*sblocks_for_recheck
)
1329 struct scrub_ctx
*sctx
= original_sblock
->sctx
;
1330 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
1331 u64 length
= original_sblock
->page_count
* PAGE_SIZE
;
1332 u64 logical
= original_sblock
->pagev
[0]->logical
;
1333 struct scrub_recover
*recover
;
1334 struct btrfs_bio
*bbio
;
1345 * note: the two members refs and outstanding_pages
1346 * are not used (and not set) in the blocks that are used for
1347 * the recheck procedure
1350 while (length
> 0) {
1351 sublen
= min_t(u64
, length
, PAGE_SIZE
);
1352 mapped_length
= sublen
;
1356 * with a length of PAGE_SIZE, each returned stripe
1357 * represents one mirror
1359 ret
= btrfs_map_sblock(fs_info
, REQ_GET_READ_MIRRORS
, logical
,
1360 &mapped_length
, &bbio
, 0, 1);
1361 if (ret
|| !bbio
|| mapped_length
< sublen
) {
1362 btrfs_put_bbio(bbio
);
1366 recover
= kzalloc(sizeof(struct scrub_recover
), GFP_NOFS
);
1368 btrfs_put_bbio(bbio
);
1372 atomic_set(&recover
->refs
, 1);
1373 recover
->bbio
= bbio
;
1374 recover
->map_length
= mapped_length
;
1376 BUG_ON(page_index
>= SCRUB_PAGES_PER_RD_BIO
);
1378 nmirrors
= min(scrub_nr_raid_mirrors(bbio
), BTRFS_MAX_MIRRORS
);
1380 for (mirror_index
= 0; mirror_index
< nmirrors
;
1382 struct scrub_block
*sblock
;
1383 struct scrub_page
*page
;
1385 sblock
= sblocks_for_recheck
+ mirror_index
;
1386 sblock
->sctx
= sctx
;
1387 page
= kzalloc(sizeof(*page
), GFP_NOFS
);
1390 spin_lock(&sctx
->stat_lock
);
1391 sctx
->stat
.malloc_errors
++;
1392 spin_unlock(&sctx
->stat_lock
);
1393 scrub_put_recover(recover
);
1396 scrub_page_get(page
);
1397 sblock
->pagev
[page_index
] = page
;
1398 page
->logical
= logical
;
1400 scrub_stripe_index_and_offset(logical
,
1409 page
->physical
= bbio
->stripes
[stripe_index
].physical
+
1411 page
->dev
= bbio
->stripes
[stripe_index
].dev
;
1413 BUG_ON(page_index
>= original_sblock
->page_count
);
1414 page
->physical_for_dev_replace
=
1415 original_sblock
->pagev
[page_index
]->
1416 physical_for_dev_replace
;
1417 /* for missing devices, dev->bdev is NULL */
1418 page
->mirror_num
= mirror_index
+ 1;
1419 sblock
->page_count
++;
1420 page
->page
= alloc_page(GFP_NOFS
);
1424 scrub_get_recover(recover
);
1425 page
->recover
= recover
;
1427 scrub_put_recover(recover
);
1436 struct scrub_bio_ret
{
1437 struct completion event
;
1441 static void scrub_bio_wait_endio(struct bio
*bio
, int error
)
1443 struct scrub_bio_ret
*ret
= bio
->bi_private
;
1446 complete(&ret
->event
);
1449 static inline int scrub_is_page_on_raid56(struct scrub_page
*page
)
1451 return page
->recover
&&
1452 (page
->recover
->bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
);
1455 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info
*fs_info
,
1457 struct scrub_page
*page
)
1459 struct scrub_bio_ret done
;
1462 init_completion(&done
.event
);
1464 bio
->bi_iter
.bi_sector
= page
->logical
>> 9;
1465 bio
->bi_private
= &done
;
1466 bio
->bi_end_io
= scrub_bio_wait_endio
;
1468 ret
= raid56_parity_recover(fs_info
->fs_root
, bio
, page
->recover
->bbio
,
1469 page
->recover
->map_length
,
1470 page
->mirror_num
, 0);
1474 wait_for_completion(&done
.event
);
1482 * this function will check the on disk data for checksum errors, header
1483 * errors and read I/O errors. If any I/O errors happen, the exact pages
1484 * which are errored are marked as being bad. The goal is to enable scrub
1485 * to take those pages that are not errored from all the mirrors so that
1486 * the pages that are errored in the just handled mirror can be repaired.
1488 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
1489 struct scrub_block
*sblock
, int is_metadata
,
1490 int have_csum
, u8
*csum
, u64 generation
,
1491 u16 csum_size
, int retry_failed_mirror
)
1495 sblock
->no_io_error_seen
= 1;
1496 sblock
->header_error
= 0;
1497 sblock
->checksum_error
= 0;
1499 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1501 struct scrub_page
*page
= sblock
->pagev
[page_num
];
1503 if (page
->dev
->bdev
== NULL
) {
1505 sblock
->no_io_error_seen
= 0;
1509 WARN_ON(!page
->page
);
1510 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
1513 sblock
->no_io_error_seen
= 0;
1516 bio
->bi_bdev
= page
->dev
->bdev
;
1518 bio_add_page(bio
, page
->page
, PAGE_SIZE
, 0);
1519 if (!retry_failed_mirror
&& scrub_is_page_on_raid56(page
)) {
1520 if (scrub_submit_raid56_bio_wait(fs_info
, bio
, page
))
1521 sblock
->no_io_error_seen
= 0;
1523 bio
->bi_iter
.bi_sector
= page
->physical
>> 9;
1525 if (btrfsic_submit_bio_wait(READ
, bio
))
1526 sblock
->no_io_error_seen
= 0;
1532 if (sblock
->no_io_error_seen
)
1533 scrub_recheck_block_checksum(fs_info
, sblock
, is_metadata
,
1534 have_csum
, csum
, generation
,
1540 static inline int scrub_check_fsid(u8 fsid
[],
1541 struct scrub_page
*spage
)
1543 struct btrfs_fs_devices
*fs_devices
= spage
->dev
->fs_devices
;
1546 ret
= memcmp(fsid
, fs_devices
->fsid
, BTRFS_UUID_SIZE
);
1550 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
1551 struct scrub_block
*sblock
,
1552 int is_metadata
, int have_csum
,
1553 const u8
*csum
, u64 generation
,
1557 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1559 void *mapped_buffer
;
1561 WARN_ON(!sblock
->pagev
[0]->page
);
1563 struct btrfs_header
*h
;
1565 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1566 h
= (struct btrfs_header
*)mapped_buffer
;
1568 if (sblock
->pagev
[0]->logical
!= btrfs_stack_header_bytenr(h
) ||
1569 !scrub_check_fsid(h
->fsid
, sblock
->pagev
[0]) ||
1570 memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1572 sblock
->header_error
= 1;
1573 } else if (generation
!= btrfs_stack_header_generation(h
)) {
1574 sblock
->header_error
= 1;
1575 sblock
->generation_error
= 1;
1582 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1585 for (page_num
= 0;;) {
1586 if (page_num
== 0 && is_metadata
)
1587 crc
= btrfs_csum_data(
1588 ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
,
1589 crc
, PAGE_SIZE
- BTRFS_CSUM_SIZE
);
1591 crc
= btrfs_csum_data(mapped_buffer
, crc
, PAGE_SIZE
);
1593 kunmap_atomic(mapped_buffer
);
1595 if (page_num
>= sblock
->page_count
)
1597 WARN_ON(!sblock
->pagev
[page_num
]->page
);
1599 mapped_buffer
= kmap_atomic(sblock
->pagev
[page_num
]->page
);
1602 btrfs_csum_final(crc
, calculated_csum
);
1603 if (memcmp(calculated_csum
, csum
, csum_size
))
1604 sblock
->checksum_error
= 1;
1607 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
1608 struct scrub_block
*sblock_good
)
1613 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1616 ret_sub
= scrub_repair_page_from_good_copy(sblock_bad
,
1626 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
1627 struct scrub_block
*sblock_good
,
1628 int page_num
, int force_write
)
1630 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1631 struct scrub_page
*page_good
= sblock_good
->pagev
[page_num
];
1633 BUG_ON(page_bad
->page
== NULL
);
1634 BUG_ON(page_good
->page
== NULL
);
1635 if (force_write
|| sblock_bad
->header_error
||
1636 sblock_bad
->checksum_error
|| page_bad
->io_error
) {
1640 if (!page_bad
->dev
->bdev
) {
1641 printk_ratelimited(KERN_WARNING
"BTRFS: "
1642 "scrub_repair_page_from_good_copy(bdev == NULL) "
1643 "is unexpected!\n");
1647 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
1650 bio
->bi_bdev
= page_bad
->dev
->bdev
;
1651 bio
->bi_iter
.bi_sector
= page_bad
->physical
>> 9;
1653 ret
= bio_add_page(bio
, page_good
->page
, PAGE_SIZE
, 0);
1654 if (PAGE_SIZE
!= ret
) {
1659 if (btrfsic_submit_bio_wait(WRITE
, bio
)) {
1660 btrfs_dev_stat_inc_and_print(page_bad
->dev
,
1661 BTRFS_DEV_STAT_WRITE_ERRS
);
1662 btrfs_dev_replace_stats_inc(
1663 &sblock_bad
->sctx
->dev_root
->fs_info
->
1664 dev_replace
.num_write_errors
);
1674 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
)
1679 * This block is used for the check of the parity on the source device,
1680 * so the data needn't be written into the destination device.
1682 if (sblock
->sparity
)
1685 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1688 ret
= scrub_write_page_to_dev_replace(sblock
, page_num
);
1690 btrfs_dev_replace_stats_inc(
1691 &sblock
->sctx
->dev_root
->fs_info
->dev_replace
.
1696 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
1699 struct scrub_page
*spage
= sblock
->pagev
[page_num
];
1701 BUG_ON(spage
->page
== NULL
);
1702 if (spage
->io_error
) {
1703 void *mapped_buffer
= kmap_atomic(spage
->page
);
1705 memset(mapped_buffer
, 0, PAGE_CACHE_SIZE
);
1706 flush_dcache_page(spage
->page
);
1707 kunmap_atomic(mapped_buffer
);
1709 return scrub_add_page_to_wr_bio(sblock
->sctx
, spage
);
1712 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
1713 struct scrub_page
*spage
)
1715 struct scrub_wr_ctx
*wr_ctx
= &sctx
->wr_ctx
;
1716 struct scrub_bio
*sbio
;
1719 mutex_lock(&wr_ctx
->wr_lock
);
1721 if (!wr_ctx
->wr_curr_bio
) {
1722 wr_ctx
->wr_curr_bio
= kzalloc(sizeof(*wr_ctx
->wr_curr_bio
),
1724 if (!wr_ctx
->wr_curr_bio
) {
1725 mutex_unlock(&wr_ctx
->wr_lock
);
1728 wr_ctx
->wr_curr_bio
->sctx
= sctx
;
1729 wr_ctx
->wr_curr_bio
->page_count
= 0;
1731 sbio
= wr_ctx
->wr_curr_bio
;
1732 if (sbio
->page_count
== 0) {
1735 sbio
->physical
= spage
->physical_for_dev_replace
;
1736 sbio
->logical
= spage
->logical
;
1737 sbio
->dev
= wr_ctx
->tgtdev
;
1740 bio
= btrfs_io_bio_alloc(GFP_NOFS
, wr_ctx
->pages_per_wr_bio
);
1742 mutex_unlock(&wr_ctx
->wr_lock
);
1748 bio
->bi_private
= sbio
;
1749 bio
->bi_end_io
= scrub_wr_bio_end_io
;
1750 bio
->bi_bdev
= sbio
->dev
->bdev
;
1751 bio
->bi_iter
.bi_sector
= sbio
->physical
>> 9;
1753 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
1754 spage
->physical_for_dev_replace
||
1755 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
1757 scrub_wr_submit(sctx
);
1761 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
1762 if (ret
!= PAGE_SIZE
) {
1763 if (sbio
->page_count
< 1) {
1766 mutex_unlock(&wr_ctx
->wr_lock
);
1769 scrub_wr_submit(sctx
);
1773 sbio
->pagev
[sbio
->page_count
] = spage
;
1774 scrub_page_get(spage
);
1776 if (sbio
->page_count
== wr_ctx
->pages_per_wr_bio
)
1777 scrub_wr_submit(sctx
);
1778 mutex_unlock(&wr_ctx
->wr_lock
);
1783 static void scrub_wr_submit(struct scrub_ctx
*sctx
)
1785 struct scrub_wr_ctx
*wr_ctx
= &sctx
->wr_ctx
;
1786 struct scrub_bio
*sbio
;
1788 if (!wr_ctx
->wr_curr_bio
)
1791 sbio
= wr_ctx
->wr_curr_bio
;
1792 wr_ctx
->wr_curr_bio
= NULL
;
1793 WARN_ON(!sbio
->bio
->bi_bdev
);
1794 scrub_pending_bio_inc(sctx
);
1795 /* process all writes in a single worker thread. Then the block layer
1796 * orders the requests before sending them to the driver which
1797 * doubled the write performance on spinning disks when measured
1799 btrfsic_submit_bio(WRITE
, sbio
->bio
);
1802 static void scrub_wr_bio_end_io(struct bio
*bio
, int err
)
1804 struct scrub_bio
*sbio
= bio
->bi_private
;
1805 struct btrfs_fs_info
*fs_info
= sbio
->dev
->dev_root
->fs_info
;
1810 btrfs_init_work(&sbio
->work
, btrfs_scrubwrc_helper
,
1811 scrub_wr_bio_end_io_worker
, NULL
, NULL
);
1812 btrfs_queue_work(fs_info
->scrub_wr_completion_workers
, &sbio
->work
);
1815 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
)
1817 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
1818 struct scrub_ctx
*sctx
= sbio
->sctx
;
1821 WARN_ON(sbio
->page_count
> SCRUB_PAGES_PER_WR_BIO
);
1823 struct btrfs_dev_replace
*dev_replace
=
1824 &sbio
->sctx
->dev_root
->fs_info
->dev_replace
;
1826 for (i
= 0; i
< sbio
->page_count
; i
++) {
1827 struct scrub_page
*spage
= sbio
->pagev
[i
];
1829 spage
->io_error
= 1;
1830 btrfs_dev_replace_stats_inc(&dev_replace
->
1835 for (i
= 0; i
< sbio
->page_count
; i
++)
1836 scrub_page_put(sbio
->pagev
[i
]);
1840 scrub_pending_bio_dec(sctx
);
1843 static int scrub_checksum(struct scrub_block
*sblock
)
1848 WARN_ON(sblock
->page_count
< 1);
1849 flags
= sblock
->pagev
[0]->flags
;
1851 if (flags
& BTRFS_EXTENT_FLAG_DATA
)
1852 ret
= scrub_checksum_data(sblock
);
1853 else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1854 ret
= scrub_checksum_tree_block(sblock
);
1855 else if (flags
& BTRFS_EXTENT_FLAG_SUPER
)
1856 (void)scrub_checksum_super(sblock
);
1860 scrub_handle_errored_block(sblock
);
1865 static int scrub_checksum_data(struct scrub_block
*sblock
)
1867 struct scrub_ctx
*sctx
= sblock
->sctx
;
1868 u8 csum
[BTRFS_CSUM_SIZE
];
1877 BUG_ON(sblock
->page_count
< 1);
1878 if (!sblock
->pagev
[0]->have_csum
)
1881 on_disk_csum
= sblock
->pagev
[0]->csum
;
1882 page
= sblock
->pagev
[0]->page
;
1883 buffer
= kmap_atomic(page
);
1885 len
= sctx
->sectorsize
;
1888 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
1890 crc
= btrfs_csum_data(buffer
, crc
, l
);
1891 kunmap_atomic(buffer
);
1896 BUG_ON(index
>= sblock
->page_count
);
1897 BUG_ON(!sblock
->pagev
[index
]->page
);
1898 page
= sblock
->pagev
[index
]->page
;
1899 buffer
= kmap_atomic(page
);
1902 btrfs_csum_final(crc
, csum
);
1903 if (memcmp(csum
, on_disk_csum
, sctx
->csum_size
))
1909 static int scrub_checksum_tree_block(struct scrub_block
*sblock
)
1911 struct scrub_ctx
*sctx
= sblock
->sctx
;
1912 struct btrfs_header
*h
;
1913 struct btrfs_root
*root
= sctx
->dev_root
;
1914 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1915 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1916 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1918 void *mapped_buffer
;
1927 BUG_ON(sblock
->page_count
< 1);
1928 page
= sblock
->pagev
[0]->page
;
1929 mapped_buffer
= kmap_atomic(page
);
1930 h
= (struct btrfs_header
*)mapped_buffer
;
1931 memcpy(on_disk_csum
, h
->csum
, sctx
->csum_size
);
1934 * we don't use the getter functions here, as we
1935 * a) don't have an extent buffer and
1936 * b) the page is already kmapped
1939 if (sblock
->pagev
[0]->logical
!= btrfs_stack_header_bytenr(h
))
1942 if (sblock
->pagev
[0]->generation
!= btrfs_stack_header_generation(h
))
1945 if (!scrub_check_fsid(h
->fsid
, sblock
->pagev
[0]))
1948 if (memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1952 len
= sctx
->nodesize
- BTRFS_CSUM_SIZE
;
1953 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1954 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1957 u64 l
= min_t(u64
, len
, mapped_size
);
1959 crc
= btrfs_csum_data(p
, crc
, l
);
1960 kunmap_atomic(mapped_buffer
);
1965 BUG_ON(index
>= sblock
->page_count
);
1966 BUG_ON(!sblock
->pagev
[index
]->page
);
1967 page
= sblock
->pagev
[index
]->page
;
1968 mapped_buffer
= kmap_atomic(page
);
1969 mapped_size
= PAGE_SIZE
;
1973 btrfs_csum_final(crc
, calculated_csum
);
1974 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1977 return fail
|| crc_fail
;
1980 static int scrub_checksum_super(struct scrub_block
*sblock
)
1982 struct btrfs_super_block
*s
;
1983 struct scrub_ctx
*sctx
= sblock
->sctx
;
1984 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1985 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1987 void *mapped_buffer
;
1996 BUG_ON(sblock
->page_count
< 1);
1997 page
= sblock
->pagev
[0]->page
;
1998 mapped_buffer
= kmap_atomic(page
);
1999 s
= (struct btrfs_super_block
*)mapped_buffer
;
2000 memcpy(on_disk_csum
, s
->csum
, sctx
->csum_size
);
2002 if (sblock
->pagev
[0]->logical
!= btrfs_super_bytenr(s
))
2005 if (sblock
->pagev
[0]->generation
!= btrfs_super_generation(s
))
2008 if (!scrub_check_fsid(s
->fsid
, sblock
->pagev
[0]))
2011 len
= BTRFS_SUPER_INFO_SIZE
- BTRFS_CSUM_SIZE
;
2012 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
2013 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
2016 u64 l
= min_t(u64
, len
, mapped_size
);
2018 crc
= btrfs_csum_data(p
, crc
, l
);
2019 kunmap_atomic(mapped_buffer
);
2024 BUG_ON(index
>= sblock
->page_count
);
2025 BUG_ON(!sblock
->pagev
[index
]->page
);
2026 page
= sblock
->pagev
[index
]->page
;
2027 mapped_buffer
= kmap_atomic(page
);
2028 mapped_size
= PAGE_SIZE
;
2032 btrfs_csum_final(crc
, calculated_csum
);
2033 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
2036 if (fail_cor
+ fail_gen
) {
2038 * if we find an error in a super block, we just report it.
2039 * They will get written with the next transaction commit
2042 spin_lock(&sctx
->stat_lock
);
2043 ++sctx
->stat
.super_errors
;
2044 spin_unlock(&sctx
->stat_lock
);
2046 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
2047 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
2049 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
2050 BTRFS_DEV_STAT_GENERATION_ERRS
);
2053 return fail_cor
+ fail_gen
;
2056 static void scrub_block_get(struct scrub_block
*sblock
)
2058 atomic_inc(&sblock
->refs
);
2061 static void scrub_block_put(struct scrub_block
*sblock
)
2063 if (atomic_dec_and_test(&sblock
->refs
)) {
2066 if (sblock
->sparity
)
2067 scrub_parity_put(sblock
->sparity
);
2069 for (i
= 0; i
< sblock
->page_count
; i
++)
2070 scrub_page_put(sblock
->pagev
[i
]);
2075 static void scrub_page_get(struct scrub_page
*spage
)
2077 atomic_inc(&spage
->refs
);
2080 static void scrub_page_put(struct scrub_page
*spage
)
2082 if (atomic_dec_and_test(&spage
->refs
)) {
2084 __free_page(spage
->page
);
2089 static void scrub_submit(struct scrub_ctx
*sctx
)
2091 struct scrub_bio
*sbio
;
2093 if (sctx
->curr
== -1)
2096 sbio
= sctx
->bios
[sctx
->curr
];
2098 scrub_pending_bio_inc(sctx
);
2100 if (!sbio
->bio
->bi_bdev
) {
2102 * this case should not happen. If btrfs_map_block() is
2103 * wrong, it could happen for dev-replace operations on
2104 * missing devices when no mirrors are available, but in
2105 * this case it should already fail the mount.
2106 * This case is handled correctly (but _very_ slowly).
2108 printk_ratelimited(KERN_WARNING
2109 "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
2110 bio_endio(sbio
->bio
, -EIO
);
2112 btrfsic_submit_bio(READ
, sbio
->bio
);
2116 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
2117 struct scrub_page
*spage
)
2119 struct scrub_block
*sblock
= spage
->sblock
;
2120 struct scrub_bio
*sbio
;
2125 * grab a fresh bio or wait for one to become available
2127 while (sctx
->curr
== -1) {
2128 spin_lock(&sctx
->list_lock
);
2129 sctx
->curr
= sctx
->first_free
;
2130 if (sctx
->curr
!= -1) {
2131 sctx
->first_free
= sctx
->bios
[sctx
->curr
]->next_free
;
2132 sctx
->bios
[sctx
->curr
]->next_free
= -1;
2133 sctx
->bios
[sctx
->curr
]->page_count
= 0;
2134 spin_unlock(&sctx
->list_lock
);
2136 spin_unlock(&sctx
->list_lock
);
2137 wait_event(sctx
->list_wait
, sctx
->first_free
!= -1);
2140 sbio
= sctx
->bios
[sctx
->curr
];
2141 if (sbio
->page_count
== 0) {
2144 sbio
->physical
= spage
->physical
;
2145 sbio
->logical
= spage
->logical
;
2146 sbio
->dev
= spage
->dev
;
2149 bio
= btrfs_io_bio_alloc(GFP_NOFS
, sctx
->pages_per_rd_bio
);
2155 bio
->bi_private
= sbio
;
2156 bio
->bi_end_io
= scrub_bio_end_io
;
2157 bio
->bi_bdev
= sbio
->dev
->bdev
;
2158 bio
->bi_iter
.bi_sector
= sbio
->physical
>> 9;
2160 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
2162 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
2164 sbio
->dev
!= spage
->dev
) {
2169 sbio
->pagev
[sbio
->page_count
] = spage
;
2170 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
2171 if (ret
!= PAGE_SIZE
) {
2172 if (sbio
->page_count
< 1) {
2181 scrub_block_get(sblock
); /* one for the page added to the bio */
2182 atomic_inc(&sblock
->outstanding_pages
);
2184 if (sbio
->page_count
== sctx
->pages_per_rd_bio
)
2190 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2191 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2192 u64 gen
, int mirror_num
, u8
*csum
, int force
,
2193 u64 physical_for_dev_replace
)
2195 struct scrub_block
*sblock
;
2198 sblock
= kzalloc(sizeof(*sblock
), GFP_NOFS
);
2200 spin_lock(&sctx
->stat_lock
);
2201 sctx
->stat
.malloc_errors
++;
2202 spin_unlock(&sctx
->stat_lock
);
2206 /* one ref inside this function, plus one for each page added to
2208 atomic_set(&sblock
->refs
, 1);
2209 sblock
->sctx
= sctx
;
2210 sblock
->no_io_error_seen
= 1;
2212 for (index
= 0; len
> 0; index
++) {
2213 struct scrub_page
*spage
;
2214 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
2216 spage
= kzalloc(sizeof(*spage
), GFP_NOFS
);
2219 spin_lock(&sctx
->stat_lock
);
2220 sctx
->stat
.malloc_errors
++;
2221 spin_unlock(&sctx
->stat_lock
);
2222 scrub_block_put(sblock
);
2225 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2226 scrub_page_get(spage
);
2227 sblock
->pagev
[index
] = spage
;
2228 spage
->sblock
= sblock
;
2230 spage
->flags
= flags
;
2231 spage
->generation
= gen
;
2232 spage
->logical
= logical
;
2233 spage
->physical
= physical
;
2234 spage
->physical_for_dev_replace
= physical_for_dev_replace
;
2235 spage
->mirror_num
= mirror_num
;
2237 spage
->have_csum
= 1;
2238 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2240 spage
->have_csum
= 0;
2242 sblock
->page_count
++;
2243 spage
->page
= alloc_page(GFP_NOFS
);
2249 physical_for_dev_replace
+= l
;
2252 WARN_ON(sblock
->page_count
== 0);
2253 for (index
= 0; index
< sblock
->page_count
; index
++) {
2254 struct scrub_page
*spage
= sblock
->pagev
[index
];
2257 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2259 scrub_block_put(sblock
);
2267 /* last one frees, either here or in bio completion for last page */
2268 scrub_block_put(sblock
);
2272 static void scrub_bio_end_io(struct bio
*bio
, int err
)
2274 struct scrub_bio
*sbio
= bio
->bi_private
;
2275 struct btrfs_fs_info
*fs_info
= sbio
->dev
->dev_root
->fs_info
;
2280 btrfs_queue_work(fs_info
->scrub_workers
, &sbio
->work
);
2283 static void scrub_bio_end_io_worker(struct btrfs_work
*work
)
2285 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
2286 struct scrub_ctx
*sctx
= sbio
->sctx
;
2289 BUG_ON(sbio
->page_count
> SCRUB_PAGES_PER_RD_BIO
);
2291 for (i
= 0; i
< sbio
->page_count
; i
++) {
2292 struct scrub_page
*spage
= sbio
->pagev
[i
];
2294 spage
->io_error
= 1;
2295 spage
->sblock
->no_io_error_seen
= 0;
2299 /* now complete the scrub_block items that have all pages completed */
2300 for (i
= 0; i
< sbio
->page_count
; i
++) {
2301 struct scrub_page
*spage
= sbio
->pagev
[i
];
2302 struct scrub_block
*sblock
= spage
->sblock
;
2304 if (atomic_dec_and_test(&sblock
->outstanding_pages
))
2305 scrub_block_complete(sblock
);
2306 scrub_block_put(sblock
);
2311 spin_lock(&sctx
->list_lock
);
2312 sbio
->next_free
= sctx
->first_free
;
2313 sctx
->first_free
= sbio
->index
;
2314 spin_unlock(&sctx
->list_lock
);
2316 if (sctx
->is_dev_replace
&&
2317 atomic_read(&sctx
->wr_ctx
.flush_all_writes
)) {
2318 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2319 scrub_wr_submit(sctx
);
2320 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2323 scrub_pending_bio_dec(sctx
);
2326 static inline void __scrub_mark_bitmap(struct scrub_parity
*sparity
,
2327 unsigned long *bitmap
,
2332 int sectorsize
= sparity
->sctx
->dev_root
->sectorsize
;
2334 if (len
>= sparity
->stripe_len
) {
2335 bitmap_set(bitmap
, 0, sparity
->nsectors
);
2339 start
-= sparity
->logic_start
;
2340 start
= div_u64_rem(start
, sparity
->stripe_len
, &offset
);
2341 offset
/= sectorsize
;
2342 nsectors
= (int)len
/ sectorsize
;
2344 if (offset
+ nsectors
<= sparity
->nsectors
) {
2345 bitmap_set(bitmap
, offset
, nsectors
);
2349 bitmap_set(bitmap
, offset
, sparity
->nsectors
- offset
);
2350 bitmap_set(bitmap
, 0, nsectors
- (sparity
->nsectors
- offset
));
2353 static inline void scrub_parity_mark_sectors_error(struct scrub_parity
*sparity
,
2356 __scrub_mark_bitmap(sparity
, sparity
->ebitmap
, start
, len
);
2359 static inline void scrub_parity_mark_sectors_data(struct scrub_parity
*sparity
,
2362 __scrub_mark_bitmap(sparity
, sparity
->dbitmap
, start
, len
);
2365 static void scrub_block_complete(struct scrub_block
*sblock
)
2369 if (!sblock
->no_io_error_seen
) {
2371 scrub_handle_errored_block(sblock
);
2374 * if has checksum error, write via repair mechanism in
2375 * dev replace case, otherwise write here in dev replace
2378 corrupted
= scrub_checksum(sblock
);
2379 if (!corrupted
&& sblock
->sctx
->is_dev_replace
)
2380 scrub_write_block_to_dev_replace(sblock
);
2383 if (sblock
->sparity
&& corrupted
&& !sblock
->data_corrected
) {
2384 u64 start
= sblock
->pagev
[0]->logical
;
2385 u64 end
= sblock
->pagev
[sblock
->page_count
- 1]->logical
+
2388 scrub_parity_mark_sectors_error(sblock
->sparity
,
2389 start
, end
- start
);
2393 static int scrub_find_csum(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2396 struct btrfs_ordered_sum
*sum
= NULL
;
2397 unsigned long index
;
2398 unsigned long num_sectors
;
2400 while (!list_empty(&sctx
->csum_list
)) {
2401 sum
= list_first_entry(&sctx
->csum_list
,
2402 struct btrfs_ordered_sum
, list
);
2403 if (sum
->bytenr
> logical
)
2405 if (sum
->bytenr
+ sum
->len
> logical
)
2408 ++sctx
->stat
.csum_discards
;
2409 list_del(&sum
->list
);
2416 index
= ((u32
)(logical
- sum
->bytenr
)) / sctx
->sectorsize
;
2417 num_sectors
= sum
->len
/ sctx
->sectorsize
;
2418 memcpy(csum
, sum
->sums
+ index
, sctx
->csum_size
);
2419 if (index
== num_sectors
- 1) {
2420 list_del(&sum
->list
);
2426 /* scrub extent tries to collect up to 64 kB for each bio */
2427 static int scrub_extent(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2428 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2429 u64 gen
, int mirror_num
, u64 physical_for_dev_replace
)
2432 u8 csum
[BTRFS_CSUM_SIZE
];
2435 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2436 blocksize
= sctx
->sectorsize
;
2437 spin_lock(&sctx
->stat_lock
);
2438 sctx
->stat
.data_extents_scrubbed
++;
2439 sctx
->stat
.data_bytes_scrubbed
+= len
;
2440 spin_unlock(&sctx
->stat_lock
);
2441 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2442 blocksize
= sctx
->nodesize
;
2443 spin_lock(&sctx
->stat_lock
);
2444 sctx
->stat
.tree_extents_scrubbed
++;
2445 sctx
->stat
.tree_bytes_scrubbed
+= len
;
2446 spin_unlock(&sctx
->stat_lock
);
2448 blocksize
= sctx
->sectorsize
;
2453 u64 l
= min_t(u64
, len
, blocksize
);
2456 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2457 /* push csums to sbio */
2458 have_csum
= scrub_find_csum(sctx
, logical
, l
, csum
);
2460 ++sctx
->stat
.no_csum
;
2461 if (sctx
->is_dev_replace
&& !have_csum
) {
2462 ret
= copy_nocow_pages(sctx
, logical
, l
,
2464 physical_for_dev_replace
);
2465 goto behind_scrub_pages
;
2468 ret
= scrub_pages(sctx
, logical
, l
, physical
, dev
, flags
, gen
,
2469 mirror_num
, have_csum
? csum
: NULL
, 0,
2470 physical_for_dev_replace
);
2477 physical_for_dev_replace
+= l
;
2482 static int scrub_pages_for_parity(struct scrub_parity
*sparity
,
2483 u64 logical
, u64 len
,
2484 u64 physical
, struct btrfs_device
*dev
,
2485 u64 flags
, u64 gen
, int mirror_num
, u8
*csum
)
2487 struct scrub_ctx
*sctx
= sparity
->sctx
;
2488 struct scrub_block
*sblock
;
2491 sblock
= kzalloc(sizeof(*sblock
), GFP_NOFS
);
2493 spin_lock(&sctx
->stat_lock
);
2494 sctx
->stat
.malloc_errors
++;
2495 spin_unlock(&sctx
->stat_lock
);
2499 /* one ref inside this function, plus one for each page added to
2501 atomic_set(&sblock
->refs
, 1);
2502 sblock
->sctx
= sctx
;
2503 sblock
->no_io_error_seen
= 1;
2504 sblock
->sparity
= sparity
;
2505 scrub_parity_get(sparity
);
2507 for (index
= 0; len
> 0; index
++) {
2508 struct scrub_page
*spage
;
2509 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
2511 spage
= kzalloc(sizeof(*spage
), GFP_NOFS
);
2514 spin_lock(&sctx
->stat_lock
);
2515 sctx
->stat
.malloc_errors
++;
2516 spin_unlock(&sctx
->stat_lock
);
2517 scrub_block_put(sblock
);
2520 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2521 /* For scrub block */
2522 scrub_page_get(spage
);
2523 sblock
->pagev
[index
] = spage
;
2524 /* For scrub parity */
2525 scrub_page_get(spage
);
2526 list_add_tail(&spage
->list
, &sparity
->spages
);
2527 spage
->sblock
= sblock
;
2529 spage
->flags
= flags
;
2530 spage
->generation
= gen
;
2531 spage
->logical
= logical
;
2532 spage
->physical
= physical
;
2533 spage
->mirror_num
= mirror_num
;
2535 spage
->have_csum
= 1;
2536 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2538 spage
->have_csum
= 0;
2540 sblock
->page_count
++;
2541 spage
->page
= alloc_page(GFP_NOFS
);
2549 WARN_ON(sblock
->page_count
== 0);
2550 for (index
= 0; index
< sblock
->page_count
; index
++) {
2551 struct scrub_page
*spage
= sblock
->pagev
[index
];
2554 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2556 scrub_block_put(sblock
);
2561 /* last one frees, either here or in bio completion for last page */
2562 scrub_block_put(sblock
);
2566 static int scrub_extent_for_parity(struct scrub_parity
*sparity
,
2567 u64 logical
, u64 len
,
2568 u64 physical
, struct btrfs_device
*dev
,
2569 u64 flags
, u64 gen
, int mirror_num
)
2571 struct scrub_ctx
*sctx
= sparity
->sctx
;
2573 u8 csum
[BTRFS_CSUM_SIZE
];
2576 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2577 blocksize
= sctx
->sectorsize
;
2578 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2579 blocksize
= sctx
->nodesize
;
2581 blocksize
= sctx
->sectorsize
;
2586 u64 l
= min_t(u64
, len
, blocksize
);
2589 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2590 /* push csums to sbio */
2591 have_csum
= scrub_find_csum(sctx
, logical
, l
, csum
);
2595 ret
= scrub_pages_for_parity(sparity
, logical
, l
, physical
, dev
,
2596 flags
, gen
, mirror_num
,
2597 have_csum
? csum
: NULL
);
2609 * Given a physical address, this will calculate it's
2610 * logical offset. if this is a parity stripe, it will return
2611 * the most left data stripe's logical offset.
2613 * return 0 if it is a data stripe, 1 means parity stripe.
2615 static int get_raid56_logic_offset(u64 physical
, int num
,
2616 struct map_lookup
*map
, u64
*offset
,
2626 last_offset
= (physical
- map
->stripes
[num
].physical
) *
2627 nr_data_stripes(map
);
2629 *stripe_start
= last_offset
;
2631 *offset
= last_offset
;
2632 for (i
= 0; i
< nr_data_stripes(map
); i
++) {
2633 *offset
= last_offset
+ i
* map
->stripe_len
;
2635 stripe_nr
= div_u64(*offset
, map
->stripe_len
);
2636 stripe_nr
= div_u64(stripe_nr
, nr_data_stripes(map
));
2638 /* Work out the disk rotation on this stripe-set */
2639 stripe_nr
= div_u64_rem(stripe_nr
, map
->num_stripes
, &rot
);
2640 /* calculate which stripe this data locates */
2642 stripe_index
= rot
% map
->num_stripes
;
2643 if (stripe_index
== num
)
2645 if (stripe_index
< num
)
2648 *offset
= last_offset
+ j
* map
->stripe_len
;
2652 static void scrub_free_parity(struct scrub_parity
*sparity
)
2654 struct scrub_ctx
*sctx
= sparity
->sctx
;
2655 struct scrub_page
*curr
, *next
;
2658 nbits
= bitmap_weight(sparity
->ebitmap
, sparity
->nsectors
);
2660 spin_lock(&sctx
->stat_lock
);
2661 sctx
->stat
.read_errors
+= nbits
;
2662 sctx
->stat
.uncorrectable_errors
+= nbits
;
2663 spin_unlock(&sctx
->stat_lock
);
2666 list_for_each_entry_safe(curr
, next
, &sparity
->spages
, list
) {
2667 list_del_init(&curr
->list
);
2668 scrub_page_put(curr
);
2674 static void scrub_parity_bio_endio_worker(struct btrfs_work
*work
)
2676 struct scrub_parity
*sparity
= container_of(work
, struct scrub_parity
,
2678 struct scrub_ctx
*sctx
= sparity
->sctx
;
2680 scrub_free_parity(sparity
);
2681 scrub_pending_bio_dec(sctx
);
2684 static void scrub_parity_bio_endio(struct bio
*bio
, int error
)
2686 struct scrub_parity
*sparity
= (struct scrub_parity
*)bio
->bi_private
;
2689 bitmap_or(sparity
->ebitmap
, sparity
->ebitmap
, sparity
->dbitmap
,
2694 btrfs_init_work(&sparity
->work
, btrfs_scrubparity_helper
,
2695 scrub_parity_bio_endio_worker
, NULL
, NULL
);
2696 btrfs_queue_work(sparity
->sctx
->dev_root
->fs_info
->scrub_parity_workers
,
2700 static void scrub_parity_check_and_repair(struct scrub_parity
*sparity
)
2702 struct scrub_ctx
*sctx
= sparity
->sctx
;
2704 struct btrfs_raid_bio
*rbio
;
2705 struct scrub_page
*spage
;
2706 struct btrfs_bio
*bbio
= NULL
;
2710 if (!bitmap_andnot(sparity
->dbitmap
, sparity
->dbitmap
, sparity
->ebitmap
,
2714 length
= sparity
->logic_end
- sparity
->logic_start
;
2715 ret
= btrfs_map_sblock(sctx
->dev_root
->fs_info
, WRITE
,
2716 sparity
->logic_start
,
2717 &length
, &bbio
, 0, 1);
2718 if (ret
|| !bbio
|| !bbio
->raid_map
)
2721 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 0);
2725 bio
->bi_iter
.bi_sector
= sparity
->logic_start
>> 9;
2726 bio
->bi_private
= sparity
;
2727 bio
->bi_end_io
= scrub_parity_bio_endio
;
2729 rbio
= raid56_parity_alloc_scrub_rbio(sctx
->dev_root
, bio
, bbio
,
2730 length
, sparity
->scrub_dev
,
2736 list_for_each_entry(spage
, &sparity
->spages
, list
)
2737 raid56_parity_add_scrub_pages(rbio
, spage
->page
,
2740 scrub_pending_bio_inc(sctx
);
2741 raid56_parity_submit_scrub_rbio(rbio
);
2747 btrfs_put_bbio(bbio
);
2748 bitmap_or(sparity
->ebitmap
, sparity
->ebitmap
, sparity
->dbitmap
,
2750 spin_lock(&sctx
->stat_lock
);
2751 sctx
->stat
.malloc_errors
++;
2752 spin_unlock(&sctx
->stat_lock
);
2754 scrub_free_parity(sparity
);
2757 static inline int scrub_calc_parity_bitmap_len(int nsectors
)
2759 return DIV_ROUND_UP(nsectors
, BITS_PER_LONG
) * (BITS_PER_LONG
/ 8);
2762 static void scrub_parity_get(struct scrub_parity
*sparity
)
2764 atomic_inc(&sparity
->refs
);
2767 static void scrub_parity_put(struct scrub_parity
*sparity
)
2769 if (!atomic_dec_and_test(&sparity
->refs
))
2772 scrub_parity_check_and_repair(sparity
);
2775 static noinline_for_stack
int scrub_raid56_parity(struct scrub_ctx
*sctx
,
2776 struct map_lookup
*map
,
2777 struct btrfs_device
*sdev
,
2778 struct btrfs_path
*path
,
2782 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
2783 struct btrfs_root
*root
= fs_info
->extent_root
;
2784 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
2785 struct btrfs_extent_item
*extent
;
2789 struct extent_buffer
*l
;
2790 struct btrfs_key key
;
2793 u64 extent_physical
;
2795 struct btrfs_device
*extent_dev
;
2796 struct scrub_parity
*sparity
;
2799 int extent_mirror_num
;
2802 nsectors
= map
->stripe_len
/ root
->sectorsize
;
2803 bitmap_len
= scrub_calc_parity_bitmap_len(nsectors
);
2804 sparity
= kzalloc(sizeof(struct scrub_parity
) + 2 * bitmap_len
,
2807 spin_lock(&sctx
->stat_lock
);
2808 sctx
->stat
.malloc_errors
++;
2809 spin_unlock(&sctx
->stat_lock
);
2813 sparity
->stripe_len
= map
->stripe_len
;
2814 sparity
->nsectors
= nsectors
;
2815 sparity
->sctx
= sctx
;
2816 sparity
->scrub_dev
= sdev
;
2817 sparity
->logic_start
= logic_start
;
2818 sparity
->logic_end
= logic_end
;
2819 atomic_set(&sparity
->refs
, 1);
2820 INIT_LIST_HEAD(&sparity
->spages
);
2821 sparity
->dbitmap
= sparity
->bitmap
;
2822 sparity
->ebitmap
= (void *)sparity
->bitmap
+ bitmap_len
;
2825 while (logic_start
< logic_end
) {
2826 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
2827 key
.type
= BTRFS_METADATA_ITEM_KEY
;
2829 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2830 key
.objectid
= logic_start
;
2831 key
.offset
= (u64
)-1;
2833 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2838 ret
= btrfs_previous_extent_item(root
, path
, 0);
2842 btrfs_release_path(path
);
2843 ret
= btrfs_search_slot(NULL
, root
, &key
,
2855 slot
= path
->slots
[0];
2856 if (slot
>= btrfs_header_nritems(l
)) {
2857 ret
= btrfs_next_leaf(root
, path
);
2866 btrfs_item_key_to_cpu(l
, &key
, slot
);
2868 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
2869 key
.type
!= BTRFS_METADATA_ITEM_KEY
)
2872 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
2873 bytes
= root
->nodesize
;
2877 if (key
.objectid
+ bytes
<= logic_start
)
2880 if (key
.objectid
>= logic_end
) {
2885 while (key
.objectid
>= logic_start
+ map
->stripe_len
)
2886 logic_start
+= map
->stripe_len
;
2888 extent
= btrfs_item_ptr(l
, slot
,
2889 struct btrfs_extent_item
);
2890 flags
= btrfs_extent_flags(l
, extent
);
2891 generation
= btrfs_extent_generation(l
, extent
);
2893 if ((flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) &&
2894 (key
.objectid
< logic_start
||
2895 key
.objectid
+ bytes
>
2896 logic_start
+ map
->stripe_len
)) {
2897 btrfs_err(fs_info
, "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2898 key
.objectid
, logic_start
);
2902 extent_logical
= key
.objectid
;
2905 if (extent_logical
< logic_start
) {
2906 extent_len
-= logic_start
- extent_logical
;
2907 extent_logical
= logic_start
;
2910 if (extent_logical
+ extent_len
>
2911 logic_start
+ map
->stripe_len
)
2912 extent_len
= logic_start
+ map
->stripe_len
-
2915 scrub_parity_mark_sectors_data(sparity
, extent_logical
,
2918 scrub_remap_extent(fs_info
, extent_logical
,
2919 extent_len
, &extent_physical
,
2921 &extent_mirror_num
);
2923 ret
= btrfs_lookup_csums_range(csum_root
,
2925 extent_logical
+ extent_len
- 1,
2926 &sctx
->csum_list
, 1);
2930 ret
= scrub_extent_for_parity(sparity
, extent_logical
,
2937 scrub_free_csums(sctx
);
2942 if (extent_logical
+ extent_len
<
2943 key
.objectid
+ bytes
) {
2944 logic_start
+= map
->stripe_len
;
2946 if (logic_start
>= logic_end
) {
2951 if (logic_start
< key
.objectid
+ bytes
) {
2960 btrfs_release_path(path
);
2965 logic_start
+= map
->stripe_len
;
2969 scrub_parity_mark_sectors_error(sparity
, logic_start
,
2970 logic_end
- logic_start
);
2971 scrub_parity_put(sparity
);
2973 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2974 scrub_wr_submit(sctx
);
2975 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2977 btrfs_release_path(path
);
2978 return ret
< 0 ? ret
: 0;
2981 static noinline_for_stack
int scrub_stripe(struct scrub_ctx
*sctx
,
2982 struct map_lookup
*map
,
2983 struct btrfs_device
*scrub_dev
,
2984 int num
, u64 base
, u64 length
,
2987 struct btrfs_path
*path
, *ppath
;
2988 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
2989 struct btrfs_root
*root
= fs_info
->extent_root
;
2990 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
2991 struct btrfs_extent_item
*extent
;
2992 struct blk_plug plug
;
2997 struct extent_buffer
*l
;
2998 struct btrfs_key key
;
3005 struct reada_control
*reada1
;
3006 struct reada_control
*reada2
;
3007 struct btrfs_key key_start
;
3008 struct btrfs_key key_end
;
3009 u64 increment
= map
->stripe_len
;
3012 u64 extent_physical
;
3016 struct btrfs_device
*extent_dev
;
3017 int extent_mirror_num
;
3020 physical
= map
->stripes
[num
].physical
;
3022 nstripes
= div_u64(length
, map
->stripe_len
);
3023 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
3024 offset
= map
->stripe_len
* num
;
3025 increment
= map
->stripe_len
* map
->num_stripes
;
3027 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3028 int factor
= map
->num_stripes
/ map
->sub_stripes
;
3029 offset
= map
->stripe_len
* (num
/ map
->sub_stripes
);
3030 increment
= map
->stripe_len
* factor
;
3031 mirror_num
= num
% map
->sub_stripes
+ 1;
3032 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
3033 increment
= map
->stripe_len
;
3034 mirror_num
= num
% map
->num_stripes
+ 1;
3035 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
3036 increment
= map
->stripe_len
;
3037 mirror_num
= num
% map
->num_stripes
+ 1;
3038 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3039 get_raid56_logic_offset(physical
, num
, map
, &offset
, NULL
);
3040 increment
= map
->stripe_len
* nr_data_stripes(map
);
3043 increment
= map
->stripe_len
;
3047 path
= btrfs_alloc_path();
3051 ppath
= btrfs_alloc_path();
3053 btrfs_free_path(path
);
3058 * work on commit root. The related disk blocks are static as
3059 * long as COW is applied. This means, it is save to rewrite
3060 * them to repair disk errors without any race conditions
3062 path
->search_commit_root
= 1;
3063 path
->skip_locking
= 1;
3065 ppath
->search_commit_root
= 1;
3066 ppath
->skip_locking
= 1;
3068 * trigger the readahead for extent tree csum tree and wait for
3069 * completion. During readahead, the scrub is officially paused
3070 * to not hold off transaction commits
3072 logical
= base
+ offset
;
3073 physical_end
= physical
+ nstripes
* map
->stripe_len
;
3074 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3075 get_raid56_logic_offset(physical_end
, num
,
3076 map
, &logic_end
, NULL
);
3079 logic_end
= logical
+ increment
* nstripes
;
3081 wait_event(sctx
->list_wait
,
3082 atomic_read(&sctx
->bios_in_flight
) == 0);
3083 scrub_blocked_if_needed(fs_info
);
3085 /* FIXME it might be better to start readahead at commit root */
3086 key_start
.objectid
= logical
;
3087 key_start
.type
= BTRFS_EXTENT_ITEM_KEY
;
3088 key_start
.offset
= (u64
)0;
3089 key_end
.objectid
= logic_end
;
3090 key_end
.type
= BTRFS_METADATA_ITEM_KEY
;
3091 key_end
.offset
= (u64
)-1;
3092 reada1
= btrfs_reada_add(root
, &key_start
, &key_end
);
3094 key_start
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
3095 key_start
.type
= BTRFS_EXTENT_CSUM_KEY
;
3096 key_start
.offset
= logical
;
3097 key_end
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
3098 key_end
.type
= BTRFS_EXTENT_CSUM_KEY
;
3099 key_end
.offset
= logic_end
;
3100 reada2
= btrfs_reada_add(csum_root
, &key_start
, &key_end
);
3102 if (!IS_ERR(reada1
))
3103 btrfs_reada_wait(reada1
);
3104 if (!IS_ERR(reada2
))
3105 btrfs_reada_wait(reada2
);
3109 * collect all data csums for the stripe to avoid seeking during
3110 * the scrub. This might currently (crc32) end up to be about 1MB
3112 blk_start_plug(&plug
);
3115 * now find all extents for each stripe and scrub them
3118 while (physical
< physical_end
) {
3122 if (atomic_read(&fs_info
->scrub_cancel_req
) ||
3123 atomic_read(&sctx
->cancel_req
)) {
3128 * check to see if we have to pause
3130 if (atomic_read(&fs_info
->scrub_pause_req
)) {
3131 /* push queued extents */
3132 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 1);
3134 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
3135 scrub_wr_submit(sctx
);
3136 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
3137 wait_event(sctx
->list_wait
,
3138 atomic_read(&sctx
->bios_in_flight
) == 0);
3139 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 0);
3140 scrub_blocked_if_needed(fs_info
);
3143 /* for raid56, we skip parity stripe */
3144 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3145 ret
= get_raid56_logic_offset(physical
, num
, map
,
3150 stripe_logical
+= base
;
3151 stripe_end
= stripe_logical
+ increment
;
3152 ret
= scrub_raid56_parity(sctx
, map
, scrub_dev
,
3153 ppath
, stripe_logical
,
3161 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
3162 key
.type
= BTRFS_METADATA_ITEM_KEY
;
3164 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3165 key
.objectid
= logical
;
3166 key
.offset
= (u64
)-1;
3168 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3173 ret
= btrfs_previous_extent_item(root
, path
, 0);
3177 /* there's no smaller item, so stick with the
3179 btrfs_release_path(path
);
3180 ret
= btrfs_search_slot(NULL
, root
, &key
,
3192 slot
= path
->slots
[0];
3193 if (slot
>= btrfs_header_nritems(l
)) {
3194 ret
= btrfs_next_leaf(root
, path
);
3203 btrfs_item_key_to_cpu(l
, &key
, slot
);
3205 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
3206 key
.type
!= BTRFS_METADATA_ITEM_KEY
)
3209 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
3210 bytes
= root
->nodesize
;
3214 if (key
.objectid
+ bytes
<= logical
)
3217 if (key
.objectid
>= logical
+ map
->stripe_len
) {
3218 /* out of this device extent */
3219 if (key
.objectid
>= logic_end
)
3224 extent
= btrfs_item_ptr(l
, slot
,
3225 struct btrfs_extent_item
);
3226 flags
= btrfs_extent_flags(l
, extent
);
3227 generation
= btrfs_extent_generation(l
, extent
);
3229 if ((flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) &&
3230 (key
.objectid
< logical
||
3231 key
.objectid
+ bytes
>
3232 logical
+ map
->stripe_len
)) {
3234 "scrub: tree block %llu spanning "
3235 "stripes, ignored. logical=%llu",
3236 key
.objectid
, logical
);
3241 extent_logical
= key
.objectid
;
3245 * trim extent to this stripe
3247 if (extent_logical
< logical
) {
3248 extent_len
-= logical
- extent_logical
;
3249 extent_logical
= logical
;
3251 if (extent_logical
+ extent_len
>
3252 logical
+ map
->stripe_len
) {
3253 extent_len
= logical
+ map
->stripe_len
-
3257 extent_physical
= extent_logical
- logical
+ physical
;
3258 extent_dev
= scrub_dev
;
3259 extent_mirror_num
= mirror_num
;
3261 scrub_remap_extent(fs_info
, extent_logical
,
3262 extent_len
, &extent_physical
,
3264 &extent_mirror_num
);
3266 ret
= btrfs_lookup_csums_range(csum_root
,
3270 &sctx
->csum_list
, 1);
3274 ret
= scrub_extent(sctx
, extent_logical
, extent_len
,
3275 extent_physical
, extent_dev
, flags
,
3276 generation
, extent_mirror_num
,
3277 extent_logical
- logical
+ physical
);
3279 scrub_free_csums(sctx
);
3284 if (extent_logical
+ extent_len
<
3285 key
.objectid
+ bytes
) {
3286 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3288 * loop until we find next data stripe
3289 * or we have finished all stripes.
3292 physical
+= map
->stripe_len
;
3293 ret
= get_raid56_logic_offset(physical
,
3298 if (ret
&& physical
< physical_end
) {
3299 stripe_logical
+= base
;
3300 stripe_end
= stripe_logical
+
3302 ret
= scrub_raid56_parity(sctx
,
3303 map
, scrub_dev
, ppath
,
3311 physical
+= map
->stripe_len
;
3312 logical
+= increment
;
3314 if (logical
< key
.objectid
+ bytes
) {
3319 if (physical
>= physical_end
) {
3327 btrfs_release_path(path
);
3329 logical
+= increment
;
3330 physical
+= map
->stripe_len
;
3331 spin_lock(&sctx
->stat_lock
);
3333 sctx
->stat
.last_physical
= map
->stripes
[num
].physical
+
3336 sctx
->stat
.last_physical
= physical
;
3337 spin_unlock(&sctx
->stat_lock
);
3342 /* push queued extents */
3344 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
3345 scrub_wr_submit(sctx
);
3346 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
3348 blk_finish_plug(&plug
);
3349 btrfs_free_path(path
);
3350 btrfs_free_path(ppath
);
3351 return ret
< 0 ? ret
: 0;
3354 static noinline_for_stack
int scrub_chunk(struct scrub_ctx
*sctx
,
3355 struct btrfs_device
*scrub_dev
,
3356 u64 chunk_tree
, u64 chunk_objectid
,
3357 u64 chunk_offset
, u64 length
,
3358 u64 dev_offset
, int is_dev_replace
)
3360 struct btrfs_mapping_tree
*map_tree
=
3361 &sctx
->dev_root
->fs_info
->mapping_tree
;
3362 struct map_lookup
*map
;
3363 struct extent_map
*em
;
3367 read_lock(&map_tree
->map_tree
.lock
);
3368 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
3369 read_unlock(&map_tree
->map_tree
.lock
);
3374 map
= (struct map_lookup
*)em
->bdev
;
3375 if (em
->start
!= chunk_offset
)
3378 if (em
->len
< length
)
3381 for (i
= 0; i
< map
->num_stripes
; ++i
) {
3382 if (map
->stripes
[i
].dev
->bdev
== scrub_dev
->bdev
&&
3383 map
->stripes
[i
].physical
== dev_offset
) {
3384 ret
= scrub_stripe(sctx
, map
, scrub_dev
, i
,
3385 chunk_offset
, length
,
3392 free_extent_map(em
);
3397 static noinline_for_stack
3398 int scrub_enumerate_chunks(struct scrub_ctx
*sctx
,
3399 struct btrfs_device
*scrub_dev
, u64 start
, u64 end
,
3402 struct btrfs_dev_extent
*dev_extent
= NULL
;
3403 struct btrfs_path
*path
;
3404 struct btrfs_root
*root
= sctx
->dev_root
;
3405 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3412 struct extent_buffer
*l
;
3413 struct btrfs_key key
;
3414 struct btrfs_key found_key
;
3415 struct btrfs_block_group_cache
*cache
;
3416 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
3418 path
= btrfs_alloc_path();
3423 path
->search_commit_root
= 1;
3424 path
->skip_locking
= 1;
3426 key
.objectid
= scrub_dev
->devid
;
3428 key
.type
= BTRFS_DEV_EXTENT_KEY
;
3431 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3435 if (path
->slots
[0] >=
3436 btrfs_header_nritems(path
->nodes
[0])) {
3437 ret
= btrfs_next_leaf(root
, path
);
3450 slot
= path
->slots
[0];
3452 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
3454 if (found_key
.objectid
!= scrub_dev
->devid
)
3457 if (found_key
.type
!= BTRFS_DEV_EXTENT_KEY
)
3460 if (found_key
.offset
>= end
)
3463 if (found_key
.offset
< key
.offset
)
3466 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
3467 length
= btrfs_dev_extent_length(l
, dev_extent
);
3469 if (found_key
.offset
+ length
<= start
)
3472 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
3473 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
3474 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
3477 * get a reference on the corresponding block group to prevent
3478 * the chunk from going away while we scrub it
3480 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3482 /* some chunks are removed but not committed to disk yet,
3483 * continue scrubbing */
3488 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3489 * to avoid deadlock caused by:
3490 * btrfs_inc_block_group_ro()
3491 * -> btrfs_wait_for_commit()
3492 * -> btrfs_commit_transaction()
3493 * -> btrfs_scrub_pause()
3495 scrub_pause_on(fs_info
);
3496 ret
= btrfs_inc_block_group_ro(root
, cache
);
3497 scrub_pause_off(fs_info
);
3499 btrfs_put_block_group(cache
);
3503 dev_replace
->cursor_right
= found_key
.offset
+ length
;
3504 dev_replace
->cursor_left
= found_key
.offset
;
3505 dev_replace
->item_needs_writeback
= 1;
3506 ret
= scrub_chunk(sctx
, scrub_dev
, chunk_tree
, chunk_objectid
,
3507 chunk_offset
, length
, found_key
.offset
,
3511 * flush, submit all pending read and write bios, afterwards
3513 * Note that in the dev replace case, a read request causes
3514 * write requests that are submitted in the read completion
3515 * worker. Therefore in the current situation, it is required
3516 * that all write requests are flushed, so that all read and
3517 * write requests are really completed when bios_in_flight
3520 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 1);
3522 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
3523 scrub_wr_submit(sctx
);
3524 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
3526 wait_event(sctx
->list_wait
,
3527 atomic_read(&sctx
->bios_in_flight
) == 0);
3529 scrub_pause_on(fs_info
);
3532 * must be called before we decrease @scrub_paused.
3533 * make sure we don't block transaction commit while
3534 * we are waiting pending workers finished.
3536 wait_event(sctx
->list_wait
,
3537 atomic_read(&sctx
->workers_pending
) == 0);
3538 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 0);
3540 scrub_pause_off(fs_info
);
3542 btrfs_dec_block_group_ro(root
, cache
);
3544 btrfs_put_block_group(cache
);
3547 if (is_dev_replace
&&
3548 atomic64_read(&dev_replace
->num_write_errors
) > 0) {
3552 if (sctx
->stat
.malloc_errors
> 0) {
3557 dev_replace
->cursor_left
= dev_replace
->cursor_right
;
3558 dev_replace
->item_needs_writeback
= 1;
3560 key
.offset
= found_key
.offset
+ length
;
3561 btrfs_release_path(path
);
3564 btrfs_free_path(path
);
3569 static noinline_for_stack
int scrub_supers(struct scrub_ctx
*sctx
,
3570 struct btrfs_device
*scrub_dev
)
3576 struct btrfs_root
*root
= sctx
->dev_root
;
3578 if (test_bit(BTRFS_FS_STATE_ERROR
, &root
->fs_info
->fs_state
))
3581 /* Seed devices of a new filesystem has their own generation. */
3582 if (scrub_dev
->fs_devices
!= root
->fs_info
->fs_devices
)
3583 gen
= scrub_dev
->generation
;
3585 gen
= root
->fs_info
->last_trans_committed
;
3587 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
3588 bytenr
= btrfs_sb_offset(i
);
3589 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>
3590 scrub_dev
->commit_total_bytes
)
3593 ret
= scrub_pages(sctx
, bytenr
, BTRFS_SUPER_INFO_SIZE
, bytenr
,
3594 scrub_dev
, BTRFS_EXTENT_FLAG_SUPER
, gen
, i
,
3599 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
3605 * get a reference count on fs_info->scrub_workers. start worker if necessary
3607 static noinline_for_stack
int scrub_workers_get(struct btrfs_fs_info
*fs_info
,
3610 unsigned int flags
= WQ_FREEZABLE
| WQ_UNBOUND
;
3611 int max_active
= fs_info
->thread_pool_size
;
3613 if (fs_info
->scrub_workers_refcnt
== 0) {
3615 fs_info
->scrub_workers
=
3616 btrfs_alloc_workqueue("btrfs-scrub", flags
,
3619 fs_info
->scrub_workers
=
3620 btrfs_alloc_workqueue("btrfs-scrub", flags
,
3622 if (!fs_info
->scrub_workers
)
3623 goto fail_scrub_workers
;
3625 fs_info
->scrub_wr_completion_workers
=
3626 btrfs_alloc_workqueue("btrfs-scrubwrc", flags
,
3628 if (!fs_info
->scrub_wr_completion_workers
)
3629 goto fail_scrub_wr_completion_workers
;
3631 fs_info
->scrub_nocow_workers
=
3632 btrfs_alloc_workqueue("btrfs-scrubnc", flags
, 1, 0);
3633 if (!fs_info
->scrub_nocow_workers
)
3634 goto fail_scrub_nocow_workers
;
3635 fs_info
->scrub_parity_workers
=
3636 btrfs_alloc_workqueue("btrfs-scrubparity", flags
,
3638 if (!fs_info
->scrub_parity_workers
)
3639 goto fail_scrub_parity_workers
;
3641 ++fs_info
->scrub_workers_refcnt
;
3644 fail_scrub_parity_workers
:
3645 btrfs_destroy_workqueue(fs_info
->scrub_nocow_workers
);
3646 fail_scrub_nocow_workers
:
3647 btrfs_destroy_workqueue(fs_info
->scrub_wr_completion_workers
);
3648 fail_scrub_wr_completion_workers
:
3649 btrfs_destroy_workqueue(fs_info
->scrub_workers
);
3654 static noinline_for_stack
void scrub_workers_put(struct btrfs_fs_info
*fs_info
)
3656 if (--fs_info
->scrub_workers_refcnt
== 0) {
3657 btrfs_destroy_workqueue(fs_info
->scrub_workers
);
3658 btrfs_destroy_workqueue(fs_info
->scrub_wr_completion_workers
);
3659 btrfs_destroy_workqueue(fs_info
->scrub_nocow_workers
);
3660 btrfs_destroy_workqueue(fs_info
->scrub_parity_workers
);
3662 WARN_ON(fs_info
->scrub_workers_refcnt
< 0);
3665 int btrfs_scrub_dev(struct btrfs_fs_info
*fs_info
, u64 devid
, u64 start
,
3666 u64 end
, struct btrfs_scrub_progress
*progress
,
3667 int readonly
, int is_dev_replace
)
3669 struct scrub_ctx
*sctx
;
3671 struct btrfs_device
*dev
;
3672 struct rcu_string
*name
;
3674 if (btrfs_fs_closing(fs_info
))
3677 if (fs_info
->chunk_root
->nodesize
> BTRFS_STRIPE_LEN
) {
3679 * in this case scrub is unable to calculate the checksum
3680 * the way scrub is implemented. Do not handle this
3681 * situation at all because it won't ever happen.
3684 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3685 fs_info
->chunk_root
->nodesize
, BTRFS_STRIPE_LEN
);
3689 if (fs_info
->chunk_root
->sectorsize
!= PAGE_SIZE
) {
3690 /* not supported for data w/o checksums */
3692 "scrub: size assumption sectorsize != PAGE_SIZE "
3693 "(%d != %lu) fails",
3694 fs_info
->chunk_root
->sectorsize
, PAGE_SIZE
);
3698 if (fs_info
->chunk_root
->nodesize
>
3699 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
||
3700 fs_info
->chunk_root
->sectorsize
>
3701 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
) {
3703 * would exhaust the array bounds of pagev member in
3704 * struct scrub_block
3706 btrfs_err(fs_info
, "scrub: size assumption nodesize and sectorsize "
3707 "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3708 fs_info
->chunk_root
->nodesize
,
3709 SCRUB_MAX_PAGES_PER_BLOCK
,
3710 fs_info
->chunk_root
->sectorsize
,
3711 SCRUB_MAX_PAGES_PER_BLOCK
);
3716 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
3717 dev
= btrfs_find_device(fs_info
, devid
, NULL
, NULL
);
3718 if (!dev
|| (dev
->missing
&& !is_dev_replace
)) {
3719 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3723 if (!is_dev_replace
&& !readonly
&& !dev
->writeable
) {
3724 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3726 name
= rcu_dereference(dev
->name
);
3727 btrfs_err(fs_info
, "scrub: device %s is not writable",
3733 mutex_lock(&fs_info
->scrub_lock
);
3734 if (!dev
->in_fs_metadata
|| dev
->is_tgtdev_for_dev_replace
) {
3735 mutex_unlock(&fs_info
->scrub_lock
);
3736 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3740 btrfs_dev_replace_lock(&fs_info
->dev_replace
);
3741 if (dev
->scrub_device
||
3743 btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
))) {
3744 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
3745 mutex_unlock(&fs_info
->scrub_lock
);
3746 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3747 return -EINPROGRESS
;
3749 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
3751 ret
= scrub_workers_get(fs_info
, is_dev_replace
);
3753 mutex_unlock(&fs_info
->scrub_lock
);
3754 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3758 sctx
= scrub_setup_ctx(dev
, is_dev_replace
);
3760 mutex_unlock(&fs_info
->scrub_lock
);
3761 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3762 scrub_workers_put(fs_info
);
3763 return PTR_ERR(sctx
);
3765 sctx
->readonly
= readonly
;
3766 dev
->scrub_device
= sctx
;
3767 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3770 * checking @scrub_pause_req here, we can avoid
3771 * race between committing transaction and scrubbing.
3773 __scrub_blocked_if_needed(fs_info
);
3774 atomic_inc(&fs_info
->scrubs_running
);
3775 mutex_unlock(&fs_info
->scrub_lock
);
3777 if (!is_dev_replace
) {
3779 * by holding device list mutex, we can
3780 * kick off writing super in log tree sync.
3782 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
3783 ret
= scrub_supers(sctx
, dev
);
3784 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3788 ret
= scrub_enumerate_chunks(sctx
, dev
, start
, end
,
3791 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
3792 atomic_dec(&fs_info
->scrubs_running
);
3793 wake_up(&fs_info
->scrub_pause_wait
);
3795 wait_event(sctx
->list_wait
, atomic_read(&sctx
->workers_pending
) == 0);
3798 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
3800 mutex_lock(&fs_info
->scrub_lock
);
3801 dev
->scrub_device
= NULL
;
3802 scrub_workers_put(fs_info
);
3803 mutex_unlock(&fs_info
->scrub_lock
);
3805 scrub_put_ctx(sctx
);
3810 void btrfs_scrub_pause(struct btrfs_root
*root
)
3812 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3814 mutex_lock(&fs_info
->scrub_lock
);
3815 atomic_inc(&fs_info
->scrub_pause_req
);
3816 while (atomic_read(&fs_info
->scrubs_paused
) !=
3817 atomic_read(&fs_info
->scrubs_running
)) {
3818 mutex_unlock(&fs_info
->scrub_lock
);
3819 wait_event(fs_info
->scrub_pause_wait
,
3820 atomic_read(&fs_info
->scrubs_paused
) ==
3821 atomic_read(&fs_info
->scrubs_running
));
3822 mutex_lock(&fs_info
->scrub_lock
);
3824 mutex_unlock(&fs_info
->scrub_lock
);
3827 void btrfs_scrub_continue(struct btrfs_root
*root
)
3829 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3831 atomic_dec(&fs_info
->scrub_pause_req
);
3832 wake_up(&fs_info
->scrub_pause_wait
);
3835 int btrfs_scrub_cancel(struct btrfs_fs_info
*fs_info
)
3837 mutex_lock(&fs_info
->scrub_lock
);
3838 if (!atomic_read(&fs_info
->scrubs_running
)) {
3839 mutex_unlock(&fs_info
->scrub_lock
);
3843 atomic_inc(&fs_info
->scrub_cancel_req
);
3844 while (atomic_read(&fs_info
->scrubs_running
)) {
3845 mutex_unlock(&fs_info
->scrub_lock
);
3846 wait_event(fs_info
->scrub_pause_wait
,
3847 atomic_read(&fs_info
->scrubs_running
) == 0);
3848 mutex_lock(&fs_info
->scrub_lock
);
3850 atomic_dec(&fs_info
->scrub_cancel_req
);
3851 mutex_unlock(&fs_info
->scrub_lock
);
3856 int btrfs_scrub_cancel_dev(struct btrfs_fs_info
*fs_info
,
3857 struct btrfs_device
*dev
)
3859 struct scrub_ctx
*sctx
;
3861 mutex_lock(&fs_info
->scrub_lock
);
3862 sctx
= dev
->scrub_device
;
3864 mutex_unlock(&fs_info
->scrub_lock
);
3867 atomic_inc(&sctx
->cancel_req
);
3868 while (dev
->scrub_device
) {
3869 mutex_unlock(&fs_info
->scrub_lock
);
3870 wait_event(fs_info
->scrub_pause_wait
,
3871 dev
->scrub_device
== NULL
);
3872 mutex_lock(&fs_info
->scrub_lock
);
3874 mutex_unlock(&fs_info
->scrub_lock
);
3879 int btrfs_scrub_progress(struct btrfs_root
*root
, u64 devid
,
3880 struct btrfs_scrub_progress
*progress
)
3882 struct btrfs_device
*dev
;
3883 struct scrub_ctx
*sctx
= NULL
;
3885 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
3886 dev
= btrfs_find_device(root
->fs_info
, devid
, NULL
, NULL
);
3888 sctx
= dev
->scrub_device
;
3890 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
3891 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
3893 return dev
? (sctx
? 0 : -ENOTCONN
) : -ENODEV
;
3896 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
3897 u64 extent_logical
, u64 extent_len
,
3898 u64
*extent_physical
,
3899 struct btrfs_device
**extent_dev
,
3900 int *extent_mirror_num
)
3903 struct btrfs_bio
*bbio
= NULL
;
3906 mapped_length
= extent_len
;
3907 ret
= btrfs_map_block(fs_info
, READ
, extent_logical
,
3908 &mapped_length
, &bbio
, 0);
3909 if (ret
|| !bbio
|| mapped_length
< extent_len
||
3910 !bbio
->stripes
[0].dev
->bdev
) {
3911 btrfs_put_bbio(bbio
);
3915 *extent_physical
= bbio
->stripes
[0].physical
;
3916 *extent_mirror_num
= bbio
->mirror_num
;
3917 *extent_dev
= bbio
->stripes
[0].dev
;
3918 btrfs_put_bbio(bbio
);
3921 static int scrub_setup_wr_ctx(struct scrub_ctx
*sctx
,
3922 struct scrub_wr_ctx
*wr_ctx
,
3923 struct btrfs_fs_info
*fs_info
,
3924 struct btrfs_device
*dev
,
3927 WARN_ON(wr_ctx
->wr_curr_bio
!= NULL
);
3929 mutex_init(&wr_ctx
->wr_lock
);
3930 wr_ctx
->wr_curr_bio
= NULL
;
3931 if (!is_dev_replace
)
3934 WARN_ON(!dev
->bdev
);
3935 wr_ctx
->pages_per_wr_bio
= min_t(int, SCRUB_PAGES_PER_WR_BIO
,
3936 bio_get_nr_vecs(dev
->bdev
));
3937 wr_ctx
->tgtdev
= dev
;
3938 atomic_set(&wr_ctx
->flush_all_writes
, 0);
3942 static void scrub_free_wr_ctx(struct scrub_wr_ctx
*wr_ctx
)
3944 mutex_lock(&wr_ctx
->wr_lock
);
3945 kfree(wr_ctx
->wr_curr_bio
);
3946 wr_ctx
->wr_curr_bio
= NULL
;
3947 mutex_unlock(&wr_ctx
->wr_lock
);
3950 static int copy_nocow_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
3951 int mirror_num
, u64 physical_for_dev_replace
)
3953 struct scrub_copy_nocow_ctx
*nocow_ctx
;
3954 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
3956 nocow_ctx
= kzalloc(sizeof(*nocow_ctx
), GFP_NOFS
);
3958 spin_lock(&sctx
->stat_lock
);
3959 sctx
->stat
.malloc_errors
++;
3960 spin_unlock(&sctx
->stat_lock
);
3964 scrub_pending_trans_workers_inc(sctx
);
3966 nocow_ctx
->sctx
= sctx
;
3967 nocow_ctx
->logical
= logical
;
3968 nocow_ctx
->len
= len
;
3969 nocow_ctx
->mirror_num
= mirror_num
;
3970 nocow_ctx
->physical_for_dev_replace
= physical_for_dev_replace
;
3971 btrfs_init_work(&nocow_ctx
->work
, btrfs_scrubnc_helper
,
3972 copy_nocow_pages_worker
, NULL
, NULL
);
3973 INIT_LIST_HEAD(&nocow_ctx
->inodes
);
3974 btrfs_queue_work(fs_info
->scrub_nocow_workers
,
3980 static int record_inode_for_nocow(u64 inum
, u64 offset
, u64 root
, void *ctx
)
3982 struct scrub_copy_nocow_ctx
*nocow_ctx
= ctx
;
3983 struct scrub_nocow_inode
*nocow_inode
;
3985 nocow_inode
= kzalloc(sizeof(*nocow_inode
), GFP_NOFS
);
3988 nocow_inode
->inum
= inum
;
3989 nocow_inode
->offset
= offset
;
3990 nocow_inode
->root
= root
;
3991 list_add_tail(&nocow_inode
->list
, &nocow_ctx
->inodes
);
3995 #define COPY_COMPLETE 1
3997 static void copy_nocow_pages_worker(struct btrfs_work
*work
)
3999 struct scrub_copy_nocow_ctx
*nocow_ctx
=
4000 container_of(work
, struct scrub_copy_nocow_ctx
, work
);
4001 struct scrub_ctx
*sctx
= nocow_ctx
->sctx
;
4002 u64 logical
= nocow_ctx
->logical
;
4003 u64 len
= nocow_ctx
->len
;
4004 int mirror_num
= nocow_ctx
->mirror_num
;
4005 u64 physical_for_dev_replace
= nocow_ctx
->physical_for_dev_replace
;
4007 struct btrfs_trans_handle
*trans
= NULL
;
4008 struct btrfs_fs_info
*fs_info
;
4009 struct btrfs_path
*path
;
4010 struct btrfs_root
*root
;
4011 int not_written
= 0;
4013 fs_info
= sctx
->dev_root
->fs_info
;
4014 root
= fs_info
->extent_root
;
4016 path
= btrfs_alloc_path();
4018 spin_lock(&sctx
->stat_lock
);
4019 sctx
->stat
.malloc_errors
++;
4020 spin_unlock(&sctx
->stat_lock
);
4025 trans
= btrfs_join_transaction(root
);
4026 if (IS_ERR(trans
)) {
4031 ret
= iterate_inodes_from_logical(logical
, fs_info
, path
,
4032 record_inode_for_nocow
, nocow_ctx
);
4033 if (ret
!= 0 && ret
!= -ENOENT
) {
4034 btrfs_warn(fs_info
, "iterate_inodes_from_logical() failed: log %llu, "
4035 "phys %llu, len %llu, mir %u, ret %d",
4036 logical
, physical_for_dev_replace
, len
, mirror_num
,
4042 btrfs_end_transaction(trans
, root
);
4044 while (!list_empty(&nocow_ctx
->inodes
)) {
4045 struct scrub_nocow_inode
*entry
;
4046 entry
= list_first_entry(&nocow_ctx
->inodes
,
4047 struct scrub_nocow_inode
,
4049 list_del_init(&entry
->list
);
4050 ret
= copy_nocow_pages_for_inode(entry
->inum
, entry
->offset
,
4051 entry
->root
, nocow_ctx
);
4053 if (ret
== COPY_COMPLETE
) {
4061 while (!list_empty(&nocow_ctx
->inodes
)) {
4062 struct scrub_nocow_inode
*entry
;
4063 entry
= list_first_entry(&nocow_ctx
->inodes
,
4064 struct scrub_nocow_inode
,
4066 list_del_init(&entry
->list
);
4069 if (trans
&& !IS_ERR(trans
))
4070 btrfs_end_transaction(trans
, root
);
4072 btrfs_dev_replace_stats_inc(&fs_info
->dev_replace
.
4073 num_uncorrectable_read_errors
);
4075 btrfs_free_path(path
);
4078 scrub_pending_trans_workers_dec(sctx
);
4081 static int check_extent_to_block(struct inode
*inode
, u64 start
, u64 len
,
4084 struct extent_state
*cached_state
= NULL
;
4085 struct btrfs_ordered_extent
*ordered
;
4086 struct extent_io_tree
*io_tree
;
4087 struct extent_map
*em
;
4088 u64 lockstart
= start
, lockend
= start
+ len
- 1;
4091 io_tree
= &BTRFS_I(inode
)->io_tree
;
4093 lock_extent_bits(io_tree
, lockstart
, lockend
, 0, &cached_state
);
4094 ordered
= btrfs_lookup_ordered_range(inode
, lockstart
, len
);
4096 btrfs_put_ordered_extent(ordered
);
4101 em
= btrfs_get_extent(inode
, NULL
, 0, start
, len
, 0);
4108 * This extent does not actually cover the logical extent anymore,
4109 * move on to the next inode.
4111 if (em
->block_start
> logical
||
4112 em
->block_start
+ em
->block_len
< logical
+ len
) {
4113 free_extent_map(em
);
4117 free_extent_map(em
);
4120 unlock_extent_cached(io_tree
, lockstart
, lockend
, &cached_state
,
4125 static int copy_nocow_pages_for_inode(u64 inum
, u64 offset
, u64 root
,
4126 struct scrub_copy_nocow_ctx
*nocow_ctx
)
4128 struct btrfs_fs_info
*fs_info
= nocow_ctx
->sctx
->dev_root
->fs_info
;
4129 struct btrfs_key key
;
4130 struct inode
*inode
;
4132 struct btrfs_root
*local_root
;
4133 struct extent_io_tree
*io_tree
;
4134 u64 physical_for_dev_replace
;
4135 u64 nocow_ctx_logical
;
4136 u64 len
= nocow_ctx
->len
;
4137 unsigned long index
;
4142 key
.objectid
= root
;
4143 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4144 key
.offset
= (u64
)-1;
4146 srcu_index
= srcu_read_lock(&fs_info
->subvol_srcu
);
4148 local_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
4149 if (IS_ERR(local_root
)) {
4150 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
4151 return PTR_ERR(local_root
);
4154 key
.type
= BTRFS_INODE_ITEM_KEY
;
4155 key
.objectid
= inum
;
4157 inode
= btrfs_iget(fs_info
->sb
, &key
, local_root
, NULL
);
4158 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
4160 return PTR_ERR(inode
);
4162 /* Avoid truncate/dio/punch hole.. */
4163 mutex_lock(&inode
->i_mutex
);
4164 inode_dio_wait(inode
);
4166 physical_for_dev_replace
= nocow_ctx
->physical_for_dev_replace
;
4167 io_tree
= &BTRFS_I(inode
)->io_tree
;
4168 nocow_ctx_logical
= nocow_ctx
->logical
;
4170 ret
= check_extent_to_block(inode
, offset
, len
, nocow_ctx_logical
);
4172 ret
= ret
> 0 ? 0 : ret
;
4176 while (len
>= PAGE_CACHE_SIZE
) {
4177 index
= offset
>> PAGE_CACHE_SHIFT
;
4179 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
4181 btrfs_err(fs_info
, "find_or_create_page() failed");
4186 if (PageUptodate(page
)) {
4187 if (PageDirty(page
))
4190 ClearPageError(page
);
4191 err
= extent_read_full_page(io_tree
, page
,
4193 nocow_ctx
->mirror_num
);
4201 * If the page has been remove from the page cache,
4202 * the data on it is meaningless, because it may be
4203 * old one, the new data may be written into the new
4204 * page in the page cache.
4206 if (page
->mapping
!= inode
->i_mapping
) {
4208 page_cache_release(page
);
4211 if (!PageUptodate(page
)) {
4217 ret
= check_extent_to_block(inode
, offset
, len
,
4220 ret
= ret
> 0 ? 0 : ret
;
4224 err
= write_page_nocow(nocow_ctx
->sctx
,
4225 physical_for_dev_replace
, page
);
4230 page_cache_release(page
);
4235 offset
+= PAGE_CACHE_SIZE
;
4236 physical_for_dev_replace
+= PAGE_CACHE_SIZE
;
4237 nocow_ctx_logical
+= PAGE_CACHE_SIZE
;
4238 len
-= PAGE_CACHE_SIZE
;
4240 ret
= COPY_COMPLETE
;
4242 mutex_unlock(&inode
->i_mutex
);
4247 static int write_page_nocow(struct scrub_ctx
*sctx
,
4248 u64 physical_for_dev_replace
, struct page
*page
)
4251 struct btrfs_device
*dev
;
4254 dev
= sctx
->wr_ctx
.tgtdev
;
4258 printk_ratelimited(KERN_WARNING
4259 "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
4262 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
4264 spin_lock(&sctx
->stat_lock
);
4265 sctx
->stat
.malloc_errors
++;
4266 spin_unlock(&sctx
->stat_lock
);
4269 bio
->bi_iter
.bi_size
= 0;
4270 bio
->bi_iter
.bi_sector
= physical_for_dev_replace
>> 9;
4271 bio
->bi_bdev
= dev
->bdev
;
4272 ret
= bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0);
4273 if (ret
!= PAGE_CACHE_SIZE
) {
4276 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_WRITE_ERRS
);
4280 if (btrfsic_submit_bio_wait(WRITE_SYNC
, bio
))
4281 goto leave_with_eio
;