2 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
24 #include "ordered-data.h"
25 #include "transaction.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
39 * Future enhancements:
40 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
42 * - track and record media errors, throw out bad devices
43 * - add a mode to also read unallocated space
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
55 #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
64 #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
66 struct scrub_recover
{
68 struct btrfs_bio
*bbio
;
73 struct scrub_block
*sblock
;
75 struct btrfs_device
*dev
;
76 struct list_head list
;
77 u64 flags
; /* extent flags */
81 u64 physical_for_dev_replace
;
84 unsigned int mirror_num
:8;
85 unsigned int have_csum
:1;
86 unsigned int io_error
:1;
88 u8 csum
[BTRFS_CSUM_SIZE
];
90 struct scrub_recover
*recover
;
95 struct scrub_ctx
*sctx
;
96 struct btrfs_device
*dev
;
101 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102 struct scrub_page
*pagev
[SCRUB_PAGES_PER_WR_BIO
];
104 struct scrub_page
*pagev
[SCRUB_PAGES_PER_RD_BIO
];
108 struct btrfs_work work
;
112 struct scrub_page
*pagev
[SCRUB_MAX_PAGES_PER_BLOCK
];
114 atomic_t outstanding_pages
;
115 atomic_t refs
; /* free mem on transition to zero */
116 struct scrub_ctx
*sctx
;
117 struct scrub_parity
*sparity
;
119 unsigned int header_error
:1;
120 unsigned int checksum_error
:1;
121 unsigned int no_io_error_seen
:1;
122 unsigned int generation_error
:1; /* also sets header_error */
124 /* The following is for the data used to check parity */
125 /* It is for the data with checksum */
126 unsigned int data_corrected
:1;
130 /* Used for the chunks with parity stripe such RAID5/6 */
131 struct scrub_parity
{
132 struct scrub_ctx
*sctx
;
134 struct btrfs_device
*scrub_dev
;
146 struct list_head spages
;
148 /* Work of parity check and repair */
149 struct btrfs_work work
;
151 /* Mark the parity blocks which have data */
152 unsigned long *dbitmap
;
155 * Mark the parity blocks which have data, but errors happen when
156 * read data or check data
158 unsigned long *ebitmap
;
160 unsigned long bitmap
[0];
163 struct scrub_wr_ctx
{
164 struct scrub_bio
*wr_curr_bio
;
165 struct btrfs_device
*tgtdev
;
166 int pages_per_wr_bio
; /* <= SCRUB_PAGES_PER_WR_BIO */
167 atomic_t flush_all_writes
;
168 struct mutex wr_lock
;
172 struct scrub_bio
*bios
[SCRUB_BIOS_PER_SCTX
];
173 struct btrfs_root
*dev_root
;
176 atomic_t bios_in_flight
;
177 atomic_t workers_pending
;
178 spinlock_t list_lock
;
179 wait_queue_head_t list_wait
;
181 struct list_head csum_list
;
184 int pages_per_rd_bio
;
189 struct scrub_wr_ctx wr_ctx
;
194 struct btrfs_scrub_progress stat
;
195 spinlock_t stat_lock
;
198 * Use a ref counter to avoid use-after-free issues. Scrub workers
199 * decrement bios_in_flight and workers_pending and then do a wakeup
200 * on the list_wait wait queue. We must ensure the main scrub task
201 * doesn't free the scrub context before or while the workers are
202 * doing the wakeup() call.
207 struct scrub_fixup_nodatasum
{
208 struct scrub_ctx
*sctx
;
209 struct btrfs_device
*dev
;
211 struct btrfs_root
*root
;
212 struct btrfs_work work
;
216 struct scrub_nocow_inode
{
220 struct list_head list
;
223 struct scrub_copy_nocow_ctx
{
224 struct scrub_ctx
*sctx
;
228 u64 physical_for_dev_replace
;
229 struct list_head inodes
;
230 struct btrfs_work work
;
233 struct scrub_warning
{
234 struct btrfs_path
*path
;
235 u64 extent_item_size
;
239 struct btrfs_device
*dev
;
242 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
);
243 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
);
244 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
);
245 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
);
246 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
);
247 static int scrub_setup_recheck_block(struct scrub_block
*original_sblock
,
248 struct scrub_block
*sblocks_for_recheck
);
249 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
250 struct scrub_block
*sblock
, int is_metadata
,
251 int have_csum
, u8
*csum
, u64 generation
,
252 u16 csum_size
, int retry_failed_mirror
);
253 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
254 struct scrub_block
*sblock
,
255 int is_metadata
, int have_csum
,
256 const u8
*csum
, u64 generation
,
258 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
259 struct scrub_block
*sblock_good
);
260 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
261 struct scrub_block
*sblock_good
,
262 int page_num
, int force_write
);
263 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
);
264 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
266 static int scrub_checksum_data(struct scrub_block
*sblock
);
267 static int scrub_checksum_tree_block(struct scrub_block
*sblock
);
268 static int scrub_checksum_super(struct scrub_block
*sblock
);
269 static void scrub_block_get(struct scrub_block
*sblock
);
270 static void scrub_block_put(struct scrub_block
*sblock
);
271 static void scrub_page_get(struct scrub_page
*spage
);
272 static void scrub_page_put(struct scrub_page
*spage
);
273 static void scrub_parity_get(struct scrub_parity
*sparity
);
274 static void scrub_parity_put(struct scrub_parity
*sparity
);
275 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
276 struct scrub_page
*spage
);
277 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
278 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
279 u64 gen
, int mirror_num
, u8
*csum
, int force
,
280 u64 physical_for_dev_replace
);
281 static void scrub_bio_end_io(struct bio
*bio
, int err
);
282 static void scrub_bio_end_io_worker(struct btrfs_work
*work
);
283 static void scrub_block_complete(struct scrub_block
*sblock
);
284 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
285 u64 extent_logical
, u64 extent_len
,
286 u64
*extent_physical
,
287 struct btrfs_device
**extent_dev
,
288 int *extent_mirror_num
);
289 static int scrub_setup_wr_ctx(struct scrub_ctx
*sctx
,
290 struct scrub_wr_ctx
*wr_ctx
,
291 struct btrfs_fs_info
*fs_info
,
292 struct btrfs_device
*dev
,
294 static void scrub_free_wr_ctx(struct scrub_wr_ctx
*wr_ctx
);
295 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
296 struct scrub_page
*spage
);
297 static void scrub_wr_submit(struct scrub_ctx
*sctx
);
298 static void scrub_wr_bio_end_io(struct bio
*bio
, int err
);
299 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
);
300 static int write_page_nocow(struct scrub_ctx
*sctx
,
301 u64 physical_for_dev_replace
, struct page
*page
);
302 static int copy_nocow_pages_for_inode(u64 inum
, u64 offset
, u64 root
,
303 struct scrub_copy_nocow_ctx
*ctx
);
304 static int copy_nocow_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
305 int mirror_num
, u64 physical_for_dev_replace
);
306 static void copy_nocow_pages_worker(struct btrfs_work
*work
);
307 static void __scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
);
308 static void scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
);
309 static void scrub_put_ctx(struct scrub_ctx
*sctx
);
312 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
)
314 atomic_inc(&sctx
->refs
);
315 atomic_inc(&sctx
->bios_in_flight
);
318 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
)
320 atomic_dec(&sctx
->bios_in_flight
);
321 wake_up(&sctx
->list_wait
);
325 static void __scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
)
327 while (atomic_read(&fs_info
->scrub_pause_req
)) {
328 mutex_unlock(&fs_info
->scrub_lock
);
329 wait_event(fs_info
->scrub_pause_wait
,
330 atomic_read(&fs_info
->scrub_pause_req
) == 0);
331 mutex_lock(&fs_info
->scrub_lock
);
335 static void scrub_blocked_if_needed(struct btrfs_fs_info
*fs_info
)
337 atomic_inc(&fs_info
->scrubs_paused
);
338 wake_up(&fs_info
->scrub_pause_wait
);
340 mutex_lock(&fs_info
->scrub_lock
);
341 __scrub_blocked_if_needed(fs_info
);
342 atomic_dec(&fs_info
->scrubs_paused
);
343 mutex_unlock(&fs_info
->scrub_lock
);
345 wake_up(&fs_info
->scrub_pause_wait
);
349 * used for workers that require transaction commits (i.e., for the
352 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
)
354 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
356 atomic_inc(&sctx
->refs
);
358 * increment scrubs_running to prevent cancel requests from
359 * completing as long as a worker is running. we must also
360 * increment scrubs_paused to prevent deadlocking on pause
361 * requests used for transactions commits (as the worker uses a
362 * transaction context). it is safe to regard the worker
363 * as paused for all matters practical. effectively, we only
364 * avoid cancellation requests from completing.
366 mutex_lock(&fs_info
->scrub_lock
);
367 atomic_inc(&fs_info
->scrubs_running
);
368 atomic_inc(&fs_info
->scrubs_paused
);
369 mutex_unlock(&fs_info
->scrub_lock
);
372 * check if @scrubs_running=@scrubs_paused condition
373 * inside wait_event() is not an atomic operation.
374 * which means we may inc/dec @scrub_running/paused
375 * at any time. Let's wake up @scrub_pause_wait as
376 * much as we can to let commit transaction blocked less.
378 wake_up(&fs_info
->scrub_pause_wait
);
380 atomic_inc(&sctx
->workers_pending
);
383 /* used for workers that require transaction commits */
384 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
)
386 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
389 * see scrub_pending_trans_workers_inc() why we're pretending
390 * to be paused in the scrub counters
392 mutex_lock(&fs_info
->scrub_lock
);
393 atomic_dec(&fs_info
->scrubs_running
);
394 atomic_dec(&fs_info
->scrubs_paused
);
395 mutex_unlock(&fs_info
->scrub_lock
);
396 atomic_dec(&sctx
->workers_pending
);
397 wake_up(&fs_info
->scrub_pause_wait
);
398 wake_up(&sctx
->list_wait
);
402 static void scrub_free_csums(struct scrub_ctx
*sctx
)
404 while (!list_empty(&sctx
->csum_list
)) {
405 struct btrfs_ordered_sum
*sum
;
406 sum
= list_first_entry(&sctx
->csum_list
,
407 struct btrfs_ordered_sum
, list
);
408 list_del(&sum
->list
);
413 static noinline_for_stack
void scrub_free_ctx(struct scrub_ctx
*sctx
)
420 scrub_free_wr_ctx(&sctx
->wr_ctx
);
422 /* this can happen when scrub is cancelled */
423 if (sctx
->curr
!= -1) {
424 struct scrub_bio
*sbio
= sctx
->bios
[sctx
->curr
];
426 for (i
= 0; i
< sbio
->page_count
; i
++) {
427 WARN_ON(!sbio
->pagev
[i
]->page
);
428 scrub_block_put(sbio
->pagev
[i
]->sblock
);
433 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
434 struct scrub_bio
*sbio
= sctx
->bios
[i
];
441 scrub_free_csums(sctx
);
445 static void scrub_put_ctx(struct scrub_ctx
*sctx
)
447 if (atomic_dec_and_test(&sctx
->refs
))
448 scrub_free_ctx(sctx
);
451 static noinline_for_stack
452 struct scrub_ctx
*scrub_setup_ctx(struct btrfs_device
*dev
, int is_dev_replace
)
454 struct scrub_ctx
*sctx
;
456 struct btrfs_fs_info
*fs_info
= dev
->dev_root
->fs_info
;
457 int pages_per_rd_bio
;
461 * the setting of pages_per_rd_bio is correct for scrub but might
462 * be wrong for the dev_replace code where we might read from
463 * different devices in the initial huge bios. However, that
464 * code is able to correctly handle the case when adding a page
468 pages_per_rd_bio
= min_t(int, SCRUB_PAGES_PER_RD_BIO
,
469 bio_get_nr_vecs(dev
->bdev
));
471 pages_per_rd_bio
= SCRUB_PAGES_PER_RD_BIO
;
472 sctx
= kzalloc(sizeof(*sctx
), GFP_NOFS
);
475 atomic_set(&sctx
->refs
, 1);
476 sctx
->is_dev_replace
= is_dev_replace
;
477 sctx
->pages_per_rd_bio
= pages_per_rd_bio
;
479 sctx
->dev_root
= dev
->dev_root
;
480 for (i
= 0; i
< SCRUB_BIOS_PER_SCTX
; ++i
) {
481 struct scrub_bio
*sbio
;
483 sbio
= kzalloc(sizeof(*sbio
), GFP_NOFS
);
486 sctx
->bios
[i
] = sbio
;
490 sbio
->page_count
= 0;
491 btrfs_init_work(&sbio
->work
, btrfs_scrub_helper
,
492 scrub_bio_end_io_worker
, NULL
, NULL
);
494 if (i
!= SCRUB_BIOS_PER_SCTX
- 1)
495 sctx
->bios
[i
]->next_free
= i
+ 1;
497 sctx
->bios
[i
]->next_free
= -1;
499 sctx
->first_free
= 0;
500 sctx
->nodesize
= dev
->dev_root
->nodesize
;
501 sctx
->sectorsize
= dev
->dev_root
->sectorsize
;
502 atomic_set(&sctx
->bios_in_flight
, 0);
503 atomic_set(&sctx
->workers_pending
, 0);
504 atomic_set(&sctx
->cancel_req
, 0);
505 sctx
->csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
506 INIT_LIST_HEAD(&sctx
->csum_list
);
508 spin_lock_init(&sctx
->list_lock
);
509 spin_lock_init(&sctx
->stat_lock
);
510 init_waitqueue_head(&sctx
->list_wait
);
512 ret
= scrub_setup_wr_ctx(sctx
, &sctx
->wr_ctx
, fs_info
,
513 fs_info
->dev_replace
.tgtdev
, is_dev_replace
);
515 scrub_free_ctx(sctx
);
521 scrub_free_ctx(sctx
);
522 return ERR_PTR(-ENOMEM
);
525 static int scrub_print_warning_inode(u64 inum
, u64 offset
, u64 root
,
532 struct extent_buffer
*eb
;
533 struct btrfs_inode_item
*inode_item
;
534 struct scrub_warning
*swarn
= warn_ctx
;
535 struct btrfs_fs_info
*fs_info
= swarn
->dev
->dev_root
->fs_info
;
536 struct inode_fs_paths
*ipath
= NULL
;
537 struct btrfs_root
*local_root
;
538 struct btrfs_key root_key
;
539 struct btrfs_key key
;
541 root_key
.objectid
= root
;
542 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
543 root_key
.offset
= (u64
)-1;
544 local_root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
545 if (IS_ERR(local_root
)) {
546 ret
= PTR_ERR(local_root
);
551 * this makes the path point to (inum INODE_ITEM ioff)
554 key
.type
= BTRFS_INODE_ITEM_KEY
;
557 ret
= btrfs_search_slot(NULL
, local_root
, &key
, swarn
->path
, 0, 0);
559 btrfs_release_path(swarn
->path
);
563 eb
= swarn
->path
->nodes
[0];
564 inode_item
= btrfs_item_ptr(eb
, swarn
->path
->slots
[0],
565 struct btrfs_inode_item
);
566 isize
= btrfs_inode_size(eb
, inode_item
);
567 nlink
= btrfs_inode_nlink(eb
, inode_item
);
568 btrfs_release_path(swarn
->path
);
570 ipath
= init_ipath(4096, local_root
, swarn
->path
);
572 ret
= PTR_ERR(ipath
);
576 ret
= paths_from_inode(inum
, ipath
);
582 * we deliberately ignore the bit ipath might have been too small to
583 * hold all of the paths here
585 for (i
= 0; i
< ipath
->fspath
->elem_cnt
; ++i
)
586 printk_in_rcu(KERN_WARNING
"BTRFS: %s at logical %llu on dev "
587 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
588 "length %llu, links %u (path: %s)\n", swarn
->errstr
,
589 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
590 (unsigned long long)swarn
->sector
, root
, inum
, offset
,
591 min(isize
- offset
, (u64
)PAGE_SIZE
), nlink
,
592 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
598 printk_in_rcu(KERN_WARNING
"BTRFS: %s at logical %llu on dev "
599 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
600 "resolving failed with ret=%d\n", swarn
->errstr
,
601 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
602 (unsigned long long)swarn
->sector
, root
, inum
, offset
, ret
);
608 static void scrub_print_warning(const char *errstr
, struct scrub_block
*sblock
)
610 struct btrfs_device
*dev
;
611 struct btrfs_fs_info
*fs_info
;
612 struct btrfs_path
*path
;
613 struct btrfs_key found_key
;
614 struct extent_buffer
*eb
;
615 struct btrfs_extent_item
*ei
;
616 struct scrub_warning swarn
;
617 unsigned long ptr
= 0;
625 WARN_ON(sblock
->page_count
< 1);
626 dev
= sblock
->pagev
[0]->dev
;
627 fs_info
= sblock
->sctx
->dev_root
->fs_info
;
629 path
= btrfs_alloc_path();
633 swarn
.sector
= (sblock
->pagev
[0]->physical
) >> 9;
634 swarn
.logical
= sblock
->pagev
[0]->logical
;
635 swarn
.errstr
= errstr
;
638 ret
= extent_from_logical(fs_info
, swarn
.logical
, path
, &found_key
,
643 extent_item_pos
= swarn
.logical
- found_key
.objectid
;
644 swarn
.extent_item_size
= found_key
.offset
;
647 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
648 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
650 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
652 ret
= tree_backref_for_extent(&ptr
, eb
, &found_key
, ei
,
653 item_size
, &ref_root
,
655 printk_in_rcu(KERN_WARNING
656 "BTRFS: %s at logical %llu on dev %s, "
657 "sector %llu: metadata %s (level %d) in tree "
658 "%llu\n", errstr
, swarn
.logical
,
659 rcu_str_deref(dev
->name
),
660 (unsigned long long)swarn
.sector
,
661 ref_level
? "node" : "leaf",
662 ret
< 0 ? -1 : ref_level
,
663 ret
< 0 ? -1 : ref_root
);
665 btrfs_release_path(path
);
667 btrfs_release_path(path
);
670 iterate_extent_inodes(fs_info
, found_key
.objectid
,
672 scrub_print_warning_inode
, &swarn
);
676 btrfs_free_path(path
);
679 static int scrub_fixup_readpage(u64 inum
, u64 offset
, u64 root
, void *fixup_ctx
)
681 struct page
*page
= NULL
;
683 struct scrub_fixup_nodatasum
*fixup
= fixup_ctx
;
686 struct btrfs_key key
;
687 struct inode
*inode
= NULL
;
688 struct btrfs_fs_info
*fs_info
;
689 u64 end
= offset
+ PAGE_SIZE
- 1;
690 struct btrfs_root
*local_root
;
694 key
.type
= BTRFS_ROOT_ITEM_KEY
;
695 key
.offset
= (u64
)-1;
697 fs_info
= fixup
->root
->fs_info
;
698 srcu_index
= srcu_read_lock(&fs_info
->subvol_srcu
);
700 local_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
701 if (IS_ERR(local_root
)) {
702 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
703 return PTR_ERR(local_root
);
706 key
.type
= BTRFS_INODE_ITEM_KEY
;
709 inode
= btrfs_iget(fs_info
->sb
, &key
, local_root
, NULL
);
710 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
712 return PTR_ERR(inode
);
714 index
= offset
>> PAGE_CACHE_SHIFT
;
716 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
722 if (PageUptodate(page
)) {
723 if (PageDirty(page
)) {
725 * we need to write the data to the defect sector. the
726 * data that was in that sector is not in memory,
727 * because the page was modified. we must not write the
728 * modified page to that sector.
730 * TODO: what could be done here: wait for the delalloc
731 * runner to write out that page (might involve
732 * COW) and see whether the sector is still
733 * referenced afterwards.
735 * For the meantime, we'll treat this error
736 * incorrectable, although there is a chance that a
737 * later scrub will find the bad sector again and that
738 * there's no dirty page in memory, then.
743 ret
= repair_io_failure(inode
, offset
, PAGE_SIZE
,
744 fixup
->logical
, page
,
745 offset
- page_offset(page
),
751 * we need to get good data first. the general readpage path
752 * will call repair_io_failure for us, we just have to make
753 * sure we read the bad mirror.
755 ret
= set_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
756 EXTENT_DAMAGED
, GFP_NOFS
);
758 /* set_extent_bits should give proper error */
765 ret
= extent_read_full_page(&BTRFS_I(inode
)->io_tree
, page
,
768 wait_on_page_locked(page
);
770 corrected
= !test_range_bit(&BTRFS_I(inode
)->io_tree
, offset
,
771 end
, EXTENT_DAMAGED
, 0, NULL
);
773 clear_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
774 EXTENT_DAMAGED
, GFP_NOFS
);
786 if (ret
== 0 && corrected
) {
788 * we only need to call readpage for one of the inodes belonging
789 * to this extent. so make iterate_extent_inodes stop
797 static void scrub_fixup_nodatasum(struct btrfs_work
*work
)
800 struct scrub_fixup_nodatasum
*fixup
;
801 struct scrub_ctx
*sctx
;
802 struct btrfs_trans_handle
*trans
= NULL
;
803 struct btrfs_path
*path
;
804 int uncorrectable
= 0;
806 fixup
= container_of(work
, struct scrub_fixup_nodatasum
, work
);
809 path
= btrfs_alloc_path();
811 spin_lock(&sctx
->stat_lock
);
812 ++sctx
->stat
.malloc_errors
;
813 spin_unlock(&sctx
->stat_lock
);
818 trans
= btrfs_join_transaction(fixup
->root
);
825 * the idea is to trigger a regular read through the standard path. we
826 * read a page from the (failed) logical address by specifying the
827 * corresponding copynum of the failed sector. thus, that readpage is
829 * that is the point where on-the-fly error correction will kick in
830 * (once it's finished) and rewrite the failed sector if a good copy
833 ret
= iterate_inodes_from_logical(fixup
->logical
, fixup
->root
->fs_info
,
834 path
, scrub_fixup_readpage
,
842 spin_lock(&sctx
->stat_lock
);
843 ++sctx
->stat
.corrected_errors
;
844 spin_unlock(&sctx
->stat_lock
);
847 if (trans
&& !IS_ERR(trans
))
848 btrfs_end_transaction(trans
, fixup
->root
);
850 spin_lock(&sctx
->stat_lock
);
851 ++sctx
->stat
.uncorrectable_errors
;
852 spin_unlock(&sctx
->stat_lock
);
853 btrfs_dev_replace_stats_inc(
854 &sctx
->dev_root
->fs_info
->dev_replace
.
855 num_uncorrectable_read_errors
);
856 printk_ratelimited_in_rcu(KERN_ERR
"BTRFS: "
857 "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
858 fixup
->logical
, rcu_str_deref(fixup
->dev
->name
));
861 btrfs_free_path(path
);
864 scrub_pending_trans_workers_dec(sctx
);
867 static inline void scrub_get_recover(struct scrub_recover
*recover
)
869 atomic_inc(&recover
->refs
);
872 static inline void scrub_put_recover(struct scrub_recover
*recover
)
874 if (atomic_dec_and_test(&recover
->refs
)) {
875 btrfs_put_bbio(recover
->bbio
);
881 * scrub_handle_errored_block gets called when either verification of the
882 * pages failed or the bio failed to read, e.g. with EIO. In the latter
883 * case, this function handles all pages in the bio, even though only one
885 * The goal of this function is to repair the errored block by using the
886 * contents of one of the mirrors.
888 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
)
890 struct scrub_ctx
*sctx
= sblock_to_check
->sctx
;
891 struct btrfs_device
*dev
;
892 struct btrfs_fs_info
*fs_info
;
896 unsigned int failed_mirror_index
;
897 unsigned int is_metadata
;
898 unsigned int have_csum
;
900 struct scrub_block
*sblocks_for_recheck
; /* holds one for each mirror */
901 struct scrub_block
*sblock_bad
;
906 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
907 DEFAULT_RATELIMIT_BURST
);
909 BUG_ON(sblock_to_check
->page_count
< 1);
910 fs_info
= sctx
->dev_root
->fs_info
;
911 if (sblock_to_check
->pagev
[0]->flags
& BTRFS_EXTENT_FLAG_SUPER
) {
913 * if we find an error in a super block, we just report it.
914 * They will get written with the next transaction commit
917 spin_lock(&sctx
->stat_lock
);
918 ++sctx
->stat
.super_errors
;
919 spin_unlock(&sctx
->stat_lock
);
922 length
= sblock_to_check
->page_count
* PAGE_SIZE
;
923 logical
= sblock_to_check
->pagev
[0]->logical
;
924 generation
= sblock_to_check
->pagev
[0]->generation
;
925 BUG_ON(sblock_to_check
->pagev
[0]->mirror_num
< 1);
926 failed_mirror_index
= sblock_to_check
->pagev
[0]->mirror_num
- 1;
927 is_metadata
= !(sblock_to_check
->pagev
[0]->flags
&
928 BTRFS_EXTENT_FLAG_DATA
);
929 have_csum
= sblock_to_check
->pagev
[0]->have_csum
;
930 csum
= sblock_to_check
->pagev
[0]->csum
;
931 dev
= sblock_to_check
->pagev
[0]->dev
;
933 if (sctx
->is_dev_replace
&& !is_metadata
&& !have_csum
) {
934 sblocks_for_recheck
= NULL
;
939 * read all mirrors one after the other. This includes to
940 * re-read the extent or metadata block that failed (that was
941 * the cause that this fixup code is called) another time,
942 * page by page this time in order to know which pages
943 * caused I/O errors and which ones are good (for all mirrors).
944 * It is the goal to handle the situation when more than one
945 * mirror contains I/O errors, but the errors do not
946 * overlap, i.e. the data can be repaired by selecting the
947 * pages from those mirrors without I/O error on the
948 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
949 * would be that mirror #1 has an I/O error on the first page,
950 * the second page is good, and mirror #2 has an I/O error on
951 * the second page, but the first page is good.
952 * Then the first page of the first mirror can be repaired by
953 * taking the first page of the second mirror, and the
954 * second page of the second mirror can be repaired by
955 * copying the contents of the 2nd page of the 1st mirror.
956 * One more note: if the pages of one mirror contain I/O
957 * errors, the checksum cannot be verified. In order to get
958 * the best data for repairing, the first attempt is to find
959 * a mirror without I/O errors and with a validated checksum.
960 * Only if this is not possible, the pages are picked from
961 * mirrors with I/O errors without considering the checksum.
962 * If the latter is the case, at the end, the checksum of the
963 * repaired area is verified in order to correctly maintain
967 sblocks_for_recheck
= kzalloc(BTRFS_MAX_MIRRORS
*
968 sizeof(*sblocks_for_recheck
),
970 if (!sblocks_for_recheck
) {
971 spin_lock(&sctx
->stat_lock
);
972 sctx
->stat
.malloc_errors
++;
973 sctx
->stat
.read_errors
++;
974 sctx
->stat
.uncorrectable_errors
++;
975 spin_unlock(&sctx
->stat_lock
);
976 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
980 /* setup the context, map the logical blocks and alloc the pages */
981 ret
= scrub_setup_recheck_block(sblock_to_check
, sblocks_for_recheck
);
983 spin_lock(&sctx
->stat_lock
);
984 sctx
->stat
.read_errors
++;
985 sctx
->stat
.uncorrectable_errors
++;
986 spin_unlock(&sctx
->stat_lock
);
987 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
990 BUG_ON(failed_mirror_index
>= BTRFS_MAX_MIRRORS
);
991 sblock_bad
= sblocks_for_recheck
+ failed_mirror_index
;
993 /* build and submit the bios for the failed mirror, check checksums */
994 scrub_recheck_block(fs_info
, sblock_bad
, is_metadata
, have_csum
,
995 csum
, generation
, sctx
->csum_size
, 1);
997 if (!sblock_bad
->header_error
&& !sblock_bad
->checksum_error
&&
998 sblock_bad
->no_io_error_seen
) {
1000 * the error disappeared after reading page by page, or
1001 * the area was part of a huge bio and other parts of the
1002 * bio caused I/O errors, or the block layer merged several
1003 * read requests into one and the error is caused by a
1004 * different bio (usually one of the two latter cases is
1007 spin_lock(&sctx
->stat_lock
);
1008 sctx
->stat
.unverified_errors
++;
1009 sblock_to_check
->data_corrected
= 1;
1010 spin_unlock(&sctx
->stat_lock
);
1012 if (sctx
->is_dev_replace
)
1013 scrub_write_block_to_dev_replace(sblock_bad
);
1017 if (!sblock_bad
->no_io_error_seen
) {
1018 spin_lock(&sctx
->stat_lock
);
1019 sctx
->stat
.read_errors
++;
1020 spin_unlock(&sctx
->stat_lock
);
1021 if (__ratelimit(&_rs
))
1022 scrub_print_warning("i/o error", sblock_to_check
);
1023 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
1024 } else if (sblock_bad
->checksum_error
) {
1025 spin_lock(&sctx
->stat_lock
);
1026 sctx
->stat
.csum_errors
++;
1027 spin_unlock(&sctx
->stat_lock
);
1028 if (__ratelimit(&_rs
))
1029 scrub_print_warning("checksum error", sblock_to_check
);
1030 btrfs_dev_stat_inc_and_print(dev
,
1031 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
1032 } else if (sblock_bad
->header_error
) {
1033 spin_lock(&sctx
->stat_lock
);
1034 sctx
->stat
.verify_errors
++;
1035 spin_unlock(&sctx
->stat_lock
);
1036 if (__ratelimit(&_rs
))
1037 scrub_print_warning("checksum/header error",
1039 if (sblock_bad
->generation_error
)
1040 btrfs_dev_stat_inc_and_print(dev
,
1041 BTRFS_DEV_STAT_GENERATION_ERRS
);
1043 btrfs_dev_stat_inc_and_print(dev
,
1044 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
1047 if (sctx
->readonly
) {
1048 ASSERT(!sctx
->is_dev_replace
);
1052 if (!is_metadata
&& !have_csum
) {
1053 struct scrub_fixup_nodatasum
*fixup_nodatasum
;
1055 WARN_ON(sctx
->is_dev_replace
);
1060 * !is_metadata and !have_csum, this means that the data
1061 * might not be COW'ed, that it might be modified
1062 * concurrently. The general strategy to work on the
1063 * commit root does not help in the case when COW is not
1066 fixup_nodatasum
= kzalloc(sizeof(*fixup_nodatasum
), GFP_NOFS
);
1067 if (!fixup_nodatasum
)
1068 goto did_not_correct_error
;
1069 fixup_nodatasum
->sctx
= sctx
;
1070 fixup_nodatasum
->dev
= dev
;
1071 fixup_nodatasum
->logical
= logical
;
1072 fixup_nodatasum
->root
= fs_info
->extent_root
;
1073 fixup_nodatasum
->mirror_num
= failed_mirror_index
+ 1;
1074 scrub_pending_trans_workers_inc(sctx
);
1075 btrfs_init_work(&fixup_nodatasum
->work
, btrfs_scrub_helper
,
1076 scrub_fixup_nodatasum
, NULL
, NULL
);
1077 btrfs_queue_work(fs_info
->scrub_workers
,
1078 &fixup_nodatasum
->work
);
1083 * now build and submit the bios for the other mirrors, check
1085 * First try to pick the mirror which is completely without I/O
1086 * errors and also does not have a checksum error.
1087 * If one is found, and if a checksum is present, the full block
1088 * that is known to contain an error is rewritten. Afterwards
1089 * the block is known to be corrected.
1090 * If a mirror is found which is completely correct, and no
1091 * checksum is present, only those pages are rewritten that had
1092 * an I/O error in the block to be repaired, since it cannot be
1093 * determined, which copy of the other pages is better (and it
1094 * could happen otherwise that a correct page would be
1095 * overwritten by a bad one).
1097 for (mirror_index
= 0;
1098 mirror_index
< BTRFS_MAX_MIRRORS
&&
1099 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1101 struct scrub_block
*sblock_other
;
1103 if (mirror_index
== failed_mirror_index
)
1105 sblock_other
= sblocks_for_recheck
+ mirror_index
;
1107 /* build and submit the bios, check checksums */
1108 scrub_recheck_block(fs_info
, sblock_other
, is_metadata
,
1109 have_csum
, csum
, generation
,
1110 sctx
->csum_size
, 0);
1112 if (!sblock_other
->header_error
&&
1113 !sblock_other
->checksum_error
&&
1114 sblock_other
->no_io_error_seen
) {
1115 if (sctx
->is_dev_replace
) {
1116 scrub_write_block_to_dev_replace(sblock_other
);
1117 goto corrected_error
;
1119 ret
= scrub_repair_block_from_good_copy(
1120 sblock_bad
, sblock_other
);
1122 goto corrected_error
;
1127 if (sblock_bad
->no_io_error_seen
&& !sctx
->is_dev_replace
)
1128 goto did_not_correct_error
;
1131 * In case of I/O errors in the area that is supposed to be
1132 * repaired, continue by picking good copies of those pages.
1133 * Select the good pages from mirrors to rewrite bad pages from
1134 * the area to fix. Afterwards verify the checksum of the block
1135 * that is supposed to be repaired. This verification step is
1136 * only done for the purpose of statistic counting and for the
1137 * final scrub report, whether errors remain.
1138 * A perfect algorithm could make use of the checksum and try
1139 * all possible combinations of pages from the different mirrors
1140 * until the checksum verification succeeds. For example, when
1141 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1142 * of mirror #2 is readable but the final checksum test fails,
1143 * then the 2nd page of mirror #3 could be tried, whether now
1144 * the final checksum succeedes. But this would be a rare
1145 * exception and is therefore not implemented. At least it is
1146 * avoided that the good copy is overwritten.
1147 * A more useful improvement would be to pick the sectors
1148 * without I/O error based on sector sizes (512 bytes on legacy
1149 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1150 * mirror could be repaired by taking 512 byte of a different
1151 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1152 * area are unreadable.
1155 for (page_num
= 0; page_num
< sblock_bad
->page_count
;
1157 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1158 struct scrub_block
*sblock_other
= NULL
;
1160 /* skip no-io-error page in scrub */
1161 if (!page_bad
->io_error
&& !sctx
->is_dev_replace
)
1164 /* try to find no-io-error page in mirrors */
1165 if (page_bad
->io_error
) {
1166 for (mirror_index
= 0;
1167 mirror_index
< BTRFS_MAX_MIRRORS
&&
1168 sblocks_for_recheck
[mirror_index
].page_count
> 0;
1170 if (!sblocks_for_recheck
[mirror_index
].
1171 pagev
[page_num
]->io_error
) {
1172 sblock_other
= sblocks_for_recheck
+
1181 if (sctx
->is_dev_replace
) {
1183 * did not find a mirror to fetch the page
1184 * from. scrub_write_page_to_dev_replace()
1185 * handles this case (page->io_error), by
1186 * filling the block with zeros before
1187 * submitting the write request
1190 sblock_other
= sblock_bad
;
1192 if (scrub_write_page_to_dev_replace(sblock_other
,
1194 btrfs_dev_replace_stats_inc(
1196 fs_info
->dev_replace
.
1200 } else if (sblock_other
) {
1201 ret
= scrub_repair_page_from_good_copy(sblock_bad
,
1205 page_bad
->io_error
= 0;
1211 if (success
&& !sctx
->is_dev_replace
) {
1212 if (is_metadata
|| have_csum
) {
1214 * need to verify the checksum now that all
1215 * sectors on disk are repaired (the write
1216 * request for data to be repaired is on its way).
1217 * Just be lazy and use scrub_recheck_block()
1218 * which re-reads the data before the checksum
1219 * is verified, but most likely the data comes out
1220 * of the page cache.
1222 scrub_recheck_block(fs_info
, sblock_bad
,
1223 is_metadata
, have_csum
, csum
,
1224 generation
, sctx
->csum_size
, 1);
1225 if (!sblock_bad
->header_error
&&
1226 !sblock_bad
->checksum_error
&&
1227 sblock_bad
->no_io_error_seen
)
1228 goto corrected_error
;
1230 goto did_not_correct_error
;
1233 spin_lock(&sctx
->stat_lock
);
1234 sctx
->stat
.corrected_errors
++;
1235 sblock_to_check
->data_corrected
= 1;
1236 spin_unlock(&sctx
->stat_lock
);
1237 printk_ratelimited_in_rcu(KERN_ERR
1238 "BTRFS: fixed up error at logical %llu on dev %s\n",
1239 logical
, rcu_str_deref(dev
->name
));
1242 did_not_correct_error
:
1243 spin_lock(&sctx
->stat_lock
);
1244 sctx
->stat
.uncorrectable_errors
++;
1245 spin_unlock(&sctx
->stat_lock
);
1246 printk_ratelimited_in_rcu(KERN_ERR
1247 "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
1248 logical
, rcu_str_deref(dev
->name
));
1252 if (sblocks_for_recheck
) {
1253 for (mirror_index
= 0; mirror_index
< BTRFS_MAX_MIRRORS
;
1255 struct scrub_block
*sblock
= sblocks_for_recheck
+
1257 struct scrub_recover
*recover
;
1260 for (page_index
= 0; page_index
< sblock
->page_count
;
1262 sblock
->pagev
[page_index
]->sblock
= NULL
;
1263 recover
= sblock
->pagev
[page_index
]->recover
;
1265 scrub_put_recover(recover
);
1266 sblock
->pagev
[page_index
]->recover
=
1269 scrub_page_put(sblock
->pagev
[page_index
]);
1272 kfree(sblocks_for_recheck
);
1278 static inline int scrub_nr_raid_mirrors(struct btrfs_bio
*bbio
)
1280 if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID5
)
1282 else if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID6
)
1285 return (int)bbio
->num_stripes
;
1288 static inline void scrub_stripe_index_and_offset(u64 logical
, u64 map_type
,
1291 int nstripes
, int mirror
,
1297 if (map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
1299 for (i
= 0; i
< nstripes
; i
++) {
1300 if (raid_map
[i
] == RAID6_Q_STRIPE
||
1301 raid_map
[i
] == RAID5_P_STRIPE
)
1304 if (logical
>= raid_map
[i
] &&
1305 logical
< raid_map
[i
] + mapped_length
)
1310 *stripe_offset
= logical
- raid_map
[i
];
1312 /* The other RAID type */
1313 *stripe_index
= mirror
;
1318 static int scrub_setup_recheck_block(struct scrub_block
*original_sblock
,
1319 struct scrub_block
*sblocks_for_recheck
)
1321 struct scrub_ctx
*sctx
= original_sblock
->sctx
;
1322 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
1323 u64 length
= original_sblock
->page_count
* PAGE_SIZE
;
1324 u64 logical
= original_sblock
->pagev
[0]->logical
;
1325 struct scrub_recover
*recover
;
1326 struct btrfs_bio
*bbio
;
1337 * note: the two members refs and outstanding_pages
1338 * are not used (and not set) in the blocks that are used for
1339 * the recheck procedure
1342 while (length
> 0) {
1343 sublen
= min_t(u64
, length
, PAGE_SIZE
);
1344 mapped_length
= sublen
;
1348 * with a length of PAGE_SIZE, each returned stripe
1349 * represents one mirror
1351 ret
= btrfs_map_sblock(fs_info
, REQ_GET_READ_MIRRORS
, logical
,
1352 &mapped_length
, &bbio
, 0, 1);
1353 if (ret
|| !bbio
|| mapped_length
< sublen
) {
1354 btrfs_put_bbio(bbio
);
1358 recover
= kzalloc(sizeof(struct scrub_recover
), GFP_NOFS
);
1360 btrfs_put_bbio(bbio
);
1364 atomic_set(&recover
->refs
, 1);
1365 recover
->bbio
= bbio
;
1366 recover
->map_length
= mapped_length
;
1368 BUG_ON(page_index
>= SCRUB_PAGES_PER_RD_BIO
);
1370 nmirrors
= min(scrub_nr_raid_mirrors(bbio
), BTRFS_MAX_MIRRORS
);
1372 for (mirror_index
= 0; mirror_index
< nmirrors
;
1374 struct scrub_block
*sblock
;
1375 struct scrub_page
*page
;
1377 sblock
= sblocks_for_recheck
+ mirror_index
;
1378 sblock
->sctx
= sctx
;
1379 page
= kzalloc(sizeof(*page
), GFP_NOFS
);
1382 spin_lock(&sctx
->stat_lock
);
1383 sctx
->stat
.malloc_errors
++;
1384 spin_unlock(&sctx
->stat_lock
);
1385 scrub_put_recover(recover
);
1388 scrub_page_get(page
);
1389 sblock
->pagev
[page_index
] = page
;
1390 page
->logical
= logical
;
1392 scrub_stripe_index_and_offset(logical
,
1401 page
->physical
= bbio
->stripes
[stripe_index
].physical
+
1403 page
->dev
= bbio
->stripes
[stripe_index
].dev
;
1405 BUG_ON(page_index
>= original_sblock
->page_count
);
1406 page
->physical_for_dev_replace
=
1407 original_sblock
->pagev
[page_index
]->
1408 physical_for_dev_replace
;
1409 /* for missing devices, dev->bdev is NULL */
1410 page
->mirror_num
= mirror_index
+ 1;
1411 sblock
->page_count
++;
1412 page
->page
= alloc_page(GFP_NOFS
);
1416 scrub_get_recover(recover
);
1417 page
->recover
= recover
;
1419 scrub_put_recover(recover
);
1428 struct scrub_bio_ret
{
1429 struct completion event
;
1433 static void scrub_bio_wait_endio(struct bio
*bio
, int error
)
1435 struct scrub_bio_ret
*ret
= bio
->bi_private
;
1438 complete(&ret
->event
);
1441 static inline int scrub_is_page_on_raid56(struct scrub_page
*page
)
1443 return page
->recover
&&
1444 (page
->recover
->bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
);
1447 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info
*fs_info
,
1449 struct scrub_page
*page
)
1451 struct scrub_bio_ret done
;
1454 init_completion(&done
.event
);
1456 bio
->bi_iter
.bi_sector
= page
->logical
>> 9;
1457 bio
->bi_private
= &done
;
1458 bio
->bi_end_io
= scrub_bio_wait_endio
;
1460 ret
= raid56_parity_recover(fs_info
->fs_root
, bio
, page
->recover
->bbio
,
1461 page
->recover
->map_length
,
1462 page
->mirror_num
, 0);
1466 wait_for_completion(&done
.event
);
1474 * this function will check the on disk data for checksum errors, header
1475 * errors and read I/O errors. If any I/O errors happen, the exact pages
1476 * which are errored are marked as being bad. The goal is to enable scrub
1477 * to take those pages that are not errored from all the mirrors so that
1478 * the pages that are errored in the just handled mirror can be repaired.
1480 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
1481 struct scrub_block
*sblock
, int is_metadata
,
1482 int have_csum
, u8
*csum
, u64 generation
,
1483 u16 csum_size
, int retry_failed_mirror
)
1487 sblock
->no_io_error_seen
= 1;
1488 sblock
->header_error
= 0;
1489 sblock
->checksum_error
= 0;
1491 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1493 struct scrub_page
*page
= sblock
->pagev
[page_num
];
1495 if (page
->dev
->bdev
== NULL
) {
1497 sblock
->no_io_error_seen
= 0;
1501 WARN_ON(!page
->page
);
1502 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
1505 sblock
->no_io_error_seen
= 0;
1508 bio
->bi_bdev
= page
->dev
->bdev
;
1510 bio_add_page(bio
, page
->page
, PAGE_SIZE
, 0);
1511 if (!retry_failed_mirror
&& scrub_is_page_on_raid56(page
)) {
1512 if (scrub_submit_raid56_bio_wait(fs_info
, bio
, page
))
1513 sblock
->no_io_error_seen
= 0;
1515 bio
->bi_iter
.bi_sector
= page
->physical
>> 9;
1517 if (btrfsic_submit_bio_wait(READ
, bio
))
1518 sblock
->no_io_error_seen
= 0;
1524 if (sblock
->no_io_error_seen
)
1525 scrub_recheck_block_checksum(fs_info
, sblock
, is_metadata
,
1526 have_csum
, csum
, generation
,
1532 static inline int scrub_check_fsid(u8 fsid
[],
1533 struct scrub_page
*spage
)
1535 struct btrfs_fs_devices
*fs_devices
= spage
->dev
->fs_devices
;
1538 ret
= memcmp(fsid
, fs_devices
->fsid
, BTRFS_UUID_SIZE
);
1542 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
1543 struct scrub_block
*sblock
,
1544 int is_metadata
, int have_csum
,
1545 const u8
*csum
, u64 generation
,
1549 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1551 void *mapped_buffer
;
1553 WARN_ON(!sblock
->pagev
[0]->page
);
1555 struct btrfs_header
*h
;
1557 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1558 h
= (struct btrfs_header
*)mapped_buffer
;
1560 if (sblock
->pagev
[0]->logical
!= btrfs_stack_header_bytenr(h
) ||
1561 !scrub_check_fsid(h
->fsid
, sblock
->pagev
[0]) ||
1562 memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1564 sblock
->header_error
= 1;
1565 } else if (generation
!= btrfs_stack_header_generation(h
)) {
1566 sblock
->header_error
= 1;
1567 sblock
->generation_error
= 1;
1574 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1577 for (page_num
= 0;;) {
1578 if (page_num
== 0 && is_metadata
)
1579 crc
= btrfs_csum_data(
1580 ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
,
1581 crc
, PAGE_SIZE
- BTRFS_CSUM_SIZE
);
1583 crc
= btrfs_csum_data(mapped_buffer
, crc
, PAGE_SIZE
);
1585 kunmap_atomic(mapped_buffer
);
1587 if (page_num
>= sblock
->page_count
)
1589 WARN_ON(!sblock
->pagev
[page_num
]->page
);
1591 mapped_buffer
= kmap_atomic(sblock
->pagev
[page_num
]->page
);
1594 btrfs_csum_final(crc
, calculated_csum
);
1595 if (memcmp(calculated_csum
, csum
, csum_size
))
1596 sblock
->checksum_error
= 1;
1599 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
1600 struct scrub_block
*sblock_good
)
1605 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1608 ret_sub
= scrub_repair_page_from_good_copy(sblock_bad
,
1618 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
1619 struct scrub_block
*sblock_good
,
1620 int page_num
, int force_write
)
1622 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1623 struct scrub_page
*page_good
= sblock_good
->pagev
[page_num
];
1625 BUG_ON(page_bad
->page
== NULL
);
1626 BUG_ON(page_good
->page
== NULL
);
1627 if (force_write
|| sblock_bad
->header_error
||
1628 sblock_bad
->checksum_error
|| page_bad
->io_error
) {
1632 if (!page_bad
->dev
->bdev
) {
1633 printk_ratelimited(KERN_WARNING
"BTRFS: "
1634 "scrub_repair_page_from_good_copy(bdev == NULL) "
1635 "is unexpected!\n");
1639 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
1642 bio
->bi_bdev
= page_bad
->dev
->bdev
;
1643 bio
->bi_iter
.bi_sector
= page_bad
->physical
>> 9;
1645 ret
= bio_add_page(bio
, page_good
->page
, PAGE_SIZE
, 0);
1646 if (PAGE_SIZE
!= ret
) {
1651 if (btrfsic_submit_bio_wait(WRITE
, bio
)) {
1652 btrfs_dev_stat_inc_and_print(page_bad
->dev
,
1653 BTRFS_DEV_STAT_WRITE_ERRS
);
1654 btrfs_dev_replace_stats_inc(
1655 &sblock_bad
->sctx
->dev_root
->fs_info
->
1656 dev_replace
.num_write_errors
);
1666 static void scrub_write_block_to_dev_replace(struct scrub_block
*sblock
)
1671 * This block is used for the check of the parity on the source device,
1672 * so the data needn't be written into the destination device.
1674 if (sblock
->sparity
)
1677 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1680 ret
= scrub_write_page_to_dev_replace(sblock
, page_num
);
1682 btrfs_dev_replace_stats_inc(
1683 &sblock
->sctx
->dev_root
->fs_info
->dev_replace
.
1688 static int scrub_write_page_to_dev_replace(struct scrub_block
*sblock
,
1691 struct scrub_page
*spage
= sblock
->pagev
[page_num
];
1693 BUG_ON(spage
->page
== NULL
);
1694 if (spage
->io_error
) {
1695 void *mapped_buffer
= kmap_atomic(spage
->page
);
1697 memset(mapped_buffer
, 0, PAGE_CACHE_SIZE
);
1698 flush_dcache_page(spage
->page
);
1699 kunmap_atomic(mapped_buffer
);
1701 return scrub_add_page_to_wr_bio(sblock
->sctx
, spage
);
1704 static int scrub_add_page_to_wr_bio(struct scrub_ctx
*sctx
,
1705 struct scrub_page
*spage
)
1707 struct scrub_wr_ctx
*wr_ctx
= &sctx
->wr_ctx
;
1708 struct scrub_bio
*sbio
;
1711 mutex_lock(&wr_ctx
->wr_lock
);
1713 if (!wr_ctx
->wr_curr_bio
) {
1714 wr_ctx
->wr_curr_bio
= kzalloc(sizeof(*wr_ctx
->wr_curr_bio
),
1716 if (!wr_ctx
->wr_curr_bio
) {
1717 mutex_unlock(&wr_ctx
->wr_lock
);
1720 wr_ctx
->wr_curr_bio
->sctx
= sctx
;
1721 wr_ctx
->wr_curr_bio
->page_count
= 0;
1723 sbio
= wr_ctx
->wr_curr_bio
;
1724 if (sbio
->page_count
== 0) {
1727 sbio
->physical
= spage
->physical_for_dev_replace
;
1728 sbio
->logical
= spage
->logical
;
1729 sbio
->dev
= wr_ctx
->tgtdev
;
1732 bio
= btrfs_io_bio_alloc(GFP_NOFS
, wr_ctx
->pages_per_wr_bio
);
1734 mutex_unlock(&wr_ctx
->wr_lock
);
1740 bio
->bi_private
= sbio
;
1741 bio
->bi_end_io
= scrub_wr_bio_end_io
;
1742 bio
->bi_bdev
= sbio
->dev
->bdev
;
1743 bio
->bi_iter
.bi_sector
= sbio
->physical
>> 9;
1745 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
1746 spage
->physical_for_dev_replace
||
1747 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
1749 scrub_wr_submit(sctx
);
1753 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
1754 if (ret
!= PAGE_SIZE
) {
1755 if (sbio
->page_count
< 1) {
1758 mutex_unlock(&wr_ctx
->wr_lock
);
1761 scrub_wr_submit(sctx
);
1765 sbio
->pagev
[sbio
->page_count
] = spage
;
1766 scrub_page_get(spage
);
1768 if (sbio
->page_count
== wr_ctx
->pages_per_wr_bio
)
1769 scrub_wr_submit(sctx
);
1770 mutex_unlock(&wr_ctx
->wr_lock
);
1775 static void scrub_wr_submit(struct scrub_ctx
*sctx
)
1777 struct scrub_wr_ctx
*wr_ctx
= &sctx
->wr_ctx
;
1778 struct scrub_bio
*sbio
;
1780 if (!wr_ctx
->wr_curr_bio
)
1783 sbio
= wr_ctx
->wr_curr_bio
;
1784 wr_ctx
->wr_curr_bio
= NULL
;
1785 WARN_ON(!sbio
->bio
->bi_bdev
);
1786 scrub_pending_bio_inc(sctx
);
1787 /* process all writes in a single worker thread. Then the block layer
1788 * orders the requests before sending them to the driver which
1789 * doubled the write performance on spinning disks when measured
1791 btrfsic_submit_bio(WRITE
, sbio
->bio
);
1794 static void scrub_wr_bio_end_io(struct bio
*bio
, int err
)
1796 struct scrub_bio
*sbio
= bio
->bi_private
;
1797 struct btrfs_fs_info
*fs_info
= sbio
->dev
->dev_root
->fs_info
;
1802 btrfs_init_work(&sbio
->work
, btrfs_scrubwrc_helper
,
1803 scrub_wr_bio_end_io_worker
, NULL
, NULL
);
1804 btrfs_queue_work(fs_info
->scrub_wr_completion_workers
, &sbio
->work
);
1807 static void scrub_wr_bio_end_io_worker(struct btrfs_work
*work
)
1809 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
1810 struct scrub_ctx
*sctx
= sbio
->sctx
;
1813 WARN_ON(sbio
->page_count
> SCRUB_PAGES_PER_WR_BIO
);
1815 struct btrfs_dev_replace
*dev_replace
=
1816 &sbio
->sctx
->dev_root
->fs_info
->dev_replace
;
1818 for (i
= 0; i
< sbio
->page_count
; i
++) {
1819 struct scrub_page
*spage
= sbio
->pagev
[i
];
1821 spage
->io_error
= 1;
1822 btrfs_dev_replace_stats_inc(&dev_replace
->
1827 for (i
= 0; i
< sbio
->page_count
; i
++)
1828 scrub_page_put(sbio
->pagev
[i
]);
1832 scrub_pending_bio_dec(sctx
);
1835 static int scrub_checksum(struct scrub_block
*sblock
)
1840 WARN_ON(sblock
->page_count
< 1);
1841 flags
= sblock
->pagev
[0]->flags
;
1843 if (flags
& BTRFS_EXTENT_FLAG_DATA
)
1844 ret
= scrub_checksum_data(sblock
);
1845 else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1846 ret
= scrub_checksum_tree_block(sblock
);
1847 else if (flags
& BTRFS_EXTENT_FLAG_SUPER
)
1848 (void)scrub_checksum_super(sblock
);
1852 scrub_handle_errored_block(sblock
);
1857 static int scrub_checksum_data(struct scrub_block
*sblock
)
1859 struct scrub_ctx
*sctx
= sblock
->sctx
;
1860 u8 csum
[BTRFS_CSUM_SIZE
];
1869 BUG_ON(sblock
->page_count
< 1);
1870 if (!sblock
->pagev
[0]->have_csum
)
1873 on_disk_csum
= sblock
->pagev
[0]->csum
;
1874 page
= sblock
->pagev
[0]->page
;
1875 buffer
= kmap_atomic(page
);
1877 len
= sctx
->sectorsize
;
1880 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
1882 crc
= btrfs_csum_data(buffer
, crc
, l
);
1883 kunmap_atomic(buffer
);
1888 BUG_ON(index
>= sblock
->page_count
);
1889 BUG_ON(!sblock
->pagev
[index
]->page
);
1890 page
= sblock
->pagev
[index
]->page
;
1891 buffer
= kmap_atomic(page
);
1894 btrfs_csum_final(crc
, csum
);
1895 if (memcmp(csum
, on_disk_csum
, sctx
->csum_size
))
1901 static int scrub_checksum_tree_block(struct scrub_block
*sblock
)
1903 struct scrub_ctx
*sctx
= sblock
->sctx
;
1904 struct btrfs_header
*h
;
1905 struct btrfs_root
*root
= sctx
->dev_root
;
1906 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1907 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1908 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1910 void *mapped_buffer
;
1919 BUG_ON(sblock
->page_count
< 1);
1920 page
= sblock
->pagev
[0]->page
;
1921 mapped_buffer
= kmap_atomic(page
);
1922 h
= (struct btrfs_header
*)mapped_buffer
;
1923 memcpy(on_disk_csum
, h
->csum
, sctx
->csum_size
);
1926 * we don't use the getter functions here, as we
1927 * a) don't have an extent buffer and
1928 * b) the page is already kmapped
1931 if (sblock
->pagev
[0]->logical
!= btrfs_stack_header_bytenr(h
))
1934 if (sblock
->pagev
[0]->generation
!= btrfs_stack_header_generation(h
))
1937 if (!scrub_check_fsid(h
->fsid
, sblock
->pagev
[0]))
1940 if (memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1944 len
= sctx
->nodesize
- BTRFS_CSUM_SIZE
;
1945 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1946 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1949 u64 l
= min_t(u64
, len
, mapped_size
);
1951 crc
= btrfs_csum_data(p
, crc
, l
);
1952 kunmap_atomic(mapped_buffer
);
1957 BUG_ON(index
>= sblock
->page_count
);
1958 BUG_ON(!sblock
->pagev
[index
]->page
);
1959 page
= sblock
->pagev
[index
]->page
;
1960 mapped_buffer
= kmap_atomic(page
);
1961 mapped_size
= PAGE_SIZE
;
1965 btrfs_csum_final(crc
, calculated_csum
);
1966 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1969 return fail
|| crc_fail
;
1972 static int scrub_checksum_super(struct scrub_block
*sblock
)
1974 struct btrfs_super_block
*s
;
1975 struct scrub_ctx
*sctx
= sblock
->sctx
;
1976 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1977 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1979 void *mapped_buffer
;
1988 BUG_ON(sblock
->page_count
< 1);
1989 page
= sblock
->pagev
[0]->page
;
1990 mapped_buffer
= kmap_atomic(page
);
1991 s
= (struct btrfs_super_block
*)mapped_buffer
;
1992 memcpy(on_disk_csum
, s
->csum
, sctx
->csum_size
);
1994 if (sblock
->pagev
[0]->logical
!= btrfs_super_bytenr(s
))
1997 if (sblock
->pagev
[0]->generation
!= btrfs_super_generation(s
))
2000 if (!scrub_check_fsid(s
->fsid
, sblock
->pagev
[0]))
2003 len
= BTRFS_SUPER_INFO_SIZE
- BTRFS_CSUM_SIZE
;
2004 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
2005 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
2008 u64 l
= min_t(u64
, len
, mapped_size
);
2010 crc
= btrfs_csum_data(p
, crc
, l
);
2011 kunmap_atomic(mapped_buffer
);
2016 BUG_ON(index
>= sblock
->page_count
);
2017 BUG_ON(!sblock
->pagev
[index
]->page
);
2018 page
= sblock
->pagev
[index
]->page
;
2019 mapped_buffer
= kmap_atomic(page
);
2020 mapped_size
= PAGE_SIZE
;
2024 btrfs_csum_final(crc
, calculated_csum
);
2025 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
2028 if (fail_cor
+ fail_gen
) {
2030 * if we find an error in a super block, we just report it.
2031 * They will get written with the next transaction commit
2034 spin_lock(&sctx
->stat_lock
);
2035 ++sctx
->stat
.super_errors
;
2036 spin_unlock(&sctx
->stat_lock
);
2038 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
2039 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
2041 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
2042 BTRFS_DEV_STAT_GENERATION_ERRS
);
2045 return fail_cor
+ fail_gen
;
2048 static void scrub_block_get(struct scrub_block
*sblock
)
2050 atomic_inc(&sblock
->refs
);
2053 static void scrub_block_put(struct scrub_block
*sblock
)
2055 if (atomic_dec_and_test(&sblock
->refs
)) {
2058 if (sblock
->sparity
)
2059 scrub_parity_put(sblock
->sparity
);
2061 for (i
= 0; i
< sblock
->page_count
; i
++)
2062 scrub_page_put(sblock
->pagev
[i
]);
2067 static void scrub_page_get(struct scrub_page
*spage
)
2069 atomic_inc(&spage
->refs
);
2072 static void scrub_page_put(struct scrub_page
*spage
)
2074 if (atomic_dec_and_test(&spage
->refs
)) {
2076 __free_page(spage
->page
);
2081 static void scrub_submit(struct scrub_ctx
*sctx
)
2083 struct scrub_bio
*sbio
;
2085 if (sctx
->curr
== -1)
2088 sbio
= sctx
->bios
[sctx
->curr
];
2090 scrub_pending_bio_inc(sctx
);
2092 if (!sbio
->bio
->bi_bdev
) {
2094 * this case should not happen. If btrfs_map_block() is
2095 * wrong, it could happen for dev-replace operations on
2096 * missing devices when no mirrors are available, but in
2097 * this case it should already fail the mount.
2098 * This case is handled correctly (but _very_ slowly).
2100 printk_ratelimited(KERN_WARNING
2101 "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
2102 bio_endio(sbio
->bio
, -EIO
);
2104 btrfsic_submit_bio(READ
, sbio
->bio
);
2108 static int scrub_add_page_to_rd_bio(struct scrub_ctx
*sctx
,
2109 struct scrub_page
*spage
)
2111 struct scrub_block
*sblock
= spage
->sblock
;
2112 struct scrub_bio
*sbio
;
2117 * grab a fresh bio or wait for one to become available
2119 while (sctx
->curr
== -1) {
2120 spin_lock(&sctx
->list_lock
);
2121 sctx
->curr
= sctx
->first_free
;
2122 if (sctx
->curr
!= -1) {
2123 sctx
->first_free
= sctx
->bios
[sctx
->curr
]->next_free
;
2124 sctx
->bios
[sctx
->curr
]->next_free
= -1;
2125 sctx
->bios
[sctx
->curr
]->page_count
= 0;
2126 spin_unlock(&sctx
->list_lock
);
2128 spin_unlock(&sctx
->list_lock
);
2129 wait_event(sctx
->list_wait
, sctx
->first_free
!= -1);
2132 sbio
= sctx
->bios
[sctx
->curr
];
2133 if (sbio
->page_count
== 0) {
2136 sbio
->physical
= spage
->physical
;
2137 sbio
->logical
= spage
->logical
;
2138 sbio
->dev
= spage
->dev
;
2141 bio
= btrfs_io_bio_alloc(GFP_NOFS
, sctx
->pages_per_rd_bio
);
2147 bio
->bi_private
= sbio
;
2148 bio
->bi_end_io
= scrub_bio_end_io
;
2149 bio
->bi_bdev
= sbio
->dev
->bdev
;
2150 bio
->bi_iter
.bi_sector
= sbio
->physical
>> 9;
2152 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
2154 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
2156 sbio
->dev
!= spage
->dev
) {
2161 sbio
->pagev
[sbio
->page_count
] = spage
;
2162 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
2163 if (ret
!= PAGE_SIZE
) {
2164 if (sbio
->page_count
< 1) {
2173 scrub_block_get(sblock
); /* one for the page added to the bio */
2174 atomic_inc(&sblock
->outstanding_pages
);
2176 if (sbio
->page_count
== sctx
->pages_per_rd_bio
)
2182 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2183 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2184 u64 gen
, int mirror_num
, u8
*csum
, int force
,
2185 u64 physical_for_dev_replace
)
2187 struct scrub_block
*sblock
;
2190 sblock
= kzalloc(sizeof(*sblock
), GFP_NOFS
);
2192 spin_lock(&sctx
->stat_lock
);
2193 sctx
->stat
.malloc_errors
++;
2194 spin_unlock(&sctx
->stat_lock
);
2198 /* one ref inside this function, plus one for each page added to
2200 atomic_set(&sblock
->refs
, 1);
2201 sblock
->sctx
= sctx
;
2202 sblock
->no_io_error_seen
= 1;
2204 for (index
= 0; len
> 0; index
++) {
2205 struct scrub_page
*spage
;
2206 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
2208 spage
= kzalloc(sizeof(*spage
), GFP_NOFS
);
2211 spin_lock(&sctx
->stat_lock
);
2212 sctx
->stat
.malloc_errors
++;
2213 spin_unlock(&sctx
->stat_lock
);
2214 scrub_block_put(sblock
);
2217 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2218 scrub_page_get(spage
);
2219 sblock
->pagev
[index
] = spage
;
2220 spage
->sblock
= sblock
;
2222 spage
->flags
= flags
;
2223 spage
->generation
= gen
;
2224 spage
->logical
= logical
;
2225 spage
->physical
= physical
;
2226 spage
->physical_for_dev_replace
= physical_for_dev_replace
;
2227 spage
->mirror_num
= mirror_num
;
2229 spage
->have_csum
= 1;
2230 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2232 spage
->have_csum
= 0;
2234 sblock
->page_count
++;
2235 spage
->page
= alloc_page(GFP_NOFS
);
2241 physical_for_dev_replace
+= l
;
2244 WARN_ON(sblock
->page_count
== 0);
2245 for (index
= 0; index
< sblock
->page_count
; index
++) {
2246 struct scrub_page
*spage
= sblock
->pagev
[index
];
2249 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2251 scrub_block_put(sblock
);
2259 /* last one frees, either here or in bio completion for last page */
2260 scrub_block_put(sblock
);
2264 static void scrub_bio_end_io(struct bio
*bio
, int err
)
2266 struct scrub_bio
*sbio
= bio
->bi_private
;
2267 struct btrfs_fs_info
*fs_info
= sbio
->dev
->dev_root
->fs_info
;
2272 btrfs_queue_work(fs_info
->scrub_workers
, &sbio
->work
);
2275 static void scrub_bio_end_io_worker(struct btrfs_work
*work
)
2277 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
2278 struct scrub_ctx
*sctx
= sbio
->sctx
;
2281 BUG_ON(sbio
->page_count
> SCRUB_PAGES_PER_RD_BIO
);
2283 for (i
= 0; i
< sbio
->page_count
; i
++) {
2284 struct scrub_page
*spage
= sbio
->pagev
[i
];
2286 spage
->io_error
= 1;
2287 spage
->sblock
->no_io_error_seen
= 0;
2291 /* now complete the scrub_block items that have all pages completed */
2292 for (i
= 0; i
< sbio
->page_count
; i
++) {
2293 struct scrub_page
*spage
= sbio
->pagev
[i
];
2294 struct scrub_block
*sblock
= spage
->sblock
;
2296 if (atomic_dec_and_test(&sblock
->outstanding_pages
))
2297 scrub_block_complete(sblock
);
2298 scrub_block_put(sblock
);
2303 spin_lock(&sctx
->list_lock
);
2304 sbio
->next_free
= sctx
->first_free
;
2305 sctx
->first_free
= sbio
->index
;
2306 spin_unlock(&sctx
->list_lock
);
2308 if (sctx
->is_dev_replace
&&
2309 atomic_read(&sctx
->wr_ctx
.flush_all_writes
)) {
2310 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2311 scrub_wr_submit(sctx
);
2312 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2315 scrub_pending_bio_dec(sctx
);
2318 static inline void __scrub_mark_bitmap(struct scrub_parity
*sparity
,
2319 unsigned long *bitmap
,
2324 int sectorsize
= sparity
->sctx
->dev_root
->sectorsize
;
2326 if (len
>= sparity
->stripe_len
) {
2327 bitmap_set(bitmap
, 0, sparity
->nsectors
);
2331 start
-= sparity
->logic_start
;
2332 offset
= (int)do_div(start
, sparity
->stripe_len
);
2333 offset
/= sectorsize
;
2334 nsectors
= (int)len
/ sectorsize
;
2336 if (offset
+ nsectors
<= sparity
->nsectors
) {
2337 bitmap_set(bitmap
, offset
, nsectors
);
2341 bitmap_set(bitmap
, offset
, sparity
->nsectors
- offset
);
2342 bitmap_set(bitmap
, 0, nsectors
- (sparity
->nsectors
- offset
));
2345 static inline void scrub_parity_mark_sectors_error(struct scrub_parity
*sparity
,
2348 __scrub_mark_bitmap(sparity
, sparity
->ebitmap
, start
, len
);
2351 static inline void scrub_parity_mark_sectors_data(struct scrub_parity
*sparity
,
2354 __scrub_mark_bitmap(sparity
, sparity
->dbitmap
, start
, len
);
2357 static void scrub_block_complete(struct scrub_block
*sblock
)
2361 if (!sblock
->no_io_error_seen
) {
2363 scrub_handle_errored_block(sblock
);
2366 * if has checksum error, write via repair mechanism in
2367 * dev replace case, otherwise write here in dev replace
2370 corrupted
= scrub_checksum(sblock
);
2371 if (!corrupted
&& sblock
->sctx
->is_dev_replace
)
2372 scrub_write_block_to_dev_replace(sblock
);
2375 if (sblock
->sparity
&& corrupted
&& !sblock
->data_corrected
) {
2376 u64 start
= sblock
->pagev
[0]->logical
;
2377 u64 end
= sblock
->pagev
[sblock
->page_count
- 1]->logical
+
2380 scrub_parity_mark_sectors_error(sblock
->sparity
,
2381 start
, end
- start
);
2385 static int scrub_find_csum(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2388 struct btrfs_ordered_sum
*sum
= NULL
;
2389 unsigned long index
;
2390 unsigned long num_sectors
;
2392 while (!list_empty(&sctx
->csum_list
)) {
2393 sum
= list_first_entry(&sctx
->csum_list
,
2394 struct btrfs_ordered_sum
, list
);
2395 if (sum
->bytenr
> logical
)
2397 if (sum
->bytenr
+ sum
->len
> logical
)
2400 ++sctx
->stat
.csum_discards
;
2401 list_del(&sum
->list
);
2408 index
= ((u32
)(logical
- sum
->bytenr
)) / sctx
->sectorsize
;
2409 num_sectors
= sum
->len
/ sctx
->sectorsize
;
2410 memcpy(csum
, sum
->sums
+ index
, sctx
->csum_size
);
2411 if (index
== num_sectors
- 1) {
2412 list_del(&sum
->list
);
2418 /* scrub extent tries to collect up to 64 kB for each bio */
2419 static int scrub_extent(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
2420 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
2421 u64 gen
, int mirror_num
, u64 physical_for_dev_replace
)
2424 u8 csum
[BTRFS_CSUM_SIZE
];
2427 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2428 blocksize
= sctx
->sectorsize
;
2429 spin_lock(&sctx
->stat_lock
);
2430 sctx
->stat
.data_extents_scrubbed
++;
2431 sctx
->stat
.data_bytes_scrubbed
+= len
;
2432 spin_unlock(&sctx
->stat_lock
);
2433 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2434 blocksize
= sctx
->nodesize
;
2435 spin_lock(&sctx
->stat_lock
);
2436 sctx
->stat
.tree_extents_scrubbed
++;
2437 sctx
->stat
.tree_bytes_scrubbed
+= len
;
2438 spin_unlock(&sctx
->stat_lock
);
2440 blocksize
= sctx
->sectorsize
;
2445 u64 l
= min_t(u64
, len
, blocksize
);
2448 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2449 /* push csums to sbio */
2450 have_csum
= scrub_find_csum(sctx
, logical
, l
, csum
);
2452 ++sctx
->stat
.no_csum
;
2453 if (sctx
->is_dev_replace
&& !have_csum
) {
2454 ret
= copy_nocow_pages(sctx
, logical
, l
,
2456 physical_for_dev_replace
);
2457 goto behind_scrub_pages
;
2460 ret
= scrub_pages(sctx
, logical
, l
, physical
, dev
, flags
, gen
,
2461 mirror_num
, have_csum
? csum
: NULL
, 0,
2462 physical_for_dev_replace
);
2469 physical_for_dev_replace
+= l
;
2474 static int scrub_pages_for_parity(struct scrub_parity
*sparity
,
2475 u64 logical
, u64 len
,
2476 u64 physical
, struct btrfs_device
*dev
,
2477 u64 flags
, u64 gen
, int mirror_num
, u8
*csum
)
2479 struct scrub_ctx
*sctx
= sparity
->sctx
;
2480 struct scrub_block
*sblock
;
2483 sblock
= kzalloc(sizeof(*sblock
), GFP_NOFS
);
2485 spin_lock(&sctx
->stat_lock
);
2486 sctx
->stat
.malloc_errors
++;
2487 spin_unlock(&sctx
->stat_lock
);
2491 /* one ref inside this function, plus one for each page added to
2493 atomic_set(&sblock
->refs
, 1);
2494 sblock
->sctx
= sctx
;
2495 sblock
->no_io_error_seen
= 1;
2496 sblock
->sparity
= sparity
;
2497 scrub_parity_get(sparity
);
2499 for (index
= 0; len
> 0; index
++) {
2500 struct scrub_page
*spage
;
2501 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
2503 spage
= kzalloc(sizeof(*spage
), GFP_NOFS
);
2506 spin_lock(&sctx
->stat_lock
);
2507 sctx
->stat
.malloc_errors
++;
2508 spin_unlock(&sctx
->stat_lock
);
2509 scrub_block_put(sblock
);
2512 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
2513 /* For scrub block */
2514 scrub_page_get(spage
);
2515 sblock
->pagev
[index
] = spage
;
2516 /* For scrub parity */
2517 scrub_page_get(spage
);
2518 list_add_tail(&spage
->list
, &sparity
->spages
);
2519 spage
->sblock
= sblock
;
2521 spage
->flags
= flags
;
2522 spage
->generation
= gen
;
2523 spage
->logical
= logical
;
2524 spage
->physical
= physical
;
2525 spage
->mirror_num
= mirror_num
;
2527 spage
->have_csum
= 1;
2528 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
2530 spage
->have_csum
= 0;
2532 sblock
->page_count
++;
2533 spage
->page
= alloc_page(GFP_NOFS
);
2541 WARN_ON(sblock
->page_count
== 0);
2542 for (index
= 0; index
< sblock
->page_count
; index
++) {
2543 struct scrub_page
*spage
= sblock
->pagev
[index
];
2546 ret
= scrub_add_page_to_rd_bio(sctx
, spage
);
2548 scrub_block_put(sblock
);
2553 /* last one frees, either here or in bio completion for last page */
2554 scrub_block_put(sblock
);
2558 static int scrub_extent_for_parity(struct scrub_parity
*sparity
,
2559 u64 logical
, u64 len
,
2560 u64 physical
, struct btrfs_device
*dev
,
2561 u64 flags
, u64 gen
, int mirror_num
)
2563 struct scrub_ctx
*sctx
= sparity
->sctx
;
2565 u8 csum
[BTRFS_CSUM_SIZE
];
2568 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2569 blocksize
= sctx
->sectorsize
;
2570 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
2571 blocksize
= sctx
->nodesize
;
2573 blocksize
= sctx
->sectorsize
;
2578 u64 l
= min_t(u64
, len
, blocksize
);
2581 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
2582 /* push csums to sbio */
2583 have_csum
= scrub_find_csum(sctx
, logical
, l
, csum
);
2587 ret
= scrub_pages_for_parity(sparity
, logical
, l
, physical
, dev
,
2588 flags
, gen
, mirror_num
,
2589 have_csum
? csum
: NULL
);
2601 * Given a physical address, this will calculate it's
2602 * logical offset. if this is a parity stripe, it will return
2603 * the most left data stripe's logical offset.
2605 * return 0 if it is a data stripe, 1 means parity stripe.
2607 static int get_raid56_logic_offset(u64 physical
, int num
,
2608 struct map_lookup
*map
, u64
*offset
,
2618 last_offset
= (physical
- map
->stripes
[num
].physical
) *
2619 nr_data_stripes(map
);
2621 *stripe_start
= last_offset
;
2623 *offset
= last_offset
;
2624 for (i
= 0; i
< nr_data_stripes(map
); i
++) {
2625 *offset
= last_offset
+ i
* map
->stripe_len
;
2627 stripe_nr
= *offset
;
2628 do_div(stripe_nr
, map
->stripe_len
);
2629 do_div(stripe_nr
, nr_data_stripes(map
));
2631 /* Work out the disk rotation on this stripe-set */
2632 rot
= do_div(stripe_nr
, map
->num_stripes
);
2633 /* calculate which stripe this data locates */
2635 stripe_index
= rot
% map
->num_stripes
;
2636 if (stripe_index
== num
)
2638 if (stripe_index
< num
)
2641 *offset
= last_offset
+ j
* map
->stripe_len
;
2645 static void scrub_free_parity(struct scrub_parity
*sparity
)
2647 struct scrub_ctx
*sctx
= sparity
->sctx
;
2648 struct scrub_page
*curr
, *next
;
2651 nbits
= bitmap_weight(sparity
->ebitmap
, sparity
->nsectors
);
2653 spin_lock(&sctx
->stat_lock
);
2654 sctx
->stat
.read_errors
+= nbits
;
2655 sctx
->stat
.uncorrectable_errors
+= nbits
;
2656 spin_unlock(&sctx
->stat_lock
);
2659 list_for_each_entry_safe(curr
, next
, &sparity
->spages
, list
) {
2660 list_del_init(&curr
->list
);
2661 scrub_page_put(curr
);
2667 static void scrub_parity_bio_endio(struct bio
*bio
, int error
)
2669 struct scrub_parity
*sparity
= (struct scrub_parity
*)bio
->bi_private
;
2670 struct scrub_ctx
*sctx
= sparity
->sctx
;
2673 bitmap_or(sparity
->ebitmap
, sparity
->ebitmap
, sparity
->dbitmap
,
2676 scrub_free_parity(sparity
);
2677 scrub_pending_bio_dec(sctx
);
2681 static void scrub_parity_check_and_repair(struct scrub_parity
*sparity
)
2683 struct scrub_ctx
*sctx
= sparity
->sctx
;
2685 struct btrfs_raid_bio
*rbio
;
2686 struct scrub_page
*spage
;
2687 struct btrfs_bio
*bbio
= NULL
;
2691 if (!bitmap_andnot(sparity
->dbitmap
, sparity
->dbitmap
, sparity
->ebitmap
,
2695 length
= sparity
->logic_end
- sparity
->logic_start
+ 1;
2696 ret
= btrfs_map_sblock(sctx
->dev_root
->fs_info
, WRITE
,
2697 sparity
->logic_start
,
2698 &length
, &bbio
, 0, 1);
2699 if (ret
|| !bbio
|| !bbio
->raid_map
)
2702 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 0);
2706 bio
->bi_iter
.bi_sector
= sparity
->logic_start
>> 9;
2707 bio
->bi_private
= sparity
;
2708 bio
->bi_end_io
= scrub_parity_bio_endio
;
2710 rbio
= raid56_parity_alloc_scrub_rbio(sctx
->dev_root
, bio
, bbio
,
2711 length
, sparity
->scrub_dev
,
2717 list_for_each_entry(spage
, &sparity
->spages
, list
)
2718 raid56_parity_add_scrub_pages(rbio
, spage
->page
,
2721 scrub_pending_bio_inc(sctx
);
2722 raid56_parity_submit_scrub_rbio(rbio
);
2728 btrfs_put_bbio(bbio
);
2729 bitmap_or(sparity
->ebitmap
, sparity
->ebitmap
, sparity
->dbitmap
,
2731 spin_lock(&sctx
->stat_lock
);
2732 sctx
->stat
.malloc_errors
++;
2733 spin_unlock(&sctx
->stat_lock
);
2735 scrub_free_parity(sparity
);
2738 static inline int scrub_calc_parity_bitmap_len(int nsectors
)
2740 return DIV_ROUND_UP(nsectors
, BITS_PER_LONG
) * (BITS_PER_LONG
/ 8);
2743 static void scrub_parity_get(struct scrub_parity
*sparity
)
2745 atomic_inc(&sparity
->refs
);
2748 static void scrub_parity_put(struct scrub_parity
*sparity
)
2750 if (!atomic_dec_and_test(&sparity
->refs
))
2753 scrub_parity_check_and_repair(sparity
);
2756 static noinline_for_stack
int scrub_raid56_parity(struct scrub_ctx
*sctx
,
2757 struct map_lookup
*map
,
2758 struct btrfs_device
*sdev
,
2759 struct btrfs_path
*path
,
2763 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
2764 struct btrfs_root
*root
= fs_info
->extent_root
;
2765 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
2766 struct btrfs_extent_item
*extent
;
2770 struct extent_buffer
*l
;
2771 struct btrfs_key key
;
2774 u64 extent_physical
;
2776 struct btrfs_device
*extent_dev
;
2777 struct scrub_parity
*sparity
;
2780 int extent_mirror_num
;
2783 nsectors
= map
->stripe_len
/ root
->sectorsize
;
2784 bitmap_len
= scrub_calc_parity_bitmap_len(nsectors
);
2785 sparity
= kzalloc(sizeof(struct scrub_parity
) + 2 * bitmap_len
,
2788 spin_lock(&sctx
->stat_lock
);
2789 sctx
->stat
.malloc_errors
++;
2790 spin_unlock(&sctx
->stat_lock
);
2794 sparity
->stripe_len
= map
->stripe_len
;
2795 sparity
->nsectors
= nsectors
;
2796 sparity
->sctx
= sctx
;
2797 sparity
->scrub_dev
= sdev
;
2798 sparity
->logic_start
= logic_start
;
2799 sparity
->logic_end
= logic_end
;
2800 atomic_set(&sparity
->refs
, 1);
2801 INIT_LIST_HEAD(&sparity
->spages
);
2802 sparity
->dbitmap
= sparity
->bitmap
;
2803 sparity
->ebitmap
= (void *)sparity
->bitmap
+ bitmap_len
;
2806 while (logic_start
< logic_end
) {
2807 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
2808 key
.type
= BTRFS_METADATA_ITEM_KEY
;
2810 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2811 key
.objectid
= logic_start
;
2812 key
.offset
= (u64
)-1;
2814 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2819 ret
= btrfs_previous_extent_item(root
, path
, 0);
2823 btrfs_release_path(path
);
2824 ret
= btrfs_search_slot(NULL
, root
, &key
,
2836 slot
= path
->slots
[0];
2837 if (slot
>= btrfs_header_nritems(l
)) {
2838 ret
= btrfs_next_leaf(root
, path
);
2847 btrfs_item_key_to_cpu(l
, &key
, slot
);
2849 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
2850 bytes
= root
->nodesize
;
2854 if (key
.objectid
+ bytes
<= logic_start
)
2857 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
2858 key
.type
!= BTRFS_METADATA_ITEM_KEY
)
2861 if (key
.objectid
> logic_end
) {
2866 while (key
.objectid
>= logic_start
+ map
->stripe_len
)
2867 logic_start
+= map
->stripe_len
;
2869 extent
= btrfs_item_ptr(l
, slot
,
2870 struct btrfs_extent_item
);
2871 flags
= btrfs_extent_flags(l
, extent
);
2872 generation
= btrfs_extent_generation(l
, extent
);
2874 if (key
.objectid
< logic_start
&&
2875 (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)) {
2877 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2878 key
.objectid
, logic_start
);
2882 extent_logical
= key
.objectid
;
2885 if (extent_logical
< logic_start
) {
2886 extent_len
-= logic_start
- extent_logical
;
2887 extent_logical
= logic_start
;
2890 if (extent_logical
+ extent_len
>
2891 logic_start
+ map
->stripe_len
)
2892 extent_len
= logic_start
+ map
->stripe_len
-
2895 scrub_parity_mark_sectors_data(sparity
, extent_logical
,
2898 scrub_remap_extent(fs_info
, extent_logical
,
2899 extent_len
, &extent_physical
,
2901 &extent_mirror_num
);
2903 ret
= btrfs_lookup_csums_range(csum_root
,
2905 extent_logical
+ extent_len
- 1,
2906 &sctx
->csum_list
, 1);
2910 ret
= scrub_extent_for_parity(sparity
, extent_logical
,
2919 scrub_free_csums(sctx
);
2920 if (extent_logical
+ extent_len
<
2921 key
.objectid
+ bytes
) {
2922 logic_start
+= map
->stripe_len
;
2924 if (logic_start
>= logic_end
) {
2929 if (logic_start
< key
.objectid
+ bytes
) {
2938 btrfs_release_path(path
);
2943 logic_start
+= map
->stripe_len
;
2947 scrub_parity_mark_sectors_error(sparity
, logic_start
,
2948 logic_end
- logic_start
+ 1);
2949 scrub_parity_put(sparity
);
2951 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
2952 scrub_wr_submit(sctx
);
2953 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
2955 btrfs_release_path(path
);
2956 return ret
< 0 ? ret
: 0;
2959 static noinline_for_stack
int scrub_stripe(struct scrub_ctx
*sctx
,
2960 struct map_lookup
*map
,
2961 struct btrfs_device
*scrub_dev
,
2962 int num
, u64 base
, u64 length
,
2965 struct btrfs_path
*path
, *ppath
;
2966 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
2967 struct btrfs_root
*root
= fs_info
->extent_root
;
2968 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
2969 struct btrfs_extent_item
*extent
;
2970 struct blk_plug plug
;
2975 struct extent_buffer
*l
;
2976 struct btrfs_key key
;
2983 struct reada_control
*reada1
;
2984 struct reada_control
*reada2
;
2985 struct btrfs_key key_start
;
2986 struct btrfs_key key_end
;
2987 u64 increment
= map
->stripe_len
;
2990 u64 extent_physical
;
2994 struct btrfs_device
*extent_dev
;
2995 int extent_mirror_num
;
2999 physical
= map
->stripes
[num
].physical
;
3001 do_div(nstripes
, map
->stripe_len
);
3002 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
3003 offset
= map
->stripe_len
* num
;
3004 increment
= map
->stripe_len
* map
->num_stripes
;
3006 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
3007 int factor
= map
->num_stripes
/ map
->sub_stripes
;
3008 offset
= map
->stripe_len
* (num
/ map
->sub_stripes
);
3009 increment
= map
->stripe_len
* factor
;
3010 mirror_num
= num
% map
->sub_stripes
+ 1;
3011 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
3012 increment
= map
->stripe_len
;
3013 mirror_num
= num
% map
->num_stripes
+ 1;
3014 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
3015 increment
= map
->stripe_len
;
3016 mirror_num
= num
% map
->num_stripes
+ 1;
3017 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3018 get_raid56_logic_offset(physical
, num
, map
, &offset
, NULL
);
3019 increment
= map
->stripe_len
* nr_data_stripes(map
);
3022 increment
= map
->stripe_len
;
3026 path
= btrfs_alloc_path();
3030 ppath
= btrfs_alloc_path();
3032 btrfs_free_path(path
);
3037 * work on commit root. The related disk blocks are static as
3038 * long as COW is applied. This means, it is save to rewrite
3039 * them to repair disk errors without any race conditions
3041 path
->search_commit_root
= 1;
3042 path
->skip_locking
= 1;
3044 ppath
->search_commit_root
= 1;
3045 ppath
->skip_locking
= 1;
3047 * trigger the readahead for extent tree csum tree and wait for
3048 * completion. During readahead, the scrub is officially paused
3049 * to not hold off transaction commits
3051 logical
= base
+ offset
;
3052 physical_end
= physical
+ nstripes
* map
->stripe_len
;
3053 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3054 get_raid56_logic_offset(physical_end
, num
,
3055 map
, &logic_end
, NULL
);
3058 logic_end
= logical
+ increment
* nstripes
;
3060 wait_event(sctx
->list_wait
,
3061 atomic_read(&sctx
->bios_in_flight
) == 0);
3062 scrub_blocked_if_needed(fs_info
);
3064 /* FIXME it might be better to start readahead at commit root */
3065 key_start
.objectid
= logical
;
3066 key_start
.type
= BTRFS_EXTENT_ITEM_KEY
;
3067 key_start
.offset
= (u64
)0;
3068 key_end
.objectid
= logic_end
;
3069 key_end
.type
= BTRFS_METADATA_ITEM_KEY
;
3070 key_end
.offset
= (u64
)-1;
3071 reada1
= btrfs_reada_add(root
, &key_start
, &key_end
);
3073 key_start
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
3074 key_start
.type
= BTRFS_EXTENT_CSUM_KEY
;
3075 key_start
.offset
= logical
;
3076 key_end
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
3077 key_end
.type
= BTRFS_EXTENT_CSUM_KEY
;
3078 key_end
.offset
= logic_end
;
3079 reada2
= btrfs_reada_add(csum_root
, &key_start
, &key_end
);
3081 if (!IS_ERR(reada1
))
3082 btrfs_reada_wait(reada1
);
3083 if (!IS_ERR(reada2
))
3084 btrfs_reada_wait(reada2
);
3088 * collect all data csums for the stripe to avoid seeking during
3089 * the scrub. This might currently (crc32) end up to be about 1MB
3091 blk_start_plug(&plug
);
3094 * now find all extents for each stripe and scrub them
3097 while (physical
< physical_end
) {
3098 /* for raid56, we skip parity stripe */
3099 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3100 ret
= get_raid56_logic_offset(physical
, num
,
3101 map
, &logical
, &stripe_logical
);
3104 stripe_logical
+= base
;
3105 stripe_end
= stripe_logical
+ increment
- 1;
3106 ret
= scrub_raid56_parity(sctx
, map
, scrub_dev
,
3107 ppath
, stripe_logical
,
3117 if (atomic_read(&fs_info
->scrub_cancel_req
) ||
3118 atomic_read(&sctx
->cancel_req
)) {
3123 * check to see if we have to pause
3125 if (atomic_read(&fs_info
->scrub_pause_req
)) {
3126 /* push queued extents */
3127 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 1);
3129 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
3130 scrub_wr_submit(sctx
);
3131 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
3132 wait_event(sctx
->list_wait
,
3133 atomic_read(&sctx
->bios_in_flight
) == 0);
3134 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 0);
3135 scrub_blocked_if_needed(fs_info
);
3138 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
3139 key
.type
= BTRFS_METADATA_ITEM_KEY
;
3141 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3142 key
.objectid
= logical
;
3143 key
.offset
= (u64
)-1;
3145 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3150 ret
= btrfs_previous_extent_item(root
, path
, 0);
3154 /* there's no smaller item, so stick with the
3156 btrfs_release_path(path
);
3157 ret
= btrfs_search_slot(NULL
, root
, &key
,
3169 slot
= path
->slots
[0];
3170 if (slot
>= btrfs_header_nritems(l
)) {
3171 ret
= btrfs_next_leaf(root
, path
);
3180 btrfs_item_key_to_cpu(l
, &key
, slot
);
3182 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
3183 bytes
= root
->nodesize
;
3187 if (key
.objectid
+ bytes
<= logical
)
3190 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
3191 key
.type
!= BTRFS_METADATA_ITEM_KEY
)
3194 if (key
.objectid
>= logical
+ map
->stripe_len
) {
3195 /* out of this device extent */
3196 if (key
.objectid
>= logic_end
)
3201 extent
= btrfs_item_ptr(l
, slot
,
3202 struct btrfs_extent_item
);
3203 flags
= btrfs_extent_flags(l
, extent
);
3204 generation
= btrfs_extent_generation(l
, extent
);
3206 if (key
.objectid
< logical
&&
3207 (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)) {
3209 "scrub: tree block %llu spanning "
3210 "stripes, ignored. logical=%llu",
3211 key
.objectid
, logical
);
3216 extent_logical
= key
.objectid
;
3220 * trim extent to this stripe
3222 if (extent_logical
< logical
) {
3223 extent_len
-= logical
- extent_logical
;
3224 extent_logical
= logical
;
3226 if (extent_logical
+ extent_len
>
3227 logical
+ map
->stripe_len
) {
3228 extent_len
= logical
+ map
->stripe_len
-
3232 extent_physical
= extent_logical
- logical
+ physical
;
3233 extent_dev
= scrub_dev
;
3234 extent_mirror_num
= mirror_num
;
3236 scrub_remap_extent(fs_info
, extent_logical
,
3237 extent_len
, &extent_physical
,
3239 &extent_mirror_num
);
3241 ret
= btrfs_lookup_csums_range(csum_root
, logical
,
3242 logical
+ map
->stripe_len
- 1,
3243 &sctx
->csum_list
, 1);
3247 ret
= scrub_extent(sctx
, extent_logical
, extent_len
,
3248 extent_physical
, extent_dev
, flags
,
3249 generation
, extent_mirror_num
,
3250 extent_logical
- logical
+ physical
);
3254 scrub_free_csums(sctx
);
3255 if (extent_logical
+ extent_len
<
3256 key
.objectid
+ bytes
) {
3257 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
3259 * loop until we find next data stripe
3260 * or we have finished all stripes.
3263 physical
+= map
->stripe_len
;
3264 ret
= get_raid56_logic_offset(physical
,
3269 if (ret
&& physical
< physical_end
) {
3270 stripe_logical
+= base
;
3271 stripe_end
= stripe_logical
+
3273 ret
= scrub_raid56_parity(sctx
,
3274 map
, scrub_dev
, ppath
,
3282 physical
+= map
->stripe_len
;
3283 logical
+= increment
;
3285 if (logical
< key
.objectid
+ bytes
) {
3290 if (physical
>= physical_end
) {
3298 btrfs_release_path(path
);
3300 logical
+= increment
;
3301 physical
+= map
->stripe_len
;
3302 spin_lock(&sctx
->stat_lock
);
3304 sctx
->stat
.last_physical
= map
->stripes
[num
].physical
+
3307 sctx
->stat
.last_physical
= physical
;
3308 spin_unlock(&sctx
->stat_lock
);
3313 /* push queued extents */
3315 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
3316 scrub_wr_submit(sctx
);
3317 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
3319 blk_finish_plug(&plug
);
3320 btrfs_free_path(path
);
3321 btrfs_free_path(ppath
);
3322 return ret
< 0 ? ret
: 0;
3325 static noinline_for_stack
int scrub_chunk(struct scrub_ctx
*sctx
,
3326 struct btrfs_device
*scrub_dev
,
3327 u64 chunk_tree
, u64 chunk_objectid
,
3328 u64 chunk_offset
, u64 length
,
3329 u64 dev_offset
, int is_dev_replace
)
3331 struct btrfs_mapping_tree
*map_tree
=
3332 &sctx
->dev_root
->fs_info
->mapping_tree
;
3333 struct map_lookup
*map
;
3334 struct extent_map
*em
;
3338 read_lock(&map_tree
->map_tree
.lock
);
3339 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
3340 read_unlock(&map_tree
->map_tree
.lock
);
3345 map
= (struct map_lookup
*)em
->bdev
;
3346 if (em
->start
!= chunk_offset
)
3349 if (em
->len
< length
)
3352 for (i
= 0; i
< map
->num_stripes
; ++i
) {
3353 if (map
->stripes
[i
].dev
->bdev
== scrub_dev
->bdev
&&
3354 map
->stripes
[i
].physical
== dev_offset
) {
3355 ret
= scrub_stripe(sctx
, map
, scrub_dev
, i
,
3356 chunk_offset
, length
,
3363 free_extent_map(em
);
3368 static noinline_for_stack
3369 int scrub_enumerate_chunks(struct scrub_ctx
*sctx
,
3370 struct btrfs_device
*scrub_dev
, u64 start
, u64 end
,
3373 struct btrfs_dev_extent
*dev_extent
= NULL
;
3374 struct btrfs_path
*path
;
3375 struct btrfs_root
*root
= sctx
->dev_root
;
3376 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3383 struct extent_buffer
*l
;
3384 struct btrfs_key key
;
3385 struct btrfs_key found_key
;
3386 struct btrfs_block_group_cache
*cache
;
3387 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
3389 path
= btrfs_alloc_path();
3394 path
->search_commit_root
= 1;
3395 path
->skip_locking
= 1;
3397 key
.objectid
= scrub_dev
->devid
;
3399 key
.type
= BTRFS_DEV_EXTENT_KEY
;
3402 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3406 if (path
->slots
[0] >=
3407 btrfs_header_nritems(path
->nodes
[0])) {
3408 ret
= btrfs_next_leaf(root
, path
);
3415 slot
= path
->slots
[0];
3417 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
3419 if (found_key
.objectid
!= scrub_dev
->devid
)
3422 if (found_key
.type
!= BTRFS_DEV_EXTENT_KEY
)
3425 if (found_key
.offset
>= end
)
3428 if (found_key
.offset
< key
.offset
)
3431 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
3432 length
= btrfs_dev_extent_length(l
, dev_extent
);
3434 if (found_key
.offset
+ length
<= start
)
3437 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
3438 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
3439 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
3442 * get a reference on the corresponding block group to prevent
3443 * the chunk from going away while we scrub it
3445 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3447 /* some chunks are removed but not committed to disk yet,
3448 * continue scrubbing */
3452 dev_replace
->cursor_right
= found_key
.offset
+ length
;
3453 dev_replace
->cursor_left
= found_key
.offset
;
3454 dev_replace
->item_needs_writeback
= 1;
3455 ret
= scrub_chunk(sctx
, scrub_dev
, chunk_tree
, chunk_objectid
,
3456 chunk_offset
, length
, found_key
.offset
,
3460 * flush, submit all pending read and write bios, afterwards
3462 * Note that in the dev replace case, a read request causes
3463 * write requests that are submitted in the read completion
3464 * worker. Therefore in the current situation, it is required
3465 * that all write requests are flushed, so that all read and
3466 * write requests are really completed when bios_in_flight
3469 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 1);
3471 mutex_lock(&sctx
->wr_ctx
.wr_lock
);
3472 scrub_wr_submit(sctx
);
3473 mutex_unlock(&sctx
->wr_ctx
.wr_lock
);
3475 wait_event(sctx
->list_wait
,
3476 atomic_read(&sctx
->bios_in_flight
) == 0);
3477 atomic_inc(&fs_info
->scrubs_paused
);
3478 wake_up(&fs_info
->scrub_pause_wait
);
3481 * must be called before we decrease @scrub_paused.
3482 * make sure we don't block transaction commit while
3483 * we are waiting pending workers finished.
3485 wait_event(sctx
->list_wait
,
3486 atomic_read(&sctx
->workers_pending
) == 0);
3487 atomic_set(&sctx
->wr_ctx
.flush_all_writes
, 0);
3489 mutex_lock(&fs_info
->scrub_lock
);
3490 __scrub_blocked_if_needed(fs_info
);
3491 atomic_dec(&fs_info
->scrubs_paused
);
3492 mutex_unlock(&fs_info
->scrub_lock
);
3493 wake_up(&fs_info
->scrub_pause_wait
);
3495 btrfs_put_block_group(cache
);
3498 if (is_dev_replace
&&
3499 atomic64_read(&dev_replace
->num_write_errors
) > 0) {
3503 if (sctx
->stat
.malloc_errors
> 0) {
3508 dev_replace
->cursor_left
= dev_replace
->cursor_right
;
3509 dev_replace
->item_needs_writeback
= 1;
3511 key
.offset
= found_key
.offset
+ length
;
3512 btrfs_release_path(path
);
3515 btrfs_free_path(path
);
3518 * ret can still be 1 from search_slot or next_leaf,
3519 * that's not an error
3521 return ret
< 0 ? ret
: 0;
3524 static noinline_for_stack
int scrub_supers(struct scrub_ctx
*sctx
,
3525 struct btrfs_device
*scrub_dev
)
3531 struct btrfs_root
*root
= sctx
->dev_root
;
3533 if (test_bit(BTRFS_FS_STATE_ERROR
, &root
->fs_info
->fs_state
))
3536 /* Seed devices of a new filesystem has their own generation. */
3537 if (scrub_dev
->fs_devices
!= root
->fs_info
->fs_devices
)
3538 gen
= scrub_dev
->generation
;
3540 gen
= root
->fs_info
->last_trans_committed
;
3542 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
3543 bytenr
= btrfs_sb_offset(i
);
3544 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>
3545 scrub_dev
->commit_total_bytes
)
3548 ret
= scrub_pages(sctx
, bytenr
, BTRFS_SUPER_INFO_SIZE
, bytenr
,
3549 scrub_dev
, BTRFS_EXTENT_FLAG_SUPER
, gen
, i
,
3554 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
3560 * get a reference count on fs_info->scrub_workers. start worker if necessary
3562 static noinline_for_stack
int scrub_workers_get(struct btrfs_fs_info
*fs_info
,
3566 int flags
= WQ_FREEZABLE
| WQ_UNBOUND
;
3567 int max_active
= fs_info
->thread_pool_size
;
3569 if (fs_info
->scrub_workers_refcnt
== 0) {
3571 fs_info
->scrub_workers
=
3572 btrfs_alloc_workqueue("btrfs-scrub", flags
,
3575 fs_info
->scrub_workers
=
3576 btrfs_alloc_workqueue("btrfs-scrub", flags
,
3578 if (!fs_info
->scrub_workers
) {
3582 fs_info
->scrub_wr_completion_workers
=
3583 btrfs_alloc_workqueue("btrfs-scrubwrc", flags
,
3585 if (!fs_info
->scrub_wr_completion_workers
) {
3589 fs_info
->scrub_nocow_workers
=
3590 btrfs_alloc_workqueue("btrfs-scrubnc", flags
, 1, 0);
3591 if (!fs_info
->scrub_nocow_workers
) {
3596 ++fs_info
->scrub_workers_refcnt
;
3601 static noinline_for_stack
void scrub_workers_put(struct btrfs_fs_info
*fs_info
)
3603 if (--fs_info
->scrub_workers_refcnt
== 0) {
3604 btrfs_destroy_workqueue(fs_info
->scrub_workers
);
3605 btrfs_destroy_workqueue(fs_info
->scrub_wr_completion_workers
);
3606 btrfs_destroy_workqueue(fs_info
->scrub_nocow_workers
);
3608 WARN_ON(fs_info
->scrub_workers_refcnt
< 0);
3611 int btrfs_scrub_dev(struct btrfs_fs_info
*fs_info
, u64 devid
, u64 start
,
3612 u64 end
, struct btrfs_scrub_progress
*progress
,
3613 int readonly
, int is_dev_replace
)
3615 struct scrub_ctx
*sctx
;
3617 struct btrfs_device
*dev
;
3618 struct rcu_string
*name
;
3620 if (btrfs_fs_closing(fs_info
))
3623 if (fs_info
->chunk_root
->nodesize
> BTRFS_STRIPE_LEN
) {
3625 * in this case scrub is unable to calculate the checksum
3626 * the way scrub is implemented. Do not handle this
3627 * situation at all because it won't ever happen.
3630 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3631 fs_info
->chunk_root
->nodesize
, BTRFS_STRIPE_LEN
);
3635 if (fs_info
->chunk_root
->sectorsize
!= PAGE_SIZE
) {
3636 /* not supported for data w/o checksums */
3638 "scrub: size assumption sectorsize != PAGE_SIZE "
3639 "(%d != %lu) fails",
3640 fs_info
->chunk_root
->sectorsize
, PAGE_SIZE
);
3644 if (fs_info
->chunk_root
->nodesize
>
3645 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
||
3646 fs_info
->chunk_root
->sectorsize
>
3647 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
) {
3649 * would exhaust the array bounds of pagev member in
3650 * struct scrub_block
3652 btrfs_err(fs_info
, "scrub: size assumption nodesize and sectorsize "
3653 "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3654 fs_info
->chunk_root
->nodesize
,
3655 SCRUB_MAX_PAGES_PER_BLOCK
,
3656 fs_info
->chunk_root
->sectorsize
,
3657 SCRUB_MAX_PAGES_PER_BLOCK
);
3662 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
3663 dev
= btrfs_find_device(fs_info
, devid
, NULL
, NULL
);
3664 if (!dev
|| (dev
->missing
&& !is_dev_replace
)) {
3665 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3669 if (!is_dev_replace
&& !readonly
&& !dev
->writeable
) {
3670 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3672 name
= rcu_dereference(dev
->name
);
3673 btrfs_err(fs_info
, "scrub: device %s is not writable",
3679 mutex_lock(&fs_info
->scrub_lock
);
3680 if (!dev
->in_fs_metadata
|| dev
->is_tgtdev_for_dev_replace
) {
3681 mutex_unlock(&fs_info
->scrub_lock
);
3682 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3686 btrfs_dev_replace_lock(&fs_info
->dev_replace
);
3687 if (dev
->scrub_device
||
3689 btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
))) {
3690 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
3691 mutex_unlock(&fs_info
->scrub_lock
);
3692 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3693 return -EINPROGRESS
;
3695 btrfs_dev_replace_unlock(&fs_info
->dev_replace
);
3697 ret
= scrub_workers_get(fs_info
, is_dev_replace
);
3699 mutex_unlock(&fs_info
->scrub_lock
);
3700 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3704 sctx
= scrub_setup_ctx(dev
, is_dev_replace
);
3706 mutex_unlock(&fs_info
->scrub_lock
);
3707 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3708 scrub_workers_put(fs_info
);
3709 return PTR_ERR(sctx
);
3711 sctx
->readonly
= readonly
;
3712 dev
->scrub_device
= sctx
;
3713 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3716 * checking @scrub_pause_req here, we can avoid
3717 * race between committing transaction and scrubbing.
3719 __scrub_blocked_if_needed(fs_info
);
3720 atomic_inc(&fs_info
->scrubs_running
);
3721 mutex_unlock(&fs_info
->scrub_lock
);
3723 if (!is_dev_replace
) {
3725 * by holding device list mutex, we can
3726 * kick off writing super in log tree sync.
3728 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
3729 ret
= scrub_supers(sctx
, dev
);
3730 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
3734 ret
= scrub_enumerate_chunks(sctx
, dev
, start
, end
,
3737 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
3738 atomic_dec(&fs_info
->scrubs_running
);
3739 wake_up(&fs_info
->scrub_pause_wait
);
3741 wait_event(sctx
->list_wait
, atomic_read(&sctx
->workers_pending
) == 0);
3744 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
3746 mutex_lock(&fs_info
->scrub_lock
);
3747 dev
->scrub_device
= NULL
;
3748 scrub_workers_put(fs_info
);
3749 mutex_unlock(&fs_info
->scrub_lock
);
3751 scrub_put_ctx(sctx
);
3756 void btrfs_scrub_pause(struct btrfs_root
*root
)
3758 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3760 mutex_lock(&fs_info
->scrub_lock
);
3761 atomic_inc(&fs_info
->scrub_pause_req
);
3762 while (atomic_read(&fs_info
->scrubs_paused
) !=
3763 atomic_read(&fs_info
->scrubs_running
)) {
3764 mutex_unlock(&fs_info
->scrub_lock
);
3765 wait_event(fs_info
->scrub_pause_wait
,
3766 atomic_read(&fs_info
->scrubs_paused
) ==
3767 atomic_read(&fs_info
->scrubs_running
));
3768 mutex_lock(&fs_info
->scrub_lock
);
3770 mutex_unlock(&fs_info
->scrub_lock
);
3773 void btrfs_scrub_continue(struct btrfs_root
*root
)
3775 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3777 atomic_dec(&fs_info
->scrub_pause_req
);
3778 wake_up(&fs_info
->scrub_pause_wait
);
3781 int btrfs_scrub_cancel(struct btrfs_fs_info
*fs_info
)
3783 mutex_lock(&fs_info
->scrub_lock
);
3784 if (!atomic_read(&fs_info
->scrubs_running
)) {
3785 mutex_unlock(&fs_info
->scrub_lock
);
3789 atomic_inc(&fs_info
->scrub_cancel_req
);
3790 while (atomic_read(&fs_info
->scrubs_running
)) {
3791 mutex_unlock(&fs_info
->scrub_lock
);
3792 wait_event(fs_info
->scrub_pause_wait
,
3793 atomic_read(&fs_info
->scrubs_running
) == 0);
3794 mutex_lock(&fs_info
->scrub_lock
);
3796 atomic_dec(&fs_info
->scrub_cancel_req
);
3797 mutex_unlock(&fs_info
->scrub_lock
);
3802 int btrfs_scrub_cancel_dev(struct btrfs_fs_info
*fs_info
,
3803 struct btrfs_device
*dev
)
3805 struct scrub_ctx
*sctx
;
3807 mutex_lock(&fs_info
->scrub_lock
);
3808 sctx
= dev
->scrub_device
;
3810 mutex_unlock(&fs_info
->scrub_lock
);
3813 atomic_inc(&sctx
->cancel_req
);
3814 while (dev
->scrub_device
) {
3815 mutex_unlock(&fs_info
->scrub_lock
);
3816 wait_event(fs_info
->scrub_pause_wait
,
3817 dev
->scrub_device
== NULL
);
3818 mutex_lock(&fs_info
->scrub_lock
);
3820 mutex_unlock(&fs_info
->scrub_lock
);
3825 int btrfs_scrub_progress(struct btrfs_root
*root
, u64 devid
,
3826 struct btrfs_scrub_progress
*progress
)
3828 struct btrfs_device
*dev
;
3829 struct scrub_ctx
*sctx
= NULL
;
3831 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
3832 dev
= btrfs_find_device(root
->fs_info
, devid
, NULL
, NULL
);
3834 sctx
= dev
->scrub_device
;
3836 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
3837 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
3839 return dev
? (sctx
? 0 : -ENOTCONN
) : -ENODEV
;
3842 static void scrub_remap_extent(struct btrfs_fs_info
*fs_info
,
3843 u64 extent_logical
, u64 extent_len
,
3844 u64
*extent_physical
,
3845 struct btrfs_device
**extent_dev
,
3846 int *extent_mirror_num
)
3849 struct btrfs_bio
*bbio
= NULL
;
3852 mapped_length
= extent_len
;
3853 ret
= btrfs_map_block(fs_info
, READ
, extent_logical
,
3854 &mapped_length
, &bbio
, 0);
3855 if (ret
|| !bbio
|| mapped_length
< extent_len
||
3856 !bbio
->stripes
[0].dev
->bdev
) {
3857 btrfs_put_bbio(bbio
);
3861 *extent_physical
= bbio
->stripes
[0].physical
;
3862 *extent_mirror_num
= bbio
->mirror_num
;
3863 *extent_dev
= bbio
->stripes
[0].dev
;
3864 btrfs_put_bbio(bbio
);
3867 static int scrub_setup_wr_ctx(struct scrub_ctx
*sctx
,
3868 struct scrub_wr_ctx
*wr_ctx
,
3869 struct btrfs_fs_info
*fs_info
,
3870 struct btrfs_device
*dev
,
3873 WARN_ON(wr_ctx
->wr_curr_bio
!= NULL
);
3875 mutex_init(&wr_ctx
->wr_lock
);
3876 wr_ctx
->wr_curr_bio
= NULL
;
3877 if (!is_dev_replace
)
3880 WARN_ON(!dev
->bdev
);
3881 wr_ctx
->pages_per_wr_bio
= min_t(int, SCRUB_PAGES_PER_WR_BIO
,
3882 bio_get_nr_vecs(dev
->bdev
));
3883 wr_ctx
->tgtdev
= dev
;
3884 atomic_set(&wr_ctx
->flush_all_writes
, 0);
3888 static void scrub_free_wr_ctx(struct scrub_wr_ctx
*wr_ctx
)
3890 mutex_lock(&wr_ctx
->wr_lock
);
3891 kfree(wr_ctx
->wr_curr_bio
);
3892 wr_ctx
->wr_curr_bio
= NULL
;
3893 mutex_unlock(&wr_ctx
->wr_lock
);
3896 static int copy_nocow_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
3897 int mirror_num
, u64 physical_for_dev_replace
)
3899 struct scrub_copy_nocow_ctx
*nocow_ctx
;
3900 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
3902 nocow_ctx
= kzalloc(sizeof(*nocow_ctx
), GFP_NOFS
);
3904 spin_lock(&sctx
->stat_lock
);
3905 sctx
->stat
.malloc_errors
++;
3906 spin_unlock(&sctx
->stat_lock
);
3910 scrub_pending_trans_workers_inc(sctx
);
3912 nocow_ctx
->sctx
= sctx
;
3913 nocow_ctx
->logical
= logical
;
3914 nocow_ctx
->len
= len
;
3915 nocow_ctx
->mirror_num
= mirror_num
;
3916 nocow_ctx
->physical_for_dev_replace
= physical_for_dev_replace
;
3917 btrfs_init_work(&nocow_ctx
->work
, btrfs_scrubnc_helper
,
3918 copy_nocow_pages_worker
, NULL
, NULL
);
3919 INIT_LIST_HEAD(&nocow_ctx
->inodes
);
3920 btrfs_queue_work(fs_info
->scrub_nocow_workers
,
3926 static int record_inode_for_nocow(u64 inum
, u64 offset
, u64 root
, void *ctx
)
3928 struct scrub_copy_nocow_ctx
*nocow_ctx
= ctx
;
3929 struct scrub_nocow_inode
*nocow_inode
;
3931 nocow_inode
= kzalloc(sizeof(*nocow_inode
), GFP_NOFS
);
3934 nocow_inode
->inum
= inum
;
3935 nocow_inode
->offset
= offset
;
3936 nocow_inode
->root
= root
;
3937 list_add_tail(&nocow_inode
->list
, &nocow_ctx
->inodes
);
3941 #define COPY_COMPLETE 1
3943 static void copy_nocow_pages_worker(struct btrfs_work
*work
)
3945 struct scrub_copy_nocow_ctx
*nocow_ctx
=
3946 container_of(work
, struct scrub_copy_nocow_ctx
, work
);
3947 struct scrub_ctx
*sctx
= nocow_ctx
->sctx
;
3948 u64 logical
= nocow_ctx
->logical
;
3949 u64 len
= nocow_ctx
->len
;
3950 int mirror_num
= nocow_ctx
->mirror_num
;
3951 u64 physical_for_dev_replace
= nocow_ctx
->physical_for_dev_replace
;
3953 struct btrfs_trans_handle
*trans
= NULL
;
3954 struct btrfs_fs_info
*fs_info
;
3955 struct btrfs_path
*path
;
3956 struct btrfs_root
*root
;
3957 int not_written
= 0;
3959 fs_info
= sctx
->dev_root
->fs_info
;
3960 root
= fs_info
->extent_root
;
3962 path
= btrfs_alloc_path();
3964 spin_lock(&sctx
->stat_lock
);
3965 sctx
->stat
.malloc_errors
++;
3966 spin_unlock(&sctx
->stat_lock
);
3971 trans
= btrfs_join_transaction(root
);
3972 if (IS_ERR(trans
)) {
3977 ret
= iterate_inodes_from_logical(logical
, fs_info
, path
,
3978 record_inode_for_nocow
, nocow_ctx
);
3979 if (ret
!= 0 && ret
!= -ENOENT
) {
3980 btrfs_warn(fs_info
, "iterate_inodes_from_logical() failed: log %llu, "
3981 "phys %llu, len %llu, mir %u, ret %d",
3982 logical
, physical_for_dev_replace
, len
, mirror_num
,
3988 btrfs_end_transaction(trans
, root
);
3990 while (!list_empty(&nocow_ctx
->inodes
)) {
3991 struct scrub_nocow_inode
*entry
;
3992 entry
= list_first_entry(&nocow_ctx
->inodes
,
3993 struct scrub_nocow_inode
,
3995 list_del_init(&entry
->list
);
3996 ret
= copy_nocow_pages_for_inode(entry
->inum
, entry
->offset
,
3997 entry
->root
, nocow_ctx
);
3999 if (ret
== COPY_COMPLETE
) {
4007 while (!list_empty(&nocow_ctx
->inodes
)) {
4008 struct scrub_nocow_inode
*entry
;
4009 entry
= list_first_entry(&nocow_ctx
->inodes
,
4010 struct scrub_nocow_inode
,
4012 list_del_init(&entry
->list
);
4015 if (trans
&& !IS_ERR(trans
))
4016 btrfs_end_transaction(trans
, root
);
4018 btrfs_dev_replace_stats_inc(&fs_info
->dev_replace
.
4019 num_uncorrectable_read_errors
);
4021 btrfs_free_path(path
);
4024 scrub_pending_trans_workers_dec(sctx
);
4027 static int check_extent_to_block(struct inode
*inode
, u64 start
, u64 len
,
4030 struct extent_state
*cached_state
= NULL
;
4031 struct btrfs_ordered_extent
*ordered
;
4032 struct extent_io_tree
*io_tree
;
4033 struct extent_map
*em
;
4034 u64 lockstart
= start
, lockend
= start
+ len
- 1;
4037 io_tree
= &BTRFS_I(inode
)->io_tree
;
4039 lock_extent_bits(io_tree
, lockstart
, lockend
, 0, &cached_state
);
4040 ordered
= btrfs_lookup_ordered_range(inode
, lockstart
, len
);
4042 btrfs_put_ordered_extent(ordered
);
4047 em
= btrfs_get_extent(inode
, NULL
, 0, start
, len
, 0);
4054 * This extent does not actually cover the logical extent anymore,
4055 * move on to the next inode.
4057 if (em
->block_start
> logical
||
4058 em
->block_start
+ em
->block_len
< logical
+ len
) {
4059 free_extent_map(em
);
4063 free_extent_map(em
);
4066 unlock_extent_cached(io_tree
, lockstart
, lockend
, &cached_state
,
4071 static int copy_nocow_pages_for_inode(u64 inum
, u64 offset
, u64 root
,
4072 struct scrub_copy_nocow_ctx
*nocow_ctx
)
4074 struct btrfs_fs_info
*fs_info
= nocow_ctx
->sctx
->dev_root
->fs_info
;
4075 struct btrfs_key key
;
4076 struct inode
*inode
;
4078 struct btrfs_root
*local_root
;
4079 struct extent_io_tree
*io_tree
;
4080 u64 physical_for_dev_replace
;
4081 u64 nocow_ctx_logical
;
4082 u64 len
= nocow_ctx
->len
;
4083 unsigned long index
;
4088 key
.objectid
= root
;
4089 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4090 key
.offset
= (u64
)-1;
4092 srcu_index
= srcu_read_lock(&fs_info
->subvol_srcu
);
4094 local_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
4095 if (IS_ERR(local_root
)) {
4096 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
4097 return PTR_ERR(local_root
);
4100 key
.type
= BTRFS_INODE_ITEM_KEY
;
4101 key
.objectid
= inum
;
4103 inode
= btrfs_iget(fs_info
->sb
, &key
, local_root
, NULL
);
4104 srcu_read_unlock(&fs_info
->subvol_srcu
, srcu_index
);
4106 return PTR_ERR(inode
);
4108 /* Avoid truncate/dio/punch hole.. */
4109 mutex_lock(&inode
->i_mutex
);
4110 inode_dio_wait(inode
);
4112 physical_for_dev_replace
= nocow_ctx
->physical_for_dev_replace
;
4113 io_tree
= &BTRFS_I(inode
)->io_tree
;
4114 nocow_ctx_logical
= nocow_ctx
->logical
;
4116 ret
= check_extent_to_block(inode
, offset
, len
, nocow_ctx_logical
);
4118 ret
= ret
> 0 ? 0 : ret
;
4122 while (len
>= PAGE_CACHE_SIZE
) {
4123 index
= offset
>> PAGE_CACHE_SHIFT
;
4125 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
4127 btrfs_err(fs_info
, "find_or_create_page() failed");
4132 if (PageUptodate(page
)) {
4133 if (PageDirty(page
))
4136 ClearPageError(page
);
4137 err
= extent_read_full_page(io_tree
, page
,
4139 nocow_ctx
->mirror_num
);
4147 * If the page has been remove from the page cache,
4148 * the data on it is meaningless, because it may be
4149 * old one, the new data may be written into the new
4150 * page in the page cache.
4152 if (page
->mapping
!= inode
->i_mapping
) {
4154 page_cache_release(page
);
4157 if (!PageUptodate(page
)) {
4163 ret
= check_extent_to_block(inode
, offset
, len
,
4166 ret
= ret
> 0 ? 0 : ret
;
4170 err
= write_page_nocow(nocow_ctx
->sctx
,
4171 physical_for_dev_replace
, page
);
4176 page_cache_release(page
);
4181 offset
+= PAGE_CACHE_SIZE
;
4182 physical_for_dev_replace
+= PAGE_CACHE_SIZE
;
4183 nocow_ctx_logical
+= PAGE_CACHE_SIZE
;
4184 len
-= PAGE_CACHE_SIZE
;
4186 ret
= COPY_COMPLETE
;
4188 mutex_unlock(&inode
->i_mutex
);
4193 static int write_page_nocow(struct scrub_ctx
*sctx
,
4194 u64 physical_for_dev_replace
, struct page
*page
)
4197 struct btrfs_device
*dev
;
4200 dev
= sctx
->wr_ctx
.tgtdev
;
4204 printk_ratelimited(KERN_WARNING
4205 "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
4208 bio
= btrfs_io_bio_alloc(GFP_NOFS
, 1);
4210 spin_lock(&sctx
->stat_lock
);
4211 sctx
->stat
.malloc_errors
++;
4212 spin_unlock(&sctx
->stat_lock
);
4215 bio
->bi_iter
.bi_size
= 0;
4216 bio
->bi_iter
.bi_sector
= physical_for_dev_replace
>> 9;
4217 bio
->bi_bdev
= dev
->bdev
;
4218 ret
= bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0);
4219 if (ret
!= PAGE_CACHE_SIZE
) {
4222 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_WRITE_ERRS
);
4226 if (btrfsic_submit_bio_wait(WRITE_SYNC
, bio
))
4227 goto leave_with_eio
;