2 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
24 #include "ordered-data.h"
25 #include "transaction.h"
27 #include "extent_io.h"
28 #include "check-integrity.h"
29 #include "rcu-string.h"
32 * This is only the first step towards a full-features scrub. It reads all
33 * extent and super block and verifies the checksums. In case a bad checksum
34 * is found or the extent cannot be read, good data will be written back if
37 * Future enhancements:
38 * - In case an unrepairable extent is encountered, track which files are
39 * affected and report them
40 * - track and record media errors, throw out bad devices
41 * - add a mode to also read unallocated space
47 #define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */
48 #define SCRUB_BIOS_PER_CTX 16 /* 1 MB per device in flight */
51 * the following value times PAGE_SIZE needs to be large enough to match the
52 * largest node/leaf/sector size that shall be supported.
53 * Values larger than BTRFS_STRIPE_LEN are not supported.
55 #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
58 struct scrub_block
*sblock
;
60 struct btrfs_device
*dev
;
61 u64 flags
; /* extent flags */
67 unsigned int mirror_num
:8;
68 unsigned int have_csum
:1;
69 unsigned int io_error
:1;
71 u8 csum
[BTRFS_CSUM_SIZE
];
76 struct scrub_ctx
*sctx
;
77 struct btrfs_device
*dev
;
82 struct scrub_page
*pagev
[SCRUB_PAGES_PER_BIO
];
85 struct btrfs_work work
;
89 struct scrub_page
*pagev
[SCRUB_MAX_PAGES_PER_BLOCK
];
91 atomic_t outstanding_pages
;
92 atomic_t ref_count
; /* free mem on transition to zero */
93 struct scrub_ctx
*sctx
;
95 unsigned int header_error
:1;
96 unsigned int checksum_error
:1;
97 unsigned int no_io_error_seen
:1;
98 unsigned int generation_error
:1; /* also sets header_error */
103 struct scrub_bio
*bios
[SCRUB_BIOS_PER_CTX
];
104 struct btrfs_root
*dev_root
;
107 atomic_t bios_in_flight
;
108 atomic_t workers_pending
;
109 spinlock_t list_lock
;
110 wait_queue_head_t list_wait
;
112 struct list_head csum_list
;
115 int pages_per_bio
; /* <= SCRUB_PAGES_PER_BIO */
122 struct btrfs_scrub_progress stat
;
123 spinlock_t stat_lock
;
126 struct scrub_fixup_nodatasum
{
127 struct scrub_ctx
*sctx
;
128 struct btrfs_device
*dev
;
130 struct btrfs_root
*root
;
131 struct btrfs_work work
;
135 struct scrub_warning
{
136 struct btrfs_path
*path
;
137 u64 extent_item_size
;
143 struct btrfs_device
*dev
;
149 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
);
150 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
);
151 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
);
152 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
);
153 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
);
154 static int scrub_setup_recheck_block(struct scrub_ctx
*sctx
,
155 struct btrfs_fs_info
*fs_info
,
156 u64 length
, u64 logical
,
157 struct scrub_block
*sblock
);
158 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
159 struct scrub_block
*sblock
, int is_metadata
,
160 int have_csum
, u8
*csum
, u64 generation
,
162 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
163 struct scrub_block
*sblock
,
164 int is_metadata
, int have_csum
,
165 const u8
*csum
, u64 generation
,
167 static void scrub_complete_bio_end_io(struct bio
*bio
, int err
);
168 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
169 struct scrub_block
*sblock_good
,
171 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
172 struct scrub_block
*sblock_good
,
173 int page_num
, int force_write
);
174 static int scrub_checksum_data(struct scrub_block
*sblock
);
175 static int scrub_checksum_tree_block(struct scrub_block
*sblock
);
176 static int scrub_checksum_super(struct scrub_block
*sblock
);
177 static void scrub_block_get(struct scrub_block
*sblock
);
178 static void scrub_block_put(struct scrub_block
*sblock
);
179 static void scrub_page_get(struct scrub_page
*spage
);
180 static void scrub_page_put(struct scrub_page
*spage
);
181 static int scrub_add_page_to_bio(struct scrub_ctx
*sctx
,
182 struct scrub_page
*spage
);
183 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
184 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
185 u64 gen
, int mirror_num
, u8
*csum
, int force
);
186 static void scrub_bio_end_io(struct bio
*bio
, int err
);
187 static void scrub_bio_end_io_worker(struct btrfs_work
*work
);
188 static void scrub_block_complete(struct scrub_block
*sblock
);
191 static void scrub_pending_bio_inc(struct scrub_ctx
*sctx
)
193 atomic_inc(&sctx
->bios_in_flight
);
196 static void scrub_pending_bio_dec(struct scrub_ctx
*sctx
)
198 atomic_dec(&sctx
->bios_in_flight
);
199 wake_up(&sctx
->list_wait
);
203 * used for workers that require transaction commits (i.e., for the
206 static void scrub_pending_trans_workers_inc(struct scrub_ctx
*sctx
)
208 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
211 * increment scrubs_running to prevent cancel requests from
212 * completing as long as a worker is running. we must also
213 * increment scrubs_paused to prevent deadlocking on pause
214 * requests used for transactions commits (as the worker uses a
215 * transaction context). it is safe to regard the worker
216 * as paused for all matters practical. effectively, we only
217 * avoid cancellation requests from completing.
219 mutex_lock(&fs_info
->scrub_lock
);
220 atomic_inc(&fs_info
->scrubs_running
);
221 atomic_inc(&fs_info
->scrubs_paused
);
222 mutex_unlock(&fs_info
->scrub_lock
);
223 atomic_inc(&sctx
->workers_pending
);
226 /* used for workers that require transaction commits */
227 static void scrub_pending_trans_workers_dec(struct scrub_ctx
*sctx
)
229 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
232 * see scrub_pending_trans_workers_inc() why we're pretending
233 * to be paused in the scrub counters
235 mutex_lock(&fs_info
->scrub_lock
);
236 atomic_dec(&fs_info
->scrubs_running
);
237 atomic_dec(&fs_info
->scrubs_paused
);
238 mutex_unlock(&fs_info
->scrub_lock
);
239 atomic_dec(&sctx
->workers_pending
);
240 wake_up(&fs_info
->scrub_pause_wait
);
241 wake_up(&sctx
->list_wait
);
244 static void scrub_free_csums(struct scrub_ctx
*sctx
)
246 while (!list_empty(&sctx
->csum_list
)) {
247 struct btrfs_ordered_sum
*sum
;
248 sum
= list_first_entry(&sctx
->csum_list
,
249 struct btrfs_ordered_sum
, list
);
250 list_del(&sum
->list
);
255 static noinline_for_stack
void scrub_free_ctx(struct scrub_ctx
*sctx
)
262 /* this can happen when scrub is cancelled */
263 if (sctx
->curr
!= -1) {
264 struct scrub_bio
*sbio
= sctx
->bios
[sctx
->curr
];
266 for (i
= 0; i
< sbio
->page_count
; i
++) {
267 BUG_ON(!sbio
->pagev
[i
]);
268 BUG_ON(!sbio
->pagev
[i
]->page
);
269 scrub_block_put(sbio
->pagev
[i
]->sblock
);
274 for (i
= 0; i
< SCRUB_BIOS_PER_CTX
; ++i
) {
275 struct scrub_bio
*sbio
= sctx
->bios
[i
];
282 scrub_free_csums(sctx
);
286 static noinline_for_stack
287 struct scrub_ctx
*scrub_setup_ctx(struct btrfs_device
*dev
)
289 struct scrub_ctx
*sctx
;
291 struct btrfs_fs_info
*fs_info
= dev
->dev_root
->fs_info
;
294 pages_per_bio
= min_t(int, SCRUB_PAGES_PER_BIO
,
295 bio_get_nr_vecs(dev
->bdev
));
296 sctx
= kzalloc(sizeof(*sctx
), GFP_NOFS
);
299 sctx
->pages_per_bio
= pages_per_bio
;
301 sctx
->dev_root
= dev
->dev_root
;
302 for (i
= 0; i
< SCRUB_BIOS_PER_CTX
; ++i
) {
303 struct scrub_bio
*sbio
;
305 sbio
= kzalloc(sizeof(*sbio
), GFP_NOFS
);
308 sctx
->bios
[i
] = sbio
;
312 sbio
->page_count
= 0;
313 sbio
->work
.func
= scrub_bio_end_io_worker
;
315 if (i
!= SCRUB_BIOS_PER_CTX
- 1)
316 sctx
->bios
[i
]->next_free
= i
+ 1;
318 sctx
->bios
[i
]->next_free
= -1;
320 sctx
->first_free
= 0;
321 sctx
->nodesize
= dev
->dev_root
->nodesize
;
322 sctx
->leafsize
= dev
->dev_root
->leafsize
;
323 sctx
->sectorsize
= dev
->dev_root
->sectorsize
;
324 atomic_set(&sctx
->bios_in_flight
, 0);
325 atomic_set(&sctx
->workers_pending
, 0);
326 atomic_set(&sctx
->cancel_req
, 0);
327 sctx
->csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
328 INIT_LIST_HEAD(&sctx
->csum_list
);
330 spin_lock_init(&sctx
->list_lock
);
331 spin_lock_init(&sctx
->stat_lock
);
332 init_waitqueue_head(&sctx
->list_wait
);
336 scrub_free_ctx(sctx
);
337 return ERR_PTR(-ENOMEM
);
340 static int scrub_print_warning_inode(u64 inum
, u64 offset
, u64 root
, void *ctx
)
346 struct extent_buffer
*eb
;
347 struct btrfs_inode_item
*inode_item
;
348 struct scrub_warning
*swarn
= ctx
;
349 struct btrfs_fs_info
*fs_info
= swarn
->dev
->dev_root
->fs_info
;
350 struct inode_fs_paths
*ipath
= NULL
;
351 struct btrfs_root
*local_root
;
352 struct btrfs_key root_key
;
354 root_key
.objectid
= root
;
355 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
356 root_key
.offset
= (u64
)-1;
357 local_root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
358 if (IS_ERR(local_root
)) {
359 ret
= PTR_ERR(local_root
);
363 ret
= inode_item_info(inum
, 0, local_root
, swarn
->path
);
365 btrfs_release_path(swarn
->path
);
369 eb
= swarn
->path
->nodes
[0];
370 inode_item
= btrfs_item_ptr(eb
, swarn
->path
->slots
[0],
371 struct btrfs_inode_item
);
372 isize
= btrfs_inode_size(eb
, inode_item
);
373 nlink
= btrfs_inode_nlink(eb
, inode_item
);
374 btrfs_release_path(swarn
->path
);
376 ipath
= init_ipath(4096, local_root
, swarn
->path
);
378 ret
= PTR_ERR(ipath
);
382 ret
= paths_from_inode(inum
, ipath
);
388 * we deliberately ignore the bit ipath might have been too small to
389 * hold all of the paths here
391 for (i
= 0; i
< ipath
->fspath
->elem_cnt
; ++i
)
392 printk_in_rcu(KERN_WARNING
"btrfs: %s at logical %llu on dev "
393 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
394 "length %llu, links %u (path: %s)\n", swarn
->errstr
,
395 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
396 (unsigned long long)swarn
->sector
, root
, inum
, offset
,
397 min(isize
- offset
, (u64
)PAGE_SIZE
), nlink
,
398 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
404 printk_in_rcu(KERN_WARNING
"btrfs: %s at logical %llu on dev "
405 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
406 "resolving failed with ret=%d\n", swarn
->errstr
,
407 swarn
->logical
, rcu_str_deref(swarn
->dev
->name
),
408 (unsigned long long)swarn
->sector
, root
, inum
, offset
, ret
);
414 static void scrub_print_warning(const char *errstr
, struct scrub_block
*sblock
)
416 struct btrfs_device
*dev
;
417 struct btrfs_fs_info
*fs_info
;
418 struct btrfs_path
*path
;
419 struct btrfs_key found_key
;
420 struct extent_buffer
*eb
;
421 struct btrfs_extent_item
*ei
;
422 struct scrub_warning swarn
;
423 unsigned long ptr
= 0;
429 const int bufsize
= 4096;
432 WARN_ON(sblock
->page_count
< 1);
433 dev
= sblock
->pagev
[0]->dev
;
434 fs_info
= sblock
->sctx
->dev_root
->fs_info
;
436 path
= btrfs_alloc_path();
438 swarn
.scratch_buf
= kmalloc(bufsize
, GFP_NOFS
);
439 swarn
.msg_buf
= kmalloc(bufsize
, GFP_NOFS
);
440 swarn
.sector
= (sblock
->pagev
[0]->physical
) >> 9;
441 swarn
.logical
= sblock
->pagev
[0]->logical
;
442 swarn
.errstr
= errstr
;
444 swarn
.msg_bufsize
= bufsize
;
445 swarn
.scratch_bufsize
= bufsize
;
447 if (!path
|| !swarn
.scratch_buf
|| !swarn
.msg_buf
)
450 ret
= extent_from_logical(fs_info
, swarn
.logical
, path
, &found_key
,
455 extent_item_pos
= swarn
.logical
- found_key
.objectid
;
456 swarn
.extent_item_size
= found_key
.offset
;
459 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
460 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
461 btrfs_release_path(path
);
463 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
465 ret
= tree_backref_for_extent(&ptr
, eb
, ei
, item_size
,
466 &ref_root
, &ref_level
);
467 printk_in_rcu(KERN_WARNING
468 "btrfs: %s at logical %llu on dev %s, "
469 "sector %llu: metadata %s (level %d) in tree "
470 "%llu\n", errstr
, swarn
.logical
,
471 rcu_str_deref(dev
->name
),
472 (unsigned long long)swarn
.sector
,
473 ref_level
? "node" : "leaf",
474 ret
< 0 ? -1 : ref_level
,
475 ret
< 0 ? -1 : ref_root
);
480 iterate_extent_inodes(fs_info
, found_key
.objectid
,
482 scrub_print_warning_inode
, &swarn
);
486 btrfs_free_path(path
);
487 kfree(swarn
.scratch_buf
);
488 kfree(swarn
.msg_buf
);
491 static int scrub_fixup_readpage(u64 inum
, u64 offset
, u64 root
, void *ctx
)
493 struct page
*page
= NULL
;
495 struct scrub_fixup_nodatasum
*fixup
= ctx
;
498 struct btrfs_key key
;
499 struct inode
*inode
= NULL
;
500 u64 end
= offset
+ PAGE_SIZE
- 1;
501 struct btrfs_root
*local_root
;
504 key
.type
= BTRFS_ROOT_ITEM_KEY
;
505 key
.offset
= (u64
)-1;
506 local_root
= btrfs_read_fs_root_no_name(fixup
->root
->fs_info
, &key
);
507 if (IS_ERR(local_root
))
508 return PTR_ERR(local_root
);
510 key
.type
= BTRFS_INODE_ITEM_KEY
;
513 inode
= btrfs_iget(fixup
->root
->fs_info
->sb
, &key
, local_root
, NULL
);
515 return PTR_ERR(inode
);
517 index
= offset
>> PAGE_CACHE_SHIFT
;
519 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
525 if (PageUptodate(page
)) {
526 struct btrfs_fs_info
*fs_info
;
527 if (PageDirty(page
)) {
529 * we need to write the data to the defect sector. the
530 * data that was in that sector is not in memory,
531 * because the page was modified. we must not write the
532 * modified page to that sector.
534 * TODO: what could be done here: wait for the delalloc
535 * runner to write out that page (might involve
536 * COW) and see whether the sector is still
537 * referenced afterwards.
539 * For the meantime, we'll treat this error
540 * incorrectable, although there is a chance that a
541 * later scrub will find the bad sector again and that
542 * there's no dirty page in memory, then.
547 fs_info
= BTRFS_I(inode
)->root
->fs_info
;
548 ret
= repair_io_failure(fs_info
, offset
, PAGE_SIZE
,
549 fixup
->logical
, page
,
555 * we need to get good data first. the general readpage path
556 * will call repair_io_failure for us, we just have to make
557 * sure we read the bad mirror.
559 ret
= set_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
560 EXTENT_DAMAGED
, GFP_NOFS
);
562 /* set_extent_bits should give proper error */
569 ret
= extent_read_full_page(&BTRFS_I(inode
)->io_tree
, page
,
572 wait_on_page_locked(page
);
574 corrected
= !test_range_bit(&BTRFS_I(inode
)->io_tree
, offset
,
575 end
, EXTENT_DAMAGED
, 0, NULL
);
577 clear_extent_bits(&BTRFS_I(inode
)->io_tree
, offset
, end
,
578 EXTENT_DAMAGED
, GFP_NOFS
);
590 if (ret
== 0 && corrected
) {
592 * we only need to call readpage for one of the inodes belonging
593 * to this extent. so make iterate_extent_inodes stop
601 static void scrub_fixup_nodatasum(struct btrfs_work
*work
)
604 struct scrub_fixup_nodatasum
*fixup
;
605 struct scrub_ctx
*sctx
;
606 struct btrfs_trans_handle
*trans
= NULL
;
607 struct btrfs_fs_info
*fs_info
;
608 struct btrfs_path
*path
;
609 int uncorrectable
= 0;
611 fixup
= container_of(work
, struct scrub_fixup_nodatasum
, work
);
613 fs_info
= fixup
->root
->fs_info
;
615 path
= btrfs_alloc_path();
617 spin_lock(&sctx
->stat_lock
);
618 ++sctx
->stat
.malloc_errors
;
619 spin_unlock(&sctx
->stat_lock
);
624 trans
= btrfs_join_transaction(fixup
->root
);
631 * the idea is to trigger a regular read through the standard path. we
632 * read a page from the (failed) logical address by specifying the
633 * corresponding copynum of the failed sector. thus, that readpage is
635 * that is the point where on-the-fly error correction will kick in
636 * (once it's finished) and rewrite the failed sector if a good copy
639 ret
= iterate_inodes_from_logical(fixup
->logical
, fixup
->root
->fs_info
,
640 path
, scrub_fixup_readpage
,
648 spin_lock(&sctx
->stat_lock
);
649 ++sctx
->stat
.corrected_errors
;
650 spin_unlock(&sctx
->stat_lock
);
653 if (trans
&& !IS_ERR(trans
))
654 btrfs_end_transaction(trans
, fixup
->root
);
656 spin_lock(&sctx
->stat_lock
);
657 ++sctx
->stat
.uncorrectable_errors
;
658 spin_unlock(&sctx
->stat_lock
);
660 printk_ratelimited_in_rcu(KERN_ERR
661 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
662 (unsigned long long)fixup
->logical
,
663 rcu_str_deref(fixup
->dev
->name
));
666 btrfs_free_path(path
);
669 scrub_pending_trans_workers_dec(sctx
);
673 * scrub_handle_errored_block gets called when either verification of the
674 * pages failed or the bio failed to read, e.g. with EIO. In the latter
675 * case, this function handles all pages in the bio, even though only one
677 * The goal of this function is to repair the errored block by using the
678 * contents of one of the mirrors.
680 static int scrub_handle_errored_block(struct scrub_block
*sblock_to_check
)
682 struct scrub_ctx
*sctx
= sblock_to_check
->sctx
;
683 struct btrfs_device
*dev
;
684 struct btrfs_fs_info
*fs_info
;
688 unsigned int failed_mirror_index
;
689 unsigned int is_metadata
;
690 unsigned int have_csum
;
692 struct scrub_block
*sblocks_for_recheck
; /* holds one for each mirror */
693 struct scrub_block
*sblock_bad
;
698 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
699 DEFAULT_RATELIMIT_BURST
);
701 BUG_ON(sblock_to_check
->page_count
< 1);
702 fs_info
= sctx
->dev_root
->fs_info
;
703 length
= sblock_to_check
->page_count
* PAGE_SIZE
;
704 logical
= sblock_to_check
->pagev
[0]->logical
;
705 generation
= sblock_to_check
->pagev
[0]->generation
;
706 BUG_ON(sblock_to_check
->pagev
[0]->mirror_num
< 1);
707 failed_mirror_index
= sblock_to_check
->pagev
[0]->mirror_num
- 1;
708 is_metadata
= !(sblock_to_check
->pagev
[0]->flags
&
709 BTRFS_EXTENT_FLAG_DATA
);
710 have_csum
= sblock_to_check
->pagev
[0]->have_csum
;
711 csum
= sblock_to_check
->pagev
[0]->csum
;
712 dev
= sblock_to_check
->pagev
[0]->dev
;
715 * read all mirrors one after the other. This includes to
716 * re-read the extent or metadata block that failed (that was
717 * the cause that this fixup code is called) another time,
718 * page by page this time in order to know which pages
719 * caused I/O errors and which ones are good (for all mirrors).
720 * It is the goal to handle the situation when more than one
721 * mirror contains I/O errors, but the errors do not
722 * overlap, i.e. the data can be repaired by selecting the
723 * pages from those mirrors without I/O error on the
724 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
725 * would be that mirror #1 has an I/O error on the first page,
726 * the second page is good, and mirror #2 has an I/O error on
727 * the second page, but the first page is good.
728 * Then the first page of the first mirror can be repaired by
729 * taking the first page of the second mirror, and the
730 * second page of the second mirror can be repaired by
731 * copying the contents of the 2nd page of the 1st mirror.
732 * One more note: if the pages of one mirror contain I/O
733 * errors, the checksum cannot be verified. In order to get
734 * the best data for repairing, the first attempt is to find
735 * a mirror without I/O errors and with a validated checksum.
736 * Only if this is not possible, the pages are picked from
737 * mirrors with I/O errors without considering the checksum.
738 * If the latter is the case, at the end, the checksum of the
739 * repaired area is verified in order to correctly maintain
743 sblocks_for_recheck
= kzalloc(BTRFS_MAX_MIRRORS
*
744 sizeof(*sblocks_for_recheck
),
746 if (!sblocks_for_recheck
) {
747 spin_lock(&sctx
->stat_lock
);
748 sctx
->stat
.malloc_errors
++;
749 sctx
->stat
.read_errors
++;
750 sctx
->stat
.uncorrectable_errors
++;
751 spin_unlock(&sctx
->stat_lock
);
752 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
756 /* setup the context, map the logical blocks and alloc the pages */
757 ret
= scrub_setup_recheck_block(sctx
, fs_info
, length
,
758 logical
, sblocks_for_recheck
);
760 spin_lock(&sctx
->stat_lock
);
761 sctx
->stat
.read_errors
++;
762 sctx
->stat
.uncorrectable_errors
++;
763 spin_unlock(&sctx
->stat_lock
);
764 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
767 BUG_ON(failed_mirror_index
>= BTRFS_MAX_MIRRORS
);
768 sblock_bad
= sblocks_for_recheck
+ failed_mirror_index
;
770 /* build and submit the bios for the failed mirror, check checksums */
771 scrub_recheck_block(fs_info
, sblock_bad
, is_metadata
, have_csum
,
772 csum
, generation
, sctx
->csum_size
);
774 if (!sblock_bad
->header_error
&& !sblock_bad
->checksum_error
&&
775 sblock_bad
->no_io_error_seen
) {
777 * the error disappeared after reading page by page, or
778 * the area was part of a huge bio and other parts of the
779 * bio caused I/O errors, or the block layer merged several
780 * read requests into one and the error is caused by a
781 * different bio (usually one of the two latter cases is
784 spin_lock(&sctx
->stat_lock
);
785 sctx
->stat
.unverified_errors
++;
786 spin_unlock(&sctx
->stat_lock
);
791 if (!sblock_bad
->no_io_error_seen
) {
792 spin_lock(&sctx
->stat_lock
);
793 sctx
->stat
.read_errors
++;
794 spin_unlock(&sctx
->stat_lock
);
795 if (__ratelimit(&_rs
))
796 scrub_print_warning("i/o error", sblock_to_check
);
797 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_READ_ERRS
);
798 } else if (sblock_bad
->checksum_error
) {
799 spin_lock(&sctx
->stat_lock
);
800 sctx
->stat
.csum_errors
++;
801 spin_unlock(&sctx
->stat_lock
);
802 if (__ratelimit(&_rs
))
803 scrub_print_warning("checksum error", sblock_to_check
);
804 btrfs_dev_stat_inc_and_print(dev
,
805 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
806 } else if (sblock_bad
->header_error
) {
807 spin_lock(&sctx
->stat_lock
);
808 sctx
->stat
.verify_errors
++;
809 spin_unlock(&sctx
->stat_lock
);
810 if (__ratelimit(&_rs
))
811 scrub_print_warning("checksum/header error",
813 if (sblock_bad
->generation_error
)
814 btrfs_dev_stat_inc_and_print(dev
,
815 BTRFS_DEV_STAT_GENERATION_ERRS
);
817 btrfs_dev_stat_inc_and_print(dev
,
818 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
822 goto did_not_correct_error
;
824 if (!is_metadata
&& !have_csum
) {
825 struct scrub_fixup_nodatasum
*fixup_nodatasum
;
828 * !is_metadata and !have_csum, this means that the data
829 * might not be COW'ed, that it might be modified
830 * concurrently. The general strategy to work on the
831 * commit root does not help in the case when COW is not
834 fixup_nodatasum
= kzalloc(sizeof(*fixup_nodatasum
), GFP_NOFS
);
835 if (!fixup_nodatasum
)
836 goto did_not_correct_error
;
837 fixup_nodatasum
->sctx
= sctx
;
838 fixup_nodatasum
->dev
= dev
;
839 fixup_nodatasum
->logical
= logical
;
840 fixup_nodatasum
->root
= fs_info
->extent_root
;
841 fixup_nodatasum
->mirror_num
= failed_mirror_index
+ 1;
842 scrub_pending_trans_workers_inc(sctx
);
843 fixup_nodatasum
->work
.func
= scrub_fixup_nodatasum
;
844 btrfs_queue_worker(&fs_info
->scrub_workers
,
845 &fixup_nodatasum
->work
);
850 * now build and submit the bios for the other mirrors, check
852 * First try to pick the mirror which is completely without I/O
853 * errors and also does not have a checksum error.
854 * If one is found, and if a checksum is present, the full block
855 * that is known to contain an error is rewritten. Afterwards
856 * the block is known to be corrected.
857 * If a mirror is found which is completely correct, and no
858 * checksum is present, only those pages are rewritten that had
859 * an I/O error in the block to be repaired, since it cannot be
860 * determined, which copy of the other pages is better (and it
861 * could happen otherwise that a correct page would be
862 * overwritten by a bad one).
864 for (mirror_index
= 0;
865 mirror_index
< BTRFS_MAX_MIRRORS
&&
866 sblocks_for_recheck
[mirror_index
].page_count
> 0;
868 struct scrub_block
*sblock_other
;
870 if (mirror_index
== failed_mirror_index
)
872 sblock_other
= sblocks_for_recheck
+ mirror_index
;
874 /* build and submit the bios, check checksums */
875 scrub_recheck_block(fs_info
, sblock_other
, is_metadata
,
876 have_csum
, csum
, generation
,
879 if (!sblock_other
->header_error
&&
880 !sblock_other
->checksum_error
&&
881 sblock_other
->no_io_error_seen
) {
882 int force_write
= is_metadata
|| have_csum
;
884 ret
= scrub_repair_block_from_good_copy(sblock_bad
,
888 goto corrected_error
;
893 * in case of I/O errors in the area that is supposed to be
894 * repaired, continue by picking good copies of those pages.
895 * Select the good pages from mirrors to rewrite bad pages from
896 * the area to fix. Afterwards verify the checksum of the block
897 * that is supposed to be repaired. This verification step is
898 * only done for the purpose of statistic counting and for the
899 * final scrub report, whether errors remain.
900 * A perfect algorithm could make use of the checksum and try
901 * all possible combinations of pages from the different mirrors
902 * until the checksum verification succeeds. For example, when
903 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
904 * of mirror #2 is readable but the final checksum test fails,
905 * then the 2nd page of mirror #3 could be tried, whether now
906 * the final checksum succeedes. But this would be a rare
907 * exception and is therefore not implemented. At least it is
908 * avoided that the good copy is overwritten.
909 * A more useful improvement would be to pick the sectors
910 * without I/O error based on sector sizes (512 bytes on legacy
911 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
912 * mirror could be repaired by taking 512 byte of a different
913 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
914 * area are unreadable.
917 /* can only fix I/O errors from here on */
918 if (sblock_bad
->no_io_error_seen
)
919 goto did_not_correct_error
;
922 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
923 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
925 if (!page_bad
->io_error
)
928 for (mirror_index
= 0;
929 mirror_index
< BTRFS_MAX_MIRRORS
&&
930 sblocks_for_recheck
[mirror_index
].page_count
> 0;
932 struct scrub_block
*sblock_other
= sblocks_for_recheck
+
934 struct scrub_page
*page_other
= sblock_other
->pagev
[
937 if (!page_other
->io_error
) {
938 ret
= scrub_repair_page_from_good_copy(
939 sblock_bad
, sblock_other
, page_num
, 0);
941 page_bad
->io_error
= 0;
942 break; /* succeeded for this page */
947 if (page_bad
->io_error
) {
948 /* did not find a mirror to copy the page from */
954 if (is_metadata
|| have_csum
) {
956 * need to verify the checksum now that all
957 * sectors on disk are repaired (the write
958 * request for data to be repaired is on its way).
959 * Just be lazy and use scrub_recheck_block()
960 * which re-reads the data before the checksum
961 * is verified, but most likely the data comes out
964 scrub_recheck_block(fs_info
, sblock_bad
,
965 is_metadata
, have_csum
, csum
,
966 generation
, sctx
->csum_size
);
967 if (!sblock_bad
->header_error
&&
968 !sblock_bad
->checksum_error
&&
969 sblock_bad
->no_io_error_seen
)
970 goto corrected_error
;
972 goto did_not_correct_error
;
975 spin_lock(&sctx
->stat_lock
);
976 sctx
->stat
.corrected_errors
++;
977 spin_unlock(&sctx
->stat_lock
);
978 printk_ratelimited_in_rcu(KERN_ERR
979 "btrfs: fixed up error at logical %llu on dev %s\n",
980 (unsigned long long)logical
,
981 rcu_str_deref(dev
->name
));
984 did_not_correct_error
:
985 spin_lock(&sctx
->stat_lock
);
986 sctx
->stat
.uncorrectable_errors
++;
987 spin_unlock(&sctx
->stat_lock
);
988 printk_ratelimited_in_rcu(KERN_ERR
989 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
990 (unsigned long long)logical
,
991 rcu_str_deref(dev
->name
));
995 if (sblocks_for_recheck
) {
996 for (mirror_index
= 0; mirror_index
< BTRFS_MAX_MIRRORS
;
998 struct scrub_block
*sblock
= sblocks_for_recheck
+
1002 for (page_index
= 0; page_index
< sblock
->page_count
;
1004 sblock
->pagev
[page_index
]->sblock
= NULL
;
1005 scrub_page_put(sblock
->pagev
[page_index
]);
1008 kfree(sblocks_for_recheck
);
1014 static int scrub_setup_recheck_block(struct scrub_ctx
*sctx
,
1015 struct btrfs_fs_info
*fs_info
,
1016 u64 length
, u64 logical
,
1017 struct scrub_block
*sblocks_for_recheck
)
1024 * note: the two members ref_count and outstanding_pages
1025 * are not used (and not set) in the blocks that are used for
1026 * the recheck procedure
1030 while (length
> 0) {
1031 u64 sublen
= min_t(u64
, length
, PAGE_SIZE
);
1032 u64 mapped_length
= sublen
;
1033 struct btrfs_bio
*bbio
= NULL
;
1036 * with a length of PAGE_SIZE, each returned stripe
1037 * represents one mirror
1039 ret
= btrfs_map_block(fs_info
, WRITE
, logical
, &mapped_length
,
1041 if (ret
|| !bbio
|| mapped_length
< sublen
) {
1046 BUG_ON(page_index
>= SCRUB_PAGES_PER_BIO
);
1047 for (mirror_index
= 0; mirror_index
< (int)bbio
->num_stripes
;
1049 struct scrub_block
*sblock
;
1050 struct scrub_page
*page
;
1052 if (mirror_index
>= BTRFS_MAX_MIRRORS
)
1055 sblock
= sblocks_for_recheck
+ mirror_index
;
1056 sblock
->sctx
= sctx
;
1057 page
= kzalloc(sizeof(*page
), GFP_NOFS
);
1060 spin_lock(&sctx
->stat_lock
);
1061 sctx
->stat
.malloc_errors
++;
1062 spin_unlock(&sctx
->stat_lock
);
1066 scrub_page_get(page
);
1067 sblock
->pagev
[page_index
] = page
;
1068 page
->logical
= logical
;
1069 page
->physical
= bbio
->stripes
[mirror_index
].physical
;
1070 /* for missing devices, dev->bdev is NULL */
1071 page
->dev
= bbio
->stripes
[mirror_index
].dev
;
1072 page
->mirror_num
= mirror_index
+ 1;
1073 sblock
->page_count
++;
1074 page
->page
= alloc_page(GFP_NOFS
);
1088 * this function will check the on disk data for checksum errors, header
1089 * errors and read I/O errors. If any I/O errors happen, the exact pages
1090 * which are errored are marked as being bad. The goal is to enable scrub
1091 * to take those pages that are not errored from all the mirrors so that
1092 * the pages that are errored in the just handled mirror can be repaired.
1094 static void scrub_recheck_block(struct btrfs_fs_info
*fs_info
,
1095 struct scrub_block
*sblock
, int is_metadata
,
1096 int have_csum
, u8
*csum
, u64 generation
,
1101 sblock
->no_io_error_seen
= 1;
1102 sblock
->header_error
= 0;
1103 sblock
->checksum_error
= 0;
1105 for (page_num
= 0; page_num
< sblock
->page_count
; page_num
++) {
1107 struct scrub_page
*page
= sblock
->pagev
[page_num
];
1108 DECLARE_COMPLETION_ONSTACK(complete
);
1110 if (page
->dev
->bdev
== NULL
) {
1112 sblock
->no_io_error_seen
= 0;
1116 WARN_ON(!page
->page
);
1117 bio
= bio_alloc(GFP_NOFS
, 1);
1120 sblock
->no_io_error_seen
= 0;
1123 bio
->bi_bdev
= page
->dev
->bdev
;
1124 bio
->bi_sector
= page
->physical
>> 9;
1125 bio
->bi_end_io
= scrub_complete_bio_end_io
;
1126 bio
->bi_private
= &complete
;
1128 bio_add_page(bio
, page
->page
, PAGE_SIZE
, 0);
1129 btrfsic_submit_bio(READ
, bio
);
1131 /* this will also unplug the queue */
1132 wait_for_completion(&complete
);
1134 page
->io_error
= !test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
1135 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
1136 sblock
->no_io_error_seen
= 0;
1140 if (sblock
->no_io_error_seen
)
1141 scrub_recheck_block_checksum(fs_info
, sblock
, is_metadata
,
1142 have_csum
, csum
, generation
,
1148 static void scrub_recheck_block_checksum(struct btrfs_fs_info
*fs_info
,
1149 struct scrub_block
*sblock
,
1150 int is_metadata
, int have_csum
,
1151 const u8
*csum
, u64 generation
,
1155 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1157 struct btrfs_root
*root
= fs_info
->extent_root
;
1158 void *mapped_buffer
;
1160 WARN_ON(!sblock
->pagev
[0]->page
);
1162 struct btrfs_header
*h
;
1164 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1165 h
= (struct btrfs_header
*)mapped_buffer
;
1167 if (sblock
->pagev
[0]->logical
!= le64_to_cpu(h
->bytenr
) ||
1168 memcmp(h
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
) ||
1169 memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1171 sblock
->header_error
= 1;
1172 } else if (generation
!= le64_to_cpu(h
->generation
)) {
1173 sblock
->header_error
= 1;
1174 sblock
->generation_error
= 1;
1181 mapped_buffer
= kmap_atomic(sblock
->pagev
[0]->page
);
1184 for (page_num
= 0;;) {
1185 if (page_num
== 0 && is_metadata
)
1186 crc
= btrfs_csum_data(root
,
1187 ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
,
1188 crc
, PAGE_SIZE
- BTRFS_CSUM_SIZE
);
1190 crc
= btrfs_csum_data(root
, mapped_buffer
, crc
,
1193 kunmap_atomic(mapped_buffer
);
1195 if (page_num
>= sblock
->page_count
)
1197 WARN_ON(!sblock
->pagev
[page_num
]->page
);
1199 mapped_buffer
= kmap_atomic(sblock
->pagev
[page_num
]->page
);
1202 btrfs_csum_final(crc
, calculated_csum
);
1203 if (memcmp(calculated_csum
, csum
, csum_size
))
1204 sblock
->checksum_error
= 1;
1207 static void scrub_complete_bio_end_io(struct bio
*bio
, int err
)
1209 complete((struct completion
*)bio
->bi_private
);
1212 static int scrub_repair_block_from_good_copy(struct scrub_block
*sblock_bad
,
1213 struct scrub_block
*sblock_good
,
1219 for (page_num
= 0; page_num
< sblock_bad
->page_count
; page_num
++) {
1222 ret_sub
= scrub_repair_page_from_good_copy(sblock_bad
,
1233 static int scrub_repair_page_from_good_copy(struct scrub_block
*sblock_bad
,
1234 struct scrub_block
*sblock_good
,
1235 int page_num
, int force_write
)
1237 struct scrub_page
*page_bad
= sblock_bad
->pagev
[page_num
];
1238 struct scrub_page
*page_good
= sblock_good
->pagev
[page_num
];
1240 BUG_ON(page_bad
->page
== NULL
);
1241 BUG_ON(page_good
->page
== NULL
);
1242 if (force_write
|| sblock_bad
->header_error
||
1243 sblock_bad
->checksum_error
|| page_bad
->io_error
) {
1246 DECLARE_COMPLETION_ONSTACK(complete
);
1248 bio
= bio_alloc(GFP_NOFS
, 1);
1251 bio
->bi_bdev
= page_bad
->dev
->bdev
;
1252 bio
->bi_sector
= page_bad
->physical
>> 9;
1253 bio
->bi_end_io
= scrub_complete_bio_end_io
;
1254 bio
->bi_private
= &complete
;
1256 ret
= bio_add_page(bio
, page_good
->page
, PAGE_SIZE
, 0);
1257 if (PAGE_SIZE
!= ret
) {
1261 btrfsic_submit_bio(WRITE
, bio
);
1263 /* this will also unplug the queue */
1264 wait_for_completion(&complete
);
1265 if (!bio_flagged(bio
, BIO_UPTODATE
)) {
1266 btrfs_dev_stat_inc_and_print(page_bad
->dev
,
1267 BTRFS_DEV_STAT_WRITE_ERRS
);
1277 static void scrub_checksum(struct scrub_block
*sblock
)
1282 WARN_ON(sblock
->page_count
< 1);
1283 flags
= sblock
->pagev
[0]->flags
;
1285 if (flags
& BTRFS_EXTENT_FLAG_DATA
)
1286 ret
= scrub_checksum_data(sblock
);
1287 else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1288 ret
= scrub_checksum_tree_block(sblock
);
1289 else if (flags
& BTRFS_EXTENT_FLAG_SUPER
)
1290 (void)scrub_checksum_super(sblock
);
1294 scrub_handle_errored_block(sblock
);
1297 static int scrub_checksum_data(struct scrub_block
*sblock
)
1299 struct scrub_ctx
*sctx
= sblock
->sctx
;
1300 u8 csum
[BTRFS_CSUM_SIZE
];
1306 struct btrfs_root
*root
= sctx
->dev_root
;
1310 BUG_ON(sblock
->page_count
< 1);
1311 if (!sblock
->pagev
[0]->have_csum
)
1314 on_disk_csum
= sblock
->pagev
[0]->csum
;
1315 page
= sblock
->pagev
[0]->page
;
1316 buffer
= kmap_atomic(page
);
1318 len
= sctx
->sectorsize
;
1321 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
1323 crc
= btrfs_csum_data(root
, buffer
, crc
, l
);
1324 kunmap_atomic(buffer
);
1329 BUG_ON(index
>= sblock
->page_count
);
1330 BUG_ON(!sblock
->pagev
[index
]->page
);
1331 page
= sblock
->pagev
[index
]->page
;
1332 buffer
= kmap_atomic(page
);
1335 btrfs_csum_final(crc
, csum
);
1336 if (memcmp(csum
, on_disk_csum
, sctx
->csum_size
))
1342 static int scrub_checksum_tree_block(struct scrub_block
*sblock
)
1344 struct scrub_ctx
*sctx
= sblock
->sctx
;
1345 struct btrfs_header
*h
;
1346 struct btrfs_root
*root
= sctx
->dev_root
;
1347 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1348 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1349 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1351 void *mapped_buffer
;
1360 BUG_ON(sblock
->page_count
< 1);
1361 page
= sblock
->pagev
[0]->page
;
1362 mapped_buffer
= kmap_atomic(page
);
1363 h
= (struct btrfs_header
*)mapped_buffer
;
1364 memcpy(on_disk_csum
, h
->csum
, sctx
->csum_size
);
1367 * we don't use the getter functions here, as we
1368 * a) don't have an extent buffer and
1369 * b) the page is already kmapped
1372 if (sblock
->pagev
[0]->logical
!= le64_to_cpu(h
->bytenr
))
1375 if (sblock
->pagev
[0]->generation
!= le64_to_cpu(h
->generation
))
1378 if (memcmp(h
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
))
1381 if (memcmp(h
->chunk_tree_uuid
, fs_info
->chunk_tree_uuid
,
1385 BUG_ON(sctx
->nodesize
!= sctx
->leafsize
);
1386 len
= sctx
->nodesize
- BTRFS_CSUM_SIZE
;
1387 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1388 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1391 u64 l
= min_t(u64
, len
, mapped_size
);
1393 crc
= btrfs_csum_data(root
, p
, crc
, l
);
1394 kunmap_atomic(mapped_buffer
);
1399 BUG_ON(index
>= sblock
->page_count
);
1400 BUG_ON(!sblock
->pagev
[index
]->page
);
1401 page
= sblock
->pagev
[index
]->page
;
1402 mapped_buffer
= kmap_atomic(page
);
1403 mapped_size
= PAGE_SIZE
;
1407 btrfs_csum_final(crc
, calculated_csum
);
1408 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1411 return fail
|| crc_fail
;
1414 static int scrub_checksum_super(struct scrub_block
*sblock
)
1416 struct btrfs_super_block
*s
;
1417 struct scrub_ctx
*sctx
= sblock
->sctx
;
1418 struct btrfs_root
*root
= sctx
->dev_root
;
1419 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1420 u8 calculated_csum
[BTRFS_CSUM_SIZE
];
1421 u8 on_disk_csum
[BTRFS_CSUM_SIZE
];
1423 void *mapped_buffer
;
1432 BUG_ON(sblock
->page_count
< 1);
1433 page
= sblock
->pagev
[0]->page
;
1434 mapped_buffer
= kmap_atomic(page
);
1435 s
= (struct btrfs_super_block
*)mapped_buffer
;
1436 memcpy(on_disk_csum
, s
->csum
, sctx
->csum_size
);
1438 if (sblock
->pagev
[0]->logical
!= le64_to_cpu(s
->bytenr
))
1441 if (sblock
->pagev
[0]->generation
!= le64_to_cpu(s
->generation
))
1444 if (memcmp(s
->fsid
, fs_info
->fsid
, BTRFS_UUID_SIZE
))
1447 len
= BTRFS_SUPER_INFO_SIZE
- BTRFS_CSUM_SIZE
;
1448 mapped_size
= PAGE_SIZE
- BTRFS_CSUM_SIZE
;
1449 p
= ((u8
*)mapped_buffer
) + BTRFS_CSUM_SIZE
;
1452 u64 l
= min_t(u64
, len
, mapped_size
);
1454 crc
= btrfs_csum_data(root
, p
, crc
, l
);
1455 kunmap_atomic(mapped_buffer
);
1460 BUG_ON(index
>= sblock
->page_count
);
1461 BUG_ON(!sblock
->pagev
[index
]->page
);
1462 page
= sblock
->pagev
[index
]->page
;
1463 mapped_buffer
= kmap_atomic(page
);
1464 mapped_size
= PAGE_SIZE
;
1468 btrfs_csum_final(crc
, calculated_csum
);
1469 if (memcmp(calculated_csum
, on_disk_csum
, sctx
->csum_size
))
1472 if (fail_cor
+ fail_gen
) {
1474 * if we find an error in a super block, we just report it.
1475 * They will get written with the next transaction commit
1478 spin_lock(&sctx
->stat_lock
);
1479 ++sctx
->stat
.super_errors
;
1480 spin_unlock(&sctx
->stat_lock
);
1482 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
1483 BTRFS_DEV_STAT_CORRUPTION_ERRS
);
1485 btrfs_dev_stat_inc_and_print(sblock
->pagev
[0]->dev
,
1486 BTRFS_DEV_STAT_GENERATION_ERRS
);
1489 return fail_cor
+ fail_gen
;
1492 static void scrub_block_get(struct scrub_block
*sblock
)
1494 atomic_inc(&sblock
->ref_count
);
1497 static void scrub_block_put(struct scrub_block
*sblock
)
1499 if (atomic_dec_and_test(&sblock
->ref_count
)) {
1502 for (i
= 0; i
< sblock
->page_count
; i
++)
1503 scrub_page_put(sblock
->pagev
[i
]);
1508 static void scrub_page_get(struct scrub_page
*spage
)
1510 atomic_inc(&spage
->ref_count
);
1513 static void scrub_page_put(struct scrub_page
*spage
)
1515 if (atomic_dec_and_test(&spage
->ref_count
)) {
1517 __free_page(spage
->page
);
1522 static void scrub_submit(struct scrub_ctx
*sctx
)
1524 struct scrub_bio
*sbio
;
1526 if (sctx
->curr
== -1)
1529 sbio
= sctx
->bios
[sctx
->curr
];
1531 scrub_pending_bio_inc(sctx
);
1533 btrfsic_submit_bio(READ
, sbio
->bio
);
1536 static int scrub_add_page_to_bio(struct scrub_ctx
*sctx
,
1537 struct scrub_page
*spage
)
1539 struct scrub_block
*sblock
= spage
->sblock
;
1540 struct scrub_bio
*sbio
;
1545 * grab a fresh bio or wait for one to become available
1547 while (sctx
->curr
== -1) {
1548 spin_lock(&sctx
->list_lock
);
1549 sctx
->curr
= sctx
->first_free
;
1550 if (sctx
->curr
!= -1) {
1551 sctx
->first_free
= sctx
->bios
[sctx
->curr
]->next_free
;
1552 sctx
->bios
[sctx
->curr
]->next_free
= -1;
1553 sctx
->bios
[sctx
->curr
]->page_count
= 0;
1554 spin_unlock(&sctx
->list_lock
);
1556 spin_unlock(&sctx
->list_lock
);
1557 wait_event(sctx
->list_wait
, sctx
->first_free
!= -1);
1560 sbio
= sctx
->bios
[sctx
->curr
];
1561 if (sbio
->page_count
== 0) {
1564 sbio
->physical
= spage
->physical
;
1565 sbio
->logical
= spage
->logical
;
1566 sbio
->dev
= spage
->dev
;
1569 bio
= bio_alloc(GFP_NOFS
, sctx
->pages_per_bio
);
1575 bio
->bi_private
= sbio
;
1576 bio
->bi_end_io
= scrub_bio_end_io
;
1577 bio
->bi_bdev
= sbio
->dev
->bdev
;
1578 bio
->bi_sector
= sbio
->physical
>> 9;
1580 } else if (sbio
->physical
+ sbio
->page_count
* PAGE_SIZE
!=
1582 sbio
->logical
+ sbio
->page_count
* PAGE_SIZE
!=
1584 sbio
->dev
!= spage
->dev
) {
1589 sbio
->pagev
[sbio
->page_count
] = spage
;
1590 ret
= bio_add_page(sbio
->bio
, spage
->page
, PAGE_SIZE
, 0);
1591 if (ret
!= PAGE_SIZE
) {
1592 if (sbio
->page_count
< 1) {
1601 scrub_block_get(sblock
); /* one for the added page */
1602 atomic_inc(&sblock
->outstanding_pages
);
1604 if (sbio
->page_count
== sctx
->pages_per_bio
)
1610 static int scrub_pages(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
1611 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
1612 u64 gen
, int mirror_num
, u8
*csum
, int force
)
1614 struct scrub_block
*sblock
;
1617 sblock
= kzalloc(sizeof(*sblock
), GFP_NOFS
);
1619 spin_lock(&sctx
->stat_lock
);
1620 sctx
->stat
.malloc_errors
++;
1621 spin_unlock(&sctx
->stat_lock
);
1625 /* one ref inside this function, plus one for each page added to
1627 atomic_set(&sblock
->ref_count
, 1);
1628 sblock
->sctx
= sctx
;
1629 sblock
->no_io_error_seen
= 1;
1631 for (index
= 0; len
> 0; index
++) {
1632 struct scrub_page
*spage
;
1633 u64 l
= min_t(u64
, len
, PAGE_SIZE
);
1635 spage
= kzalloc(sizeof(*spage
), GFP_NOFS
);
1638 spin_lock(&sctx
->stat_lock
);
1639 sctx
->stat
.malloc_errors
++;
1640 spin_unlock(&sctx
->stat_lock
);
1641 scrub_block_put(sblock
);
1644 BUG_ON(index
>= SCRUB_MAX_PAGES_PER_BLOCK
);
1645 scrub_page_get(spage
);
1646 sblock
->pagev
[index
] = spage
;
1647 spage
->sblock
= sblock
;
1649 spage
->flags
= flags
;
1650 spage
->generation
= gen
;
1651 spage
->logical
= logical
;
1652 spage
->physical
= physical
;
1653 spage
->mirror_num
= mirror_num
;
1655 spage
->have_csum
= 1;
1656 memcpy(spage
->csum
, csum
, sctx
->csum_size
);
1658 spage
->have_csum
= 0;
1660 sblock
->page_count
++;
1661 spage
->page
= alloc_page(GFP_NOFS
);
1669 WARN_ON(sblock
->page_count
== 0);
1670 for (index
= 0; index
< sblock
->page_count
; index
++) {
1671 struct scrub_page
*spage
= sblock
->pagev
[index
];
1674 ret
= scrub_add_page_to_bio(sctx
, spage
);
1676 scrub_block_put(sblock
);
1684 /* last one frees, either here or in bio completion for last page */
1685 scrub_block_put(sblock
);
1689 static void scrub_bio_end_io(struct bio
*bio
, int err
)
1691 struct scrub_bio
*sbio
= bio
->bi_private
;
1692 struct btrfs_fs_info
*fs_info
= sbio
->dev
->dev_root
->fs_info
;
1697 btrfs_queue_worker(&fs_info
->scrub_workers
, &sbio
->work
);
1700 static void scrub_bio_end_io_worker(struct btrfs_work
*work
)
1702 struct scrub_bio
*sbio
= container_of(work
, struct scrub_bio
, work
);
1703 struct scrub_ctx
*sctx
= sbio
->sctx
;
1706 BUG_ON(sbio
->page_count
> SCRUB_PAGES_PER_BIO
);
1708 for (i
= 0; i
< sbio
->page_count
; i
++) {
1709 struct scrub_page
*spage
= sbio
->pagev
[i
];
1711 spage
->io_error
= 1;
1712 spage
->sblock
->no_io_error_seen
= 0;
1716 /* now complete the scrub_block items that have all pages completed */
1717 for (i
= 0; i
< sbio
->page_count
; i
++) {
1718 struct scrub_page
*spage
= sbio
->pagev
[i
];
1719 struct scrub_block
*sblock
= spage
->sblock
;
1721 if (atomic_dec_and_test(&sblock
->outstanding_pages
))
1722 scrub_block_complete(sblock
);
1723 scrub_block_put(sblock
);
1728 spin_lock(&sctx
->list_lock
);
1729 sbio
->next_free
= sctx
->first_free
;
1730 sctx
->first_free
= sbio
->index
;
1731 spin_unlock(&sctx
->list_lock
);
1732 scrub_pending_bio_dec(sctx
);
1735 static void scrub_block_complete(struct scrub_block
*sblock
)
1737 if (!sblock
->no_io_error_seen
)
1738 scrub_handle_errored_block(sblock
);
1740 scrub_checksum(sblock
);
1743 static int scrub_find_csum(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
1746 struct btrfs_ordered_sum
*sum
= NULL
;
1749 unsigned long num_sectors
;
1751 while (!list_empty(&sctx
->csum_list
)) {
1752 sum
= list_first_entry(&sctx
->csum_list
,
1753 struct btrfs_ordered_sum
, list
);
1754 if (sum
->bytenr
> logical
)
1756 if (sum
->bytenr
+ sum
->len
> logical
)
1759 ++sctx
->stat
.csum_discards
;
1760 list_del(&sum
->list
);
1767 num_sectors
= sum
->len
/ sctx
->sectorsize
;
1768 for (i
= 0; i
< num_sectors
; ++i
) {
1769 if (sum
->sums
[i
].bytenr
== logical
) {
1770 memcpy(csum
, &sum
->sums
[i
].sum
, sctx
->csum_size
);
1775 if (ret
&& i
== num_sectors
- 1) {
1776 list_del(&sum
->list
);
1782 /* scrub extent tries to collect up to 64 kB for each bio */
1783 static int scrub_extent(struct scrub_ctx
*sctx
, u64 logical
, u64 len
,
1784 u64 physical
, struct btrfs_device
*dev
, u64 flags
,
1785 u64 gen
, int mirror_num
)
1788 u8 csum
[BTRFS_CSUM_SIZE
];
1791 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
1792 blocksize
= sctx
->sectorsize
;
1793 spin_lock(&sctx
->stat_lock
);
1794 sctx
->stat
.data_extents_scrubbed
++;
1795 sctx
->stat
.data_bytes_scrubbed
+= len
;
1796 spin_unlock(&sctx
->stat_lock
);
1797 } else if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1798 BUG_ON(sctx
->nodesize
!= sctx
->leafsize
);
1799 blocksize
= sctx
->nodesize
;
1800 spin_lock(&sctx
->stat_lock
);
1801 sctx
->stat
.tree_extents_scrubbed
++;
1802 sctx
->stat
.tree_bytes_scrubbed
+= len
;
1803 spin_unlock(&sctx
->stat_lock
);
1805 blocksize
= sctx
->sectorsize
;
1810 u64 l
= min_t(u64
, len
, blocksize
);
1813 if (flags
& BTRFS_EXTENT_FLAG_DATA
) {
1814 /* push csums to sbio */
1815 have_csum
= scrub_find_csum(sctx
, logical
, l
, csum
);
1817 ++sctx
->stat
.no_csum
;
1819 ret
= scrub_pages(sctx
, logical
, l
, physical
, dev
, flags
, gen
,
1820 mirror_num
, have_csum
? csum
: NULL
, 0);
1830 static noinline_for_stack
int scrub_stripe(struct scrub_ctx
*sctx
,
1831 struct map_lookup
*map
,
1832 struct btrfs_device
*scrub_dev
,
1833 int num
, u64 base
, u64 length
)
1835 struct btrfs_path
*path
;
1836 struct btrfs_fs_info
*fs_info
= sctx
->dev_root
->fs_info
;
1837 struct btrfs_root
*root
= fs_info
->extent_root
;
1838 struct btrfs_root
*csum_root
= fs_info
->csum_root
;
1839 struct btrfs_extent_item
*extent
;
1840 struct blk_plug plug
;
1846 struct extent_buffer
*l
;
1847 struct btrfs_key key
;
1852 struct reada_control
*reada1
;
1853 struct reada_control
*reada2
;
1854 struct btrfs_key key_start
;
1855 struct btrfs_key key_end
;
1856 u64 increment
= map
->stripe_len
;
1861 do_div(nstripes
, map
->stripe_len
);
1862 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
1863 offset
= map
->stripe_len
* num
;
1864 increment
= map
->stripe_len
* map
->num_stripes
;
1866 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1867 int factor
= map
->num_stripes
/ map
->sub_stripes
;
1868 offset
= map
->stripe_len
* (num
/ map
->sub_stripes
);
1869 increment
= map
->stripe_len
* factor
;
1870 mirror_num
= num
% map
->sub_stripes
+ 1;
1871 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
1872 increment
= map
->stripe_len
;
1873 mirror_num
= num
% map
->num_stripes
+ 1;
1874 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
1875 increment
= map
->stripe_len
;
1876 mirror_num
= num
% map
->num_stripes
+ 1;
1878 increment
= map
->stripe_len
;
1882 path
= btrfs_alloc_path();
1887 * work on commit root. The related disk blocks are static as
1888 * long as COW is applied. This means, it is save to rewrite
1889 * them to repair disk errors without any race conditions
1891 path
->search_commit_root
= 1;
1892 path
->skip_locking
= 1;
1895 * trigger the readahead for extent tree csum tree and wait for
1896 * completion. During readahead, the scrub is officially paused
1897 * to not hold off transaction commits
1899 logical
= base
+ offset
;
1901 wait_event(sctx
->list_wait
,
1902 atomic_read(&sctx
->bios_in_flight
) == 0);
1903 atomic_inc(&fs_info
->scrubs_paused
);
1904 wake_up(&fs_info
->scrub_pause_wait
);
1906 /* FIXME it might be better to start readahead at commit root */
1907 key_start
.objectid
= logical
;
1908 key_start
.type
= BTRFS_EXTENT_ITEM_KEY
;
1909 key_start
.offset
= (u64
)0;
1910 key_end
.objectid
= base
+ offset
+ nstripes
* increment
;
1911 key_end
.type
= BTRFS_EXTENT_ITEM_KEY
;
1912 key_end
.offset
= (u64
)0;
1913 reada1
= btrfs_reada_add(root
, &key_start
, &key_end
);
1915 key_start
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
1916 key_start
.type
= BTRFS_EXTENT_CSUM_KEY
;
1917 key_start
.offset
= logical
;
1918 key_end
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
1919 key_end
.type
= BTRFS_EXTENT_CSUM_KEY
;
1920 key_end
.offset
= base
+ offset
+ nstripes
* increment
;
1921 reada2
= btrfs_reada_add(csum_root
, &key_start
, &key_end
);
1923 if (!IS_ERR(reada1
))
1924 btrfs_reada_wait(reada1
);
1925 if (!IS_ERR(reada2
))
1926 btrfs_reada_wait(reada2
);
1928 mutex_lock(&fs_info
->scrub_lock
);
1929 while (atomic_read(&fs_info
->scrub_pause_req
)) {
1930 mutex_unlock(&fs_info
->scrub_lock
);
1931 wait_event(fs_info
->scrub_pause_wait
,
1932 atomic_read(&fs_info
->scrub_pause_req
) == 0);
1933 mutex_lock(&fs_info
->scrub_lock
);
1935 atomic_dec(&fs_info
->scrubs_paused
);
1936 mutex_unlock(&fs_info
->scrub_lock
);
1937 wake_up(&fs_info
->scrub_pause_wait
);
1940 * collect all data csums for the stripe to avoid seeking during
1941 * the scrub. This might currently (crc32) end up to be about 1MB
1943 blk_start_plug(&plug
);
1946 * now find all extents for each stripe and scrub them
1948 logical
= base
+ offset
;
1949 physical
= map
->stripes
[num
].physical
;
1951 for (i
= 0; i
< nstripes
; ++i
) {
1955 if (atomic_read(&fs_info
->scrub_cancel_req
) ||
1956 atomic_read(&sctx
->cancel_req
)) {
1961 * check to see if we have to pause
1963 if (atomic_read(&fs_info
->scrub_pause_req
)) {
1964 /* push queued extents */
1966 wait_event(sctx
->list_wait
,
1967 atomic_read(&sctx
->bios_in_flight
) == 0);
1968 atomic_inc(&fs_info
->scrubs_paused
);
1969 wake_up(&fs_info
->scrub_pause_wait
);
1970 mutex_lock(&fs_info
->scrub_lock
);
1971 while (atomic_read(&fs_info
->scrub_pause_req
)) {
1972 mutex_unlock(&fs_info
->scrub_lock
);
1973 wait_event(fs_info
->scrub_pause_wait
,
1974 atomic_read(&fs_info
->scrub_pause_req
) == 0);
1975 mutex_lock(&fs_info
->scrub_lock
);
1977 atomic_dec(&fs_info
->scrubs_paused
);
1978 mutex_unlock(&fs_info
->scrub_lock
);
1979 wake_up(&fs_info
->scrub_pause_wait
);
1982 ret
= btrfs_lookup_csums_range(csum_root
, logical
,
1983 logical
+ map
->stripe_len
- 1,
1984 &sctx
->csum_list
, 1);
1988 key
.objectid
= logical
;
1989 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1990 key
.offset
= (u64
)0;
1992 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1996 ret
= btrfs_previous_item(root
, path
, 0,
1997 BTRFS_EXTENT_ITEM_KEY
);
2001 /* there's no smaller item, so stick with the
2003 btrfs_release_path(path
);
2004 ret
= btrfs_search_slot(NULL
, root
, &key
,
2013 slot
= path
->slots
[0];
2014 if (slot
>= btrfs_header_nritems(l
)) {
2015 ret
= btrfs_next_leaf(root
, path
);
2023 btrfs_item_key_to_cpu(l
, &key
, slot
);
2025 if (key
.objectid
+ key
.offset
<= logical
)
2028 if (key
.objectid
>= logical
+ map
->stripe_len
)
2031 if (btrfs_key_type(&key
) != BTRFS_EXTENT_ITEM_KEY
)
2034 extent
= btrfs_item_ptr(l
, slot
,
2035 struct btrfs_extent_item
);
2036 flags
= btrfs_extent_flags(l
, extent
);
2037 generation
= btrfs_extent_generation(l
, extent
);
2039 if (key
.objectid
< logical
&&
2040 (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)) {
2042 "btrfs scrub: tree block %llu spanning "
2043 "stripes, ignored. logical=%llu\n",
2044 (unsigned long long)key
.objectid
,
2045 (unsigned long long)logical
);
2050 * trim extent to this stripe
2052 if (key
.objectid
< logical
) {
2053 key
.offset
-= logical
- key
.objectid
;
2054 key
.objectid
= logical
;
2056 if (key
.objectid
+ key
.offset
>
2057 logical
+ map
->stripe_len
) {
2058 key
.offset
= logical
+ map
->stripe_len
-
2062 ret
= scrub_extent(sctx
, key
.objectid
, key
.offset
,
2063 key
.objectid
- logical
+ physical
,
2064 scrub_dev
, flags
, generation
,
2072 btrfs_release_path(path
);
2073 logical
+= increment
;
2074 physical
+= map
->stripe_len
;
2075 spin_lock(&sctx
->stat_lock
);
2076 sctx
->stat
.last_physical
= physical
;
2077 spin_unlock(&sctx
->stat_lock
);
2079 /* push queued extents */
2083 blk_finish_plug(&plug
);
2084 btrfs_free_path(path
);
2085 return ret
< 0 ? ret
: 0;
2088 static noinline_for_stack
int scrub_chunk(struct scrub_ctx
*sctx
,
2089 struct btrfs_device
*scrub_dev
,
2090 u64 chunk_tree
, u64 chunk_objectid
,
2091 u64 chunk_offset
, u64 length
,
2094 struct btrfs_mapping_tree
*map_tree
=
2095 &sctx
->dev_root
->fs_info
->mapping_tree
;
2096 struct map_lookup
*map
;
2097 struct extent_map
*em
;
2101 read_lock(&map_tree
->map_tree
.lock
);
2102 em
= lookup_extent_mapping(&map_tree
->map_tree
, chunk_offset
, 1);
2103 read_unlock(&map_tree
->map_tree
.lock
);
2108 map
= (struct map_lookup
*)em
->bdev
;
2109 if (em
->start
!= chunk_offset
)
2112 if (em
->len
< length
)
2115 for (i
= 0; i
< map
->num_stripes
; ++i
) {
2116 if (map
->stripes
[i
].dev
->bdev
== scrub_dev
->bdev
&&
2117 map
->stripes
[i
].physical
== dev_offset
) {
2118 ret
= scrub_stripe(sctx
, map
, scrub_dev
, i
,
2119 chunk_offset
, length
);
2125 free_extent_map(em
);
2130 static noinline_for_stack
2131 int scrub_enumerate_chunks(struct scrub_ctx
*sctx
,
2132 struct btrfs_device
*scrub_dev
, u64 start
, u64 end
)
2134 struct btrfs_dev_extent
*dev_extent
= NULL
;
2135 struct btrfs_path
*path
;
2136 struct btrfs_root
*root
= sctx
->dev_root
;
2137 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2144 struct extent_buffer
*l
;
2145 struct btrfs_key key
;
2146 struct btrfs_key found_key
;
2147 struct btrfs_block_group_cache
*cache
;
2149 path
= btrfs_alloc_path();
2154 path
->search_commit_root
= 1;
2155 path
->skip_locking
= 1;
2157 key
.objectid
= scrub_dev
->devid
;
2159 key
.type
= BTRFS_DEV_EXTENT_KEY
;
2162 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2166 if (path
->slots
[0] >=
2167 btrfs_header_nritems(path
->nodes
[0])) {
2168 ret
= btrfs_next_leaf(root
, path
);
2175 slot
= path
->slots
[0];
2177 btrfs_item_key_to_cpu(l
, &found_key
, slot
);
2179 if (found_key
.objectid
!= scrub_dev
->devid
)
2182 if (btrfs_key_type(&found_key
) != BTRFS_DEV_EXTENT_KEY
)
2185 if (found_key
.offset
>= end
)
2188 if (found_key
.offset
< key
.offset
)
2191 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
2192 length
= btrfs_dev_extent_length(l
, dev_extent
);
2194 if (found_key
.offset
+ length
<= start
) {
2195 key
.offset
= found_key
.offset
+ length
;
2196 btrfs_release_path(path
);
2200 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
2201 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
2202 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
2205 * get a reference on the corresponding block group to prevent
2206 * the chunk from going away while we scrub it
2208 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
2213 ret
= scrub_chunk(sctx
, scrub_dev
, chunk_tree
, chunk_objectid
,
2214 chunk_offset
, length
, found_key
.offset
);
2215 btrfs_put_block_group(cache
);
2219 key
.offset
= found_key
.offset
+ length
;
2220 btrfs_release_path(path
);
2223 btrfs_free_path(path
);
2226 * ret can still be 1 from search_slot or next_leaf,
2227 * that's not an error
2229 return ret
< 0 ? ret
: 0;
2232 static noinline_for_stack
int scrub_supers(struct scrub_ctx
*sctx
,
2233 struct btrfs_device
*scrub_dev
)
2239 struct btrfs_root
*root
= sctx
->dev_root
;
2241 if (root
->fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
)
2244 gen
= root
->fs_info
->last_trans_committed
;
2246 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
2247 bytenr
= btrfs_sb_offset(i
);
2248 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
> scrub_dev
->total_bytes
)
2251 ret
= scrub_pages(sctx
, bytenr
, BTRFS_SUPER_INFO_SIZE
, bytenr
,
2252 scrub_dev
, BTRFS_EXTENT_FLAG_SUPER
, gen
, i
,
2257 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
2263 * get a reference count on fs_info->scrub_workers. start worker if necessary
2265 static noinline_for_stack
int scrub_workers_get(struct btrfs_root
*root
)
2267 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2270 mutex_lock(&fs_info
->scrub_lock
);
2271 if (fs_info
->scrub_workers_refcnt
== 0) {
2272 btrfs_init_workers(&fs_info
->scrub_workers
, "scrub",
2273 fs_info
->thread_pool_size
, &fs_info
->generic_worker
);
2274 fs_info
->scrub_workers
.idle_thresh
= 4;
2275 ret
= btrfs_start_workers(&fs_info
->scrub_workers
);
2279 ++fs_info
->scrub_workers_refcnt
;
2281 mutex_unlock(&fs_info
->scrub_lock
);
2286 static noinline_for_stack
void scrub_workers_put(struct btrfs_root
*root
)
2288 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2290 mutex_lock(&fs_info
->scrub_lock
);
2291 if (--fs_info
->scrub_workers_refcnt
== 0)
2292 btrfs_stop_workers(&fs_info
->scrub_workers
);
2293 WARN_ON(fs_info
->scrub_workers_refcnt
< 0);
2294 mutex_unlock(&fs_info
->scrub_lock
);
2298 int btrfs_scrub_dev(struct btrfs_root
*root
, u64 devid
, u64 start
, u64 end
,
2299 struct btrfs_scrub_progress
*progress
, int readonly
)
2301 struct scrub_ctx
*sctx
;
2302 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2304 struct btrfs_device
*dev
;
2306 if (btrfs_fs_closing(root
->fs_info
))
2310 * check some assumptions
2312 if (root
->nodesize
!= root
->leafsize
) {
2314 "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
2315 root
->nodesize
, root
->leafsize
);
2319 if (root
->nodesize
> BTRFS_STRIPE_LEN
) {
2321 * in this case scrub is unable to calculate the checksum
2322 * the way scrub is implemented. Do not handle this
2323 * situation at all because it won't ever happen.
2326 "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
2327 root
->nodesize
, BTRFS_STRIPE_LEN
);
2331 if (root
->sectorsize
!= PAGE_SIZE
) {
2332 /* not supported for data w/o checksums */
2334 "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
2335 root
->sectorsize
, (unsigned long long)PAGE_SIZE
);
2339 if (fs_info
->chunk_root
->nodesize
>
2340 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
||
2341 fs_info
->chunk_root
->sectorsize
>
2342 PAGE_SIZE
* SCRUB_MAX_PAGES_PER_BLOCK
) {
2344 * would exhaust the array bounds of pagev member in
2345 * struct scrub_block
2347 pr_err("btrfs_scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails\n",
2348 fs_info
->chunk_root
->nodesize
,
2349 SCRUB_MAX_PAGES_PER_BLOCK
,
2350 fs_info
->chunk_root
->sectorsize
,
2351 SCRUB_MAX_PAGES_PER_BLOCK
);
2355 ret
= scrub_workers_get(root
);
2359 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2360 dev
= btrfs_find_device(root
, devid
, NULL
, NULL
);
2361 if (!dev
|| dev
->missing
) {
2362 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2363 scrub_workers_put(root
);
2366 mutex_lock(&fs_info
->scrub_lock
);
2368 if (!dev
->in_fs_metadata
) {
2369 mutex_unlock(&fs_info
->scrub_lock
);
2370 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2371 scrub_workers_put(root
);
2375 if (dev
->scrub_device
) {
2376 mutex_unlock(&fs_info
->scrub_lock
);
2377 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2378 scrub_workers_put(root
);
2379 return -EINPROGRESS
;
2381 sctx
= scrub_setup_ctx(dev
);
2383 mutex_unlock(&fs_info
->scrub_lock
);
2384 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2385 scrub_workers_put(root
);
2386 return PTR_ERR(sctx
);
2388 sctx
->readonly
= readonly
;
2389 dev
->scrub_device
= sctx
;
2391 atomic_inc(&fs_info
->scrubs_running
);
2392 mutex_unlock(&fs_info
->scrub_lock
);
2393 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2395 down_read(&fs_info
->scrub_super_lock
);
2396 ret
= scrub_supers(sctx
, dev
);
2397 up_read(&fs_info
->scrub_super_lock
);
2400 ret
= scrub_enumerate_chunks(sctx
, dev
, start
, end
);
2402 wait_event(sctx
->list_wait
, atomic_read(&sctx
->bios_in_flight
) == 0);
2403 atomic_dec(&fs_info
->scrubs_running
);
2404 wake_up(&fs_info
->scrub_pause_wait
);
2406 wait_event(sctx
->list_wait
, atomic_read(&sctx
->workers_pending
) == 0);
2409 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
2411 mutex_lock(&fs_info
->scrub_lock
);
2412 dev
->scrub_device
= NULL
;
2413 mutex_unlock(&fs_info
->scrub_lock
);
2415 scrub_free_ctx(sctx
);
2416 scrub_workers_put(root
);
2421 void btrfs_scrub_pause(struct btrfs_root
*root
)
2423 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2425 mutex_lock(&fs_info
->scrub_lock
);
2426 atomic_inc(&fs_info
->scrub_pause_req
);
2427 while (atomic_read(&fs_info
->scrubs_paused
) !=
2428 atomic_read(&fs_info
->scrubs_running
)) {
2429 mutex_unlock(&fs_info
->scrub_lock
);
2430 wait_event(fs_info
->scrub_pause_wait
,
2431 atomic_read(&fs_info
->scrubs_paused
) ==
2432 atomic_read(&fs_info
->scrubs_running
));
2433 mutex_lock(&fs_info
->scrub_lock
);
2435 mutex_unlock(&fs_info
->scrub_lock
);
2438 void btrfs_scrub_continue(struct btrfs_root
*root
)
2440 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2442 atomic_dec(&fs_info
->scrub_pause_req
);
2443 wake_up(&fs_info
->scrub_pause_wait
);
2446 void btrfs_scrub_pause_super(struct btrfs_root
*root
)
2448 down_write(&root
->fs_info
->scrub_super_lock
);
2451 void btrfs_scrub_continue_super(struct btrfs_root
*root
)
2453 up_write(&root
->fs_info
->scrub_super_lock
);
2456 int __btrfs_scrub_cancel(struct btrfs_fs_info
*fs_info
)
2459 mutex_lock(&fs_info
->scrub_lock
);
2460 if (!atomic_read(&fs_info
->scrubs_running
)) {
2461 mutex_unlock(&fs_info
->scrub_lock
);
2465 atomic_inc(&fs_info
->scrub_cancel_req
);
2466 while (atomic_read(&fs_info
->scrubs_running
)) {
2467 mutex_unlock(&fs_info
->scrub_lock
);
2468 wait_event(fs_info
->scrub_pause_wait
,
2469 atomic_read(&fs_info
->scrubs_running
) == 0);
2470 mutex_lock(&fs_info
->scrub_lock
);
2472 atomic_dec(&fs_info
->scrub_cancel_req
);
2473 mutex_unlock(&fs_info
->scrub_lock
);
2478 int btrfs_scrub_cancel(struct btrfs_root
*root
)
2480 return __btrfs_scrub_cancel(root
->fs_info
);
2483 int btrfs_scrub_cancel_dev(struct btrfs_root
*root
, struct btrfs_device
*dev
)
2485 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2486 struct scrub_ctx
*sctx
;
2488 mutex_lock(&fs_info
->scrub_lock
);
2489 sctx
= dev
->scrub_device
;
2491 mutex_unlock(&fs_info
->scrub_lock
);
2494 atomic_inc(&sctx
->cancel_req
);
2495 while (dev
->scrub_device
) {
2496 mutex_unlock(&fs_info
->scrub_lock
);
2497 wait_event(fs_info
->scrub_pause_wait
,
2498 dev
->scrub_device
== NULL
);
2499 mutex_lock(&fs_info
->scrub_lock
);
2501 mutex_unlock(&fs_info
->scrub_lock
);
2506 int btrfs_scrub_cancel_devid(struct btrfs_root
*root
, u64 devid
)
2508 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2509 struct btrfs_device
*dev
;
2513 * we have to hold the device_list_mutex here so the device
2514 * does not go away in cancel_dev. FIXME: find a better solution
2516 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
2517 dev
= btrfs_find_device(root
, devid
, NULL
, NULL
);
2519 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2522 ret
= btrfs_scrub_cancel_dev(root
, dev
);
2523 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2528 int btrfs_scrub_progress(struct btrfs_root
*root
, u64 devid
,
2529 struct btrfs_scrub_progress
*progress
)
2531 struct btrfs_device
*dev
;
2532 struct scrub_ctx
*sctx
= NULL
;
2534 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2535 dev
= btrfs_find_device(root
, devid
, NULL
, NULL
);
2537 sctx
= dev
->scrub_device
;
2539 memcpy(progress
, &sctx
->stat
, sizeof(*progress
));
2540 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2542 return dev
? (sctx
? 0 : -ENOTCONN
) : -ENODEV
;