btrfs: scrub: set error stats when tree block spanning stripes
[deliverable/linux.git] / fs / btrfs / scrub.c
CommitLineData
a2de733c 1/*
b6bfebc1 2 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
a2de733c
AJ
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
a2de733c 19#include <linux/blkdev.h>
558540c1 20#include <linux/ratelimit.h>
a2de733c
AJ
21#include "ctree.h"
22#include "volumes.h"
23#include "disk-io.h"
24#include "ordered-data.h"
0ef8e451 25#include "transaction.h"
558540c1 26#include "backref.h"
5da6fcbc 27#include "extent_io.h"
ff023aac 28#include "dev-replace.h"
21adbd5c 29#include "check-integrity.h"
606686ee 30#include "rcu-string.h"
53b381b3 31#include "raid56.h"
a2de733c
AJ
32
33/*
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
37 * any can be found.
38 *
39 * Future enhancements:
a2de733c
AJ
40 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
a2de733c 42 * - track and record media errors, throw out bad devices
a2de733c 43 * - add a mode to also read unallocated space
a2de733c
AJ
44 */
45
b5d67f64 46struct scrub_block;
d9d181c1 47struct scrub_ctx;
a2de733c 48
ff023aac
SB
49/*
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
54 */
55#define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56#define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57#define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
7a9e9987
SB
58
59/*
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
63 */
b5d67f64 64#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
a2de733c 65
af8e2d1d
MX
66struct scrub_recover {
67 atomic_t refs;
68 struct btrfs_bio *bbio;
af8e2d1d
MX
69 u64 map_length;
70};
71
a2de733c 72struct scrub_page {
b5d67f64
SB
73 struct scrub_block *sblock;
74 struct page *page;
442a4f63 75 struct btrfs_device *dev;
5a6ac9ea 76 struct list_head list;
a2de733c
AJ
77 u64 flags; /* extent flags */
78 u64 generation;
b5d67f64
SB
79 u64 logical;
80 u64 physical;
ff023aac 81 u64 physical_for_dev_replace;
57019345 82 atomic_t refs;
b5d67f64
SB
83 struct {
84 unsigned int mirror_num:8;
85 unsigned int have_csum:1;
86 unsigned int io_error:1;
87 };
a2de733c 88 u8 csum[BTRFS_CSUM_SIZE];
af8e2d1d
MX
89
90 struct scrub_recover *recover;
a2de733c
AJ
91};
92
93struct scrub_bio {
94 int index;
d9d181c1 95 struct scrub_ctx *sctx;
a36cf8b8 96 struct btrfs_device *dev;
a2de733c
AJ
97 struct bio *bio;
98 int err;
99 u64 logical;
100 u64 physical;
ff023aac
SB
101#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102 struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
103#else
104 struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
105#endif
b5d67f64 106 int page_count;
a2de733c
AJ
107 int next_free;
108 struct btrfs_work work;
109};
110
b5d67f64 111struct scrub_block {
7a9e9987 112 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
b5d67f64
SB
113 int page_count;
114 atomic_t outstanding_pages;
57019345 115 atomic_t refs; /* free mem on transition to zero */
d9d181c1 116 struct scrub_ctx *sctx;
5a6ac9ea 117 struct scrub_parity *sparity;
b5d67f64
SB
118 struct {
119 unsigned int header_error:1;
120 unsigned int checksum_error:1;
121 unsigned int no_io_error_seen:1;
442a4f63 122 unsigned int generation_error:1; /* also sets header_error */
5a6ac9ea
MX
123
124 /* The following is for the data used to check parity */
125 /* It is for the data with checksum */
126 unsigned int data_corrected:1;
b5d67f64 127 };
73ff61db 128 struct btrfs_work work;
b5d67f64
SB
129};
130
5a6ac9ea
MX
131/* Used for the chunks with parity stripe such RAID5/6 */
132struct scrub_parity {
133 struct scrub_ctx *sctx;
134
135 struct btrfs_device *scrub_dev;
136
137 u64 logic_start;
138
139 u64 logic_end;
140
141 int nsectors;
142
143 int stripe_len;
144
57019345 145 atomic_t refs;
5a6ac9ea
MX
146
147 struct list_head spages;
148
149 /* Work of parity check and repair */
150 struct btrfs_work work;
151
152 /* Mark the parity blocks which have data */
153 unsigned long *dbitmap;
154
155 /*
156 * Mark the parity blocks which have data, but errors happen when
157 * read data or check data
158 */
159 unsigned long *ebitmap;
160
161 unsigned long bitmap[0];
162};
163
ff023aac
SB
164struct scrub_wr_ctx {
165 struct scrub_bio *wr_curr_bio;
166 struct btrfs_device *tgtdev;
167 int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
168 atomic_t flush_all_writes;
169 struct mutex wr_lock;
170};
171
d9d181c1 172struct scrub_ctx {
ff023aac 173 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
a36cf8b8 174 struct btrfs_root *dev_root;
a2de733c
AJ
175 int first_free;
176 int curr;
b6bfebc1
SB
177 atomic_t bios_in_flight;
178 atomic_t workers_pending;
a2de733c
AJ
179 spinlock_t list_lock;
180 wait_queue_head_t list_wait;
181 u16 csum_size;
182 struct list_head csum_list;
183 atomic_t cancel_req;
8628764e 184 int readonly;
ff023aac 185 int pages_per_rd_bio;
b5d67f64
SB
186 u32 sectorsize;
187 u32 nodesize;
63a212ab
SB
188
189 int is_dev_replace;
ff023aac 190 struct scrub_wr_ctx wr_ctx;
63a212ab 191
a2de733c
AJ
192 /*
193 * statistics
194 */
195 struct btrfs_scrub_progress stat;
196 spinlock_t stat_lock;
f55985f4
FM
197
198 /*
199 * Use a ref counter to avoid use-after-free issues. Scrub workers
200 * decrement bios_in_flight and workers_pending and then do a wakeup
201 * on the list_wait wait queue. We must ensure the main scrub task
202 * doesn't free the scrub context before or while the workers are
203 * doing the wakeup() call.
204 */
205 atomic_t refs;
a2de733c
AJ
206};
207
0ef8e451 208struct scrub_fixup_nodatasum {
d9d181c1 209 struct scrub_ctx *sctx;
a36cf8b8 210 struct btrfs_device *dev;
0ef8e451
JS
211 u64 logical;
212 struct btrfs_root *root;
213 struct btrfs_work work;
214 int mirror_num;
215};
216
652f25a2
JB
217struct scrub_nocow_inode {
218 u64 inum;
219 u64 offset;
220 u64 root;
221 struct list_head list;
222};
223
ff023aac
SB
224struct scrub_copy_nocow_ctx {
225 struct scrub_ctx *sctx;
226 u64 logical;
227 u64 len;
228 int mirror_num;
229 u64 physical_for_dev_replace;
652f25a2 230 struct list_head inodes;
ff023aac
SB
231 struct btrfs_work work;
232};
233
558540c1
JS
234struct scrub_warning {
235 struct btrfs_path *path;
236 u64 extent_item_size;
558540c1
JS
237 const char *errstr;
238 sector_t sector;
239 u64 logical;
240 struct btrfs_device *dev;
558540c1
JS
241};
242
b6bfebc1
SB
243static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
244static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
245static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
246static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
b5d67f64 247static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
be50a8dd 248static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
ff023aac 249 struct scrub_block *sblocks_for_recheck);
34f5c8e9
SB
250static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
251 struct scrub_block *sblock, int is_metadata,
252 int have_csum, u8 *csum, u64 generation,
af8e2d1d 253 u16 csum_size, int retry_failed_mirror);
b5d67f64
SB
254static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
255 struct scrub_block *sblock,
256 int is_metadata, int have_csum,
257 const u8 *csum, u64 generation,
258 u16 csum_size);
b5d67f64 259static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
114ab50d 260 struct scrub_block *sblock_good);
b5d67f64
SB
261static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
262 struct scrub_block *sblock_good,
263 int page_num, int force_write);
ff023aac
SB
264static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
265static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
266 int page_num);
b5d67f64
SB
267static int scrub_checksum_data(struct scrub_block *sblock);
268static int scrub_checksum_tree_block(struct scrub_block *sblock);
269static int scrub_checksum_super(struct scrub_block *sblock);
270static void scrub_block_get(struct scrub_block *sblock);
271static void scrub_block_put(struct scrub_block *sblock);
7a9e9987
SB
272static void scrub_page_get(struct scrub_page *spage);
273static void scrub_page_put(struct scrub_page *spage);
5a6ac9ea
MX
274static void scrub_parity_get(struct scrub_parity *sparity);
275static void scrub_parity_put(struct scrub_parity *sparity);
ff023aac
SB
276static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
277 struct scrub_page *spage);
d9d181c1 278static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
a36cf8b8 279 u64 physical, struct btrfs_device *dev, u64 flags,
ff023aac
SB
280 u64 gen, int mirror_num, u8 *csum, int force,
281 u64 physical_for_dev_replace);
4246a0b6 282static void scrub_bio_end_io(struct bio *bio);
b5d67f64
SB
283static void scrub_bio_end_io_worker(struct btrfs_work *work);
284static void scrub_block_complete(struct scrub_block *sblock);
ff023aac
SB
285static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
286 u64 extent_logical, u64 extent_len,
287 u64 *extent_physical,
288 struct btrfs_device **extent_dev,
289 int *extent_mirror_num);
290static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
291 struct scrub_wr_ctx *wr_ctx,
292 struct btrfs_fs_info *fs_info,
293 struct btrfs_device *dev,
294 int is_dev_replace);
295static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
296static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
297 struct scrub_page *spage);
298static void scrub_wr_submit(struct scrub_ctx *sctx);
4246a0b6 299static void scrub_wr_bio_end_io(struct bio *bio);
ff023aac
SB
300static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
301static int write_page_nocow(struct scrub_ctx *sctx,
302 u64 physical_for_dev_replace, struct page *page);
303static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
652f25a2 304 struct scrub_copy_nocow_ctx *ctx);
ff023aac
SB
305static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
306 int mirror_num, u64 physical_for_dev_replace);
307static void copy_nocow_pages_worker(struct btrfs_work *work);
cb7ab021 308static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
3cb0929a 309static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
f55985f4 310static void scrub_put_ctx(struct scrub_ctx *sctx);
1623edeb
SB
311
312
b6bfebc1
SB
313static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
314{
f55985f4 315 atomic_inc(&sctx->refs);
b6bfebc1
SB
316 atomic_inc(&sctx->bios_in_flight);
317}
318
319static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
320{
321 atomic_dec(&sctx->bios_in_flight);
322 wake_up(&sctx->list_wait);
f55985f4 323 scrub_put_ctx(sctx);
b6bfebc1
SB
324}
325
cb7ab021 326static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
3cb0929a
WS
327{
328 while (atomic_read(&fs_info->scrub_pause_req)) {
329 mutex_unlock(&fs_info->scrub_lock);
330 wait_event(fs_info->scrub_pause_wait,
331 atomic_read(&fs_info->scrub_pause_req) == 0);
332 mutex_lock(&fs_info->scrub_lock);
333 }
334}
335
0e22be89 336static void scrub_pause_on(struct btrfs_fs_info *fs_info)
cb7ab021
WS
337{
338 atomic_inc(&fs_info->scrubs_paused);
339 wake_up(&fs_info->scrub_pause_wait);
0e22be89 340}
cb7ab021 341
0e22be89
Z
342static void scrub_pause_off(struct btrfs_fs_info *fs_info)
343{
cb7ab021
WS
344 mutex_lock(&fs_info->scrub_lock);
345 __scrub_blocked_if_needed(fs_info);
346 atomic_dec(&fs_info->scrubs_paused);
347 mutex_unlock(&fs_info->scrub_lock);
348
349 wake_up(&fs_info->scrub_pause_wait);
350}
351
0e22be89
Z
352static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
353{
354 scrub_pause_on(fs_info);
355 scrub_pause_off(fs_info);
356}
357
b6bfebc1
SB
358/*
359 * used for workers that require transaction commits (i.e., for the
360 * NOCOW case)
361 */
362static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
363{
364 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
365
f55985f4 366 atomic_inc(&sctx->refs);
b6bfebc1
SB
367 /*
368 * increment scrubs_running to prevent cancel requests from
369 * completing as long as a worker is running. we must also
370 * increment scrubs_paused to prevent deadlocking on pause
371 * requests used for transactions commits (as the worker uses a
372 * transaction context). it is safe to regard the worker
373 * as paused for all matters practical. effectively, we only
374 * avoid cancellation requests from completing.
375 */
376 mutex_lock(&fs_info->scrub_lock);
377 atomic_inc(&fs_info->scrubs_running);
378 atomic_inc(&fs_info->scrubs_paused);
379 mutex_unlock(&fs_info->scrub_lock);
32a44789
WS
380
381 /*
382 * check if @scrubs_running=@scrubs_paused condition
383 * inside wait_event() is not an atomic operation.
384 * which means we may inc/dec @scrub_running/paused
385 * at any time. Let's wake up @scrub_pause_wait as
386 * much as we can to let commit transaction blocked less.
387 */
388 wake_up(&fs_info->scrub_pause_wait);
389
b6bfebc1
SB
390 atomic_inc(&sctx->workers_pending);
391}
392
393/* used for workers that require transaction commits */
394static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
395{
396 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
397
398 /*
399 * see scrub_pending_trans_workers_inc() why we're pretending
400 * to be paused in the scrub counters
401 */
402 mutex_lock(&fs_info->scrub_lock);
403 atomic_dec(&fs_info->scrubs_running);
404 atomic_dec(&fs_info->scrubs_paused);
405 mutex_unlock(&fs_info->scrub_lock);
406 atomic_dec(&sctx->workers_pending);
407 wake_up(&fs_info->scrub_pause_wait);
408 wake_up(&sctx->list_wait);
f55985f4 409 scrub_put_ctx(sctx);
b6bfebc1
SB
410}
411
d9d181c1 412static void scrub_free_csums(struct scrub_ctx *sctx)
a2de733c 413{
d9d181c1 414 while (!list_empty(&sctx->csum_list)) {
a2de733c 415 struct btrfs_ordered_sum *sum;
d9d181c1 416 sum = list_first_entry(&sctx->csum_list,
a2de733c
AJ
417 struct btrfs_ordered_sum, list);
418 list_del(&sum->list);
419 kfree(sum);
420 }
421}
422
d9d181c1 423static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
a2de733c
AJ
424{
425 int i;
a2de733c 426
d9d181c1 427 if (!sctx)
a2de733c
AJ
428 return;
429
ff023aac
SB
430 scrub_free_wr_ctx(&sctx->wr_ctx);
431
b5d67f64 432 /* this can happen when scrub is cancelled */
d9d181c1
SB
433 if (sctx->curr != -1) {
434 struct scrub_bio *sbio = sctx->bios[sctx->curr];
b5d67f64
SB
435
436 for (i = 0; i < sbio->page_count; i++) {
ff023aac 437 WARN_ON(!sbio->pagev[i]->page);
b5d67f64
SB
438 scrub_block_put(sbio->pagev[i]->sblock);
439 }
440 bio_put(sbio->bio);
441 }
442
ff023aac 443 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
d9d181c1 444 struct scrub_bio *sbio = sctx->bios[i];
a2de733c
AJ
445
446 if (!sbio)
447 break;
a2de733c
AJ
448 kfree(sbio);
449 }
450
d9d181c1
SB
451 scrub_free_csums(sctx);
452 kfree(sctx);
a2de733c
AJ
453}
454
f55985f4
FM
455static void scrub_put_ctx(struct scrub_ctx *sctx)
456{
457 if (atomic_dec_and_test(&sctx->refs))
458 scrub_free_ctx(sctx);
459}
460
a2de733c 461static noinline_for_stack
63a212ab 462struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
a2de733c 463{
d9d181c1 464 struct scrub_ctx *sctx;
a2de733c 465 int i;
a2de733c 466 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
ff023aac 467 int ret;
a2de733c 468
d9d181c1
SB
469 sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
470 if (!sctx)
a2de733c 471 goto nomem;
f55985f4 472 atomic_set(&sctx->refs, 1);
63a212ab 473 sctx->is_dev_replace = is_dev_replace;
b54ffb73 474 sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
d9d181c1 475 sctx->curr = -1;
a36cf8b8 476 sctx->dev_root = dev->dev_root;
ff023aac 477 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
a2de733c
AJ
478 struct scrub_bio *sbio;
479
480 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
481 if (!sbio)
482 goto nomem;
d9d181c1 483 sctx->bios[i] = sbio;
a2de733c 484
a2de733c 485 sbio->index = i;
d9d181c1 486 sbio->sctx = sctx;
b5d67f64 487 sbio->page_count = 0;
9e0af237
LB
488 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
489 scrub_bio_end_io_worker, NULL, NULL);
a2de733c 490
ff023aac 491 if (i != SCRUB_BIOS_PER_SCTX - 1)
d9d181c1 492 sctx->bios[i]->next_free = i + 1;
0ef8e451 493 else
d9d181c1
SB
494 sctx->bios[i]->next_free = -1;
495 }
496 sctx->first_free = 0;
497 sctx->nodesize = dev->dev_root->nodesize;
d9d181c1 498 sctx->sectorsize = dev->dev_root->sectorsize;
b6bfebc1
SB
499 atomic_set(&sctx->bios_in_flight, 0);
500 atomic_set(&sctx->workers_pending, 0);
d9d181c1
SB
501 atomic_set(&sctx->cancel_req, 0);
502 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
503 INIT_LIST_HEAD(&sctx->csum_list);
504
505 spin_lock_init(&sctx->list_lock);
506 spin_lock_init(&sctx->stat_lock);
507 init_waitqueue_head(&sctx->list_wait);
ff023aac
SB
508
509 ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
510 fs_info->dev_replace.tgtdev, is_dev_replace);
511 if (ret) {
512 scrub_free_ctx(sctx);
513 return ERR_PTR(ret);
514 }
d9d181c1 515 return sctx;
a2de733c
AJ
516
517nomem:
d9d181c1 518 scrub_free_ctx(sctx);
a2de733c
AJ
519 return ERR_PTR(-ENOMEM);
520}
521
ff023aac
SB
522static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
523 void *warn_ctx)
558540c1
JS
524{
525 u64 isize;
526 u32 nlink;
527 int ret;
528 int i;
529 struct extent_buffer *eb;
530 struct btrfs_inode_item *inode_item;
ff023aac 531 struct scrub_warning *swarn = warn_ctx;
558540c1
JS
532 struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
533 struct inode_fs_paths *ipath = NULL;
534 struct btrfs_root *local_root;
535 struct btrfs_key root_key;
1d4c08e0 536 struct btrfs_key key;
558540c1
JS
537
538 root_key.objectid = root;
539 root_key.type = BTRFS_ROOT_ITEM_KEY;
540 root_key.offset = (u64)-1;
541 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
542 if (IS_ERR(local_root)) {
543 ret = PTR_ERR(local_root);
544 goto err;
545 }
546
14692cc1
DS
547 /*
548 * this makes the path point to (inum INODE_ITEM ioff)
549 */
1d4c08e0
DS
550 key.objectid = inum;
551 key.type = BTRFS_INODE_ITEM_KEY;
552 key.offset = 0;
553
554 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
558540c1
JS
555 if (ret) {
556 btrfs_release_path(swarn->path);
557 goto err;
558 }
559
560 eb = swarn->path->nodes[0];
561 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
562 struct btrfs_inode_item);
563 isize = btrfs_inode_size(eb, inode_item);
564 nlink = btrfs_inode_nlink(eb, inode_item);
565 btrfs_release_path(swarn->path);
566
567 ipath = init_ipath(4096, local_root, swarn->path);
26bdef54
DC
568 if (IS_ERR(ipath)) {
569 ret = PTR_ERR(ipath);
570 ipath = NULL;
571 goto err;
572 }
558540c1
JS
573 ret = paths_from_inode(inum, ipath);
574
575 if (ret < 0)
576 goto err;
577
578 /*
579 * we deliberately ignore the bit ipath might have been too small to
580 * hold all of the paths here
581 */
582 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
ecaeb14b 583 btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev "
558540c1 584 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
ecaeb14b 585 "length %llu, links %u (path: %s)", swarn->errstr,
606686ee 586 swarn->logical, rcu_str_deref(swarn->dev->name),
558540c1
JS
587 (unsigned long long)swarn->sector, root, inum, offset,
588 min(isize - offset, (u64)PAGE_SIZE), nlink,
745c4d8e 589 (char *)(unsigned long)ipath->fspath->val[i]);
558540c1
JS
590
591 free_ipath(ipath);
592 return 0;
593
594err:
ecaeb14b 595 btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev "
558540c1 596 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
ecaeb14b 597 "resolving failed with ret=%d", swarn->errstr,
606686ee 598 swarn->logical, rcu_str_deref(swarn->dev->name),
558540c1
JS
599 (unsigned long long)swarn->sector, root, inum, offset, ret);
600
601 free_ipath(ipath);
602 return 0;
603}
604
b5d67f64 605static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
558540c1 606{
a36cf8b8
SB
607 struct btrfs_device *dev;
608 struct btrfs_fs_info *fs_info;
558540c1
JS
609 struct btrfs_path *path;
610 struct btrfs_key found_key;
611 struct extent_buffer *eb;
612 struct btrfs_extent_item *ei;
613 struct scrub_warning swarn;
69917e43
LB
614 unsigned long ptr = 0;
615 u64 extent_item_pos;
616 u64 flags = 0;
558540c1 617 u64 ref_root;
69917e43 618 u32 item_size;
558540c1 619 u8 ref_level;
69917e43 620 int ret;
558540c1 621
a36cf8b8 622 WARN_ON(sblock->page_count < 1);
7a9e9987 623 dev = sblock->pagev[0]->dev;
a36cf8b8
SB
624 fs_info = sblock->sctx->dev_root->fs_info;
625
558540c1 626 path = btrfs_alloc_path();
8b9456da
DS
627 if (!path)
628 return;
558540c1 629
7a9e9987
SB
630 swarn.sector = (sblock->pagev[0]->physical) >> 9;
631 swarn.logical = sblock->pagev[0]->logical;
558540c1 632 swarn.errstr = errstr;
a36cf8b8 633 swarn.dev = NULL;
558540c1 634
69917e43
LB
635 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
636 &flags);
558540c1
JS
637 if (ret < 0)
638 goto out;
639
4692cf58 640 extent_item_pos = swarn.logical - found_key.objectid;
558540c1
JS
641 swarn.extent_item_size = found_key.offset;
642
643 eb = path->nodes[0];
644 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
645 item_size = btrfs_item_size_nr(eb, path->slots[0]);
646
69917e43 647 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
558540c1 648 do {
6eda71d0
LB
649 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
650 item_size, &ref_root,
651 &ref_level);
ecaeb14b
DS
652 btrfs_warn_in_rcu(fs_info,
653 "%s at logical %llu on dev %s, "
558540c1 654 "sector %llu: metadata %s (level %d) in tree "
ecaeb14b 655 "%llu", errstr, swarn.logical,
606686ee 656 rcu_str_deref(dev->name),
558540c1
JS
657 (unsigned long long)swarn.sector,
658 ref_level ? "node" : "leaf",
659 ret < 0 ? -1 : ref_level,
660 ret < 0 ? -1 : ref_root);
661 } while (ret != 1);
d8fe29e9 662 btrfs_release_path(path);
558540c1 663 } else {
d8fe29e9 664 btrfs_release_path(path);
558540c1 665 swarn.path = path;
a36cf8b8 666 swarn.dev = dev;
7a3ae2f8
JS
667 iterate_extent_inodes(fs_info, found_key.objectid,
668 extent_item_pos, 1,
558540c1
JS
669 scrub_print_warning_inode, &swarn);
670 }
671
672out:
673 btrfs_free_path(path);
558540c1
JS
674}
675
ff023aac 676static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
0ef8e451 677{
5da6fcbc 678 struct page *page = NULL;
0ef8e451 679 unsigned long index;
ff023aac 680 struct scrub_fixup_nodatasum *fixup = fixup_ctx;
0ef8e451 681 int ret;
5da6fcbc 682 int corrected = 0;
0ef8e451 683 struct btrfs_key key;
5da6fcbc 684 struct inode *inode = NULL;
6f1c3605 685 struct btrfs_fs_info *fs_info;
0ef8e451
JS
686 u64 end = offset + PAGE_SIZE - 1;
687 struct btrfs_root *local_root;
6f1c3605 688 int srcu_index;
0ef8e451
JS
689
690 key.objectid = root;
691 key.type = BTRFS_ROOT_ITEM_KEY;
692 key.offset = (u64)-1;
6f1c3605
LB
693
694 fs_info = fixup->root->fs_info;
695 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
696
697 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
698 if (IS_ERR(local_root)) {
699 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
0ef8e451 700 return PTR_ERR(local_root);
6f1c3605 701 }
0ef8e451
JS
702
703 key.type = BTRFS_INODE_ITEM_KEY;
704 key.objectid = inum;
705 key.offset = 0;
6f1c3605
LB
706 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
707 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
0ef8e451
JS
708 if (IS_ERR(inode))
709 return PTR_ERR(inode);
710
0ef8e451
JS
711 index = offset >> PAGE_CACHE_SHIFT;
712
713 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
5da6fcbc
JS
714 if (!page) {
715 ret = -ENOMEM;
716 goto out;
717 }
718
719 if (PageUptodate(page)) {
5da6fcbc
JS
720 if (PageDirty(page)) {
721 /*
722 * we need to write the data to the defect sector. the
723 * data that was in that sector is not in memory,
724 * because the page was modified. we must not write the
725 * modified page to that sector.
726 *
727 * TODO: what could be done here: wait for the delalloc
728 * runner to write out that page (might involve
729 * COW) and see whether the sector is still
730 * referenced afterwards.
731 *
732 * For the meantime, we'll treat this error
733 * incorrectable, although there is a chance that a
734 * later scrub will find the bad sector again and that
735 * there's no dirty page in memory, then.
736 */
737 ret = -EIO;
738 goto out;
739 }
1203b681 740 ret = repair_io_failure(inode, offset, PAGE_SIZE,
5da6fcbc 741 fixup->logical, page,
ffdd2018 742 offset - page_offset(page),
5da6fcbc
JS
743 fixup->mirror_num);
744 unlock_page(page);
745 corrected = !ret;
746 } else {
747 /*
748 * we need to get good data first. the general readpage path
749 * will call repair_io_failure for us, we just have to make
750 * sure we read the bad mirror.
751 */
752 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
753 EXTENT_DAMAGED, GFP_NOFS);
754 if (ret) {
755 /* set_extent_bits should give proper error */
756 WARN_ON(ret > 0);
757 if (ret > 0)
758 ret = -EFAULT;
759 goto out;
760 }
761
762 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
763 btrfs_get_extent,
764 fixup->mirror_num);
765 wait_on_page_locked(page);
766
767 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
768 end, EXTENT_DAMAGED, 0, NULL);
769 if (!corrected)
770 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
771 EXTENT_DAMAGED, GFP_NOFS);
772 }
773
774out:
775 if (page)
776 put_page(page);
7fb18a06
TK
777
778 iput(inode);
0ef8e451
JS
779
780 if (ret < 0)
781 return ret;
782
783 if (ret == 0 && corrected) {
784 /*
785 * we only need to call readpage for one of the inodes belonging
786 * to this extent. so make iterate_extent_inodes stop
787 */
788 return 1;
789 }
790
791 return -EIO;
792}
793
794static void scrub_fixup_nodatasum(struct btrfs_work *work)
795{
796 int ret;
797 struct scrub_fixup_nodatasum *fixup;
d9d181c1 798 struct scrub_ctx *sctx;
0ef8e451 799 struct btrfs_trans_handle *trans = NULL;
0ef8e451
JS
800 struct btrfs_path *path;
801 int uncorrectable = 0;
802
803 fixup = container_of(work, struct scrub_fixup_nodatasum, work);
d9d181c1 804 sctx = fixup->sctx;
0ef8e451
JS
805
806 path = btrfs_alloc_path();
807 if (!path) {
d9d181c1
SB
808 spin_lock(&sctx->stat_lock);
809 ++sctx->stat.malloc_errors;
810 spin_unlock(&sctx->stat_lock);
0ef8e451
JS
811 uncorrectable = 1;
812 goto out;
813 }
814
815 trans = btrfs_join_transaction(fixup->root);
816 if (IS_ERR(trans)) {
817 uncorrectable = 1;
818 goto out;
819 }
820
821 /*
822 * the idea is to trigger a regular read through the standard path. we
823 * read a page from the (failed) logical address by specifying the
824 * corresponding copynum of the failed sector. thus, that readpage is
825 * expected to fail.
826 * that is the point where on-the-fly error correction will kick in
827 * (once it's finished) and rewrite the failed sector if a good copy
828 * can be found.
829 */
830 ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
831 path, scrub_fixup_readpage,
832 fixup);
833 if (ret < 0) {
834 uncorrectable = 1;
835 goto out;
836 }
837 WARN_ON(ret != 1);
838
d9d181c1
SB
839 spin_lock(&sctx->stat_lock);
840 ++sctx->stat.corrected_errors;
841 spin_unlock(&sctx->stat_lock);
0ef8e451
JS
842
843out:
844 if (trans && !IS_ERR(trans))
845 btrfs_end_transaction(trans, fixup->root);
846 if (uncorrectable) {
d9d181c1
SB
847 spin_lock(&sctx->stat_lock);
848 ++sctx->stat.uncorrectable_errors;
849 spin_unlock(&sctx->stat_lock);
ff023aac
SB
850 btrfs_dev_replace_stats_inc(
851 &sctx->dev_root->fs_info->dev_replace.
852 num_uncorrectable_read_errors);
b14af3b4
DS
853 btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
854 "unable to fixup (nodatasum) error at logical %llu on dev %s",
c1c9ff7c 855 fixup->logical, rcu_str_deref(fixup->dev->name));
0ef8e451
JS
856 }
857
858 btrfs_free_path(path);
859 kfree(fixup);
860
b6bfebc1 861 scrub_pending_trans_workers_dec(sctx);
0ef8e451
JS
862}
863
af8e2d1d
MX
864static inline void scrub_get_recover(struct scrub_recover *recover)
865{
866 atomic_inc(&recover->refs);
867}
868
869static inline void scrub_put_recover(struct scrub_recover *recover)
870{
871 if (atomic_dec_and_test(&recover->refs)) {
6e9606d2 872 btrfs_put_bbio(recover->bbio);
af8e2d1d
MX
873 kfree(recover);
874 }
875}
876
a2de733c 877/*
b5d67f64
SB
878 * scrub_handle_errored_block gets called when either verification of the
879 * pages failed or the bio failed to read, e.g. with EIO. In the latter
880 * case, this function handles all pages in the bio, even though only one
881 * may be bad.
882 * The goal of this function is to repair the errored block by using the
883 * contents of one of the mirrors.
a2de733c 884 */
b5d67f64 885static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
a2de733c 886{
d9d181c1 887 struct scrub_ctx *sctx = sblock_to_check->sctx;
a36cf8b8 888 struct btrfs_device *dev;
b5d67f64
SB
889 struct btrfs_fs_info *fs_info;
890 u64 length;
891 u64 logical;
892 u64 generation;
893 unsigned int failed_mirror_index;
894 unsigned int is_metadata;
895 unsigned int have_csum;
896 u8 *csum;
897 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
898 struct scrub_block *sblock_bad;
899 int ret;
900 int mirror_index;
901 int page_num;
902 int success;
558540c1 903 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
b5d67f64
SB
904 DEFAULT_RATELIMIT_BURST);
905
906 BUG_ON(sblock_to_check->page_count < 1);
a36cf8b8 907 fs_info = sctx->dev_root->fs_info;
4ded4f63
SB
908 if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
909 /*
910 * if we find an error in a super block, we just report it.
911 * They will get written with the next transaction commit
912 * anyway
913 */
914 spin_lock(&sctx->stat_lock);
915 ++sctx->stat.super_errors;
916 spin_unlock(&sctx->stat_lock);
917 return 0;
918 }
b5d67f64 919 length = sblock_to_check->page_count * PAGE_SIZE;
7a9e9987
SB
920 logical = sblock_to_check->pagev[0]->logical;
921 generation = sblock_to_check->pagev[0]->generation;
922 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
923 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
924 is_metadata = !(sblock_to_check->pagev[0]->flags &
b5d67f64 925 BTRFS_EXTENT_FLAG_DATA);
7a9e9987
SB
926 have_csum = sblock_to_check->pagev[0]->have_csum;
927 csum = sblock_to_check->pagev[0]->csum;
928 dev = sblock_to_check->pagev[0]->dev;
13db62b7 929
ff023aac
SB
930 if (sctx->is_dev_replace && !is_metadata && !have_csum) {
931 sblocks_for_recheck = NULL;
932 goto nodatasum_case;
933 }
934
b5d67f64
SB
935 /*
936 * read all mirrors one after the other. This includes to
937 * re-read the extent or metadata block that failed (that was
938 * the cause that this fixup code is called) another time,
939 * page by page this time in order to know which pages
940 * caused I/O errors and which ones are good (for all mirrors).
941 * It is the goal to handle the situation when more than one
942 * mirror contains I/O errors, but the errors do not
943 * overlap, i.e. the data can be repaired by selecting the
944 * pages from those mirrors without I/O error on the
945 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
946 * would be that mirror #1 has an I/O error on the first page,
947 * the second page is good, and mirror #2 has an I/O error on
948 * the second page, but the first page is good.
949 * Then the first page of the first mirror can be repaired by
950 * taking the first page of the second mirror, and the
951 * second page of the second mirror can be repaired by
952 * copying the contents of the 2nd page of the 1st mirror.
953 * One more note: if the pages of one mirror contain I/O
954 * errors, the checksum cannot be verified. In order to get
955 * the best data for repairing, the first attempt is to find
956 * a mirror without I/O errors and with a validated checksum.
957 * Only if this is not possible, the pages are picked from
958 * mirrors with I/O errors without considering the checksum.
959 * If the latter is the case, at the end, the checksum of the
960 * repaired area is verified in order to correctly maintain
961 * the statistics.
962 */
963
31e818fe
DS
964 sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
965 sizeof(*sblocks_for_recheck), GFP_NOFS);
b5d67f64 966 if (!sblocks_for_recheck) {
d9d181c1
SB
967 spin_lock(&sctx->stat_lock);
968 sctx->stat.malloc_errors++;
969 sctx->stat.read_errors++;
970 sctx->stat.uncorrectable_errors++;
971 spin_unlock(&sctx->stat_lock);
a36cf8b8 972 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
b5d67f64 973 goto out;
a2de733c
AJ
974 }
975
b5d67f64 976 /* setup the context, map the logical blocks and alloc the pages */
be50a8dd 977 ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
b5d67f64 978 if (ret) {
d9d181c1
SB
979 spin_lock(&sctx->stat_lock);
980 sctx->stat.read_errors++;
981 sctx->stat.uncorrectable_errors++;
982 spin_unlock(&sctx->stat_lock);
a36cf8b8 983 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
b5d67f64
SB
984 goto out;
985 }
986 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
987 sblock_bad = sblocks_for_recheck + failed_mirror_index;
13db62b7 988
b5d67f64 989 /* build and submit the bios for the failed mirror, check checksums */
34f5c8e9 990 scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
af8e2d1d 991 csum, generation, sctx->csum_size, 1);
a2de733c 992
b5d67f64
SB
993 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
994 sblock_bad->no_io_error_seen) {
995 /*
996 * the error disappeared after reading page by page, or
997 * the area was part of a huge bio and other parts of the
998 * bio caused I/O errors, or the block layer merged several
999 * read requests into one and the error is caused by a
1000 * different bio (usually one of the two latter cases is
1001 * the cause)
1002 */
d9d181c1
SB
1003 spin_lock(&sctx->stat_lock);
1004 sctx->stat.unverified_errors++;
5a6ac9ea 1005 sblock_to_check->data_corrected = 1;
d9d181c1 1006 spin_unlock(&sctx->stat_lock);
a2de733c 1007
ff023aac
SB
1008 if (sctx->is_dev_replace)
1009 scrub_write_block_to_dev_replace(sblock_bad);
b5d67f64 1010 goto out;
a2de733c 1011 }
a2de733c 1012
b5d67f64 1013 if (!sblock_bad->no_io_error_seen) {
d9d181c1
SB
1014 spin_lock(&sctx->stat_lock);
1015 sctx->stat.read_errors++;
1016 spin_unlock(&sctx->stat_lock);
b5d67f64
SB
1017 if (__ratelimit(&_rs))
1018 scrub_print_warning("i/o error", sblock_to_check);
a36cf8b8 1019 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
b5d67f64 1020 } else if (sblock_bad->checksum_error) {
d9d181c1
SB
1021 spin_lock(&sctx->stat_lock);
1022 sctx->stat.csum_errors++;
1023 spin_unlock(&sctx->stat_lock);
b5d67f64
SB
1024 if (__ratelimit(&_rs))
1025 scrub_print_warning("checksum error", sblock_to_check);
a36cf8b8 1026 btrfs_dev_stat_inc_and_print(dev,
442a4f63 1027 BTRFS_DEV_STAT_CORRUPTION_ERRS);
b5d67f64 1028 } else if (sblock_bad->header_error) {
d9d181c1
SB
1029 spin_lock(&sctx->stat_lock);
1030 sctx->stat.verify_errors++;
1031 spin_unlock(&sctx->stat_lock);
b5d67f64
SB
1032 if (__ratelimit(&_rs))
1033 scrub_print_warning("checksum/header error",
1034 sblock_to_check);
442a4f63 1035 if (sblock_bad->generation_error)
a36cf8b8 1036 btrfs_dev_stat_inc_and_print(dev,
442a4f63
SB
1037 BTRFS_DEV_STAT_GENERATION_ERRS);
1038 else
a36cf8b8 1039 btrfs_dev_stat_inc_and_print(dev,
442a4f63 1040 BTRFS_DEV_STAT_CORRUPTION_ERRS);
b5d67f64 1041 }
a2de733c 1042
33ef30ad
ID
1043 if (sctx->readonly) {
1044 ASSERT(!sctx->is_dev_replace);
1045 goto out;
1046 }
a2de733c 1047
b5d67f64
SB
1048 if (!is_metadata && !have_csum) {
1049 struct scrub_fixup_nodatasum *fixup_nodatasum;
a2de733c 1050
ff023aac
SB
1051 WARN_ON(sctx->is_dev_replace);
1052
b25c94c5
ZL
1053nodatasum_case:
1054
b5d67f64
SB
1055 /*
1056 * !is_metadata and !have_csum, this means that the data
1057 * might not be COW'ed, that it might be modified
1058 * concurrently. The general strategy to work on the
1059 * commit root does not help in the case when COW is not
1060 * used.
1061 */
1062 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1063 if (!fixup_nodatasum)
1064 goto did_not_correct_error;
d9d181c1 1065 fixup_nodatasum->sctx = sctx;
a36cf8b8 1066 fixup_nodatasum->dev = dev;
b5d67f64
SB
1067 fixup_nodatasum->logical = logical;
1068 fixup_nodatasum->root = fs_info->extent_root;
1069 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
b6bfebc1 1070 scrub_pending_trans_workers_inc(sctx);
9e0af237
LB
1071 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1072 scrub_fixup_nodatasum, NULL, NULL);
0339ef2f
QW
1073 btrfs_queue_work(fs_info->scrub_workers,
1074 &fixup_nodatasum->work);
b5d67f64 1075 goto out;
a2de733c
AJ
1076 }
1077
b5d67f64
SB
1078 /*
1079 * now build and submit the bios for the other mirrors, check
cb2ced73
SB
1080 * checksums.
1081 * First try to pick the mirror which is completely without I/O
b5d67f64
SB
1082 * errors and also does not have a checksum error.
1083 * If one is found, and if a checksum is present, the full block
1084 * that is known to contain an error is rewritten. Afterwards
1085 * the block is known to be corrected.
1086 * If a mirror is found which is completely correct, and no
1087 * checksum is present, only those pages are rewritten that had
1088 * an I/O error in the block to be repaired, since it cannot be
1089 * determined, which copy of the other pages is better (and it
1090 * could happen otherwise that a correct page would be
1091 * overwritten by a bad one).
1092 */
1093 for (mirror_index = 0;
1094 mirror_index < BTRFS_MAX_MIRRORS &&
1095 sblocks_for_recheck[mirror_index].page_count > 0;
1096 mirror_index++) {
cb2ced73 1097 struct scrub_block *sblock_other;
b5d67f64 1098
cb2ced73
SB
1099 if (mirror_index == failed_mirror_index)
1100 continue;
1101 sblock_other = sblocks_for_recheck + mirror_index;
1102
1103 /* build and submit the bios, check checksums */
34f5c8e9
SB
1104 scrub_recheck_block(fs_info, sblock_other, is_metadata,
1105 have_csum, csum, generation,
af8e2d1d 1106 sctx->csum_size, 0);
34f5c8e9
SB
1107
1108 if (!sblock_other->header_error &&
b5d67f64
SB
1109 !sblock_other->checksum_error &&
1110 sblock_other->no_io_error_seen) {
ff023aac
SB
1111 if (sctx->is_dev_replace) {
1112 scrub_write_block_to_dev_replace(sblock_other);
114ab50d 1113 goto corrected_error;
ff023aac 1114 } else {
ff023aac 1115 ret = scrub_repair_block_from_good_copy(
114ab50d
ZL
1116 sblock_bad, sblock_other);
1117 if (!ret)
1118 goto corrected_error;
ff023aac 1119 }
b5d67f64
SB
1120 }
1121 }
a2de733c 1122
b968fed1
ZL
1123 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1124 goto did_not_correct_error;
ff023aac
SB
1125
1126 /*
ff023aac 1127 * In case of I/O errors in the area that is supposed to be
b5d67f64
SB
1128 * repaired, continue by picking good copies of those pages.
1129 * Select the good pages from mirrors to rewrite bad pages from
1130 * the area to fix. Afterwards verify the checksum of the block
1131 * that is supposed to be repaired. This verification step is
1132 * only done for the purpose of statistic counting and for the
1133 * final scrub report, whether errors remain.
1134 * A perfect algorithm could make use of the checksum and try
1135 * all possible combinations of pages from the different mirrors
1136 * until the checksum verification succeeds. For example, when
1137 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1138 * of mirror #2 is readable but the final checksum test fails,
1139 * then the 2nd page of mirror #3 could be tried, whether now
1140 * the final checksum succeedes. But this would be a rare
1141 * exception and is therefore not implemented. At least it is
1142 * avoided that the good copy is overwritten.
1143 * A more useful improvement would be to pick the sectors
1144 * without I/O error based on sector sizes (512 bytes on legacy
1145 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1146 * mirror could be repaired by taking 512 byte of a different
1147 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1148 * area are unreadable.
a2de733c 1149 */
b5d67f64 1150 success = 1;
b968fed1
ZL
1151 for (page_num = 0; page_num < sblock_bad->page_count;
1152 page_num++) {
7a9e9987 1153 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
b968fed1 1154 struct scrub_block *sblock_other = NULL;
b5d67f64 1155
b968fed1
ZL
1156 /* skip no-io-error page in scrub */
1157 if (!page_bad->io_error && !sctx->is_dev_replace)
a2de733c 1158 continue;
b5d67f64 1159
b968fed1
ZL
1160 /* try to find no-io-error page in mirrors */
1161 if (page_bad->io_error) {
1162 for (mirror_index = 0;
1163 mirror_index < BTRFS_MAX_MIRRORS &&
1164 sblocks_for_recheck[mirror_index].page_count > 0;
1165 mirror_index++) {
1166 if (!sblocks_for_recheck[mirror_index].
1167 pagev[page_num]->io_error) {
1168 sblock_other = sblocks_for_recheck +
1169 mirror_index;
1170 break;
b5d67f64
SB
1171 }
1172 }
b968fed1
ZL
1173 if (!sblock_other)
1174 success = 0;
96e36920 1175 }
a2de733c 1176
b968fed1
ZL
1177 if (sctx->is_dev_replace) {
1178 /*
1179 * did not find a mirror to fetch the page
1180 * from. scrub_write_page_to_dev_replace()
1181 * handles this case (page->io_error), by
1182 * filling the block with zeros before
1183 * submitting the write request
1184 */
1185 if (!sblock_other)
1186 sblock_other = sblock_bad;
1187
1188 if (scrub_write_page_to_dev_replace(sblock_other,
1189 page_num) != 0) {
1190 btrfs_dev_replace_stats_inc(
1191 &sctx->dev_root->
1192 fs_info->dev_replace.
1193 num_write_errors);
1194 success = 0;
1195 }
1196 } else if (sblock_other) {
1197 ret = scrub_repair_page_from_good_copy(sblock_bad,
1198 sblock_other,
1199 page_num, 0);
1200 if (0 == ret)
1201 page_bad->io_error = 0;
1202 else
1203 success = 0;
b5d67f64 1204 }
a2de733c 1205 }
a2de733c 1206
b968fed1 1207 if (success && !sctx->is_dev_replace) {
b5d67f64
SB
1208 if (is_metadata || have_csum) {
1209 /*
1210 * need to verify the checksum now that all
1211 * sectors on disk are repaired (the write
1212 * request for data to be repaired is on its way).
1213 * Just be lazy and use scrub_recheck_block()
1214 * which re-reads the data before the checksum
1215 * is verified, but most likely the data comes out
1216 * of the page cache.
1217 */
34f5c8e9
SB
1218 scrub_recheck_block(fs_info, sblock_bad,
1219 is_metadata, have_csum, csum,
af8e2d1d 1220 generation, sctx->csum_size, 1);
34f5c8e9 1221 if (!sblock_bad->header_error &&
b5d67f64
SB
1222 !sblock_bad->checksum_error &&
1223 sblock_bad->no_io_error_seen)
1224 goto corrected_error;
1225 else
1226 goto did_not_correct_error;
1227 } else {
1228corrected_error:
d9d181c1
SB
1229 spin_lock(&sctx->stat_lock);
1230 sctx->stat.corrected_errors++;
5a6ac9ea 1231 sblock_to_check->data_corrected = 1;
d9d181c1 1232 spin_unlock(&sctx->stat_lock);
b14af3b4
DS
1233 btrfs_err_rl_in_rcu(fs_info,
1234 "fixed up error at logical %llu on dev %s",
c1c9ff7c 1235 logical, rcu_str_deref(dev->name));
8628764e 1236 }
b5d67f64
SB
1237 } else {
1238did_not_correct_error:
d9d181c1
SB
1239 spin_lock(&sctx->stat_lock);
1240 sctx->stat.uncorrectable_errors++;
1241 spin_unlock(&sctx->stat_lock);
b14af3b4
DS
1242 btrfs_err_rl_in_rcu(fs_info,
1243 "unable to fixup (regular) error at logical %llu on dev %s",
c1c9ff7c 1244 logical, rcu_str_deref(dev->name));
96e36920 1245 }
a2de733c 1246
b5d67f64
SB
1247out:
1248 if (sblocks_for_recheck) {
1249 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1250 mirror_index++) {
1251 struct scrub_block *sblock = sblocks_for_recheck +
1252 mirror_index;
af8e2d1d 1253 struct scrub_recover *recover;
b5d67f64
SB
1254 int page_index;
1255
7a9e9987
SB
1256 for (page_index = 0; page_index < sblock->page_count;
1257 page_index++) {
1258 sblock->pagev[page_index]->sblock = NULL;
af8e2d1d
MX
1259 recover = sblock->pagev[page_index]->recover;
1260 if (recover) {
1261 scrub_put_recover(recover);
1262 sblock->pagev[page_index]->recover =
1263 NULL;
1264 }
7a9e9987
SB
1265 scrub_page_put(sblock->pagev[page_index]);
1266 }
b5d67f64
SB
1267 }
1268 kfree(sblocks_for_recheck);
1269 }
a2de733c 1270
b5d67f64
SB
1271 return 0;
1272}
a2de733c 1273
8e5cfb55 1274static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
af8e2d1d 1275{
10f11900
ZL
1276 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1277 return 2;
1278 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1279 return 3;
1280 else
af8e2d1d 1281 return (int)bbio->num_stripes;
af8e2d1d
MX
1282}
1283
10f11900
ZL
1284static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1285 u64 *raid_map,
af8e2d1d
MX
1286 u64 mapped_length,
1287 int nstripes, int mirror,
1288 int *stripe_index,
1289 u64 *stripe_offset)
1290{
1291 int i;
1292
ffe2d203 1293 if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
af8e2d1d
MX
1294 /* RAID5/6 */
1295 for (i = 0; i < nstripes; i++) {
1296 if (raid_map[i] == RAID6_Q_STRIPE ||
1297 raid_map[i] == RAID5_P_STRIPE)
1298 continue;
1299
1300 if (logical >= raid_map[i] &&
1301 logical < raid_map[i] + mapped_length)
1302 break;
1303 }
1304
1305 *stripe_index = i;
1306 *stripe_offset = logical - raid_map[i];
1307 } else {
1308 /* The other RAID type */
1309 *stripe_index = mirror;
1310 *stripe_offset = 0;
1311 }
1312}
1313
be50a8dd 1314static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
b5d67f64
SB
1315 struct scrub_block *sblocks_for_recheck)
1316{
be50a8dd
ZL
1317 struct scrub_ctx *sctx = original_sblock->sctx;
1318 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
1319 u64 length = original_sblock->page_count * PAGE_SIZE;
1320 u64 logical = original_sblock->pagev[0]->logical;
af8e2d1d
MX
1321 struct scrub_recover *recover;
1322 struct btrfs_bio *bbio;
af8e2d1d
MX
1323 u64 sublen;
1324 u64 mapped_length;
1325 u64 stripe_offset;
1326 int stripe_index;
be50a8dd 1327 int page_index = 0;
b5d67f64 1328 int mirror_index;
af8e2d1d 1329 int nmirrors;
b5d67f64
SB
1330 int ret;
1331
1332 /*
57019345 1333 * note: the two members refs and outstanding_pages
b5d67f64
SB
1334 * are not used (and not set) in the blocks that are used for
1335 * the recheck procedure
1336 */
1337
b5d67f64 1338 while (length > 0) {
af8e2d1d
MX
1339 sublen = min_t(u64, length, PAGE_SIZE);
1340 mapped_length = sublen;
1341 bbio = NULL;
a2de733c 1342
b5d67f64
SB
1343 /*
1344 * with a length of PAGE_SIZE, each returned stripe
1345 * represents one mirror
1346 */
af8e2d1d 1347 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
8e5cfb55 1348 &mapped_length, &bbio, 0, 1);
b5d67f64 1349 if (ret || !bbio || mapped_length < sublen) {
6e9606d2 1350 btrfs_put_bbio(bbio);
b5d67f64
SB
1351 return -EIO;
1352 }
a2de733c 1353
af8e2d1d
MX
1354 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1355 if (!recover) {
6e9606d2 1356 btrfs_put_bbio(bbio);
af8e2d1d
MX
1357 return -ENOMEM;
1358 }
1359
1360 atomic_set(&recover->refs, 1);
1361 recover->bbio = bbio;
af8e2d1d
MX
1362 recover->map_length = mapped_length;
1363
ff023aac 1364 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
af8e2d1d 1365
be50a8dd 1366 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
10f11900 1367
af8e2d1d 1368 for (mirror_index = 0; mirror_index < nmirrors;
b5d67f64
SB
1369 mirror_index++) {
1370 struct scrub_block *sblock;
1371 struct scrub_page *page;
1372
b5d67f64 1373 sblock = sblocks_for_recheck + mirror_index;
7a9e9987
SB
1374 sblock->sctx = sctx;
1375 page = kzalloc(sizeof(*page), GFP_NOFS);
1376 if (!page) {
1377leave_nomem:
d9d181c1
SB
1378 spin_lock(&sctx->stat_lock);
1379 sctx->stat.malloc_errors++;
1380 spin_unlock(&sctx->stat_lock);
af8e2d1d 1381 scrub_put_recover(recover);
b5d67f64
SB
1382 return -ENOMEM;
1383 }
7a9e9987
SB
1384 scrub_page_get(page);
1385 sblock->pagev[page_index] = page;
1386 page->logical = logical;
af8e2d1d 1387
10f11900
ZL
1388 scrub_stripe_index_and_offset(logical,
1389 bbio->map_type,
1390 bbio->raid_map,
af8e2d1d 1391 mapped_length,
e34c330d
ZL
1392 bbio->num_stripes -
1393 bbio->num_tgtdevs,
af8e2d1d
MX
1394 mirror_index,
1395 &stripe_index,
1396 &stripe_offset);
1397 page->physical = bbio->stripes[stripe_index].physical +
1398 stripe_offset;
1399 page->dev = bbio->stripes[stripe_index].dev;
1400
ff023aac
SB
1401 BUG_ON(page_index >= original_sblock->page_count);
1402 page->physical_for_dev_replace =
1403 original_sblock->pagev[page_index]->
1404 physical_for_dev_replace;
7a9e9987 1405 /* for missing devices, dev->bdev is NULL */
7a9e9987 1406 page->mirror_num = mirror_index + 1;
b5d67f64 1407 sblock->page_count++;
7a9e9987
SB
1408 page->page = alloc_page(GFP_NOFS);
1409 if (!page->page)
1410 goto leave_nomem;
af8e2d1d
MX
1411
1412 scrub_get_recover(recover);
1413 page->recover = recover;
b5d67f64 1414 }
af8e2d1d 1415 scrub_put_recover(recover);
b5d67f64
SB
1416 length -= sublen;
1417 logical += sublen;
1418 page_index++;
1419 }
1420
1421 return 0;
96e36920
ID
1422}
1423
af8e2d1d
MX
1424struct scrub_bio_ret {
1425 struct completion event;
1426 int error;
1427};
1428
4246a0b6 1429static void scrub_bio_wait_endio(struct bio *bio)
af8e2d1d
MX
1430{
1431 struct scrub_bio_ret *ret = bio->bi_private;
1432
4246a0b6 1433 ret->error = bio->bi_error;
af8e2d1d
MX
1434 complete(&ret->event);
1435}
1436
1437static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1438{
10f11900 1439 return page->recover &&
ffe2d203 1440 (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
af8e2d1d
MX
1441}
1442
1443static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1444 struct bio *bio,
1445 struct scrub_page *page)
1446{
1447 struct scrub_bio_ret done;
1448 int ret;
1449
1450 init_completion(&done.event);
1451 done.error = 0;
1452 bio->bi_iter.bi_sector = page->logical >> 9;
1453 bio->bi_private = &done;
1454 bio->bi_end_io = scrub_bio_wait_endio;
1455
1456 ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
af8e2d1d 1457 page->recover->map_length,
4245215d 1458 page->mirror_num, 0);
af8e2d1d
MX
1459 if (ret)
1460 return ret;
1461
1462 wait_for_completion(&done.event);
1463 if (done.error)
1464 return -EIO;
1465
1466 return 0;
1467}
1468
b5d67f64
SB
1469/*
1470 * this function will check the on disk data for checksum errors, header
1471 * errors and read I/O errors. If any I/O errors happen, the exact pages
1472 * which are errored are marked as being bad. The goal is to enable scrub
1473 * to take those pages that are not errored from all the mirrors so that
1474 * the pages that are errored in the just handled mirror can be repaired.
1475 */
34f5c8e9
SB
1476static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1477 struct scrub_block *sblock, int is_metadata,
1478 int have_csum, u8 *csum, u64 generation,
af8e2d1d 1479 u16 csum_size, int retry_failed_mirror)
96e36920 1480{
b5d67f64 1481 int page_num;
96e36920 1482
b5d67f64
SB
1483 sblock->no_io_error_seen = 1;
1484 sblock->header_error = 0;
1485 sblock->checksum_error = 0;
96e36920 1486
b5d67f64
SB
1487 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1488 struct bio *bio;
7a9e9987 1489 struct scrub_page *page = sblock->pagev[page_num];
b5d67f64 1490
442a4f63 1491 if (page->dev->bdev == NULL) {
ea9947b4
SB
1492 page->io_error = 1;
1493 sblock->no_io_error_seen = 0;
1494 continue;
1495 }
1496
7a9e9987 1497 WARN_ON(!page->page);
9be3395b 1498 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
34f5c8e9
SB
1499 if (!bio) {
1500 page->io_error = 1;
1501 sblock->no_io_error_seen = 0;
1502 continue;
1503 }
442a4f63 1504 bio->bi_bdev = page->dev->bdev;
b5d67f64 1505
34f5c8e9 1506 bio_add_page(bio, page->page, PAGE_SIZE, 0);
af8e2d1d
MX
1507 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1508 if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
1509 sblock->no_io_error_seen = 0;
1510 } else {
1511 bio->bi_iter.bi_sector = page->physical >> 9;
1512
1513 if (btrfsic_submit_bio_wait(READ, bio))
1514 sblock->no_io_error_seen = 0;
1515 }
33879d45 1516
b5d67f64
SB
1517 bio_put(bio);
1518 }
96e36920 1519
b5d67f64
SB
1520 if (sblock->no_io_error_seen)
1521 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1522 have_csum, csum, generation,
1523 csum_size);
1524
34f5c8e9 1525 return;
a2de733c
AJ
1526}
1527
17a9be2f
MX
1528static inline int scrub_check_fsid(u8 fsid[],
1529 struct scrub_page *spage)
1530{
1531 struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1532 int ret;
1533
1534 ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1535 return !ret;
1536}
1537
b5d67f64
SB
1538static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1539 struct scrub_block *sblock,
1540 int is_metadata, int have_csum,
1541 const u8 *csum, u64 generation,
1542 u16 csum_size)
a2de733c 1543{
b5d67f64
SB
1544 int page_num;
1545 u8 calculated_csum[BTRFS_CSUM_SIZE];
1546 u32 crc = ~(u32)0;
b5d67f64
SB
1547 void *mapped_buffer;
1548
7a9e9987 1549 WARN_ON(!sblock->pagev[0]->page);
b5d67f64
SB
1550 if (is_metadata) {
1551 struct btrfs_header *h;
1552
7a9e9987 1553 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
b5d67f64
SB
1554 h = (struct btrfs_header *)mapped_buffer;
1555
3cae210f 1556 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) ||
17a9be2f 1557 !scrub_check_fsid(h->fsid, sblock->pagev[0]) ||
b5d67f64 1558 memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
442a4f63 1559 BTRFS_UUID_SIZE)) {
b5d67f64 1560 sblock->header_error = 1;
3cae210f 1561 } else if (generation != btrfs_stack_header_generation(h)) {
442a4f63
SB
1562 sblock->header_error = 1;
1563 sblock->generation_error = 1;
1564 }
b5d67f64
SB
1565 csum = h->csum;
1566 } else {
1567 if (!have_csum)
1568 return;
a2de733c 1569
7a9e9987 1570 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
b5d67f64 1571 }
a2de733c 1572
b5d67f64
SB
1573 for (page_num = 0;;) {
1574 if (page_num == 0 && is_metadata)
b0496686 1575 crc = btrfs_csum_data(
b5d67f64
SB
1576 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1577 crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1578 else
b0496686 1579 crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
b5d67f64 1580
9613bebb 1581 kunmap_atomic(mapped_buffer);
b5d67f64
SB
1582 page_num++;
1583 if (page_num >= sblock->page_count)
1584 break;
7a9e9987 1585 WARN_ON(!sblock->pagev[page_num]->page);
b5d67f64 1586
7a9e9987 1587 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
b5d67f64
SB
1588 }
1589
1590 btrfs_csum_final(crc, calculated_csum);
1591 if (memcmp(calculated_csum, csum, csum_size))
1592 sblock->checksum_error = 1;
a2de733c
AJ
1593}
1594
b5d67f64 1595static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
114ab50d 1596 struct scrub_block *sblock_good)
b5d67f64
SB
1597{
1598 int page_num;
1599 int ret = 0;
96e36920 1600
b5d67f64
SB
1601 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1602 int ret_sub;
96e36920 1603
b5d67f64
SB
1604 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1605 sblock_good,
114ab50d 1606 page_num, 1);
b5d67f64
SB
1607 if (ret_sub)
1608 ret = ret_sub;
a2de733c 1609 }
b5d67f64
SB
1610
1611 return ret;
1612}
1613
1614static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1615 struct scrub_block *sblock_good,
1616 int page_num, int force_write)
1617{
7a9e9987
SB
1618 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1619 struct scrub_page *page_good = sblock_good->pagev[page_num];
b5d67f64 1620
7a9e9987
SB
1621 BUG_ON(page_bad->page == NULL);
1622 BUG_ON(page_good->page == NULL);
b5d67f64
SB
1623 if (force_write || sblock_bad->header_error ||
1624 sblock_bad->checksum_error || page_bad->io_error) {
1625 struct bio *bio;
1626 int ret;
b5d67f64 1627
ff023aac 1628 if (!page_bad->dev->bdev) {
94647322 1629 btrfs_warn_rl(sblock_bad->sctx->dev_root->fs_info,
efe120a0 1630 "scrub_repair_page_from_good_copy(bdev == NULL) "
94647322 1631 "is unexpected");
ff023aac
SB
1632 return -EIO;
1633 }
1634
9be3395b 1635 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
e627ee7b
TI
1636 if (!bio)
1637 return -EIO;
442a4f63 1638 bio->bi_bdev = page_bad->dev->bdev;
4f024f37 1639 bio->bi_iter.bi_sector = page_bad->physical >> 9;
b5d67f64
SB
1640
1641 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1642 if (PAGE_SIZE != ret) {
1643 bio_put(bio);
1644 return -EIO;
13db62b7 1645 }
b5d67f64 1646
33879d45 1647 if (btrfsic_submit_bio_wait(WRITE, bio)) {
442a4f63
SB
1648 btrfs_dev_stat_inc_and_print(page_bad->dev,
1649 BTRFS_DEV_STAT_WRITE_ERRS);
ff023aac
SB
1650 btrfs_dev_replace_stats_inc(
1651 &sblock_bad->sctx->dev_root->fs_info->
1652 dev_replace.num_write_errors);
442a4f63
SB
1653 bio_put(bio);
1654 return -EIO;
1655 }
b5d67f64 1656 bio_put(bio);
a2de733c
AJ
1657 }
1658
b5d67f64
SB
1659 return 0;
1660}
1661
ff023aac
SB
1662static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1663{
1664 int page_num;
1665
5a6ac9ea
MX
1666 /*
1667 * This block is used for the check of the parity on the source device,
1668 * so the data needn't be written into the destination device.
1669 */
1670 if (sblock->sparity)
1671 return;
1672
ff023aac
SB
1673 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1674 int ret;
1675
1676 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1677 if (ret)
1678 btrfs_dev_replace_stats_inc(
1679 &sblock->sctx->dev_root->fs_info->dev_replace.
1680 num_write_errors);
1681 }
1682}
1683
1684static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1685 int page_num)
1686{
1687 struct scrub_page *spage = sblock->pagev[page_num];
1688
1689 BUG_ON(spage->page == NULL);
1690 if (spage->io_error) {
1691 void *mapped_buffer = kmap_atomic(spage->page);
1692
1693 memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
1694 flush_dcache_page(spage->page);
1695 kunmap_atomic(mapped_buffer);
1696 }
1697 return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1698}
1699
1700static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1701 struct scrub_page *spage)
1702{
1703 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1704 struct scrub_bio *sbio;
1705 int ret;
1706
1707 mutex_lock(&wr_ctx->wr_lock);
1708again:
1709 if (!wr_ctx->wr_curr_bio) {
1710 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1711 GFP_NOFS);
1712 if (!wr_ctx->wr_curr_bio) {
1713 mutex_unlock(&wr_ctx->wr_lock);
1714 return -ENOMEM;
1715 }
1716 wr_ctx->wr_curr_bio->sctx = sctx;
1717 wr_ctx->wr_curr_bio->page_count = 0;
1718 }
1719 sbio = wr_ctx->wr_curr_bio;
1720 if (sbio->page_count == 0) {
1721 struct bio *bio;
1722
1723 sbio->physical = spage->physical_for_dev_replace;
1724 sbio->logical = spage->logical;
1725 sbio->dev = wr_ctx->tgtdev;
1726 bio = sbio->bio;
1727 if (!bio) {
9be3395b 1728 bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
ff023aac
SB
1729 if (!bio) {
1730 mutex_unlock(&wr_ctx->wr_lock);
1731 return -ENOMEM;
1732 }
1733 sbio->bio = bio;
1734 }
1735
1736 bio->bi_private = sbio;
1737 bio->bi_end_io = scrub_wr_bio_end_io;
1738 bio->bi_bdev = sbio->dev->bdev;
4f024f37 1739 bio->bi_iter.bi_sector = sbio->physical >> 9;
ff023aac
SB
1740 sbio->err = 0;
1741 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1742 spage->physical_for_dev_replace ||
1743 sbio->logical + sbio->page_count * PAGE_SIZE !=
1744 spage->logical) {
1745 scrub_wr_submit(sctx);
1746 goto again;
1747 }
1748
1749 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1750 if (ret != PAGE_SIZE) {
1751 if (sbio->page_count < 1) {
1752 bio_put(sbio->bio);
1753 sbio->bio = NULL;
1754 mutex_unlock(&wr_ctx->wr_lock);
1755 return -EIO;
1756 }
1757 scrub_wr_submit(sctx);
1758 goto again;
1759 }
1760
1761 sbio->pagev[sbio->page_count] = spage;
1762 scrub_page_get(spage);
1763 sbio->page_count++;
1764 if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1765 scrub_wr_submit(sctx);
1766 mutex_unlock(&wr_ctx->wr_lock);
1767
1768 return 0;
1769}
1770
1771static void scrub_wr_submit(struct scrub_ctx *sctx)
1772{
1773 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1774 struct scrub_bio *sbio;
1775
1776 if (!wr_ctx->wr_curr_bio)
1777 return;
1778
1779 sbio = wr_ctx->wr_curr_bio;
1780 wr_ctx->wr_curr_bio = NULL;
1781 WARN_ON(!sbio->bio->bi_bdev);
1782 scrub_pending_bio_inc(sctx);
1783 /* process all writes in a single worker thread. Then the block layer
1784 * orders the requests before sending them to the driver which
1785 * doubled the write performance on spinning disks when measured
1786 * with Linux 3.5 */
1787 btrfsic_submit_bio(WRITE, sbio->bio);
1788}
1789
4246a0b6 1790static void scrub_wr_bio_end_io(struct bio *bio)
ff023aac
SB
1791{
1792 struct scrub_bio *sbio = bio->bi_private;
1793 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1794
4246a0b6 1795 sbio->err = bio->bi_error;
ff023aac
SB
1796 sbio->bio = bio;
1797
9e0af237
LB
1798 btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1799 scrub_wr_bio_end_io_worker, NULL, NULL);
0339ef2f 1800 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
ff023aac
SB
1801}
1802
1803static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1804{
1805 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1806 struct scrub_ctx *sctx = sbio->sctx;
1807 int i;
1808
1809 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1810 if (sbio->err) {
1811 struct btrfs_dev_replace *dev_replace =
1812 &sbio->sctx->dev_root->fs_info->dev_replace;
1813
1814 for (i = 0; i < sbio->page_count; i++) {
1815 struct scrub_page *spage = sbio->pagev[i];
1816
1817 spage->io_error = 1;
1818 btrfs_dev_replace_stats_inc(&dev_replace->
1819 num_write_errors);
1820 }
1821 }
1822
1823 for (i = 0; i < sbio->page_count; i++)
1824 scrub_page_put(sbio->pagev[i]);
1825
1826 bio_put(sbio->bio);
1827 kfree(sbio);
1828 scrub_pending_bio_dec(sctx);
1829}
1830
1831static int scrub_checksum(struct scrub_block *sblock)
b5d67f64
SB
1832{
1833 u64 flags;
1834 int ret;
1835
7a9e9987
SB
1836 WARN_ON(sblock->page_count < 1);
1837 flags = sblock->pagev[0]->flags;
b5d67f64
SB
1838 ret = 0;
1839 if (flags & BTRFS_EXTENT_FLAG_DATA)
1840 ret = scrub_checksum_data(sblock);
1841 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1842 ret = scrub_checksum_tree_block(sblock);
1843 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1844 (void)scrub_checksum_super(sblock);
1845 else
1846 WARN_ON(1);
1847 if (ret)
1848 scrub_handle_errored_block(sblock);
ff023aac
SB
1849
1850 return ret;
a2de733c
AJ
1851}
1852
b5d67f64 1853static int scrub_checksum_data(struct scrub_block *sblock)
a2de733c 1854{
d9d181c1 1855 struct scrub_ctx *sctx = sblock->sctx;
a2de733c 1856 u8 csum[BTRFS_CSUM_SIZE];
b5d67f64
SB
1857 u8 *on_disk_csum;
1858 struct page *page;
1859 void *buffer;
a2de733c
AJ
1860 u32 crc = ~(u32)0;
1861 int fail = 0;
b5d67f64
SB
1862 u64 len;
1863 int index;
a2de733c 1864
b5d67f64 1865 BUG_ON(sblock->page_count < 1);
7a9e9987 1866 if (!sblock->pagev[0]->have_csum)
a2de733c
AJ
1867 return 0;
1868
7a9e9987
SB
1869 on_disk_csum = sblock->pagev[0]->csum;
1870 page = sblock->pagev[0]->page;
9613bebb 1871 buffer = kmap_atomic(page);
b5d67f64 1872
d9d181c1 1873 len = sctx->sectorsize;
b5d67f64
SB
1874 index = 0;
1875 for (;;) {
1876 u64 l = min_t(u64, len, PAGE_SIZE);
1877
b0496686 1878 crc = btrfs_csum_data(buffer, crc, l);
9613bebb 1879 kunmap_atomic(buffer);
b5d67f64
SB
1880 len -= l;
1881 if (len == 0)
1882 break;
1883 index++;
1884 BUG_ON(index >= sblock->page_count);
7a9e9987
SB
1885 BUG_ON(!sblock->pagev[index]->page);
1886 page = sblock->pagev[index]->page;
9613bebb 1887 buffer = kmap_atomic(page);
b5d67f64
SB
1888 }
1889
a2de733c 1890 btrfs_csum_final(crc, csum);
d9d181c1 1891 if (memcmp(csum, on_disk_csum, sctx->csum_size))
a2de733c
AJ
1892 fail = 1;
1893
a2de733c
AJ
1894 return fail;
1895}
1896
b5d67f64 1897static int scrub_checksum_tree_block(struct scrub_block *sblock)
a2de733c 1898{
d9d181c1 1899 struct scrub_ctx *sctx = sblock->sctx;
a2de733c 1900 struct btrfs_header *h;
a36cf8b8 1901 struct btrfs_root *root = sctx->dev_root;
a2de733c 1902 struct btrfs_fs_info *fs_info = root->fs_info;
b5d67f64
SB
1903 u8 calculated_csum[BTRFS_CSUM_SIZE];
1904 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1905 struct page *page;
1906 void *mapped_buffer;
1907 u64 mapped_size;
1908 void *p;
a2de733c
AJ
1909 u32 crc = ~(u32)0;
1910 int fail = 0;
1911 int crc_fail = 0;
b5d67f64
SB
1912 u64 len;
1913 int index;
1914
1915 BUG_ON(sblock->page_count < 1);
7a9e9987 1916 page = sblock->pagev[0]->page;
9613bebb 1917 mapped_buffer = kmap_atomic(page);
b5d67f64 1918 h = (struct btrfs_header *)mapped_buffer;
d9d181c1 1919 memcpy(on_disk_csum, h->csum, sctx->csum_size);
a2de733c
AJ
1920
1921 /*
1922 * we don't use the getter functions here, as we
1923 * a) don't have an extent buffer and
1924 * b) the page is already kmapped
1925 */
a2de733c 1926
3cae210f 1927 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
a2de733c
AJ
1928 ++fail;
1929
3cae210f 1930 if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h))
a2de733c
AJ
1931 ++fail;
1932
17a9be2f 1933 if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
a2de733c
AJ
1934 ++fail;
1935
1936 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1937 BTRFS_UUID_SIZE))
1938 ++fail;
1939
d9d181c1 1940 len = sctx->nodesize - BTRFS_CSUM_SIZE;
b5d67f64
SB
1941 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1942 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1943 index = 0;
1944 for (;;) {
1945 u64 l = min_t(u64, len, mapped_size);
1946
b0496686 1947 crc = btrfs_csum_data(p, crc, l);
9613bebb 1948 kunmap_atomic(mapped_buffer);
b5d67f64
SB
1949 len -= l;
1950 if (len == 0)
1951 break;
1952 index++;
1953 BUG_ON(index >= sblock->page_count);
7a9e9987
SB
1954 BUG_ON(!sblock->pagev[index]->page);
1955 page = sblock->pagev[index]->page;
9613bebb 1956 mapped_buffer = kmap_atomic(page);
b5d67f64
SB
1957 mapped_size = PAGE_SIZE;
1958 p = mapped_buffer;
1959 }
1960
1961 btrfs_csum_final(crc, calculated_csum);
d9d181c1 1962 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
a2de733c
AJ
1963 ++crc_fail;
1964
a2de733c
AJ
1965 return fail || crc_fail;
1966}
1967
b5d67f64 1968static int scrub_checksum_super(struct scrub_block *sblock)
a2de733c
AJ
1969{
1970 struct btrfs_super_block *s;
d9d181c1 1971 struct scrub_ctx *sctx = sblock->sctx;
b5d67f64
SB
1972 u8 calculated_csum[BTRFS_CSUM_SIZE];
1973 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1974 struct page *page;
1975 void *mapped_buffer;
1976 u64 mapped_size;
1977 void *p;
a2de733c 1978 u32 crc = ~(u32)0;
442a4f63
SB
1979 int fail_gen = 0;
1980 int fail_cor = 0;
b5d67f64
SB
1981 u64 len;
1982 int index;
a2de733c 1983
b5d67f64 1984 BUG_ON(sblock->page_count < 1);
7a9e9987 1985 page = sblock->pagev[0]->page;
9613bebb 1986 mapped_buffer = kmap_atomic(page);
b5d67f64 1987 s = (struct btrfs_super_block *)mapped_buffer;
d9d181c1 1988 memcpy(on_disk_csum, s->csum, sctx->csum_size);
a2de733c 1989
3cae210f 1990 if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
442a4f63 1991 ++fail_cor;
a2de733c 1992
3cae210f 1993 if (sblock->pagev[0]->generation != btrfs_super_generation(s))
442a4f63 1994 ++fail_gen;
a2de733c 1995
17a9be2f 1996 if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
442a4f63 1997 ++fail_cor;
a2de733c 1998
b5d67f64
SB
1999 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
2000 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2001 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2002 index = 0;
2003 for (;;) {
2004 u64 l = min_t(u64, len, mapped_size);
2005
b0496686 2006 crc = btrfs_csum_data(p, crc, l);
9613bebb 2007 kunmap_atomic(mapped_buffer);
b5d67f64
SB
2008 len -= l;
2009 if (len == 0)
2010 break;
2011 index++;
2012 BUG_ON(index >= sblock->page_count);
7a9e9987
SB
2013 BUG_ON(!sblock->pagev[index]->page);
2014 page = sblock->pagev[index]->page;
9613bebb 2015 mapped_buffer = kmap_atomic(page);
b5d67f64
SB
2016 mapped_size = PAGE_SIZE;
2017 p = mapped_buffer;
2018 }
2019
2020 btrfs_csum_final(crc, calculated_csum);
d9d181c1 2021 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
442a4f63 2022 ++fail_cor;
a2de733c 2023
442a4f63 2024 if (fail_cor + fail_gen) {
a2de733c
AJ
2025 /*
2026 * if we find an error in a super block, we just report it.
2027 * They will get written with the next transaction commit
2028 * anyway
2029 */
d9d181c1
SB
2030 spin_lock(&sctx->stat_lock);
2031 ++sctx->stat.super_errors;
2032 spin_unlock(&sctx->stat_lock);
442a4f63 2033 if (fail_cor)
7a9e9987 2034 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
442a4f63
SB
2035 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2036 else
7a9e9987 2037 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
442a4f63 2038 BTRFS_DEV_STAT_GENERATION_ERRS);
a2de733c
AJ
2039 }
2040
442a4f63 2041 return fail_cor + fail_gen;
a2de733c
AJ
2042}
2043
b5d67f64
SB
2044static void scrub_block_get(struct scrub_block *sblock)
2045{
57019345 2046 atomic_inc(&sblock->refs);
b5d67f64
SB
2047}
2048
2049static void scrub_block_put(struct scrub_block *sblock)
2050{
57019345 2051 if (atomic_dec_and_test(&sblock->refs)) {
b5d67f64
SB
2052 int i;
2053
5a6ac9ea
MX
2054 if (sblock->sparity)
2055 scrub_parity_put(sblock->sparity);
2056
b5d67f64 2057 for (i = 0; i < sblock->page_count; i++)
7a9e9987 2058 scrub_page_put(sblock->pagev[i]);
b5d67f64
SB
2059 kfree(sblock);
2060 }
2061}
2062
7a9e9987
SB
2063static void scrub_page_get(struct scrub_page *spage)
2064{
57019345 2065 atomic_inc(&spage->refs);
7a9e9987
SB
2066}
2067
2068static void scrub_page_put(struct scrub_page *spage)
2069{
57019345 2070 if (atomic_dec_and_test(&spage->refs)) {
7a9e9987
SB
2071 if (spage->page)
2072 __free_page(spage->page);
2073 kfree(spage);
2074 }
2075}
2076
d9d181c1 2077static void scrub_submit(struct scrub_ctx *sctx)
a2de733c
AJ
2078{
2079 struct scrub_bio *sbio;
2080
d9d181c1 2081 if (sctx->curr == -1)
1623edeb 2082 return;
a2de733c 2083
d9d181c1
SB
2084 sbio = sctx->bios[sctx->curr];
2085 sctx->curr = -1;
b6bfebc1 2086 scrub_pending_bio_inc(sctx);
03679ade 2087 btrfsic_submit_bio(READ, sbio->bio);
a2de733c
AJ
2088}
2089
ff023aac
SB
2090static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2091 struct scrub_page *spage)
a2de733c 2092{
b5d67f64 2093 struct scrub_block *sblock = spage->sblock;
a2de733c 2094 struct scrub_bio *sbio;
69f4cb52 2095 int ret;
a2de733c
AJ
2096
2097again:
2098 /*
2099 * grab a fresh bio or wait for one to become available
2100 */
d9d181c1
SB
2101 while (sctx->curr == -1) {
2102 spin_lock(&sctx->list_lock);
2103 sctx->curr = sctx->first_free;
2104 if (sctx->curr != -1) {
2105 sctx->first_free = sctx->bios[sctx->curr]->next_free;
2106 sctx->bios[sctx->curr]->next_free = -1;
2107 sctx->bios[sctx->curr]->page_count = 0;
2108 spin_unlock(&sctx->list_lock);
a2de733c 2109 } else {
d9d181c1
SB
2110 spin_unlock(&sctx->list_lock);
2111 wait_event(sctx->list_wait, sctx->first_free != -1);
a2de733c
AJ
2112 }
2113 }
d9d181c1 2114 sbio = sctx->bios[sctx->curr];
b5d67f64 2115 if (sbio->page_count == 0) {
69f4cb52
AJ
2116 struct bio *bio;
2117
b5d67f64
SB
2118 sbio->physical = spage->physical;
2119 sbio->logical = spage->logical;
a36cf8b8 2120 sbio->dev = spage->dev;
b5d67f64
SB
2121 bio = sbio->bio;
2122 if (!bio) {
9be3395b 2123 bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
b5d67f64
SB
2124 if (!bio)
2125 return -ENOMEM;
2126 sbio->bio = bio;
2127 }
69f4cb52
AJ
2128
2129 bio->bi_private = sbio;
2130 bio->bi_end_io = scrub_bio_end_io;
a36cf8b8 2131 bio->bi_bdev = sbio->dev->bdev;
4f024f37 2132 bio->bi_iter.bi_sector = sbio->physical >> 9;
69f4cb52 2133 sbio->err = 0;
b5d67f64
SB
2134 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2135 spage->physical ||
2136 sbio->logical + sbio->page_count * PAGE_SIZE !=
a36cf8b8
SB
2137 spage->logical ||
2138 sbio->dev != spage->dev) {
d9d181c1 2139 scrub_submit(sctx);
a2de733c
AJ
2140 goto again;
2141 }
69f4cb52 2142
b5d67f64
SB
2143 sbio->pagev[sbio->page_count] = spage;
2144 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2145 if (ret != PAGE_SIZE) {
2146 if (sbio->page_count < 1) {
2147 bio_put(sbio->bio);
2148 sbio->bio = NULL;
2149 return -EIO;
2150 }
d9d181c1 2151 scrub_submit(sctx);
69f4cb52
AJ
2152 goto again;
2153 }
2154
ff023aac 2155 scrub_block_get(sblock); /* one for the page added to the bio */
b5d67f64
SB
2156 atomic_inc(&sblock->outstanding_pages);
2157 sbio->page_count++;
ff023aac 2158 if (sbio->page_count == sctx->pages_per_rd_bio)
d9d181c1 2159 scrub_submit(sctx);
b5d67f64
SB
2160
2161 return 0;
2162}
2163
22365979 2164static void scrub_missing_raid56_end_io(struct bio *bio)
73ff61db
OS
2165{
2166 struct scrub_block *sblock = bio->bi_private;
2167 struct btrfs_fs_info *fs_info = sblock->sctx->dev_root->fs_info;
2168
22365979 2169 if (bio->bi_error)
73ff61db
OS
2170 sblock->no_io_error_seen = 0;
2171
2172 btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2173}
2174
2175static void scrub_missing_raid56_worker(struct btrfs_work *work)
2176{
2177 struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2178 struct scrub_ctx *sctx = sblock->sctx;
2179 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2180 unsigned int is_metadata;
2181 unsigned int have_csum;
2182 u8 *csum;
2183 u64 generation;
2184 u64 logical;
2185 struct btrfs_device *dev;
2186
2187 is_metadata = !(sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA);
2188 have_csum = sblock->pagev[0]->have_csum;
2189 csum = sblock->pagev[0]->csum;
2190 generation = sblock->pagev[0]->generation;
2191 logical = sblock->pagev[0]->logical;
2192 dev = sblock->pagev[0]->dev;
2193
2194 if (sblock->no_io_error_seen) {
2195 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
2196 have_csum, csum, generation,
2197 sctx->csum_size);
2198 }
2199
2200 if (!sblock->no_io_error_seen) {
2201 spin_lock(&sctx->stat_lock);
2202 sctx->stat.read_errors++;
2203 spin_unlock(&sctx->stat_lock);
b14af3b4
DS
2204 btrfs_err_rl_in_rcu(fs_info,
2205 "IO error rebuilding logical %llu for dev %s",
73ff61db
OS
2206 logical, rcu_str_deref(dev->name));
2207 } else if (sblock->header_error || sblock->checksum_error) {
2208 spin_lock(&sctx->stat_lock);
2209 sctx->stat.uncorrectable_errors++;
2210 spin_unlock(&sctx->stat_lock);
b14af3b4
DS
2211 btrfs_err_rl_in_rcu(fs_info,
2212 "failed to rebuild valid logical %llu for dev %s",
73ff61db
OS
2213 logical, rcu_str_deref(dev->name));
2214 } else {
2215 scrub_write_block_to_dev_replace(sblock);
2216 }
2217
2218 scrub_block_put(sblock);
2219
2220 if (sctx->is_dev_replace &&
2221 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2222 mutex_lock(&sctx->wr_ctx.wr_lock);
2223 scrub_wr_submit(sctx);
2224 mutex_unlock(&sctx->wr_ctx.wr_lock);
2225 }
2226
2227 scrub_pending_bio_dec(sctx);
2228}
2229
2230static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2231{
2232 struct scrub_ctx *sctx = sblock->sctx;
2233 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2234 u64 length = sblock->page_count * PAGE_SIZE;
2235 u64 logical = sblock->pagev[0]->logical;
2236 struct btrfs_bio *bbio;
2237 struct bio *bio;
2238 struct btrfs_raid_bio *rbio;
2239 int ret;
2240 int i;
2241
2242 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
2243 &bbio, 0, 1);
2244 if (ret || !bbio || !bbio->raid_map)
2245 goto bbio_out;
2246
2247 if (WARN_ON(!sctx->is_dev_replace ||
2248 !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2249 /*
2250 * We shouldn't be scrubbing a missing device. Even for dev
2251 * replace, we should only get here for RAID 5/6. We either
2252 * managed to mount something with no mirrors remaining or
2253 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2254 */
2255 goto bbio_out;
2256 }
2257
2258 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2259 if (!bio)
2260 goto bbio_out;
2261
2262 bio->bi_iter.bi_sector = logical >> 9;
2263 bio->bi_private = sblock;
2264 bio->bi_end_io = scrub_missing_raid56_end_io;
2265
2266 rbio = raid56_alloc_missing_rbio(sctx->dev_root, bio, bbio, length);
2267 if (!rbio)
2268 goto rbio_out;
2269
2270 for (i = 0; i < sblock->page_count; i++) {
2271 struct scrub_page *spage = sblock->pagev[i];
2272
2273 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2274 }
2275
2276 btrfs_init_work(&sblock->work, btrfs_scrub_helper,
2277 scrub_missing_raid56_worker, NULL, NULL);
2278 scrub_block_get(sblock);
2279 scrub_pending_bio_inc(sctx);
2280 raid56_submit_missing_rbio(rbio);
2281 return;
2282
2283rbio_out:
2284 bio_put(bio);
2285bbio_out:
2286 btrfs_put_bbio(bbio);
2287 spin_lock(&sctx->stat_lock);
2288 sctx->stat.malloc_errors++;
2289 spin_unlock(&sctx->stat_lock);
2290}
2291
d9d181c1 2292static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
a36cf8b8 2293 u64 physical, struct btrfs_device *dev, u64 flags,
ff023aac
SB
2294 u64 gen, int mirror_num, u8 *csum, int force,
2295 u64 physical_for_dev_replace)
b5d67f64
SB
2296{
2297 struct scrub_block *sblock;
2298 int index;
2299
2300 sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2301 if (!sblock) {
d9d181c1
SB
2302 spin_lock(&sctx->stat_lock);
2303 sctx->stat.malloc_errors++;
2304 spin_unlock(&sctx->stat_lock);
b5d67f64 2305 return -ENOMEM;
a2de733c 2306 }
b5d67f64 2307
7a9e9987
SB
2308 /* one ref inside this function, plus one for each page added to
2309 * a bio later on */
57019345 2310 atomic_set(&sblock->refs, 1);
d9d181c1 2311 sblock->sctx = sctx;
b5d67f64
SB
2312 sblock->no_io_error_seen = 1;
2313
2314 for (index = 0; len > 0; index++) {
7a9e9987 2315 struct scrub_page *spage;
b5d67f64
SB
2316 u64 l = min_t(u64, len, PAGE_SIZE);
2317
7a9e9987
SB
2318 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2319 if (!spage) {
2320leave_nomem:
d9d181c1
SB
2321 spin_lock(&sctx->stat_lock);
2322 sctx->stat.malloc_errors++;
2323 spin_unlock(&sctx->stat_lock);
7a9e9987 2324 scrub_block_put(sblock);
b5d67f64
SB
2325 return -ENOMEM;
2326 }
7a9e9987
SB
2327 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2328 scrub_page_get(spage);
2329 sblock->pagev[index] = spage;
b5d67f64 2330 spage->sblock = sblock;
a36cf8b8 2331 spage->dev = dev;
b5d67f64
SB
2332 spage->flags = flags;
2333 spage->generation = gen;
2334 spage->logical = logical;
2335 spage->physical = physical;
ff023aac 2336 spage->physical_for_dev_replace = physical_for_dev_replace;
b5d67f64
SB
2337 spage->mirror_num = mirror_num;
2338 if (csum) {
2339 spage->have_csum = 1;
d9d181c1 2340 memcpy(spage->csum, csum, sctx->csum_size);
b5d67f64
SB
2341 } else {
2342 spage->have_csum = 0;
2343 }
2344 sblock->page_count++;
7a9e9987
SB
2345 spage->page = alloc_page(GFP_NOFS);
2346 if (!spage->page)
2347 goto leave_nomem;
b5d67f64
SB
2348 len -= l;
2349 logical += l;
2350 physical += l;
ff023aac 2351 physical_for_dev_replace += l;
b5d67f64
SB
2352 }
2353
7a9e9987 2354 WARN_ON(sblock->page_count == 0);
73ff61db
OS
2355 if (dev->missing) {
2356 /*
2357 * This case should only be hit for RAID 5/6 device replace. See
2358 * the comment in scrub_missing_raid56_pages() for details.
2359 */
2360 scrub_missing_raid56_pages(sblock);
2361 } else {
2362 for (index = 0; index < sblock->page_count; index++) {
2363 struct scrub_page *spage = sblock->pagev[index];
2364 int ret;
1bc87793 2365
73ff61db
OS
2366 ret = scrub_add_page_to_rd_bio(sctx, spage);
2367 if (ret) {
2368 scrub_block_put(sblock);
2369 return ret;
2370 }
b5d67f64 2371 }
a2de733c 2372
73ff61db
OS
2373 if (force)
2374 scrub_submit(sctx);
2375 }
a2de733c 2376
b5d67f64
SB
2377 /* last one frees, either here or in bio completion for last page */
2378 scrub_block_put(sblock);
a2de733c
AJ
2379 return 0;
2380}
2381
4246a0b6 2382static void scrub_bio_end_io(struct bio *bio)
b5d67f64
SB
2383{
2384 struct scrub_bio *sbio = bio->bi_private;
a36cf8b8 2385 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
b5d67f64 2386
4246a0b6 2387 sbio->err = bio->bi_error;
b5d67f64
SB
2388 sbio->bio = bio;
2389
0339ef2f 2390 btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
b5d67f64
SB
2391}
2392
2393static void scrub_bio_end_io_worker(struct btrfs_work *work)
2394{
2395 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
d9d181c1 2396 struct scrub_ctx *sctx = sbio->sctx;
b5d67f64
SB
2397 int i;
2398
ff023aac 2399 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
b5d67f64
SB
2400 if (sbio->err) {
2401 for (i = 0; i < sbio->page_count; i++) {
2402 struct scrub_page *spage = sbio->pagev[i];
2403
2404 spage->io_error = 1;
2405 spage->sblock->no_io_error_seen = 0;
2406 }
2407 }
2408
2409 /* now complete the scrub_block items that have all pages completed */
2410 for (i = 0; i < sbio->page_count; i++) {
2411 struct scrub_page *spage = sbio->pagev[i];
2412 struct scrub_block *sblock = spage->sblock;
2413
2414 if (atomic_dec_and_test(&sblock->outstanding_pages))
2415 scrub_block_complete(sblock);
2416 scrub_block_put(sblock);
2417 }
2418
b5d67f64
SB
2419 bio_put(sbio->bio);
2420 sbio->bio = NULL;
d9d181c1
SB
2421 spin_lock(&sctx->list_lock);
2422 sbio->next_free = sctx->first_free;
2423 sctx->first_free = sbio->index;
2424 spin_unlock(&sctx->list_lock);
ff023aac
SB
2425
2426 if (sctx->is_dev_replace &&
2427 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2428 mutex_lock(&sctx->wr_ctx.wr_lock);
2429 scrub_wr_submit(sctx);
2430 mutex_unlock(&sctx->wr_ctx.wr_lock);
2431 }
2432
b6bfebc1 2433 scrub_pending_bio_dec(sctx);
b5d67f64
SB
2434}
2435
5a6ac9ea
MX
2436static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2437 unsigned long *bitmap,
2438 u64 start, u64 len)
2439{
9d644a62 2440 u32 offset;
5a6ac9ea
MX
2441 int nsectors;
2442 int sectorsize = sparity->sctx->dev_root->sectorsize;
2443
2444 if (len >= sparity->stripe_len) {
2445 bitmap_set(bitmap, 0, sparity->nsectors);
2446 return;
2447 }
2448
2449 start -= sparity->logic_start;
47c5713f 2450 start = div_u64_rem(start, sparity->stripe_len, &offset);
5a6ac9ea
MX
2451 offset /= sectorsize;
2452 nsectors = (int)len / sectorsize;
2453
2454 if (offset + nsectors <= sparity->nsectors) {
2455 bitmap_set(bitmap, offset, nsectors);
2456 return;
2457 }
2458
2459 bitmap_set(bitmap, offset, sparity->nsectors - offset);
2460 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2461}
2462
2463static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2464 u64 start, u64 len)
2465{
2466 __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2467}
2468
2469static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2470 u64 start, u64 len)
2471{
2472 __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2473}
2474
b5d67f64
SB
2475static void scrub_block_complete(struct scrub_block *sblock)
2476{
5a6ac9ea
MX
2477 int corrupted = 0;
2478
ff023aac 2479 if (!sblock->no_io_error_seen) {
5a6ac9ea 2480 corrupted = 1;
b5d67f64 2481 scrub_handle_errored_block(sblock);
ff023aac
SB
2482 } else {
2483 /*
2484 * if has checksum error, write via repair mechanism in
2485 * dev replace case, otherwise write here in dev replace
2486 * case.
2487 */
5a6ac9ea
MX
2488 corrupted = scrub_checksum(sblock);
2489 if (!corrupted && sblock->sctx->is_dev_replace)
ff023aac
SB
2490 scrub_write_block_to_dev_replace(sblock);
2491 }
5a6ac9ea
MX
2492
2493 if (sblock->sparity && corrupted && !sblock->data_corrected) {
2494 u64 start = sblock->pagev[0]->logical;
2495 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2496 PAGE_SIZE;
2497
2498 scrub_parity_mark_sectors_error(sblock->sparity,
2499 start, end - start);
2500 }
b5d67f64
SB
2501}
2502
d9d181c1 2503static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
a2de733c
AJ
2504 u8 *csum)
2505{
2506 struct btrfs_ordered_sum *sum = NULL;
f51a4a18 2507 unsigned long index;
a2de733c 2508 unsigned long num_sectors;
a2de733c 2509
d9d181c1
SB
2510 while (!list_empty(&sctx->csum_list)) {
2511 sum = list_first_entry(&sctx->csum_list,
a2de733c
AJ
2512 struct btrfs_ordered_sum, list);
2513 if (sum->bytenr > logical)
2514 return 0;
2515 if (sum->bytenr + sum->len > logical)
2516 break;
2517
d9d181c1 2518 ++sctx->stat.csum_discards;
a2de733c
AJ
2519 list_del(&sum->list);
2520 kfree(sum);
2521 sum = NULL;
2522 }
2523 if (!sum)
2524 return 0;
2525
f51a4a18 2526 index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
d9d181c1 2527 num_sectors = sum->len / sctx->sectorsize;
f51a4a18
MX
2528 memcpy(csum, sum->sums + index, sctx->csum_size);
2529 if (index == num_sectors - 1) {
a2de733c
AJ
2530 list_del(&sum->list);
2531 kfree(sum);
2532 }
f51a4a18 2533 return 1;
a2de733c
AJ
2534}
2535
2536/* scrub extent tries to collect up to 64 kB for each bio */
d9d181c1 2537static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
a36cf8b8 2538 u64 physical, struct btrfs_device *dev, u64 flags,
ff023aac 2539 u64 gen, int mirror_num, u64 physical_for_dev_replace)
a2de733c
AJ
2540{
2541 int ret;
2542 u8 csum[BTRFS_CSUM_SIZE];
b5d67f64
SB
2543 u32 blocksize;
2544
2545 if (flags & BTRFS_EXTENT_FLAG_DATA) {
d9d181c1
SB
2546 blocksize = sctx->sectorsize;
2547 spin_lock(&sctx->stat_lock);
2548 sctx->stat.data_extents_scrubbed++;
2549 sctx->stat.data_bytes_scrubbed += len;
2550 spin_unlock(&sctx->stat_lock);
b5d67f64 2551 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
d9d181c1
SB
2552 blocksize = sctx->nodesize;
2553 spin_lock(&sctx->stat_lock);
2554 sctx->stat.tree_extents_scrubbed++;
2555 sctx->stat.tree_bytes_scrubbed += len;
2556 spin_unlock(&sctx->stat_lock);
b5d67f64 2557 } else {
d9d181c1 2558 blocksize = sctx->sectorsize;
ff023aac 2559 WARN_ON(1);
b5d67f64 2560 }
a2de733c
AJ
2561
2562 while (len) {
b5d67f64 2563 u64 l = min_t(u64, len, blocksize);
a2de733c
AJ
2564 int have_csum = 0;
2565
2566 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2567 /* push csums to sbio */
d9d181c1 2568 have_csum = scrub_find_csum(sctx, logical, l, csum);
a2de733c 2569 if (have_csum == 0)
d9d181c1 2570 ++sctx->stat.no_csum;
ff023aac
SB
2571 if (sctx->is_dev_replace && !have_csum) {
2572 ret = copy_nocow_pages(sctx, logical, l,
2573 mirror_num,
2574 physical_for_dev_replace);
2575 goto behind_scrub_pages;
2576 }
a2de733c 2577 }
a36cf8b8 2578 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
ff023aac
SB
2579 mirror_num, have_csum ? csum : NULL, 0,
2580 physical_for_dev_replace);
2581behind_scrub_pages:
a2de733c
AJ
2582 if (ret)
2583 return ret;
2584 len -= l;
2585 logical += l;
2586 physical += l;
ff023aac 2587 physical_for_dev_replace += l;
a2de733c
AJ
2588 }
2589 return 0;
2590}
2591
5a6ac9ea
MX
2592static int scrub_pages_for_parity(struct scrub_parity *sparity,
2593 u64 logical, u64 len,
2594 u64 physical, struct btrfs_device *dev,
2595 u64 flags, u64 gen, int mirror_num, u8 *csum)
2596{
2597 struct scrub_ctx *sctx = sparity->sctx;
2598 struct scrub_block *sblock;
2599 int index;
2600
2601 sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2602 if (!sblock) {
2603 spin_lock(&sctx->stat_lock);
2604 sctx->stat.malloc_errors++;
2605 spin_unlock(&sctx->stat_lock);
2606 return -ENOMEM;
2607 }
2608
2609 /* one ref inside this function, plus one for each page added to
2610 * a bio later on */
57019345 2611 atomic_set(&sblock->refs, 1);
5a6ac9ea
MX
2612 sblock->sctx = sctx;
2613 sblock->no_io_error_seen = 1;
2614 sblock->sparity = sparity;
2615 scrub_parity_get(sparity);
2616
2617 for (index = 0; len > 0; index++) {
2618 struct scrub_page *spage;
2619 u64 l = min_t(u64, len, PAGE_SIZE);
2620
2621 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2622 if (!spage) {
2623leave_nomem:
2624 spin_lock(&sctx->stat_lock);
2625 sctx->stat.malloc_errors++;
2626 spin_unlock(&sctx->stat_lock);
2627 scrub_block_put(sblock);
2628 return -ENOMEM;
2629 }
2630 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2631 /* For scrub block */
2632 scrub_page_get(spage);
2633 sblock->pagev[index] = spage;
2634 /* For scrub parity */
2635 scrub_page_get(spage);
2636 list_add_tail(&spage->list, &sparity->spages);
2637 spage->sblock = sblock;
2638 spage->dev = dev;
2639 spage->flags = flags;
2640 spage->generation = gen;
2641 spage->logical = logical;
2642 spage->physical = physical;
2643 spage->mirror_num = mirror_num;
2644 if (csum) {
2645 spage->have_csum = 1;
2646 memcpy(spage->csum, csum, sctx->csum_size);
2647 } else {
2648 spage->have_csum = 0;
2649 }
2650 sblock->page_count++;
2651 spage->page = alloc_page(GFP_NOFS);
2652 if (!spage->page)
2653 goto leave_nomem;
2654 len -= l;
2655 logical += l;
2656 physical += l;
2657 }
2658
2659 WARN_ON(sblock->page_count == 0);
2660 for (index = 0; index < sblock->page_count; index++) {
2661 struct scrub_page *spage = sblock->pagev[index];
2662 int ret;
2663
2664 ret = scrub_add_page_to_rd_bio(sctx, spage);
2665 if (ret) {
2666 scrub_block_put(sblock);
2667 return ret;
2668 }
2669 }
2670
2671 /* last one frees, either here or in bio completion for last page */
2672 scrub_block_put(sblock);
2673 return 0;
2674}
2675
2676static int scrub_extent_for_parity(struct scrub_parity *sparity,
2677 u64 logical, u64 len,
2678 u64 physical, struct btrfs_device *dev,
2679 u64 flags, u64 gen, int mirror_num)
2680{
2681 struct scrub_ctx *sctx = sparity->sctx;
2682 int ret;
2683 u8 csum[BTRFS_CSUM_SIZE];
2684 u32 blocksize;
2685
4a770891
OS
2686 if (dev->missing) {
2687 scrub_parity_mark_sectors_error(sparity, logical, len);
2688 return 0;
2689 }
2690
5a6ac9ea
MX
2691 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2692 blocksize = sctx->sectorsize;
2693 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2694 blocksize = sctx->nodesize;
2695 } else {
2696 blocksize = sctx->sectorsize;
2697 WARN_ON(1);
2698 }
2699
2700 while (len) {
2701 u64 l = min_t(u64, len, blocksize);
2702 int have_csum = 0;
2703
2704 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2705 /* push csums to sbio */
2706 have_csum = scrub_find_csum(sctx, logical, l, csum);
2707 if (have_csum == 0)
2708 goto skip;
2709 }
2710 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2711 flags, gen, mirror_num,
2712 have_csum ? csum : NULL);
5a6ac9ea
MX
2713 if (ret)
2714 return ret;
6b6d24b3 2715skip:
5a6ac9ea
MX
2716 len -= l;
2717 logical += l;
2718 physical += l;
2719 }
2720 return 0;
2721}
2722
3b080b25
WS
2723/*
2724 * Given a physical address, this will calculate it's
2725 * logical offset. if this is a parity stripe, it will return
2726 * the most left data stripe's logical offset.
2727 *
2728 * return 0 if it is a data stripe, 1 means parity stripe.
2729 */
2730static int get_raid56_logic_offset(u64 physical, int num,
5a6ac9ea
MX
2731 struct map_lookup *map, u64 *offset,
2732 u64 *stripe_start)
3b080b25
WS
2733{
2734 int i;
2735 int j = 0;
2736 u64 stripe_nr;
2737 u64 last_offset;
9d644a62
DS
2738 u32 stripe_index;
2739 u32 rot;
3b080b25
WS
2740
2741 last_offset = (physical - map->stripes[num].physical) *
2742 nr_data_stripes(map);
5a6ac9ea
MX
2743 if (stripe_start)
2744 *stripe_start = last_offset;
2745
3b080b25
WS
2746 *offset = last_offset;
2747 for (i = 0; i < nr_data_stripes(map); i++) {
2748 *offset = last_offset + i * map->stripe_len;
2749
b8b93add
DS
2750 stripe_nr = div_u64(*offset, map->stripe_len);
2751 stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
3b080b25
WS
2752
2753 /* Work out the disk rotation on this stripe-set */
47c5713f 2754 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
3b080b25
WS
2755 /* calculate which stripe this data locates */
2756 rot += i;
e4fbaee2 2757 stripe_index = rot % map->num_stripes;
3b080b25
WS
2758 if (stripe_index == num)
2759 return 0;
2760 if (stripe_index < num)
2761 j++;
2762 }
2763 *offset = last_offset + j * map->stripe_len;
2764 return 1;
2765}
2766
5a6ac9ea
MX
2767static void scrub_free_parity(struct scrub_parity *sparity)
2768{
2769 struct scrub_ctx *sctx = sparity->sctx;
2770 struct scrub_page *curr, *next;
2771 int nbits;
2772
2773 nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2774 if (nbits) {
2775 spin_lock(&sctx->stat_lock);
2776 sctx->stat.read_errors += nbits;
2777 sctx->stat.uncorrectable_errors += nbits;
2778 spin_unlock(&sctx->stat_lock);
2779 }
2780
2781 list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2782 list_del_init(&curr->list);
2783 scrub_page_put(curr);
2784 }
2785
2786 kfree(sparity);
2787}
2788
20b2e302
ZL
2789static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2790{
2791 struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2792 work);
2793 struct scrub_ctx *sctx = sparity->sctx;
2794
2795 scrub_free_parity(sparity);
2796 scrub_pending_bio_dec(sctx);
2797}
2798
4246a0b6 2799static void scrub_parity_bio_endio(struct bio *bio)
5a6ac9ea
MX
2800{
2801 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
5a6ac9ea 2802
4246a0b6 2803 if (bio->bi_error)
5a6ac9ea
MX
2804 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2805 sparity->nsectors);
2806
5a6ac9ea 2807 bio_put(bio);
20b2e302
ZL
2808
2809 btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
2810 scrub_parity_bio_endio_worker, NULL, NULL);
2811 btrfs_queue_work(sparity->sctx->dev_root->fs_info->scrub_parity_workers,
2812 &sparity->work);
5a6ac9ea
MX
2813}
2814
2815static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2816{
2817 struct scrub_ctx *sctx = sparity->sctx;
2818 struct bio *bio;
2819 struct btrfs_raid_bio *rbio;
2820 struct scrub_page *spage;
2821 struct btrfs_bio *bbio = NULL;
5a6ac9ea
MX
2822 u64 length;
2823 int ret;
2824
2825 if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2826 sparity->nsectors))
2827 goto out;
2828
a0dd59de 2829 length = sparity->logic_end - sparity->logic_start;
76035976 2830 ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
5a6ac9ea 2831 sparity->logic_start,
8e5cfb55
ZL
2832 &length, &bbio, 0, 1);
2833 if (ret || !bbio || !bbio->raid_map)
5a6ac9ea
MX
2834 goto bbio_out;
2835
2836 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2837 if (!bio)
2838 goto bbio_out;
2839
2840 bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2841 bio->bi_private = sparity;
2842 bio->bi_end_io = scrub_parity_bio_endio;
2843
2844 rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
8e5cfb55 2845 length, sparity->scrub_dev,
5a6ac9ea
MX
2846 sparity->dbitmap,
2847 sparity->nsectors);
2848 if (!rbio)
2849 goto rbio_out;
2850
2851 list_for_each_entry(spage, &sparity->spages, list)
b4ee1782 2852 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
5a6ac9ea
MX
2853
2854 scrub_pending_bio_inc(sctx);
2855 raid56_parity_submit_scrub_rbio(rbio);
2856 return;
2857
2858rbio_out:
2859 bio_put(bio);
2860bbio_out:
6e9606d2 2861 btrfs_put_bbio(bbio);
5a6ac9ea
MX
2862 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2863 sparity->nsectors);
2864 spin_lock(&sctx->stat_lock);
2865 sctx->stat.malloc_errors++;
2866 spin_unlock(&sctx->stat_lock);
2867out:
2868 scrub_free_parity(sparity);
2869}
2870
2871static inline int scrub_calc_parity_bitmap_len(int nsectors)
2872{
2873 return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * (BITS_PER_LONG / 8);
2874}
2875
2876static void scrub_parity_get(struct scrub_parity *sparity)
2877{
57019345 2878 atomic_inc(&sparity->refs);
5a6ac9ea
MX
2879}
2880
2881static void scrub_parity_put(struct scrub_parity *sparity)
2882{
57019345 2883 if (!atomic_dec_and_test(&sparity->refs))
5a6ac9ea
MX
2884 return;
2885
2886 scrub_parity_check_and_repair(sparity);
2887}
2888
2889static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2890 struct map_lookup *map,
2891 struct btrfs_device *sdev,
2892 struct btrfs_path *path,
2893 u64 logic_start,
2894 u64 logic_end)
2895{
2896 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2897 struct btrfs_root *root = fs_info->extent_root;
2898 struct btrfs_root *csum_root = fs_info->csum_root;
2899 struct btrfs_extent_item *extent;
4a770891 2900 struct btrfs_bio *bbio = NULL;
5a6ac9ea
MX
2901 u64 flags;
2902 int ret;
2903 int slot;
2904 struct extent_buffer *l;
2905 struct btrfs_key key;
2906 u64 generation;
2907 u64 extent_logical;
2908 u64 extent_physical;
2909 u64 extent_len;
4a770891 2910 u64 mapped_length;
5a6ac9ea
MX
2911 struct btrfs_device *extent_dev;
2912 struct scrub_parity *sparity;
2913 int nsectors;
2914 int bitmap_len;
2915 int extent_mirror_num;
2916 int stop_loop = 0;
2917
2918 nsectors = map->stripe_len / root->sectorsize;
2919 bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2920 sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2921 GFP_NOFS);
2922 if (!sparity) {
2923 spin_lock(&sctx->stat_lock);
2924 sctx->stat.malloc_errors++;
2925 spin_unlock(&sctx->stat_lock);
2926 return -ENOMEM;
2927 }
2928
2929 sparity->stripe_len = map->stripe_len;
2930 sparity->nsectors = nsectors;
2931 sparity->sctx = sctx;
2932 sparity->scrub_dev = sdev;
2933 sparity->logic_start = logic_start;
2934 sparity->logic_end = logic_end;
57019345 2935 atomic_set(&sparity->refs, 1);
5a6ac9ea
MX
2936 INIT_LIST_HEAD(&sparity->spages);
2937 sparity->dbitmap = sparity->bitmap;
2938 sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2939
2940 ret = 0;
2941 while (logic_start < logic_end) {
2942 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2943 key.type = BTRFS_METADATA_ITEM_KEY;
2944 else
2945 key.type = BTRFS_EXTENT_ITEM_KEY;
2946 key.objectid = logic_start;
2947 key.offset = (u64)-1;
2948
2949 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2950 if (ret < 0)
2951 goto out;
2952
2953 if (ret > 0) {
2954 ret = btrfs_previous_extent_item(root, path, 0);
2955 if (ret < 0)
2956 goto out;
2957 if (ret > 0) {
2958 btrfs_release_path(path);
2959 ret = btrfs_search_slot(NULL, root, &key,
2960 path, 0, 0);
2961 if (ret < 0)
2962 goto out;
2963 }
2964 }
2965
2966 stop_loop = 0;
2967 while (1) {
2968 u64 bytes;
2969
2970 l = path->nodes[0];
2971 slot = path->slots[0];
2972 if (slot >= btrfs_header_nritems(l)) {
2973 ret = btrfs_next_leaf(root, path);
2974 if (ret == 0)
2975 continue;
2976 if (ret < 0)
2977 goto out;
2978
2979 stop_loop = 1;
2980 break;
2981 }
2982 btrfs_item_key_to_cpu(l, &key, slot);
2983
d7cad238
ZL
2984 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2985 key.type != BTRFS_METADATA_ITEM_KEY)
2986 goto next;
2987
5a6ac9ea
MX
2988 if (key.type == BTRFS_METADATA_ITEM_KEY)
2989 bytes = root->nodesize;
2990 else
2991 bytes = key.offset;
2992
2993 if (key.objectid + bytes <= logic_start)
2994 goto next;
2995
a0dd59de 2996 if (key.objectid >= logic_end) {
5a6ac9ea
MX
2997 stop_loop = 1;
2998 break;
2999 }
3000
3001 while (key.objectid >= logic_start + map->stripe_len)
3002 logic_start += map->stripe_len;
3003
3004 extent = btrfs_item_ptr(l, slot,
3005 struct btrfs_extent_item);
3006 flags = btrfs_extent_flags(l, extent);
3007 generation = btrfs_extent_generation(l, extent);
3008
a323e813
ZL
3009 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3010 (key.objectid < logic_start ||
3011 key.objectid + bytes >
3012 logic_start + map->stripe_len)) {
3013 btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3014 key.objectid, logic_start);
9799d2c3
ZL
3015 spin_lock(&sctx->stat_lock);
3016 sctx->stat.uncorrectable_errors++;
3017 spin_unlock(&sctx->stat_lock);
5a6ac9ea
MX
3018 goto next;
3019 }
3020again:
3021 extent_logical = key.objectid;
3022 extent_len = bytes;
3023
3024 if (extent_logical < logic_start) {
3025 extent_len -= logic_start - extent_logical;
3026 extent_logical = logic_start;
3027 }
3028
3029 if (extent_logical + extent_len >
3030 logic_start + map->stripe_len)
3031 extent_len = logic_start + map->stripe_len -
3032 extent_logical;
3033
3034 scrub_parity_mark_sectors_data(sparity, extent_logical,
3035 extent_len);
3036
4a770891
OS
3037 mapped_length = extent_len;
3038 ret = btrfs_map_block(fs_info, READ, extent_logical,
3039 &mapped_length, &bbio, 0);
3040 if (!ret) {
3041 if (!bbio || mapped_length < extent_len)
3042 ret = -EIO;
3043 }
3044 if (ret) {
3045 btrfs_put_bbio(bbio);
3046 goto out;
3047 }
3048 extent_physical = bbio->stripes[0].physical;
3049 extent_mirror_num = bbio->mirror_num;
3050 extent_dev = bbio->stripes[0].dev;
3051 btrfs_put_bbio(bbio);
5a6ac9ea
MX
3052
3053 ret = btrfs_lookup_csums_range(csum_root,
3054 extent_logical,
3055 extent_logical + extent_len - 1,
3056 &sctx->csum_list, 1);
3057 if (ret)
3058 goto out;
3059
3060 ret = scrub_extent_for_parity(sparity, extent_logical,
3061 extent_len,
3062 extent_physical,
3063 extent_dev, flags,
3064 generation,
3065 extent_mirror_num);
6fa96d72
ZL
3066
3067 scrub_free_csums(sctx);
3068
5a6ac9ea
MX
3069 if (ret)
3070 goto out;
3071
5a6ac9ea
MX
3072 if (extent_logical + extent_len <
3073 key.objectid + bytes) {
3074 logic_start += map->stripe_len;
3075
3076 if (logic_start >= logic_end) {
3077 stop_loop = 1;
3078 break;
3079 }
3080
3081 if (logic_start < key.objectid + bytes) {
3082 cond_resched();
3083 goto again;
3084 }
3085 }
3086next:
3087 path->slots[0]++;
3088 }
3089
3090 btrfs_release_path(path);
3091
3092 if (stop_loop)
3093 break;
3094
3095 logic_start += map->stripe_len;
3096 }
3097out:
3098 if (ret < 0)
3099 scrub_parity_mark_sectors_error(sparity, logic_start,
a0dd59de 3100 logic_end - logic_start);
5a6ac9ea
MX
3101 scrub_parity_put(sparity);
3102 scrub_submit(sctx);
3103 mutex_lock(&sctx->wr_ctx.wr_lock);
3104 scrub_wr_submit(sctx);
3105 mutex_unlock(&sctx->wr_ctx.wr_lock);
3106
3107 btrfs_release_path(path);
3108 return ret < 0 ? ret : 0;
3109}
3110
d9d181c1 3111static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
a36cf8b8
SB
3112 struct map_lookup *map,
3113 struct btrfs_device *scrub_dev,
ff023aac
SB
3114 int num, u64 base, u64 length,
3115 int is_dev_replace)
a2de733c 3116{
5a6ac9ea 3117 struct btrfs_path *path, *ppath;
a36cf8b8 3118 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
a2de733c
AJ
3119 struct btrfs_root *root = fs_info->extent_root;
3120 struct btrfs_root *csum_root = fs_info->csum_root;
3121 struct btrfs_extent_item *extent;
e7786c3a 3122 struct blk_plug plug;
a2de733c
AJ
3123 u64 flags;
3124 int ret;
3125 int slot;
a2de733c 3126 u64 nstripes;
a2de733c
AJ
3127 struct extent_buffer *l;
3128 struct btrfs_key key;
3129 u64 physical;
3130 u64 logical;
625f1c8d 3131 u64 logic_end;
3b080b25 3132 u64 physical_end;
a2de733c 3133 u64 generation;
e12fa9cd 3134 int mirror_num;
7a26285e
AJ
3135 struct reada_control *reada1;
3136 struct reada_control *reada2;
3137 struct btrfs_key key_start;
3138 struct btrfs_key key_end;
a2de733c
AJ
3139 u64 increment = map->stripe_len;
3140 u64 offset;
ff023aac
SB
3141 u64 extent_logical;
3142 u64 extent_physical;
3143 u64 extent_len;
5a6ac9ea
MX
3144 u64 stripe_logical;
3145 u64 stripe_end;
ff023aac
SB
3146 struct btrfs_device *extent_dev;
3147 int extent_mirror_num;
3b080b25 3148 int stop_loop = 0;
53b381b3 3149
3b080b25 3150 physical = map->stripes[num].physical;
a2de733c 3151 offset = 0;
b8b93add 3152 nstripes = div_u64(length, map->stripe_len);
a2de733c
AJ
3153 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3154 offset = map->stripe_len * num;
3155 increment = map->stripe_len * map->num_stripes;
193ea74b 3156 mirror_num = 1;
a2de733c
AJ
3157 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3158 int factor = map->num_stripes / map->sub_stripes;
3159 offset = map->stripe_len * (num / map->sub_stripes);
3160 increment = map->stripe_len * factor;
193ea74b 3161 mirror_num = num % map->sub_stripes + 1;
a2de733c
AJ
3162 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3163 increment = map->stripe_len;
193ea74b 3164 mirror_num = num % map->num_stripes + 1;
a2de733c
AJ
3165 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3166 increment = map->stripe_len;
193ea74b 3167 mirror_num = num % map->num_stripes + 1;
ffe2d203 3168 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5a6ac9ea 3169 get_raid56_logic_offset(physical, num, map, &offset, NULL);
3b080b25
WS
3170 increment = map->stripe_len * nr_data_stripes(map);
3171 mirror_num = 1;
a2de733c
AJ
3172 } else {
3173 increment = map->stripe_len;
193ea74b 3174 mirror_num = 1;
a2de733c
AJ
3175 }
3176
3177 path = btrfs_alloc_path();
3178 if (!path)
3179 return -ENOMEM;
3180
5a6ac9ea
MX
3181 ppath = btrfs_alloc_path();
3182 if (!ppath) {
379d6854 3183 btrfs_free_path(path);
5a6ac9ea
MX
3184 return -ENOMEM;
3185 }
3186
b5d67f64
SB
3187 /*
3188 * work on commit root. The related disk blocks are static as
3189 * long as COW is applied. This means, it is save to rewrite
3190 * them to repair disk errors without any race conditions
3191 */
a2de733c
AJ
3192 path->search_commit_root = 1;
3193 path->skip_locking = 1;
3194
063c54dc
GH
3195 ppath->search_commit_root = 1;
3196 ppath->skip_locking = 1;
a2de733c 3197 /*
7a26285e
AJ
3198 * trigger the readahead for extent tree csum tree and wait for
3199 * completion. During readahead, the scrub is officially paused
3200 * to not hold off transaction commits
a2de733c
AJ
3201 */
3202 logical = base + offset;
3b080b25 3203 physical_end = physical + nstripes * map->stripe_len;
ffe2d203 3204 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3b080b25 3205 get_raid56_logic_offset(physical_end, num,
5a6ac9ea 3206 map, &logic_end, NULL);
3b080b25
WS
3207 logic_end += base;
3208 } else {
3209 logic_end = logical + increment * nstripes;
3210 }
d9d181c1 3211 wait_event(sctx->list_wait,
b6bfebc1 3212 atomic_read(&sctx->bios_in_flight) == 0);
cb7ab021 3213 scrub_blocked_if_needed(fs_info);
7a26285e
AJ
3214
3215 /* FIXME it might be better to start readahead at commit root */
3216 key_start.objectid = logical;
3217 key_start.type = BTRFS_EXTENT_ITEM_KEY;
3218 key_start.offset = (u64)0;
3b080b25 3219 key_end.objectid = logic_end;
3173a18f
JB
3220 key_end.type = BTRFS_METADATA_ITEM_KEY;
3221 key_end.offset = (u64)-1;
7a26285e
AJ
3222 reada1 = btrfs_reada_add(root, &key_start, &key_end);
3223
3224 key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3225 key_start.type = BTRFS_EXTENT_CSUM_KEY;
3226 key_start.offset = logical;
3227 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3228 key_end.type = BTRFS_EXTENT_CSUM_KEY;
3b080b25 3229 key_end.offset = logic_end;
7a26285e
AJ
3230 reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
3231
3232 if (!IS_ERR(reada1))
3233 btrfs_reada_wait(reada1);
3234 if (!IS_ERR(reada2))
3235 btrfs_reada_wait(reada2);
3236
a2de733c
AJ
3237
3238 /*
3239 * collect all data csums for the stripe to avoid seeking during
3240 * the scrub. This might currently (crc32) end up to be about 1MB
3241 */
e7786c3a 3242 blk_start_plug(&plug);
a2de733c 3243
a2de733c
AJ
3244 /*
3245 * now find all extents for each stripe and scrub them
3246 */
a2de733c 3247 ret = 0;
3b080b25 3248 while (physical < physical_end) {
a2de733c
AJ
3249 /*
3250 * canceled?
3251 */
3252 if (atomic_read(&fs_info->scrub_cancel_req) ||
d9d181c1 3253 atomic_read(&sctx->cancel_req)) {
a2de733c
AJ
3254 ret = -ECANCELED;
3255 goto out;
3256 }
3257 /*
3258 * check to see if we have to pause
3259 */
3260 if (atomic_read(&fs_info->scrub_pause_req)) {
3261 /* push queued extents */
ff023aac 3262 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
d9d181c1 3263 scrub_submit(sctx);
ff023aac
SB
3264 mutex_lock(&sctx->wr_ctx.wr_lock);
3265 scrub_wr_submit(sctx);
3266 mutex_unlock(&sctx->wr_ctx.wr_lock);
d9d181c1 3267 wait_event(sctx->list_wait,
b6bfebc1 3268 atomic_read(&sctx->bios_in_flight) == 0);
ff023aac 3269 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3cb0929a 3270 scrub_blocked_if_needed(fs_info);
a2de733c
AJ
3271 }
3272
f2f66a2f
ZL
3273 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3274 ret = get_raid56_logic_offset(physical, num, map,
3275 &logical,
3276 &stripe_logical);
3277 logical += base;
3278 if (ret) {
7955323b 3279 /* it is parity strip */
f2f66a2f 3280 stripe_logical += base;
a0dd59de 3281 stripe_end = stripe_logical + increment;
f2f66a2f
ZL
3282 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3283 ppath, stripe_logical,
3284 stripe_end);
3285 if (ret)
3286 goto out;
3287 goto skip;
3288 }
3289 }
3290
7c76edb7
WS
3291 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3292 key.type = BTRFS_METADATA_ITEM_KEY;
3293 else
3294 key.type = BTRFS_EXTENT_ITEM_KEY;
a2de733c 3295 key.objectid = logical;
625f1c8d 3296 key.offset = (u64)-1;
a2de733c
AJ
3297
3298 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3299 if (ret < 0)
3300 goto out;
3173a18f 3301
8c51032f 3302 if (ret > 0) {
ade2e0b3 3303 ret = btrfs_previous_extent_item(root, path, 0);
a2de733c
AJ
3304 if (ret < 0)
3305 goto out;
8c51032f
AJ
3306 if (ret > 0) {
3307 /* there's no smaller item, so stick with the
3308 * larger one */
3309 btrfs_release_path(path);
3310 ret = btrfs_search_slot(NULL, root, &key,
3311 path, 0, 0);
3312 if (ret < 0)
3313 goto out;
3314 }
a2de733c
AJ
3315 }
3316
625f1c8d 3317 stop_loop = 0;
a2de733c 3318 while (1) {
3173a18f
JB
3319 u64 bytes;
3320
a2de733c
AJ
3321 l = path->nodes[0];
3322 slot = path->slots[0];
3323 if (slot >= btrfs_header_nritems(l)) {
3324 ret = btrfs_next_leaf(root, path);
3325 if (ret == 0)
3326 continue;
3327 if (ret < 0)
3328 goto out;
3329
625f1c8d 3330 stop_loop = 1;
a2de733c
AJ
3331 break;
3332 }
3333 btrfs_item_key_to_cpu(l, &key, slot);
3334
d7cad238
ZL
3335 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3336 key.type != BTRFS_METADATA_ITEM_KEY)
3337 goto next;
3338
3173a18f 3339 if (key.type == BTRFS_METADATA_ITEM_KEY)
707e8a07 3340 bytes = root->nodesize;
3173a18f
JB
3341 else
3342 bytes = key.offset;
3343
3344 if (key.objectid + bytes <= logical)
a2de733c
AJ
3345 goto next;
3346
625f1c8d
LB
3347 if (key.objectid >= logical + map->stripe_len) {
3348 /* out of this device extent */
3349 if (key.objectid >= logic_end)
3350 stop_loop = 1;
3351 break;
3352 }
a2de733c
AJ
3353
3354 extent = btrfs_item_ptr(l, slot,
3355 struct btrfs_extent_item);
3356 flags = btrfs_extent_flags(l, extent);
3357 generation = btrfs_extent_generation(l, extent);
3358
a323e813
ZL
3359 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3360 (key.objectid < logical ||
3361 key.objectid + bytes >
3362 logical + map->stripe_len)) {
efe120a0
FH
3363 btrfs_err(fs_info,
3364 "scrub: tree block %llu spanning "
3365 "stripes, ignored. logical=%llu",
c1c9ff7c 3366 key.objectid, logical);
9799d2c3
ZL
3367 spin_lock(&sctx->stat_lock);
3368 sctx->stat.uncorrectable_errors++;
3369 spin_unlock(&sctx->stat_lock);
a2de733c
AJ
3370 goto next;
3371 }
3372
625f1c8d
LB
3373again:
3374 extent_logical = key.objectid;
3375 extent_len = bytes;
3376
a2de733c
AJ
3377 /*
3378 * trim extent to this stripe
3379 */
625f1c8d
LB
3380 if (extent_logical < logical) {
3381 extent_len -= logical - extent_logical;
3382 extent_logical = logical;
a2de733c 3383 }
625f1c8d 3384 if (extent_logical + extent_len >
a2de733c 3385 logical + map->stripe_len) {
625f1c8d
LB
3386 extent_len = logical + map->stripe_len -
3387 extent_logical;
a2de733c
AJ
3388 }
3389
625f1c8d 3390 extent_physical = extent_logical - logical + physical;
ff023aac
SB
3391 extent_dev = scrub_dev;
3392 extent_mirror_num = mirror_num;
3393 if (is_dev_replace)
3394 scrub_remap_extent(fs_info, extent_logical,
3395 extent_len, &extent_physical,
3396 &extent_dev,
3397 &extent_mirror_num);
625f1c8d 3398
fe8cf654
ZL
3399 ret = btrfs_lookup_csums_range(csum_root,
3400 extent_logical,
3401 extent_logical +
3402 extent_len - 1,
3403 &sctx->csum_list, 1);
625f1c8d
LB
3404 if (ret)
3405 goto out;
3406
ff023aac
SB
3407 ret = scrub_extent(sctx, extent_logical, extent_len,
3408 extent_physical, extent_dev, flags,
3409 generation, extent_mirror_num,
115930cb 3410 extent_logical - logical + physical);
6fa96d72
ZL
3411
3412 scrub_free_csums(sctx);
3413
a2de733c
AJ
3414 if (ret)
3415 goto out;
3416
625f1c8d
LB
3417 if (extent_logical + extent_len <
3418 key.objectid + bytes) {
ffe2d203 3419 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3b080b25
WS
3420 /*
3421 * loop until we find next data stripe
3422 * or we have finished all stripes.
3423 */
5a6ac9ea
MX
3424loop:
3425 physical += map->stripe_len;
3426 ret = get_raid56_logic_offset(physical,
3427 num, map, &logical,
3428 &stripe_logical);
3429 logical += base;
3430
3431 if (ret && physical < physical_end) {
3432 stripe_logical += base;
3433 stripe_end = stripe_logical +
a0dd59de 3434 increment;
5a6ac9ea
MX
3435 ret = scrub_raid56_parity(sctx,
3436 map, scrub_dev, ppath,
3437 stripe_logical,
3438 stripe_end);
3439 if (ret)
3440 goto out;
3441 goto loop;
3442 }
3b080b25
WS
3443 } else {
3444 physical += map->stripe_len;
3445 logical += increment;
3446 }
625f1c8d
LB
3447 if (logical < key.objectid + bytes) {
3448 cond_resched();
3449 goto again;
3450 }
3451
3b080b25 3452 if (physical >= physical_end) {
625f1c8d
LB
3453 stop_loop = 1;
3454 break;
3455 }
3456 }
a2de733c
AJ
3457next:
3458 path->slots[0]++;
3459 }
71267333 3460 btrfs_release_path(path);
3b080b25 3461skip:
a2de733c
AJ
3462 logical += increment;
3463 physical += map->stripe_len;
d9d181c1 3464 spin_lock(&sctx->stat_lock);
625f1c8d
LB
3465 if (stop_loop)
3466 sctx->stat.last_physical = map->stripes[num].physical +
3467 length;
3468 else
3469 sctx->stat.last_physical = physical;
d9d181c1 3470 spin_unlock(&sctx->stat_lock);
625f1c8d
LB
3471 if (stop_loop)
3472 break;
a2de733c 3473 }
ff023aac 3474out:
a2de733c 3475 /* push queued extents */
d9d181c1 3476 scrub_submit(sctx);
ff023aac
SB
3477 mutex_lock(&sctx->wr_ctx.wr_lock);
3478 scrub_wr_submit(sctx);
3479 mutex_unlock(&sctx->wr_ctx.wr_lock);
a2de733c 3480
e7786c3a 3481 blk_finish_plug(&plug);
a2de733c 3482 btrfs_free_path(path);
5a6ac9ea 3483 btrfs_free_path(ppath);
a2de733c
AJ
3484 return ret < 0 ? ret : 0;
3485}
3486
d9d181c1 3487static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
a36cf8b8 3488 struct btrfs_device *scrub_dev,
a36cf8b8 3489 u64 chunk_offset, u64 length,
ff023aac 3490 u64 dev_offset, int is_dev_replace)
a2de733c
AJ
3491{
3492 struct btrfs_mapping_tree *map_tree =
a36cf8b8 3493 &sctx->dev_root->fs_info->mapping_tree;
a2de733c
AJ
3494 struct map_lookup *map;
3495 struct extent_map *em;
3496 int i;
ff023aac 3497 int ret = 0;
a2de733c
AJ
3498
3499 read_lock(&map_tree->map_tree.lock);
3500 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3501 read_unlock(&map_tree->map_tree.lock);
3502
3503 if (!em)
3504 return -EINVAL;
3505
3506 map = (struct map_lookup *)em->bdev;
3507 if (em->start != chunk_offset)
3508 goto out;
3509
3510 if (em->len < length)
3511 goto out;
3512
3513 for (i = 0; i < map->num_stripes; ++i) {
a36cf8b8 3514 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
859acaf1 3515 map->stripes[i].physical == dev_offset) {
a36cf8b8 3516 ret = scrub_stripe(sctx, map, scrub_dev, i,
ff023aac
SB
3517 chunk_offset, length,
3518 is_dev_replace);
a2de733c
AJ
3519 if (ret)
3520 goto out;
3521 }
3522 }
3523out:
3524 free_extent_map(em);
3525
3526 return ret;
3527}
3528
3529static noinline_for_stack
a36cf8b8 3530int scrub_enumerate_chunks(struct scrub_ctx *sctx,
ff023aac
SB
3531 struct btrfs_device *scrub_dev, u64 start, u64 end,
3532 int is_dev_replace)
a2de733c
AJ
3533{
3534 struct btrfs_dev_extent *dev_extent = NULL;
3535 struct btrfs_path *path;
a36cf8b8 3536 struct btrfs_root *root = sctx->dev_root;
a2de733c
AJ
3537 struct btrfs_fs_info *fs_info = root->fs_info;
3538 u64 length;
a2de733c 3539 u64 chunk_offset;
55e3a601 3540 int ret = 0;
a2de733c
AJ
3541 int slot;
3542 struct extent_buffer *l;
3543 struct btrfs_key key;
3544 struct btrfs_key found_key;
3545 struct btrfs_block_group_cache *cache;
ff023aac 3546 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
a2de733c
AJ
3547
3548 path = btrfs_alloc_path();
3549 if (!path)
3550 return -ENOMEM;
3551
3552 path->reada = 2;
3553 path->search_commit_root = 1;
3554 path->skip_locking = 1;
3555
a36cf8b8 3556 key.objectid = scrub_dev->devid;
a2de733c
AJ
3557 key.offset = 0ull;
3558 key.type = BTRFS_DEV_EXTENT_KEY;
3559
a2de733c
AJ
3560 while (1) {
3561 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3562 if (ret < 0)
8c51032f
AJ
3563 break;
3564 if (ret > 0) {
3565 if (path->slots[0] >=
3566 btrfs_header_nritems(path->nodes[0])) {
3567 ret = btrfs_next_leaf(root, path);
55e3a601
Z
3568 if (ret < 0)
3569 break;
3570 if (ret > 0) {
3571 ret = 0;
8c51032f 3572 break;
55e3a601
Z
3573 }
3574 } else {
3575 ret = 0;
8c51032f
AJ
3576 }
3577 }
a2de733c
AJ
3578
3579 l = path->nodes[0];
3580 slot = path->slots[0];
3581
3582 btrfs_item_key_to_cpu(l, &found_key, slot);
3583
a36cf8b8 3584 if (found_key.objectid != scrub_dev->devid)
a2de733c
AJ
3585 break;
3586
962a298f 3587 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
a2de733c
AJ
3588 break;
3589
3590 if (found_key.offset >= end)
3591 break;
3592
3593 if (found_key.offset < key.offset)
3594 break;
3595
3596 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3597 length = btrfs_dev_extent_length(l, dev_extent);
3598
ced96edc
QW
3599 if (found_key.offset + length <= start)
3600 goto skip;
a2de733c 3601
a2de733c
AJ
3602 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3603
3604 /*
3605 * get a reference on the corresponding block group to prevent
3606 * the chunk from going away while we scrub it
3607 */
3608 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
ced96edc
QW
3609
3610 /* some chunks are removed but not committed to disk yet,
3611 * continue scrubbing */
3612 if (!cache)
3613 goto skip;
3614
55e3a601
Z
3615 /*
3616 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3617 * to avoid deadlock caused by:
3618 * btrfs_inc_block_group_ro()
3619 * -> btrfs_wait_for_commit()
3620 * -> btrfs_commit_transaction()
3621 * -> btrfs_scrub_pause()
3622 */
3623 scrub_pause_on(fs_info);
3624 ret = btrfs_inc_block_group_ro(root, cache);
3625 scrub_pause_off(fs_info);
3626 if (ret) {
3627 btrfs_put_block_group(cache);
3628 break;
3629 }
3630
ff023aac
SB
3631 dev_replace->cursor_right = found_key.offset + length;
3632 dev_replace->cursor_left = found_key.offset;
3633 dev_replace->item_needs_writeback = 1;
8c204c96
ZL
3634 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3635 found_key.offset, is_dev_replace);
ff023aac
SB
3636
3637 /*
3638 * flush, submit all pending read and write bios, afterwards
3639 * wait for them.
3640 * Note that in the dev replace case, a read request causes
3641 * write requests that are submitted in the read completion
3642 * worker. Therefore in the current situation, it is required
3643 * that all write requests are flushed, so that all read and
3644 * write requests are really completed when bios_in_flight
3645 * changes to 0.
3646 */
3647 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3648 scrub_submit(sctx);
3649 mutex_lock(&sctx->wr_ctx.wr_lock);
3650 scrub_wr_submit(sctx);
3651 mutex_unlock(&sctx->wr_ctx.wr_lock);
3652
3653 wait_event(sctx->list_wait,
3654 atomic_read(&sctx->bios_in_flight) == 0);
b708ce96
Z
3655
3656 scrub_pause_on(fs_info);
12cf9372
WS
3657
3658 /*
3659 * must be called before we decrease @scrub_paused.
3660 * make sure we don't block transaction commit while
3661 * we are waiting pending workers finished.
3662 */
ff023aac
SB
3663 wait_event(sctx->list_wait,
3664 atomic_read(&sctx->workers_pending) == 0);
12cf9372
WS
3665 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3666
b708ce96 3667 scrub_pause_off(fs_info);
ff023aac 3668
55e3a601 3669 btrfs_dec_block_group_ro(root, cache);
ff023aac 3670
a2de733c
AJ
3671 btrfs_put_block_group(cache);
3672 if (ret)
3673 break;
af1be4f8
SB
3674 if (is_dev_replace &&
3675 atomic64_read(&dev_replace->num_write_errors) > 0) {
ff023aac
SB
3676 ret = -EIO;
3677 break;
3678 }
3679 if (sctx->stat.malloc_errors > 0) {
3680 ret = -ENOMEM;
3681 break;
3682 }
a2de733c 3683
539f358a
ID
3684 dev_replace->cursor_left = dev_replace->cursor_right;
3685 dev_replace->item_needs_writeback = 1;
ced96edc 3686skip:
a2de733c 3687 key.offset = found_key.offset + length;
71267333 3688 btrfs_release_path(path);
a2de733c
AJ
3689 }
3690
a2de733c 3691 btrfs_free_path(path);
8c51032f 3692
55e3a601 3693 return ret;
a2de733c
AJ
3694}
3695
a36cf8b8
SB
3696static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3697 struct btrfs_device *scrub_dev)
a2de733c
AJ
3698{
3699 int i;
3700 u64 bytenr;
3701 u64 gen;
3702 int ret;
a36cf8b8 3703 struct btrfs_root *root = sctx->dev_root;
a2de733c 3704
87533c47 3705 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
79787eaa
JM
3706 return -EIO;
3707
5f546063
MX
3708 /* Seed devices of a new filesystem has their own generation. */
3709 if (scrub_dev->fs_devices != root->fs_info->fs_devices)
3710 gen = scrub_dev->generation;
3711 else
3712 gen = root->fs_info->last_trans_committed;
a2de733c
AJ
3713
3714 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3715 bytenr = btrfs_sb_offset(i);
935e5cc9
MX
3716 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3717 scrub_dev->commit_total_bytes)
a2de733c
AJ
3718 break;
3719
d9d181c1 3720 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
a36cf8b8 3721 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
ff023aac 3722 NULL, 1, bytenr);
a2de733c
AJ
3723 if (ret)
3724 return ret;
3725 }
b6bfebc1 3726 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
a2de733c
AJ
3727
3728 return 0;
3729}
3730
3731/*
3732 * get a reference count on fs_info->scrub_workers. start worker if necessary
3733 */
ff023aac
SB
3734static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3735 int is_dev_replace)
a2de733c 3736{
6f011058 3737 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
0339ef2f 3738 int max_active = fs_info->thread_pool_size;
a2de733c 3739
632dd772 3740 if (fs_info->scrub_workers_refcnt == 0) {
ff023aac 3741 if (is_dev_replace)
0339ef2f
QW
3742 fs_info->scrub_workers =
3743 btrfs_alloc_workqueue("btrfs-scrub", flags,
3744 1, 4);
ff023aac 3745 else
0339ef2f
QW
3746 fs_info->scrub_workers =
3747 btrfs_alloc_workqueue("btrfs-scrub", flags,
3748 max_active, 4);
e82afc52
ZL
3749 if (!fs_info->scrub_workers)
3750 goto fail_scrub_workers;
3751
0339ef2f
QW
3752 fs_info->scrub_wr_completion_workers =
3753 btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
3754 max_active, 2);
e82afc52
ZL
3755 if (!fs_info->scrub_wr_completion_workers)
3756 goto fail_scrub_wr_completion_workers;
3757
0339ef2f
QW
3758 fs_info->scrub_nocow_workers =
3759 btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
e82afc52
ZL
3760 if (!fs_info->scrub_nocow_workers)
3761 goto fail_scrub_nocow_workers;
20b2e302
ZL
3762 fs_info->scrub_parity_workers =
3763 btrfs_alloc_workqueue("btrfs-scrubparity", flags,
3764 max_active, 2);
e82afc52
ZL
3765 if (!fs_info->scrub_parity_workers)
3766 goto fail_scrub_parity_workers;
632dd772 3767 }
a2de733c 3768 ++fs_info->scrub_workers_refcnt;
e82afc52
ZL
3769 return 0;
3770
3771fail_scrub_parity_workers:
3772 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3773fail_scrub_nocow_workers:
3774 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3775fail_scrub_wr_completion_workers:
3776 btrfs_destroy_workqueue(fs_info->scrub_workers);
3777fail_scrub_workers:
3778 return -ENOMEM;
a2de733c
AJ
3779}
3780
aa1b8cd4 3781static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
a2de733c 3782{
ff023aac 3783 if (--fs_info->scrub_workers_refcnt == 0) {
0339ef2f
QW
3784 btrfs_destroy_workqueue(fs_info->scrub_workers);
3785 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3786 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
20b2e302 3787 btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
ff023aac 3788 }
a2de733c 3789 WARN_ON(fs_info->scrub_workers_refcnt < 0);
a2de733c
AJ
3790}
3791
aa1b8cd4
SB
3792int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3793 u64 end, struct btrfs_scrub_progress *progress,
63a212ab 3794 int readonly, int is_dev_replace)
a2de733c 3795{
d9d181c1 3796 struct scrub_ctx *sctx;
a2de733c
AJ
3797 int ret;
3798 struct btrfs_device *dev;
5d68da3b 3799 struct rcu_string *name;
a2de733c 3800
aa1b8cd4 3801 if (btrfs_fs_closing(fs_info))
a2de733c
AJ
3802 return -EINVAL;
3803
aa1b8cd4 3804 if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
b5d67f64
SB
3805 /*
3806 * in this case scrub is unable to calculate the checksum
3807 * the way scrub is implemented. Do not handle this
3808 * situation at all because it won't ever happen.
3809 */
efe120a0
FH
3810 btrfs_err(fs_info,
3811 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
aa1b8cd4 3812 fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
b5d67f64
SB
3813 return -EINVAL;
3814 }
3815
aa1b8cd4 3816 if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
b5d67f64 3817 /* not supported for data w/o checksums */
efe120a0
FH
3818 btrfs_err(fs_info,
3819 "scrub: size assumption sectorsize != PAGE_SIZE "
3820 "(%d != %lu) fails",
27f9f023 3821 fs_info->chunk_root->sectorsize, PAGE_SIZE);
a2de733c
AJ
3822 return -EINVAL;
3823 }
3824
7a9e9987
SB
3825 if (fs_info->chunk_root->nodesize >
3826 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3827 fs_info->chunk_root->sectorsize >
3828 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3829 /*
3830 * would exhaust the array bounds of pagev member in
3831 * struct scrub_block
3832 */
efe120a0
FH
3833 btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
3834 "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
7a9e9987
SB
3835 fs_info->chunk_root->nodesize,
3836 SCRUB_MAX_PAGES_PER_BLOCK,
3837 fs_info->chunk_root->sectorsize,
3838 SCRUB_MAX_PAGES_PER_BLOCK);
3839 return -EINVAL;
3840 }
3841
a2de733c 3842
aa1b8cd4
SB
3843 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3844 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
63a212ab 3845 if (!dev || (dev->missing && !is_dev_replace)) {
aa1b8cd4 3846 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
a2de733c
AJ
3847 return -ENODEV;
3848 }
a2de733c 3849
5d68da3b
MX
3850 if (!is_dev_replace && !readonly && !dev->writeable) {
3851 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3852 rcu_read_lock();
3853 name = rcu_dereference(dev->name);
3854 btrfs_err(fs_info, "scrub: device %s is not writable",
3855 name->str);
3856 rcu_read_unlock();
3857 return -EROFS;
3858 }
3859
3b7a016f 3860 mutex_lock(&fs_info->scrub_lock);
63a212ab 3861 if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
a2de733c 3862 mutex_unlock(&fs_info->scrub_lock);
aa1b8cd4 3863 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
aa1b8cd4 3864 return -EIO;
a2de733c
AJ
3865 }
3866
8dabb742
SB
3867 btrfs_dev_replace_lock(&fs_info->dev_replace);
3868 if (dev->scrub_device ||
3869 (!is_dev_replace &&
3870 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3871 btrfs_dev_replace_unlock(&fs_info->dev_replace);
a2de733c 3872 mutex_unlock(&fs_info->scrub_lock);
aa1b8cd4 3873 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
a2de733c
AJ
3874 return -EINPROGRESS;
3875 }
8dabb742 3876 btrfs_dev_replace_unlock(&fs_info->dev_replace);
3b7a016f
WS
3877
3878 ret = scrub_workers_get(fs_info, is_dev_replace);
3879 if (ret) {
3880 mutex_unlock(&fs_info->scrub_lock);
3881 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3882 return ret;
3883 }
3884
63a212ab 3885 sctx = scrub_setup_ctx(dev, is_dev_replace);
d9d181c1 3886 if (IS_ERR(sctx)) {
a2de733c 3887 mutex_unlock(&fs_info->scrub_lock);
aa1b8cd4
SB
3888 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3889 scrub_workers_put(fs_info);
d9d181c1 3890 return PTR_ERR(sctx);
a2de733c 3891 }
d9d181c1
SB
3892 sctx->readonly = readonly;
3893 dev->scrub_device = sctx;
3cb0929a 3894 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
a2de733c 3895
3cb0929a
WS
3896 /*
3897 * checking @scrub_pause_req here, we can avoid
3898 * race between committing transaction and scrubbing.
3899 */
cb7ab021 3900 __scrub_blocked_if_needed(fs_info);
a2de733c
AJ
3901 atomic_inc(&fs_info->scrubs_running);
3902 mutex_unlock(&fs_info->scrub_lock);
a2de733c 3903
ff023aac 3904 if (!is_dev_replace) {
9b011adf
WS
3905 /*
3906 * by holding device list mutex, we can
3907 * kick off writing super in log tree sync.
3908 */
3cb0929a 3909 mutex_lock(&fs_info->fs_devices->device_list_mutex);
ff023aac 3910 ret = scrub_supers(sctx, dev);
3cb0929a 3911 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
ff023aac 3912 }
a2de733c
AJ
3913
3914 if (!ret)
ff023aac
SB
3915 ret = scrub_enumerate_chunks(sctx, dev, start, end,
3916 is_dev_replace);
a2de733c 3917
b6bfebc1 3918 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
a2de733c
AJ
3919 atomic_dec(&fs_info->scrubs_running);
3920 wake_up(&fs_info->scrub_pause_wait);
3921
b6bfebc1 3922 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
0ef8e451 3923
a2de733c 3924 if (progress)
d9d181c1 3925 memcpy(progress, &sctx->stat, sizeof(*progress));
a2de733c
AJ
3926
3927 mutex_lock(&fs_info->scrub_lock);
3928 dev->scrub_device = NULL;
3b7a016f 3929 scrub_workers_put(fs_info);
a2de733c
AJ
3930 mutex_unlock(&fs_info->scrub_lock);
3931
f55985f4 3932 scrub_put_ctx(sctx);
a2de733c
AJ
3933
3934 return ret;
3935}
3936
143bede5 3937void btrfs_scrub_pause(struct btrfs_root *root)
a2de733c
AJ
3938{
3939 struct btrfs_fs_info *fs_info = root->fs_info;
3940
3941 mutex_lock(&fs_info->scrub_lock);
3942 atomic_inc(&fs_info->scrub_pause_req);
3943 while (atomic_read(&fs_info->scrubs_paused) !=
3944 atomic_read(&fs_info->scrubs_running)) {
3945 mutex_unlock(&fs_info->scrub_lock);
3946 wait_event(fs_info->scrub_pause_wait,
3947 atomic_read(&fs_info->scrubs_paused) ==
3948 atomic_read(&fs_info->scrubs_running));
3949 mutex_lock(&fs_info->scrub_lock);
3950 }
3951 mutex_unlock(&fs_info->scrub_lock);
a2de733c
AJ
3952}
3953
143bede5 3954void btrfs_scrub_continue(struct btrfs_root *root)
a2de733c
AJ
3955{
3956 struct btrfs_fs_info *fs_info = root->fs_info;
3957
3958 atomic_dec(&fs_info->scrub_pause_req);
3959 wake_up(&fs_info->scrub_pause_wait);
a2de733c
AJ
3960}
3961
aa1b8cd4 3962int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
a2de733c 3963{
a2de733c
AJ
3964 mutex_lock(&fs_info->scrub_lock);
3965 if (!atomic_read(&fs_info->scrubs_running)) {
3966 mutex_unlock(&fs_info->scrub_lock);
3967 return -ENOTCONN;
3968 }
3969
3970 atomic_inc(&fs_info->scrub_cancel_req);
3971 while (atomic_read(&fs_info->scrubs_running)) {
3972 mutex_unlock(&fs_info->scrub_lock);
3973 wait_event(fs_info->scrub_pause_wait,
3974 atomic_read(&fs_info->scrubs_running) == 0);
3975 mutex_lock(&fs_info->scrub_lock);
3976 }
3977 atomic_dec(&fs_info->scrub_cancel_req);
3978 mutex_unlock(&fs_info->scrub_lock);
3979
3980 return 0;
3981}
3982
aa1b8cd4
SB
3983int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3984 struct btrfs_device *dev)
49b25e05 3985{
d9d181c1 3986 struct scrub_ctx *sctx;
a2de733c
AJ
3987
3988 mutex_lock(&fs_info->scrub_lock);
d9d181c1
SB
3989 sctx = dev->scrub_device;
3990 if (!sctx) {
a2de733c
AJ
3991 mutex_unlock(&fs_info->scrub_lock);
3992 return -ENOTCONN;
3993 }
d9d181c1 3994 atomic_inc(&sctx->cancel_req);
a2de733c
AJ
3995 while (dev->scrub_device) {
3996 mutex_unlock(&fs_info->scrub_lock);
3997 wait_event(fs_info->scrub_pause_wait,
3998 dev->scrub_device == NULL);
3999 mutex_lock(&fs_info->scrub_lock);
4000 }
4001 mutex_unlock(&fs_info->scrub_lock);
4002
4003 return 0;
4004}
1623edeb 4005
a2de733c
AJ
4006int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
4007 struct btrfs_scrub_progress *progress)
4008{
4009 struct btrfs_device *dev;
d9d181c1 4010 struct scrub_ctx *sctx = NULL;
a2de733c
AJ
4011
4012 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
aa1b8cd4 4013 dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
a2de733c 4014 if (dev)
d9d181c1
SB
4015 sctx = dev->scrub_device;
4016 if (sctx)
4017 memcpy(progress, &sctx->stat, sizeof(*progress));
a2de733c
AJ
4018 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
4019
d9d181c1 4020 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
a2de733c 4021}
ff023aac
SB
4022
4023static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4024 u64 extent_logical, u64 extent_len,
4025 u64 *extent_physical,
4026 struct btrfs_device **extent_dev,
4027 int *extent_mirror_num)
4028{
4029 u64 mapped_length;
4030 struct btrfs_bio *bbio = NULL;
4031 int ret;
4032
4033 mapped_length = extent_len;
4034 ret = btrfs_map_block(fs_info, READ, extent_logical,
4035 &mapped_length, &bbio, 0);
4036 if (ret || !bbio || mapped_length < extent_len ||
4037 !bbio->stripes[0].dev->bdev) {
6e9606d2 4038 btrfs_put_bbio(bbio);
ff023aac
SB
4039 return;
4040 }
4041
4042 *extent_physical = bbio->stripes[0].physical;
4043 *extent_mirror_num = bbio->mirror_num;
4044 *extent_dev = bbio->stripes[0].dev;
6e9606d2 4045 btrfs_put_bbio(bbio);
ff023aac
SB
4046}
4047
4048static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
4049 struct scrub_wr_ctx *wr_ctx,
4050 struct btrfs_fs_info *fs_info,
4051 struct btrfs_device *dev,
4052 int is_dev_replace)
4053{
4054 WARN_ON(wr_ctx->wr_curr_bio != NULL);
4055
4056 mutex_init(&wr_ctx->wr_lock);
4057 wr_ctx->wr_curr_bio = NULL;
4058 if (!is_dev_replace)
4059 return 0;
4060
4061 WARN_ON(!dev->bdev);
b54ffb73 4062 wr_ctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
ff023aac
SB
4063 wr_ctx->tgtdev = dev;
4064 atomic_set(&wr_ctx->flush_all_writes, 0);
4065 return 0;
4066}
4067
4068static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
4069{
4070 mutex_lock(&wr_ctx->wr_lock);
4071 kfree(wr_ctx->wr_curr_bio);
4072 wr_ctx->wr_curr_bio = NULL;
4073 mutex_unlock(&wr_ctx->wr_lock);
4074}
4075
4076static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
4077 int mirror_num, u64 physical_for_dev_replace)
4078{
4079 struct scrub_copy_nocow_ctx *nocow_ctx;
4080 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
4081
4082 nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
4083 if (!nocow_ctx) {
4084 spin_lock(&sctx->stat_lock);
4085 sctx->stat.malloc_errors++;
4086 spin_unlock(&sctx->stat_lock);
4087 return -ENOMEM;
4088 }
4089
4090 scrub_pending_trans_workers_inc(sctx);
4091
4092 nocow_ctx->sctx = sctx;
4093 nocow_ctx->logical = logical;
4094 nocow_ctx->len = len;
4095 nocow_ctx->mirror_num = mirror_num;
4096 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
9e0af237
LB
4097 btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
4098 copy_nocow_pages_worker, NULL, NULL);
652f25a2 4099 INIT_LIST_HEAD(&nocow_ctx->inodes);
0339ef2f
QW
4100 btrfs_queue_work(fs_info->scrub_nocow_workers,
4101 &nocow_ctx->work);
ff023aac
SB
4102
4103 return 0;
4104}
4105
652f25a2
JB
4106static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
4107{
4108 struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
4109 struct scrub_nocow_inode *nocow_inode;
4110
4111 nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
4112 if (!nocow_inode)
4113 return -ENOMEM;
4114 nocow_inode->inum = inum;
4115 nocow_inode->offset = offset;
4116 nocow_inode->root = root;
4117 list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
4118 return 0;
4119}
4120
4121#define COPY_COMPLETE 1
4122
ff023aac
SB
4123static void copy_nocow_pages_worker(struct btrfs_work *work)
4124{
4125 struct scrub_copy_nocow_ctx *nocow_ctx =
4126 container_of(work, struct scrub_copy_nocow_ctx, work);
4127 struct scrub_ctx *sctx = nocow_ctx->sctx;
4128 u64 logical = nocow_ctx->logical;
4129 u64 len = nocow_ctx->len;
4130 int mirror_num = nocow_ctx->mirror_num;
4131 u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4132 int ret;
4133 struct btrfs_trans_handle *trans = NULL;
4134 struct btrfs_fs_info *fs_info;
4135 struct btrfs_path *path;
4136 struct btrfs_root *root;
4137 int not_written = 0;
4138
4139 fs_info = sctx->dev_root->fs_info;
4140 root = fs_info->extent_root;
4141
4142 path = btrfs_alloc_path();
4143 if (!path) {
4144 spin_lock(&sctx->stat_lock);
4145 sctx->stat.malloc_errors++;
4146 spin_unlock(&sctx->stat_lock);
4147 not_written = 1;
4148 goto out;
4149 }
4150
4151 trans = btrfs_join_transaction(root);
4152 if (IS_ERR(trans)) {
4153 not_written = 1;
4154 goto out;
4155 }
4156
4157 ret = iterate_inodes_from_logical(logical, fs_info, path,
652f25a2 4158 record_inode_for_nocow, nocow_ctx);
ff023aac 4159 if (ret != 0 && ret != -ENOENT) {
efe120a0
FH
4160 btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
4161 "phys %llu, len %llu, mir %u, ret %d",
118a0a25
GU
4162 logical, physical_for_dev_replace, len, mirror_num,
4163 ret);
ff023aac
SB
4164 not_written = 1;
4165 goto out;
4166 }
4167
652f25a2
JB
4168 btrfs_end_transaction(trans, root);
4169 trans = NULL;
4170 while (!list_empty(&nocow_ctx->inodes)) {
4171 struct scrub_nocow_inode *entry;
4172 entry = list_first_entry(&nocow_ctx->inodes,
4173 struct scrub_nocow_inode,
4174 list);
4175 list_del_init(&entry->list);
4176 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4177 entry->root, nocow_ctx);
4178 kfree(entry);
4179 if (ret == COPY_COMPLETE) {
4180 ret = 0;
4181 break;
4182 } else if (ret) {
4183 break;
4184 }
4185 }
ff023aac 4186out:
652f25a2
JB
4187 while (!list_empty(&nocow_ctx->inodes)) {
4188 struct scrub_nocow_inode *entry;
4189 entry = list_first_entry(&nocow_ctx->inodes,
4190 struct scrub_nocow_inode,
4191 list);
4192 list_del_init(&entry->list);
4193 kfree(entry);
4194 }
ff023aac
SB
4195 if (trans && !IS_ERR(trans))
4196 btrfs_end_transaction(trans, root);
4197 if (not_written)
4198 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4199 num_uncorrectable_read_errors);
4200
4201 btrfs_free_path(path);
4202 kfree(nocow_ctx);
4203
4204 scrub_pending_trans_workers_dec(sctx);
4205}
4206
32159242
GH
4207static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
4208 u64 logical)
4209{
4210 struct extent_state *cached_state = NULL;
4211 struct btrfs_ordered_extent *ordered;
4212 struct extent_io_tree *io_tree;
4213 struct extent_map *em;
4214 u64 lockstart = start, lockend = start + len - 1;
4215 int ret = 0;
4216
4217 io_tree = &BTRFS_I(inode)->io_tree;
4218
4219 lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
4220 ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4221 if (ordered) {
4222 btrfs_put_ordered_extent(ordered);
4223 ret = 1;
4224 goto out_unlock;
4225 }
4226
4227 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4228 if (IS_ERR(em)) {
4229 ret = PTR_ERR(em);
4230 goto out_unlock;
4231 }
4232
4233 /*
4234 * This extent does not actually cover the logical extent anymore,
4235 * move on to the next inode.
4236 */
4237 if (em->block_start > logical ||
4238 em->block_start + em->block_len < logical + len) {
4239 free_extent_map(em);
4240 ret = 1;
4241 goto out_unlock;
4242 }
4243 free_extent_map(em);
4244
4245out_unlock:
4246 unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4247 GFP_NOFS);
4248 return ret;
4249}
4250
652f25a2
JB
4251static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4252 struct scrub_copy_nocow_ctx *nocow_ctx)
ff023aac 4253{
826aa0a8 4254 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
ff023aac 4255 struct btrfs_key key;
826aa0a8
MX
4256 struct inode *inode;
4257 struct page *page;
ff023aac 4258 struct btrfs_root *local_root;
652f25a2 4259 struct extent_io_tree *io_tree;
ff023aac 4260 u64 physical_for_dev_replace;
32159242 4261 u64 nocow_ctx_logical;
652f25a2 4262 u64 len = nocow_ctx->len;
826aa0a8 4263 unsigned long index;
6f1c3605 4264 int srcu_index;
652f25a2
JB
4265 int ret = 0;
4266 int err = 0;
ff023aac
SB
4267
4268 key.objectid = root;
4269 key.type = BTRFS_ROOT_ITEM_KEY;
4270 key.offset = (u64)-1;
6f1c3605
LB
4271
4272 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4273
ff023aac 4274 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
6f1c3605
LB
4275 if (IS_ERR(local_root)) {
4276 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
ff023aac 4277 return PTR_ERR(local_root);
6f1c3605 4278 }
ff023aac
SB
4279
4280 key.type = BTRFS_INODE_ITEM_KEY;
4281 key.objectid = inum;
4282 key.offset = 0;
4283 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
6f1c3605 4284 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
ff023aac
SB
4285 if (IS_ERR(inode))
4286 return PTR_ERR(inode);
4287
edd1400b
MX
4288 /* Avoid truncate/dio/punch hole.. */
4289 mutex_lock(&inode->i_mutex);
4290 inode_dio_wait(inode);
4291
ff023aac 4292 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
652f25a2 4293 io_tree = &BTRFS_I(inode)->io_tree;
32159242 4294 nocow_ctx_logical = nocow_ctx->logical;
652f25a2 4295
32159242
GH
4296 ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical);
4297 if (ret) {
4298 ret = ret > 0 ? 0 : ret;
4299 goto out;
652f25a2 4300 }
652f25a2 4301
ff023aac 4302 while (len >= PAGE_CACHE_SIZE) {
ff023aac 4303 index = offset >> PAGE_CACHE_SHIFT;
edd1400b 4304again:
ff023aac
SB
4305 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4306 if (!page) {
efe120a0 4307 btrfs_err(fs_info, "find_or_create_page() failed");
ff023aac 4308 ret = -ENOMEM;
826aa0a8 4309 goto out;
ff023aac
SB
4310 }
4311
4312 if (PageUptodate(page)) {
4313 if (PageDirty(page))
4314 goto next_page;
4315 } else {
4316 ClearPageError(page);
32159242 4317 err = extent_read_full_page(io_tree, page,
652f25a2
JB
4318 btrfs_get_extent,
4319 nocow_ctx->mirror_num);
826aa0a8
MX
4320 if (err) {
4321 ret = err;
ff023aac
SB
4322 goto next_page;
4323 }
edd1400b 4324
26b25891 4325 lock_page(page);
edd1400b
MX
4326 /*
4327 * If the page has been remove from the page cache,
4328 * the data on it is meaningless, because it may be
4329 * old one, the new data may be written into the new
4330 * page in the page cache.
4331 */
4332 if (page->mapping != inode->i_mapping) {
652f25a2 4333 unlock_page(page);
edd1400b
MX
4334 page_cache_release(page);
4335 goto again;
4336 }
ff023aac
SB
4337 if (!PageUptodate(page)) {
4338 ret = -EIO;
4339 goto next_page;
4340 }
4341 }
32159242
GH
4342
4343 ret = check_extent_to_block(inode, offset, len,
4344 nocow_ctx_logical);
4345 if (ret) {
4346 ret = ret > 0 ? 0 : ret;
4347 goto next_page;
4348 }
4349
826aa0a8
MX
4350 err = write_page_nocow(nocow_ctx->sctx,
4351 physical_for_dev_replace, page);
4352 if (err)
4353 ret = err;
ff023aac 4354next_page:
826aa0a8
MX
4355 unlock_page(page);
4356 page_cache_release(page);
4357
4358 if (ret)
4359 break;
4360
ff023aac
SB
4361 offset += PAGE_CACHE_SIZE;
4362 physical_for_dev_replace += PAGE_CACHE_SIZE;
32159242 4363 nocow_ctx_logical += PAGE_CACHE_SIZE;
ff023aac
SB
4364 len -= PAGE_CACHE_SIZE;
4365 }
652f25a2 4366 ret = COPY_COMPLETE;
826aa0a8 4367out:
edd1400b 4368 mutex_unlock(&inode->i_mutex);
826aa0a8 4369 iput(inode);
ff023aac
SB
4370 return ret;
4371}
4372
4373static int write_page_nocow(struct scrub_ctx *sctx,
4374 u64 physical_for_dev_replace, struct page *page)
4375{
4376 struct bio *bio;
4377 struct btrfs_device *dev;
4378 int ret;
ff023aac
SB
4379
4380 dev = sctx->wr_ctx.tgtdev;
4381 if (!dev)
4382 return -EIO;
4383 if (!dev->bdev) {
94647322
DS
4384 btrfs_warn_rl(dev->dev_root->fs_info,
4385 "scrub write_page_nocow(bdev == NULL) is unexpected");
ff023aac
SB
4386 return -EIO;
4387 }
9be3395b 4388 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
ff023aac
SB
4389 if (!bio) {
4390 spin_lock(&sctx->stat_lock);
4391 sctx->stat.malloc_errors++;
4392 spin_unlock(&sctx->stat_lock);
4393 return -ENOMEM;
4394 }
4f024f37
KO
4395 bio->bi_iter.bi_size = 0;
4396 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
ff023aac
SB
4397 bio->bi_bdev = dev->bdev;
4398 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
4399 if (ret != PAGE_CACHE_SIZE) {
4400leave_with_eio:
4401 bio_put(bio);
4402 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4403 return -EIO;
4404 }
ff023aac 4405
33879d45 4406 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
ff023aac
SB
4407 goto leave_with_eio;
4408
4409 bio_put(bio);
4410 return 0;
4411}
This page took 1.086347 seconds and 5 git commands to generate.