1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 */
5
6 #include <linux/blkdev.h>
7 #include <linux/ratelimit.h>
8 #include <linux/sched/mm.h>
9 #include <crypto/hash.h>
10 #include "ctree.h"
11 #include "discard.h"
12 #include "volumes.h"
13 #include "disk-io.h"
14 #include "ordered-data.h"
15 #include "transaction.h"
16 #include "backref.h"
17 #include "extent_io.h"
18 #include "dev-replace.h"
19 #include "check-integrity.h"
20 #include "raid56.h"
21 #include "block-group.h"
22 #include "zoned.h"
23 #include "fs.h"
24 #include "accessors.h"
25 #include "file-item.h"
26 #include "scrub.h"
27
28 /*
29 * This is only the first step towards a full-features scrub. It reads all
30 * extent and super block and verifies the checksums. In case a bad checksum
31 * is found or the extent cannot be read, good data will be written back if
32 * any can be found.
33 *
34 * Future enhancements:
35 * - In case an unrepairable extent is encountered, track which files are
36 * affected and report them
37 * - track and record media errors, throw out bad devices
38 * - add a mode to also read unallocated space
39 */
40
41 struct scrub_ctx;
42
43 /*
44 * The following value only influences the performance.
45 *
46 * This detemines how many stripes would be submitted in one go,
47 * which is 512KiB (BTRFS_STRIPE_LEN * SCRUB_STRIPES_PER_GROUP).
48 */
49 #define SCRUB_STRIPES_PER_GROUP 8
50
51 /*
52 * How many groups we have for each sctx.
53 *
54 * This would be 8M per device, the same value as the old scrub in-flight bios
55 * size limit.
56 */
57 #define SCRUB_GROUPS_PER_SCTX 16
58
59 #define SCRUB_TOTAL_STRIPES (SCRUB_GROUPS_PER_SCTX * SCRUB_STRIPES_PER_GROUP)
60
61 /*
62 * The following value times PAGE_SIZE needs to be large enough to match the
63 * largest node/leaf/sector size that shall be supported.
64 */
65 #define SCRUB_MAX_SECTORS_PER_BLOCK (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
66
67 /* Represent one sector and its needed info to verify the content. */
68 struct scrub_sector_verification {
69 bool is_metadata;
70
71 union {
72 /*
73 * Csum pointer for data csum verification. Should point to a
74 * sector csum inside scrub_stripe::csums.
75 *
76 * NULL if this data sector has no csum.
77 */
78 u8 *csum;
79
80 /*
81 * Extra info for metadata verification. All sectors inside a
82 * tree block share the same generation.
83 */
84 u64 generation;
85 };
86 };
87
88 enum scrub_stripe_flags {
89 /* Set when @mirror_num, @dev, @physical and @logical are set. */
90 SCRUB_STRIPE_FLAG_INITIALIZED,
91
92 /* Set when the read-repair is finished. */
93 SCRUB_STRIPE_FLAG_REPAIR_DONE,
94
95 /*
96 * Set for data stripes if it's triggered from P/Q stripe.
97 * During such scrub, we should not report errors in data stripes, nor
98 * update the accounting.
99 */
100 SCRUB_STRIPE_FLAG_NO_REPORT,
101 };
102
103 #define SCRUB_STRIPE_PAGES (BTRFS_STRIPE_LEN / PAGE_SIZE)
104
105 /*
106 * Represent one contiguous range with a length of BTRFS_STRIPE_LEN.
107 */
108 struct scrub_stripe {
109 struct scrub_ctx *sctx;
110 struct btrfs_block_group *bg;
111
112 struct page *pages[SCRUB_STRIPE_PAGES];
113 struct scrub_sector_verification *sectors;
114
115 struct btrfs_device *dev;
116 u64 logical;
117 u64 physical;
118
119 u16 mirror_num;
120
121 /* Should be BTRFS_STRIPE_LEN / sectorsize. */
122 u16 nr_sectors;
123
124 /*
125 * How many data/meta extents are in this stripe. Only for scrub status
126 * reporting purposes.
127 */
128 u16 nr_data_extents;
129 u16 nr_meta_extents;
130
131 atomic_t pending_io;
132 wait_queue_head_t io_wait;
133 wait_queue_head_t repair_wait;
134
135 /*
136 * Indicate the states of the stripe. Bits are defined in
137 * scrub_stripe_flags enum.
138 */
139 unsigned long state;
140
141 /* Indicate which sectors are covered by extent items. */
142 unsigned long extent_sector_bitmap;
143
144 /*
145 * The errors hit during the initial read of the stripe.
146 *
147 * Would be utilized for error reporting and repair.
148 *
149 * The remaining init_nr_* records the number of errors hit, only used
150 * by error reporting.
151 */
152 unsigned long init_error_bitmap;
153 unsigned int init_nr_io_errors;
154 unsigned int init_nr_csum_errors;
155 unsigned int init_nr_meta_errors;
156
157 /*
158 * The following error bitmaps are all for the current status.
159 * Every time we submit a new read, these bitmaps may be updated.
160 *
161 * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap;
162 *
163 * IO and csum errors can happen for both metadata and data.
164 */
165 unsigned long error_bitmap;
166 unsigned long io_error_bitmap;
167 unsigned long csum_error_bitmap;
168 unsigned long meta_error_bitmap;
169
170 /* For writeback (repair or replace) error reporting. */
171 unsigned long write_error_bitmap;
172
173 /* Writeback can be concurrent, thus we need to protect the bitmap. */
174 spinlock_t write_error_lock;
175
176 /*
177 * Checksum for the whole stripe if this stripe is inside a data block
178 * group.
179 */
180 u8 *csums;
181
182 struct work_struct work;
183 };
184
185 struct scrub_ctx {
186 struct scrub_stripe stripes[SCRUB_TOTAL_STRIPES];
187 struct scrub_stripe *raid56_data_stripes;
188 struct btrfs_fs_info *fs_info;
189 struct btrfs_path extent_path;
190 struct btrfs_path csum_path;
191 int first_free;
192 int cur_stripe;
193 atomic_t cancel_req;
194 int readonly;
195 int sectors_per_bio;
196
197 /* State of IO submission throttling affecting the associated device */
198 ktime_t throttle_deadline;
199 u64 throttle_sent;
200
201 int is_dev_replace;
202 u64 write_pointer;
203
204 struct mutex wr_lock;
205 struct btrfs_device *wr_tgtdev;
206
207 /*
208 * statistics
209 */
210 struct btrfs_scrub_progress stat;
211 spinlock_t stat_lock;
212
213 /*
214 * Use a ref counter to avoid use-after-free issues. Scrub workers
215 * decrement bios_in_flight and workers_pending and then do a wakeup
216 * on the list_wait wait queue. We must ensure the main scrub task
217 * doesn't free the scrub context before or while the workers are
218 * doing the wakeup() call.
219 */
220 refcount_t refs;
221 };
222
223 struct scrub_warning {
224 struct btrfs_path *path;
225 u64 extent_item_size;
226 const char *errstr;
227 u64 physical;
228 u64 logical;
229 struct btrfs_device *dev;
230 };
231
release_scrub_stripe(struct scrub_stripe * stripe)232 static void release_scrub_stripe(struct scrub_stripe *stripe)
233 {
234 if (!stripe)
235 return;
236
237 for (int i = 0; i < SCRUB_STRIPE_PAGES; i++) {
238 if (stripe->pages[i])
239 __free_page(stripe->pages[i]);
240 stripe->pages[i] = NULL;
241 }
242 kfree(stripe->sectors);
243 kfree(stripe->csums);
244 stripe->sectors = NULL;
245 stripe->csums = NULL;
246 stripe->sctx = NULL;
247 stripe->state = 0;
248 }
249
init_scrub_stripe(struct btrfs_fs_info * fs_info,struct scrub_stripe * stripe)250 static int init_scrub_stripe(struct btrfs_fs_info *fs_info,
251 struct scrub_stripe *stripe)
252 {
253 int ret;
254
255 memset(stripe, 0, sizeof(*stripe));
256
257 stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
258 stripe->state = 0;
259
260 init_waitqueue_head(&stripe->io_wait);
261 init_waitqueue_head(&stripe->repair_wait);
262 atomic_set(&stripe->pending_io, 0);
263 spin_lock_init(&stripe->write_error_lock);
264
265 ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages);
266 if (ret < 0)
267 goto error;
268
269 stripe->sectors = kcalloc(stripe->nr_sectors,
270 sizeof(struct scrub_sector_verification),
271 GFP_KERNEL);
272 if (!stripe->sectors)
273 goto error;
274
275 stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits,
276 fs_info->csum_size, GFP_KERNEL);
277 if (!stripe->csums)
278 goto error;
279 return 0;
280 error:
281 release_scrub_stripe(stripe);
282 return -ENOMEM;
283 }
284
wait_scrub_stripe_io(struct scrub_stripe * stripe)285 static void wait_scrub_stripe_io(struct scrub_stripe *stripe)
286 {
287 wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0);
288 }
289
290 static void scrub_put_ctx(struct scrub_ctx *sctx);
291
__scrub_blocked_if_needed(struct btrfs_fs_info * fs_info)292 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
293 {
294 while (atomic_read(&fs_info->scrub_pause_req)) {
295 mutex_unlock(&fs_info->scrub_lock);
296 wait_event(fs_info->scrub_pause_wait,
297 atomic_read(&fs_info->scrub_pause_req) == 0);
298 mutex_lock(&fs_info->scrub_lock);
299 }
300 }
301
scrub_pause_on(struct btrfs_fs_info * fs_info)302 static void scrub_pause_on(struct btrfs_fs_info *fs_info)
303 {
304 atomic_inc(&fs_info->scrubs_paused);
305 wake_up(&fs_info->scrub_pause_wait);
306 }
307
scrub_pause_off(struct btrfs_fs_info * fs_info)308 static void scrub_pause_off(struct btrfs_fs_info *fs_info)
309 {
310 mutex_lock(&fs_info->scrub_lock);
311 __scrub_blocked_if_needed(fs_info);
312 atomic_dec(&fs_info->scrubs_paused);
313 mutex_unlock(&fs_info->scrub_lock);
314
315 wake_up(&fs_info->scrub_pause_wait);
316 }
317
scrub_blocked_if_needed(struct btrfs_fs_info * fs_info)318 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
319 {
320 scrub_pause_on(fs_info);
321 scrub_pause_off(fs_info);
322 }
323
scrub_free_ctx(struct scrub_ctx * sctx)324 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
325 {
326 int i;
327
328 if (!sctx)
329 return;
330
331 for (i = 0; i < SCRUB_TOTAL_STRIPES; i++)
332 release_scrub_stripe(&sctx->stripes[i]);
333
334 kvfree(sctx);
335 }
336
scrub_put_ctx(struct scrub_ctx * sctx)337 static void scrub_put_ctx(struct scrub_ctx *sctx)
338 {
339 if (refcount_dec_and_test(&sctx->refs))
340 scrub_free_ctx(sctx);
341 }
342
scrub_setup_ctx(struct btrfs_fs_info * fs_info,int is_dev_replace)343 static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
344 struct btrfs_fs_info *fs_info, int is_dev_replace)
345 {
346 struct scrub_ctx *sctx;
347 int i;
348
349 /* Since sctx has inline 128 stripes, it can go beyond 64K easily. Use
350 * kvzalloc().
351 */
352 sctx = kvzalloc(sizeof(*sctx), GFP_KERNEL);
353 if (!sctx)
354 goto nomem;
355 refcount_set(&sctx->refs, 1);
356 sctx->is_dev_replace = is_dev_replace;
357 sctx->fs_info = fs_info;
358 sctx->extent_path.search_commit_root = 1;
359 sctx->extent_path.skip_locking = 1;
360 sctx->csum_path.search_commit_root = 1;
361 sctx->csum_path.skip_locking = 1;
362 for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) {
363 int ret;
364
365 ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
366 if (ret < 0)
367 goto nomem;
368 sctx->stripes[i].sctx = sctx;
369 }
370 sctx->first_free = 0;
371 atomic_set(&sctx->cancel_req, 0);
372
373 spin_lock_init(&sctx->stat_lock);
374 sctx->throttle_deadline = 0;
375
376 mutex_init(&sctx->wr_lock);
377 if (is_dev_replace) {
378 WARN_ON(!fs_info->dev_replace.tgtdev);
379 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
380 }
381
382 return sctx;
383
384 nomem:
385 scrub_free_ctx(sctx);
386 return ERR_PTR(-ENOMEM);
387 }
388
scrub_print_warning_inode(u64 inum,u64 offset,u64 num_bytes,u64 root,void * warn_ctx)389 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
390 u64 root, void *warn_ctx)
391 {
392 u32 nlink;
393 int ret;
394 int i;
395 unsigned nofs_flag;
396 struct extent_buffer *eb;
397 struct btrfs_inode_item *inode_item;
398 struct scrub_warning *swarn = warn_ctx;
399 struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
400 struct inode_fs_paths *ipath = NULL;
401 struct btrfs_root *local_root;
402 struct btrfs_key key;
403
404 local_root = btrfs_get_fs_root(fs_info, root, true);
405 if (IS_ERR(local_root)) {
406 ret = PTR_ERR(local_root);
407 goto err;
408 }
409
410 /*
411 * this makes the path point to (inum INODE_ITEM ioff)
412 */
413 key.objectid = inum;
414 key.type = BTRFS_INODE_ITEM_KEY;
415 key.offset = 0;
416
417 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
418 if (ret) {
419 btrfs_put_root(local_root);
420 btrfs_release_path(swarn->path);
421 goto err;
422 }
423
424 eb = swarn->path->nodes[0];
425 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
426 struct btrfs_inode_item);
427 nlink = btrfs_inode_nlink(eb, inode_item);
428 btrfs_release_path(swarn->path);
429
430 /*
431 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
432 * uses GFP_NOFS in this context, so we keep it consistent but it does
433 * not seem to be strictly necessary.
434 */
435 nofs_flag = memalloc_nofs_save();
436 ipath = init_ipath(4096, local_root, swarn->path);
437 memalloc_nofs_restore(nofs_flag);
438 if (IS_ERR(ipath)) {
439 btrfs_put_root(local_root);
440 ret = PTR_ERR(ipath);
441 ipath = NULL;
442 goto err;
443 }
444 ret = paths_from_inode(inum, ipath);
445
446 if (ret < 0)
447 goto err;
448
449 /*
450 * we deliberately ignore the bit ipath might have been too small to
451 * hold all of the paths here
452 */
453 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
454 btrfs_warn_in_rcu(fs_info,
455 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
456 swarn->errstr, swarn->logical,
457 btrfs_dev_name(swarn->dev),
458 swarn->physical,
459 root, inum, offset,
460 fs_info->sectorsize, nlink,
461 (char *)(unsigned long)ipath->fspath->val[i]);
462
463 btrfs_put_root(local_root);
464 free_ipath(ipath);
465 return 0;
466
467 err:
468 btrfs_warn_in_rcu(fs_info,
469 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
470 swarn->errstr, swarn->logical,
471 btrfs_dev_name(swarn->dev),
472 swarn->physical,
473 root, inum, offset, ret);
474
475 free_ipath(ipath);
476 return 0;
477 }
478
scrub_print_common_warning(const char * errstr,struct btrfs_device * dev,bool is_super,u64 logical,u64 physical)479 static void scrub_print_common_warning(const char *errstr, struct btrfs_device *dev,
480 bool is_super, u64 logical, u64 physical)
481 {
482 struct btrfs_fs_info *fs_info = dev->fs_info;
483 struct btrfs_path *path;
484 struct btrfs_key found_key;
485 struct extent_buffer *eb;
486 struct btrfs_extent_item *ei;
487 struct scrub_warning swarn;
488 u64 flags = 0;
489 u32 item_size;
490 int ret;
491
492 /* Super block error, no need to search extent tree. */
493 if (is_super) {
494 btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
495 errstr, btrfs_dev_name(dev), physical);
496 return;
497 }
498 path = btrfs_alloc_path();
499 if (!path)
500 return;
501
502 swarn.physical = physical;
503 swarn.logical = logical;
504 swarn.errstr = errstr;
505 swarn.dev = NULL;
506
507 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
508 &flags);
509 if (ret < 0)
510 goto out;
511
512 swarn.extent_item_size = found_key.offset;
513
514 eb = path->nodes[0];
515 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
516 item_size = btrfs_item_size(eb, path->slots[0]);
517
518 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
519 unsigned long ptr = 0;
520 u8 ref_level;
521 u64 ref_root;
522
523 while (true) {
524 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
525 item_size, &ref_root,
526 &ref_level);
527 if (ret < 0) {
528 btrfs_warn(fs_info,
529 "failed to resolve tree backref for logical %llu: %d",
530 swarn.logical, ret);
531 break;
532 }
533 if (ret > 0)
534 break;
535 btrfs_warn_in_rcu(fs_info,
536 "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
537 errstr, swarn.logical, btrfs_dev_name(dev),
538 swarn.physical, (ref_level ? "node" : "leaf"),
539 ref_level, ref_root);
540 }
541 btrfs_release_path(path);
542 } else {
543 struct btrfs_backref_walk_ctx ctx = { 0 };
544
545 btrfs_release_path(path);
546
547 ctx.bytenr = found_key.objectid;
548 ctx.extent_item_pos = swarn.logical - found_key.objectid;
549 ctx.fs_info = fs_info;
550
551 swarn.path = path;
552 swarn.dev = dev;
553
554 iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn);
555 }
556
557 out:
558 btrfs_free_path(path);
559 }
560
fill_writer_pointer_gap(struct scrub_ctx * sctx,u64 physical)561 static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
562 {
563 int ret = 0;
564 u64 length;
565
566 if (!btrfs_is_zoned(sctx->fs_info))
567 return 0;
568
569 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
570 return 0;
571
572 if (sctx->write_pointer < physical) {
573 length = physical - sctx->write_pointer;
574
575 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
576 sctx->write_pointer, length);
577 if (!ret)
578 sctx->write_pointer = physical;
579 }
580 return ret;
581 }
582
scrub_stripe_get_page(struct scrub_stripe * stripe,int sector_nr)583 static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr)
584 {
585 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
586 int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT;
587
588 return stripe->pages[page_index];
589 }
590
scrub_stripe_get_page_offset(struct scrub_stripe * stripe,int sector_nr)591 static unsigned int scrub_stripe_get_page_offset(struct scrub_stripe *stripe,
592 int sector_nr)
593 {
594 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
595
596 return offset_in_page(sector_nr << fs_info->sectorsize_bits);
597 }
598
scrub_verify_one_metadata(struct scrub_stripe * stripe,int sector_nr)599 static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr)
600 {
601 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
602 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
603 const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits);
604 const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr);
605 const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr);
606 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
607 u8 on_disk_csum[BTRFS_CSUM_SIZE];
608 u8 calculated_csum[BTRFS_CSUM_SIZE];
609 struct btrfs_header *header;
610
611 /*
612 * Here we don't have a good way to attach the pages (and subpages)
613 * to a dummy extent buffer, thus we have to directly grab the members
614 * from pages.
615 */
616 header = (struct btrfs_header *)(page_address(first_page) + first_off);
617 memcpy(on_disk_csum, header->csum, fs_info->csum_size);
618
619 if (logical != btrfs_stack_header_bytenr(header)) {
620 bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
621 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
622 btrfs_warn_rl(fs_info,
623 "tree block %llu mirror %u has bad bytenr, has %llu want %llu",
624 logical, stripe->mirror_num,
625 btrfs_stack_header_bytenr(header), logical);
626 return;
627 }
628 if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid,
629 BTRFS_FSID_SIZE) != 0) {
630 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
631 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
632 btrfs_warn_rl(fs_info,
633 "tree block %llu mirror %u has bad fsid, has %pU want %pU",
634 logical, stripe->mirror_num,
635 header->fsid, fs_info->fs_devices->fsid);
636 return;
637 }
638 if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
639 BTRFS_UUID_SIZE) != 0) {
640 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
641 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
642 btrfs_warn_rl(fs_info,
643 "tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
644 logical, stripe->mirror_num,
645 header->chunk_tree_uuid, fs_info->chunk_tree_uuid);
646 return;
647 }
648
649 /* Now check tree block csum. */
650 shash->tfm = fs_info->csum_shash;
651 crypto_shash_init(shash);
652 crypto_shash_update(shash, page_address(first_page) + first_off +
653 BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE);
654
655 for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) {
656 struct page *page = scrub_stripe_get_page(stripe, i);
657 unsigned int page_off = scrub_stripe_get_page_offset(stripe, i);
658
659 crypto_shash_update(shash, page_address(page) + page_off,
660 fs_info->sectorsize);
661 }
662
663 crypto_shash_final(shash, calculated_csum);
664 if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) {
665 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
666 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
667 btrfs_warn_rl(fs_info,
668 "tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
669 logical, stripe->mirror_num,
670 CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
671 CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
672 return;
673 }
674 if (stripe->sectors[sector_nr].generation !=
675 btrfs_stack_header_generation(header)) {
676 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
677 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
678 btrfs_warn_rl(fs_info,
679 "tree block %llu mirror %u has bad generation, has %llu want %llu",
680 logical, stripe->mirror_num,
681 btrfs_stack_header_generation(header),
682 stripe->sectors[sector_nr].generation);
683 return;
684 }
685 bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree);
686 bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
687 bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
688 }
689
scrub_verify_one_sector(struct scrub_stripe * stripe,int sector_nr)690 static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
691 {
692 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
693 struct scrub_sector_verification *sector = &stripe->sectors[sector_nr];
694 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
695 struct page *page = scrub_stripe_get_page(stripe, sector_nr);
696 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
697 u8 csum_buf[BTRFS_CSUM_SIZE];
698 int ret;
699
700 ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors);
701
702 /* Sector not utilized, skip it. */
703 if (!test_bit(sector_nr, &stripe->extent_sector_bitmap))
704 return;
705
706 /* IO error, no need to check. */
707 if (test_bit(sector_nr, &stripe->io_error_bitmap))
708 return;
709
710 /* Metadata, verify the full tree block. */
711 if (sector->is_metadata) {
712 /*
713 * Check if the tree block crosses the stripe boudary. If
714 * crossed the boundary, we cannot verify it but only give a
715 * warning.
716 *
717 * This can only happen on a very old filesystem where chunks
718 * are not ensured to be stripe aligned.
719 */
720 if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) {
721 btrfs_warn_rl(fs_info,
722 "tree block at %llu crosses stripe boundary %llu",
723 stripe->logical +
724 (sector_nr << fs_info->sectorsize_bits),
725 stripe->logical);
726 return;
727 }
728 scrub_verify_one_metadata(stripe, sector_nr);
729 return;
730 }
731
732 /*
733 * Data is easier, we just verify the data csum (if we have it). For
734 * cases without csum, we have no other choice but to trust it.
735 */
736 if (!sector->csum) {
737 clear_bit(sector_nr, &stripe->error_bitmap);
738 return;
739 }
740
741 ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum);
742 if (ret < 0) {
743 set_bit(sector_nr, &stripe->csum_error_bitmap);
744 set_bit(sector_nr, &stripe->error_bitmap);
745 } else {
746 clear_bit(sector_nr, &stripe->csum_error_bitmap);
747 clear_bit(sector_nr, &stripe->error_bitmap);
748 }
749 }
750
751 /* Verify specified sectors of a stripe. */
scrub_verify_one_stripe(struct scrub_stripe * stripe,unsigned long bitmap)752 static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long bitmap)
753 {
754 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
755 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
756 int sector_nr;
757
758 for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) {
759 scrub_verify_one_sector(stripe, sector_nr);
760 if (stripe->sectors[sector_nr].is_metadata)
761 sector_nr += sectors_per_tree - 1;
762 }
763 }
764
calc_sector_number(struct scrub_stripe * stripe,struct bio_vec * first_bvec)765 static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first_bvec)
766 {
767 int i;
768
769 for (i = 0; i < stripe->nr_sectors; i++) {
770 if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page &&
771 scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset)
772 break;
773 }
774 ASSERT(i < stripe->nr_sectors);
775 return i;
776 }
777
778 /*
779 * Repair read is different to the regular read:
780 *
781 * - Only reads the failed sectors
782 * - May have extra blocksize limits
783 */
scrub_repair_read_endio(struct btrfs_bio * bbio)784 static void scrub_repair_read_endio(struct btrfs_bio *bbio)
785 {
786 struct scrub_stripe *stripe = bbio->private;
787 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
788 struct bio_vec *bvec;
789 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
790 u32 bio_size = 0;
791 int i;
792
793 ASSERT(sector_nr < stripe->nr_sectors);
794
795 bio_for_each_bvec_all(bvec, &bbio->bio, i)
796 bio_size += bvec->bv_len;
797
798 if (bbio->bio.bi_status) {
799 bitmap_set(&stripe->io_error_bitmap, sector_nr,
800 bio_size >> fs_info->sectorsize_bits);
801 bitmap_set(&stripe->error_bitmap, sector_nr,
802 bio_size >> fs_info->sectorsize_bits);
803 } else {
804 bitmap_clear(&stripe->io_error_bitmap, sector_nr,
805 bio_size >> fs_info->sectorsize_bits);
806 }
807 bio_put(&bbio->bio);
808 if (atomic_dec_and_test(&stripe->pending_io))
809 wake_up(&stripe->io_wait);
810 }
811
calc_next_mirror(int mirror,int num_copies)812 static int calc_next_mirror(int mirror, int num_copies)
813 {
814 ASSERT(mirror <= num_copies);
815 return (mirror + 1 > num_copies) ? 1 : mirror + 1;
816 }
817
scrub_stripe_submit_repair_read(struct scrub_stripe * stripe,int mirror,int blocksize,bool wait)818 static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
819 int mirror, int blocksize, bool wait)
820 {
821 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
822 struct btrfs_bio *bbio = NULL;
823 const unsigned long old_error_bitmap = stripe->error_bitmap;
824 int i;
825
826 ASSERT(stripe->mirror_num >= 1);
827 ASSERT(atomic_read(&stripe->pending_io) == 0);
828
829 for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) {
830 struct page *page;
831 int pgoff;
832 int ret;
833
834 page = scrub_stripe_get_page(stripe, i);
835 pgoff = scrub_stripe_get_page_offset(stripe, i);
836
837 /* The current sector cannot be merged, submit the bio. */
838 if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) ||
839 bbio->bio.bi_iter.bi_size >= blocksize)) {
840 ASSERT(bbio->bio.bi_iter.bi_size);
841 atomic_inc(&stripe->pending_io);
842 btrfs_submit_bio(bbio, mirror);
843 if (wait)
844 wait_scrub_stripe_io(stripe);
845 bbio = NULL;
846 }
847
848 if (!bbio) {
849 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
850 fs_info, scrub_repair_read_endio, stripe);
851 bbio->bio.bi_iter.bi_sector = (stripe->logical +
852 (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT;
853 }
854
855 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
856 ASSERT(ret == fs_info->sectorsize);
857 }
858 if (bbio) {
859 ASSERT(bbio->bio.bi_iter.bi_size);
860 atomic_inc(&stripe->pending_io);
861 btrfs_submit_bio(bbio, mirror);
862 if (wait)
863 wait_scrub_stripe_io(stripe);
864 }
865 }
866
scrub_stripe_report_errors(struct scrub_ctx * sctx,struct scrub_stripe * stripe)867 static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
868 struct scrub_stripe *stripe)
869 {
870 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
871 DEFAULT_RATELIMIT_BURST);
872 struct btrfs_fs_info *fs_info = sctx->fs_info;
873 struct btrfs_device *dev = NULL;
874 u64 physical = 0;
875 int nr_data_sectors = 0;
876 int nr_meta_sectors = 0;
877 int nr_nodatacsum_sectors = 0;
878 int nr_repaired_sectors = 0;
879 int sector_nr;
880
881 if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state))
882 return;
883
884 /*
885 * Init needed infos for error reporting.
886 *
887 * Although our scrub_stripe infrastucture is mostly based on btrfs_submit_bio()
888 * thus no need for dev/physical, error reporting still needs dev and physical.
889 */
890 if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) {
891 u64 mapped_len = fs_info->sectorsize;
892 struct btrfs_io_context *bioc = NULL;
893 int stripe_index = stripe->mirror_num - 1;
894 int ret;
895
896 /* For scrub, our mirror_num should always start at 1. */
897 ASSERT(stripe->mirror_num >= 1);
898 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
899 stripe->logical, &mapped_len, &bioc,
900 NULL, NULL, 1);
901 /*
902 * If we failed, dev will be NULL, and later detailed reports
903 * will just be skipped.
904 */
905 if (ret < 0)
906 goto skip;
907 physical = bioc->stripes[stripe_index].physical;
908 dev = bioc->stripes[stripe_index].dev;
909 btrfs_put_bioc(bioc);
910 }
911
912 skip:
913 for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
914 bool repaired = false;
915
916 if (stripe->sectors[sector_nr].is_metadata) {
917 nr_meta_sectors++;
918 } else {
919 nr_data_sectors++;
920 if (!stripe->sectors[sector_nr].csum)
921 nr_nodatacsum_sectors++;
922 }
923
924 if (test_bit(sector_nr, &stripe->init_error_bitmap) &&
925 !test_bit(sector_nr, &stripe->error_bitmap)) {
926 nr_repaired_sectors++;
927 repaired = true;
928 }
929
930 /* Good sector from the beginning, nothing need to be done. */
931 if (!test_bit(sector_nr, &stripe->init_error_bitmap))
932 continue;
933
934 /*
935 * Report error for the corrupted sectors. If repaired, just
936 * output the message of repaired message.
937 */
938 if (repaired) {
939 if (dev) {
940 btrfs_err_rl_in_rcu(fs_info,
941 "fixed up error at logical %llu on dev %s physical %llu",
942 stripe->logical, btrfs_dev_name(dev),
943 physical);
944 } else {
945 btrfs_err_rl_in_rcu(fs_info,
946 "fixed up error at logical %llu on mirror %u",
947 stripe->logical, stripe->mirror_num);
948 }
949 continue;
950 }
951
952 /* The remaining are all for unrepaired. */
953 if (dev) {
954 btrfs_err_rl_in_rcu(fs_info,
955 "unable to fixup (regular) error at logical %llu on dev %s physical %llu",
956 stripe->logical, btrfs_dev_name(dev),
957 physical);
958 } else {
959 btrfs_err_rl_in_rcu(fs_info,
960 "unable to fixup (regular) error at logical %llu on mirror %u",
961 stripe->logical, stripe->mirror_num);
962 }
963
964 if (test_bit(sector_nr, &stripe->io_error_bitmap))
965 if (__ratelimit(&rs) && dev)
966 scrub_print_common_warning("i/o error", dev, false,
967 stripe->logical, physical);
968 if (test_bit(sector_nr, &stripe->csum_error_bitmap))
969 if (__ratelimit(&rs) && dev)
970 scrub_print_common_warning("checksum error", dev, false,
971 stripe->logical, physical);
972 if (test_bit(sector_nr, &stripe->meta_error_bitmap))
973 if (__ratelimit(&rs) && dev)
974 scrub_print_common_warning("header error", dev, false,
975 stripe->logical, physical);
976 }
977
978 spin_lock(&sctx->stat_lock);
979 sctx->stat.data_extents_scrubbed += stripe->nr_data_extents;
980 sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents;
981 sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits;
982 sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits;
983 sctx->stat.no_csum += nr_nodatacsum_sectors;
984 sctx->stat.read_errors += stripe->init_nr_io_errors;
985 sctx->stat.csum_errors += stripe->init_nr_csum_errors;
986 sctx->stat.verify_errors += stripe->init_nr_meta_errors;
987 sctx->stat.uncorrectable_errors +=
988 bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors);
989 sctx->stat.corrected_errors += nr_repaired_sectors;
990 spin_unlock(&sctx->stat_lock);
991 }
992
993 static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
994 unsigned long write_bitmap, bool dev_replace);
995
996 /*
997 * The main entrance for all read related scrub work, including:
998 *
999 * - Wait for the initial read to finish
1000 * - Verify and locate any bad sectors
1001 * - Go through the remaining mirrors and try to read as large blocksize as
1002 * possible
1003 * - Go through all mirrors (including the failed mirror) sector-by-sector
1004 * - Submit writeback for repaired sectors
1005 *
1006 * Writeback for dev-replace does not happen here, it needs extra
1007 * synchronization for zoned devices.
1008 */
scrub_stripe_read_repair_worker(struct work_struct * work)1009 static void scrub_stripe_read_repair_worker(struct work_struct *work)
1010 {
1011 struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work);
1012 struct scrub_ctx *sctx = stripe->sctx;
1013 struct btrfs_fs_info *fs_info = sctx->fs_info;
1014 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1015 stripe->bg->length);
1016 int mirror;
1017 int i;
1018
1019 ASSERT(stripe->mirror_num > 0);
1020
1021 wait_scrub_stripe_io(stripe);
1022 scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap);
1023 /* Save the initial failed bitmap for later repair and report usage. */
1024 stripe->init_error_bitmap = stripe->error_bitmap;
1025 stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap,
1026 stripe->nr_sectors);
1027 stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap,
1028 stripe->nr_sectors);
1029 stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap,
1030 stripe->nr_sectors);
1031
1032 if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors))
1033 goto out;
1034
1035 /*
1036 * Try all remaining mirrors.
1037 *
1038 * Here we still try to read as large block as possible, as this is
1039 * faster and we have extra safety nets to rely on.
1040 */
1041 for (mirror = calc_next_mirror(stripe->mirror_num, num_copies);
1042 mirror != stripe->mirror_num;
1043 mirror = calc_next_mirror(mirror, num_copies)) {
1044 const unsigned long old_error_bitmap = stripe->error_bitmap;
1045
1046 scrub_stripe_submit_repair_read(stripe, mirror,
1047 BTRFS_STRIPE_LEN, false);
1048 wait_scrub_stripe_io(stripe);
1049 scrub_verify_one_stripe(stripe, old_error_bitmap);
1050 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1051 goto out;
1052 }
1053
1054 /*
1055 * Last safety net, try re-checking all mirrors, including the failed
1056 * one, sector-by-sector.
1057 *
1058 * As if one sector failed the drive's internal csum, the whole read
1059 * containing the offending sector would be marked as error.
1060 * Thus here we do sector-by-sector read.
1061 *
1062 * This can be slow, thus we only try it as the last resort.
1063 */
1064
1065 for (i = 0, mirror = stripe->mirror_num;
1066 i < num_copies;
1067 i++, mirror = calc_next_mirror(mirror, num_copies)) {
1068 const unsigned long old_error_bitmap = stripe->error_bitmap;
1069
1070 scrub_stripe_submit_repair_read(stripe, mirror,
1071 fs_info->sectorsize, true);
1072 wait_scrub_stripe_io(stripe);
1073 scrub_verify_one_stripe(stripe, old_error_bitmap);
1074 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1075 goto out;
1076 }
1077 out:
1078 /*
1079 * Submit the repaired sectors. For zoned case, we cannot do repair
1080 * in-place, but queue the bg to be relocated.
1081 */
1082 if (btrfs_is_zoned(fs_info)) {
1083 if (!bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1084 btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
1085 } else if (!sctx->readonly) {
1086 unsigned long repaired;
1087
1088 bitmap_andnot(&repaired, &stripe->init_error_bitmap,
1089 &stripe->error_bitmap, stripe->nr_sectors);
1090 scrub_write_sectors(sctx, stripe, repaired, false);
1091 wait_scrub_stripe_io(stripe);
1092 }
1093
1094 scrub_stripe_report_errors(sctx, stripe);
1095 set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state);
1096 wake_up(&stripe->repair_wait);
1097 }
1098
scrub_read_endio(struct btrfs_bio * bbio)1099 static void scrub_read_endio(struct btrfs_bio *bbio)
1100 {
1101 struct scrub_stripe *stripe = bbio->private;
1102
1103 if (bbio->bio.bi_status) {
1104 bitmap_set(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
1105 bitmap_set(&stripe->error_bitmap, 0, stripe->nr_sectors);
1106 } else {
1107 bitmap_clear(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
1108 }
1109 bio_put(&bbio->bio);
1110 if (atomic_dec_and_test(&stripe->pending_io)) {
1111 wake_up(&stripe->io_wait);
1112 INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1113 queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1114 }
1115 }
1116
scrub_write_endio(struct btrfs_bio * bbio)1117 static void scrub_write_endio(struct btrfs_bio *bbio)
1118 {
1119 struct scrub_stripe *stripe = bbio->private;
1120 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1121 struct bio_vec *bvec;
1122 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1123 u32 bio_size = 0;
1124 int i;
1125
1126 bio_for_each_bvec_all(bvec, &bbio->bio, i)
1127 bio_size += bvec->bv_len;
1128
1129 if (bbio->bio.bi_status) {
1130 unsigned long flags;
1131
1132 spin_lock_irqsave(&stripe->write_error_lock, flags);
1133 bitmap_set(&stripe->write_error_bitmap, sector_nr,
1134 bio_size >> fs_info->sectorsize_bits);
1135 spin_unlock_irqrestore(&stripe->write_error_lock, flags);
1136 }
1137 bio_put(&bbio->bio);
1138
1139 if (atomic_dec_and_test(&stripe->pending_io))
1140 wake_up(&stripe->io_wait);
1141 }
1142
scrub_submit_write_bio(struct scrub_ctx * sctx,struct scrub_stripe * stripe,struct btrfs_bio * bbio,bool dev_replace)1143 static void scrub_submit_write_bio(struct scrub_ctx *sctx,
1144 struct scrub_stripe *stripe,
1145 struct btrfs_bio *bbio, bool dev_replace)
1146 {
1147 struct btrfs_fs_info *fs_info = sctx->fs_info;
1148 u32 bio_len = bbio->bio.bi_iter.bi_size;
1149 u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) -
1150 stripe->logical;
1151
1152 fill_writer_pointer_gap(sctx, stripe->physical + bio_off);
1153 atomic_inc(&stripe->pending_io);
1154 btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace);
1155 if (!btrfs_is_zoned(fs_info))
1156 return;
1157 /*
1158 * For zoned writeback, queue depth must be 1, thus we must wait for
1159 * the write to finish before the next write.
1160 */
1161 wait_scrub_stripe_io(stripe);
1162
1163 /*
1164 * And also need to update the write pointer if write finished
1165 * successfully.
1166 */
1167 if (!test_bit(bio_off >> fs_info->sectorsize_bits,
1168 &stripe->write_error_bitmap))
1169 sctx->write_pointer += bio_len;
1170 }
1171
1172 /*
1173 * Submit the write bio(s) for the sectors specified by @write_bitmap.
1174 *
1175 * Here we utilize btrfs_submit_repair_write(), which has some extra benefits:
1176 *
1177 * - Only needs logical bytenr and mirror_num
1178 * Just like the scrub read path
1179 *
1180 * - Would only result in writes to the specified mirror
1181 * Unlike the regular writeback path, which would write back to all stripes
1182 *
1183 * - Handle dev-replace and read-repair writeback differently
1184 */
scrub_write_sectors(struct scrub_ctx * sctx,struct scrub_stripe * stripe,unsigned long write_bitmap,bool dev_replace)1185 static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
1186 unsigned long write_bitmap, bool dev_replace)
1187 {
1188 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1189 struct btrfs_bio *bbio = NULL;
1190 int sector_nr;
1191
1192 for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) {
1193 struct page *page = scrub_stripe_get_page(stripe, sector_nr);
1194 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
1195 int ret;
1196
1197 /* We should only writeback sectors covered by an extent. */
1198 ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap));
1199
1200 /* Cannot merge with previous sector, submit the current one. */
1201 if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) {
1202 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1203 bbio = NULL;
1204 }
1205 if (!bbio) {
1206 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE,
1207 fs_info, scrub_write_endio, stripe);
1208 bbio->bio.bi_iter.bi_sector = (stripe->logical +
1209 (sector_nr << fs_info->sectorsize_bits)) >>
1210 SECTOR_SHIFT;
1211 }
1212 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1213 ASSERT(ret == fs_info->sectorsize);
1214 }
1215 if (bbio)
1216 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1217 }
1218
1219 /*
1220 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
1221 * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
1222 */
scrub_throttle_dev_io(struct scrub_ctx * sctx,struct btrfs_device * device,unsigned int bio_size)1223 static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device,
1224 unsigned int bio_size)
1225 {
1226 const int time_slice = 1000;
1227 s64 delta;
1228 ktime_t now;
1229 u32 div;
1230 u64 bwlimit;
1231
1232 bwlimit = READ_ONCE(device->scrub_speed_max);
1233 if (bwlimit == 0)
1234 return;
1235
1236 /*
1237 * Slice is divided into intervals when the IO is submitted, adjust by
1238 * bwlimit and maximum of 64 intervals.
1239 */
1240 div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
1241 div = min_t(u32, 64, div);
1242
1243 /* Start new epoch, set deadline */
1244 now = ktime_get();
1245 if (sctx->throttle_deadline == 0) {
1246 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
1247 sctx->throttle_sent = 0;
1248 }
1249
1250 /* Still in the time to send? */
1251 if (ktime_before(now, sctx->throttle_deadline)) {
1252 /* If current bio is within the limit, send it */
1253 sctx->throttle_sent += bio_size;
1254 if (sctx->throttle_sent <= div_u64(bwlimit, div))
1255 return;
1256
1257 /* We're over the limit, sleep until the rest of the slice */
1258 delta = ktime_ms_delta(sctx->throttle_deadline, now);
1259 } else {
1260 /* New request after deadline, start new epoch */
1261 delta = 0;
1262 }
1263
1264 if (delta) {
1265 long timeout;
1266
1267 timeout = div_u64(delta * HZ, 1000);
1268 schedule_timeout_interruptible(timeout);
1269 }
1270
1271 /* Next call will start the deadline period */
1272 sctx->throttle_deadline = 0;
1273 }
1274
1275 /*
1276 * Given a physical address, this will calculate it's
1277 * logical offset. if this is a parity stripe, it will return
1278 * the most left data stripe's logical offset.
1279 *
1280 * return 0 if it is a data stripe, 1 means parity stripe.
1281 */
get_raid56_logic_offset(u64 physical,int num,struct map_lookup * map,u64 * offset,u64 * stripe_start)1282 static int get_raid56_logic_offset(u64 physical, int num,
1283 struct map_lookup *map, u64 *offset,
1284 u64 *stripe_start)
1285 {
1286 int i;
1287 int j = 0;
1288 u64 last_offset;
1289 const int data_stripes = nr_data_stripes(map);
1290
1291 last_offset = (physical - map->stripes[num].physical) * data_stripes;
1292 if (stripe_start)
1293 *stripe_start = last_offset;
1294
1295 *offset = last_offset;
1296 for (i = 0; i < data_stripes; i++) {
1297 u32 stripe_nr;
1298 u32 stripe_index;
1299 u32 rot;
1300
1301 *offset = last_offset + btrfs_stripe_nr_to_offset(i);
1302
1303 stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes;
1304
1305 /* Work out the disk rotation on this stripe-set */
1306 rot = stripe_nr % map->num_stripes;
1307 /* calculate which stripe this data locates */
1308 rot += i;
1309 stripe_index = rot % map->num_stripes;
1310 if (stripe_index == num)
1311 return 0;
1312 if (stripe_index < num)
1313 j++;
1314 }
1315 *offset = last_offset + btrfs_stripe_nr_to_offset(j);
1316 return 1;
1317 }
1318
1319 /*
1320 * Return 0 if the extent item range covers any byte of the range.
1321 * Return <0 if the extent item is before @search_start.
1322 * Return >0 if the extent item is after @start_start + @search_len.
1323 */
compare_extent_item_range(struct btrfs_path * path,u64 search_start,u64 search_len)1324 static int compare_extent_item_range(struct btrfs_path *path,
1325 u64 search_start, u64 search_len)
1326 {
1327 struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
1328 u64 len;
1329 struct btrfs_key key;
1330
1331 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1332 ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
1333 key.type == BTRFS_METADATA_ITEM_KEY);
1334 if (key.type == BTRFS_METADATA_ITEM_KEY)
1335 len = fs_info->nodesize;
1336 else
1337 len = key.offset;
1338
1339 if (key.objectid + len <= search_start)
1340 return -1;
1341 if (key.objectid >= search_start + search_len)
1342 return 1;
1343 return 0;
1344 }
1345
1346 /*
1347 * Locate one extent item which covers any byte in range
1348 * [@search_start, @search_start + @search_length)
1349 *
1350 * If the path is not initialized, we will initialize the search by doing
1351 * a btrfs_search_slot().
1352 * If the path is already initialized, we will use the path as the initial
1353 * slot, to avoid duplicated btrfs_search_slot() calls.
1354 *
1355 * NOTE: If an extent item starts before @search_start, we will still
1356 * return the extent item. This is for data extent crossing stripe boundary.
1357 *
1358 * Return 0 if we found such extent item, and @path will point to the extent item.
1359 * Return >0 if no such extent item can be found, and @path will be released.
1360 * Return <0 if hit fatal error, and @path will be released.
1361 */
find_first_extent_item(struct btrfs_root * extent_root,struct btrfs_path * path,u64 search_start,u64 search_len)1362 static int find_first_extent_item(struct btrfs_root *extent_root,
1363 struct btrfs_path *path,
1364 u64 search_start, u64 search_len)
1365 {
1366 struct btrfs_fs_info *fs_info = extent_root->fs_info;
1367 struct btrfs_key key;
1368 int ret;
1369
1370 /* Continue using the existing path */
1371 if (path->nodes[0])
1372 goto search_forward;
1373
1374 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1375 key.type = BTRFS_METADATA_ITEM_KEY;
1376 else
1377 key.type = BTRFS_EXTENT_ITEM_KEY;
1378 key.objectid = search_start;
1379 key.offset = (u64)-1;
1380
1381 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1382 if (ret < 0)
1383 return ret;
1384
1385 ASSERT(ret > 0);
1386 /*
1387 * Here we intentionally pass 0 as @min_objectid, as there could be
1388 * an extent item starting before @search_start.
1389 */
1390 ret = btrfs_previous_extent_item(extent_root, path, 0);
1391 if (ret < 0)
1392 return ret;
1393 /*
1394 * No matter whether we have found an extent item, the next loop will
1395 * properly do every check on the key.
1396 */
1397 search_forward:
1398 while (true) {
1399 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1400 if (key.objectid >= search_start + search_len)
1401 break;
1402 if (key.type != BTRFS_METADATA_ITEM_KEY &&
1403 key.type != BTRFS_EXTENT_ITEM_KEY)
1404 goto next;
1405
1406 ret = compare_extent_item_range(path, search_start, search_len);
1407 if (ret == 0)
1408 return ret;
1409 if (ret > 0)
1410 break;
1411 next:
1412 path->slots[0]++;
1413 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
1414 ret = btrfs_next_leaf(extent_root, path);
1415 if (ret) {
1416 /* Either no more item or fatal error */
1417 btrfs_release_path(path);
1418 return ret;
1419 }
1420 }
1421 }
1422 btrfs_release_path(path);
1423 return 1;
1424 }
1425
get_extent_info(struct btrfs_path * path,u64 * extent_start_ret,u64 * size_ret,u64 * flags_ret,u64 * generation_ret)1426 static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
1427 u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
1428 {
1429 struct btrfs_key key;
1430 struct btrfs_extent_item *ei;
1431
1432 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1433 ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
1434 key.type == BTRFS_EXTENT_ITEM_KEY);
1435 *extent_start_ret = key.objectid;
1436 if (key.type == BTRFS_METADATA_ITEM_KEY)
1437 *size_ret = path->nodes[0]->fs_info->nodesize;
1438 else
1439 *size_ret = key.offset;
1440 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
1441 *flags_ret = btrfs_extent_flags(path->nodes[0], ei);
1442 *generation_ret = btrfs_extent_generation(path->nodes[0], ei);
1443 }
1444
sync_write_pointer_for_zoned(struct scrub_ctx * sctx,u64 logical,u64 physical,u64 physical_end)1445 static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
1446 u64 physical, u64 physical_end)
1447 {
1448 struct btrfs_fs_info *fs_info = sctx->fs_info;
1449 int ret = 0;
1450
1451 if (!btrfs_is_zoned(fs_info))
1452 return 0;
1453
1454 mutex_lock(&sctx->wr_lock);
1455 if (sctx->write_pointer < physical_end) {
1456 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
1457 physical,
1458 sctx->write_pointer);
1459 if (ret)
1460 btrfs_err(fs_info,
1461 "zoned: failed to recover write pointer");
1462 }
1463 mutex_unlock(&sctx->wr_lock);
1464 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
1465
1466 return ret;
1467 }
1468
fill_one_extent_info(struct btrfs_fs_info * fs_info,struct scrub_stripe * stripe,u64 extent_start,u64 extent_len,u64 extent_flags,u64 extent_gen)1469 static void fill_one_extent_info(struct btrfs_fs_info *fs_info,
1470 struct scrub_stripe *stripe,
1471 u64 extent_start, u64 extent_len,
1472 u64 extent_flags, u64 extent_gen)
1473 {
1474 for (u64 cur_logical = max(stripe->logical, extent_start);
1475 cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN,
1476 extent_start + extent_len);
1477 cur_logical += fs_info->sectorsize) {
1478 const int nr_sector = (cur_logical - stripe->logical) >>
1479 fs_info->sectorsize_bits;
1480 struct scrub_sector_verification *sector =
1481 &stripe->sectors[nr_sector];
1482
1483 set_bit(nr_sector, &stripe->extent_sector_bitmap);
1484 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1485 sector->is_metadata = true;
1486 sector->generation = extent_gen;
1487 }
1488 }
1489 }
1490
scrub_stripe_reset_bitmaps(struct scrub_stripe * stripe)1491 static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
1492 {
1493 stripe->extent_sector_bitmap = 0;
1494 stripe->init_error_bitmap = 0;
1495 stripe->init_nr_io_errors = 0;
1496 stripe->init_nr_csum_errors = 0;
1497 stripe->init_nr_meta_errors = 0;
1498 stripe->error_bitmap = 0;
1499 stripe->io_error_bitmap = 0;
1500 stripe->csum_error_bitmap = 0;
1501 stripe->meta_error_bitmap = 0;
1502 }
1503
1504 /*
1505 * Locate one stripe which has at least one extent in its range.
1506 *
1507 * Return 0 if found such stripe, and store its info into @stripe.
1508 * Return >0 if there is no such stripe in the specified range.
1509 * Return <0 for error.
1510 */
scrub_find_fill_first_stripe(struct btrfs_block_group * bg,struct btrfs_path * extent_path,struct btrfs_path * csum_path,struct btrfs_device * dev,u64 physical,int mirror_num,u64 logical_start,u32 logical_len,struct scrub_stripe * stripe)1511 static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
1512 struct btrfs_path *extent_path,
1513 struct btrfs_path *csum_path,
1514 struct btrfs_device *dev, u64 physical,
1515 int mirror_num, u64 logical_start,
1516 u32 logical_len,
1517 struct scrub_stripe *stripe)
1518 {
1519 struct btrfs_fs_info *fs_info = bg->fs_info;
1520 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start);
1521 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start);
1522 const u64 logical_end = logical_start + logical_len;
1523 u64 cur_logical = logical_start;
1524 u64 stripe_end;
1525 u64 extent_start;
1526 u64 extent_len;
1527 u64 extent_flags;
1528 u64 extent_gen;
1529 int ret;
1530
1531 memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
1532 stripe->nr_sectors);
1533 scrub_stripe_reset_bitmaps(stripe);
1534
1535 /* The range must be inside the bg. */
1536 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
1537
1538 ret = find_first_extent_item(extent_root, extent_path, logical_start,
1539 logical_len);
1540 /* Either error or not found. */
1541 if (ret)
1542 goto out;
1543 get_extent_info(extent_path, &extent_start, &extent_len, &extent_flags,
1544 &extent_gen);
1545 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1546 stripe->nr_meta_extents++;
1547 if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1548 stripe->nr_data_extents++;
1549 cur_logical = max(extent_start, cur_logical);
1550
1551 /*
1552 * Round down to stripe boundary.
1553 *
1554 * The extra calculation against bg->start is to handle block groups
1555 * whose logical bytenr is not BTRFS_STRIPE_LEN aligned.
1556 */
1557 stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) +
1558 bg->start;
1559 stripe->physical = physical + stripe->logical - logical_start;
1560 stripe->dev = dev;
1561 stripe->bg = bg;
1562 stripe->mirror_num = mirror_num;
1563 stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1;
1564
1565 /* Fill the first extent info into stripe->sectors[] array. */
1566 fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1567 extent_flags, extent_gen);
1568 cur_logical = extent_start + extent_len;
1569
1570 /* Fill the extent info for the remaining sectors. */
1571 while (cur_logical <= stripe_end) {
1572 ret = find_first_extent_item(extent_root, extent_path, cur_logical,
1573 stripe_end - cur_logical + 1);
1574 if (ret < 0)
1575 goto out;
1576 if (ret > 0) {
1577 ret = 0;
1578 break;
1579 }
1580 get_extent_info(extent_path, &extent_start, &extent_len,
1581 &extent_flags, &extent_gen);
1582 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1583 stripe->nr_meta_extents++;
1584 if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1585 stripe->nr_data_extents++;
1586 fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1587 extent_flags, extent_gen);
1588 cur_logical = extent_start + extent_len;
1589 }
1590
1591 /* Now fill the data csum. */
1592 if (bg->flags & BTRFS_BLOCK_GROUP_DATA) {
1593 int sector_nr;
1594 unsigned long csum_bitmap = 0;
1595
1596 /* Csum space should have already been allocated. */
1597 ASSERT(stripe->csums);
1598
1599 /*
1600 * Our csum bitmap should be large enough, as BTRFS_STRIPE_LEN
1601 * should contain at most 16 sectors.
1602 */
1603 ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1604
1605 ret = btrfs_lookup_csums_bitmap(csum_root, csum_path,
1606 stripe->logical, stripe_end,
1607 stripe->csums, &csum_bitmap);
1608 if (ret < 0)
1609 goto out;
1610 if (ret > 0)
1611 ret = 0;
1612
1613 for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) {
1614 stripe->sectors[sector_nr].csum = stripe->csums +
1615 sector_nr * fs_info->csum_size;
1616 }
1617 }
1618 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1619 out:
1620 return ret;
1621 }
1622
scrub_reset_stripe(struct scrub_stripe * stripe)1623 static void scrub_reset_stripe(struct scrub_stripe *stripe)
1624 {
1625 scrub_stripe_reset_bitmaps(stripe);
1626
1627 stripe->nr_meta_extents = 0;
1628 stripe->nr_data_extents = 0;
1629 stripe->state = 0;
1630
1631 for (int i = 0; i < stripe->nr_sectors; i++) {
1632 stripe->sectors[i].is_metadata = false;
1633 stripe->sectors[i].csum = NULL;
1634 stripe->sectors[i].generation = 0;
1635 }
1636 }
1637
scrub_submit_initial_read(struct scrub_ctx * sctx,struct scrub_stripe * stripe)1638 static void scrub_submit_initial_read(struct scrub_ctx *sctx,
1639 struct scrub_stripe *stripe)
1640 {
1641 struct btrfs_fs_info *fs_info = sctx->fs_info;
1642 struct btrfs_bio *bbio;
1643 int mirror = stripe->mirror_num;
1644
1645 ASSERT(stripe->bg);
1646 ASSERT(stripe->mirror_num > 0);
1647 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1648
1649 bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
1650 scrub_read_endio, stripe);
1651
1652 /* Read the whole stripe. */
1653 bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
1654 for (int i = 0; i < BTRFS_STRIPE_LEN >> PAGE_SHIFT; i++) {
1655 int ret;
1656
1657 ret = bio_add_page(&bbio->bio, stripe->pages[i], PAGE_SIZE, 0);
1658 /* We should have allocated enough bio vectors. */
1659 ASSERT(ret == PAGE_SIZE);
1660 }
1661 atomic_inc(&stripe->pending_io);
1662
1663 /*
1664 * For dev-replace, either user asks to avoid the source dev, or
1665 * the device is missing, we try the next mirror instead.
1666 */
1667 if (sctx->is_dev_replace &&
1668 (fs_info->dev_replace.cont_reading_from_srcdev_mode ==
1669 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID ||
1670 !stripe->dev->bdev)) {
1671 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1672 stripe->bg->length);
1673
1674 mirror = calc_next_mirror(mirror, num_copies);
1675 }
1676 btrfs_submit_bio(bbio, mirror);
1677 }
1678
stripe_has_metadata_error(struct scrub_stripe * stripe)1679 static bool stripe_has_metadata_error(struct scrub_stripe *stripe)
1680 {
1681 int i;
1682
1683 for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) {
1684 if (stripe->sectors[i].is_metadata) {
1685 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1686
1687 btrfs_err(fs_info,
1688 "stripe %llu has unrepaired metadata sector at %llu",
1689 stripe->logical,
1690 stripe->logical + (i << fs_info->sectorsize_bits));
1691 return true;
1692 }
1693 }
1694 return false;
1695 }
1696
submit_initial_group_read(struct scrub_ctx * sctx,unsigned int first_slot,unsigned int nr_stripes)1697 static void submit_initial_group_read(struct scrub_ctx *sctx,
1698 unsigned int first_slot,
1699 unsigned int nr_stripes)
1700 {
1701 struct blk_plug plug;
1702
1703 ASSERT(first_slot < SCRUB_TOTAL_STRIPES);
1704 ASSERT(first_slot + nr_stripes <= SCRUB_TOTAL_STRIPES);
1705
1706 scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
1707 btrfs_stripe_nr_to_offset(nr_stripes));
1708 blk_start_plug(&plug);
1709 for (int i = 0; i < nr_stripes; i++) {
1710 struct scrub_stripe *stripe = &sctx->stripes[first_slot + i];
1711
1712 /* Those stripes should be initialized. */
1713 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1714 scrub_submit_initial_read(sctx, stripe);
1715 }
1716 blk_finish_plug(&plug);
1717 }
1718
flush_scrub_stripes(struct scrub_ctx * sctx)1719 static int flush_scrub_stripes(struct scrub_ctx *sctx)
1720 {
1721 struct btrfs_fs_info *fs_info = sctx->fs_info;
1722 struct scrub_stripe *stripe;
1723 const int nr_stripes = sctx->cur_stripe;
1724 int ret = 0;
1725
1726 if (!nr_stripes)
1727 return 0;
1728
1729 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
1730
1731 /* Submit the stripes which are populated but not submitted. */
1732 if (nr_stripes % SCRUB_STRIPES_PER_GROUP) {
1733 const int first_slot = round_down(nr_stripes, SCRUB_STRIPES_PER_GROUP);
1734
1735 submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot);
1736 }
1737
1738 for (int i = 0; i < nr_stripes; i++) {
1739 stripe = &sctx->stripes[i];
1740
1741 wait_event(stripe->repair_wait,
1742 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
1743 }
1744
1745 /* Submit for dev-replace. */
1746 if (sctx->is_dev_replace) {
1747 /*
1748 * For dev-replace, if we know there is something wrong with
1749 * metadata, we should immedately abort.
1750 */
1751 for (int i = 0; i < nr_stripes; i++) {
1752 if (stripe_has_metadata_error(&sctx->stripes[i])) {
1753 ret = -EIO;
1754 goto out;
1755 }
1756 }
1757 for (int i = 0; i < nr_stripes; i++) {
1758 unsigned long good;
1759
1760 stripe = &sctx->stripes[i];
1761
1762 ASSERT(stripe->dev == fs_info->dev_replace.srcdev);
1763
1764 bitmap_andnot(&good, &stripe->extent_sector_bitmap,
1765 &stripe->error_bitmap, stripe->nr_sectors);
1766 scrub_write_sectors(sctx, stripe, good, true);
1767 }
1768 }
1769
1770 /* Wait for the above writebacks to finish. */
1771 for (int i = 0; i < nr_stripes; i++) {
1772 stripe = &sctx->stripes[i];
1773
1774 wait_scrub_stripe_io(stripe);
1775 scrub_reset_stripe(stripe);
1776 }
1777 out:
1778 sctx->cur_stripe = 0;
1779 return ret;
1780 }
1781
raid56_scrub_wait_endio(struct bio * bio)1782 static void raid56_scrub_wait_endio(struct bio *bio)
1783 {
1784 complete(bio->bi_private);
1785 }
1786
queue_scrub_stripe(struct scrub_ctx * sctx,struct btrfs_block_group * bg,struct btrfs_device * dev,int mirror_num,u64 logical,u32 length,u64 physical,u64 * found_logical_ret)1787 static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg,
1788 struct btrfs_device *dev, int mirror_num,
1789 u64 logical, u32 length, u64 physical,
1790 u64 *found_logical_ret)
1791 {
1792 struct scrub_stripe *stripe;
1793 int ret;
1794
1795 /*
1796 * There should always be one slot left, as caller filling the last
1797 * slot should flush them all.
1798 */
1799 ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
1800
1801 stripe = &sctx->stripes[sctx->cur_stripe];
1802 scrub_reset_stripe(stripe);
1803 ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
1804 &sctx->csum_path, dev, physical,
1805 mirror_num, logical, length, stripe);
1806 /* Either >0 as no more extents or <0 for error. */
1807 if (ret)
1808 return ret;
1809 if (found_logical_ret)
1810 *found_logical_ret = stripe->logical;
1811 sctx->cur_stripe++;
1812
1813 /* We filled one group, submit it. */
1814 if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) {
1815 const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP;
1816
1817 submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP);
1818 }
1819
1820 /* Last slot used, flush them all. */
1821 if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES)
1822 return flush_scrub_stripes(sctx);
1823 return 0;
1824 }
1825
scrub_raid56_parity_stripe(struct scrub_ctx * sctx,struct btrfs_device * scrub_dev,struct btrfs_block_group * bg,struct map_lookup * map,u64 full_stripe_start)1826 static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
1827 struct btrfs_device *scrub_dev,
1828 struct btrfs_block_group *bg,
1829 struct map_lookup *map,
1830 u64 full_stripe_start)
1831 {
1832 DECLARE_COMPLETION_ONSTACK(io_done);
1833 struct btrfs_fs_info *fs_info = sctx->fs_info;
1834 struct btrfs_raid_bio *rbio;
1835 struct btrfs_io_context *bioc = NULL;
1836 struct btrfs_path extent_path = { 0 };
1837 struct btrfs_path csum_path = { 0 };
1838 struct bio *bio;
1839 struct scrub_stripe *stripe;
1840 bool all_empty = true;
1841 const int data_stripes = nr_data_stripes(map);
1842 unsigned long extent_bitmap = 0;
1843 u64 length = btrfs_stripe_nr_to_offset(data_stripes);
1844 int ret;
1845
1846 ASSERT(sctx->raid56_data_stripes);
1847
1848 /*
1849 * For data stripe search, we cannot re-use the same extent/csum paths,
1850 * as the data stripe bytenr may be smaller than previous extent. Thus
1851 * we have to use our own extent/csum paths.
1852 */
1853 extent_path.search_commit_root = 1;
1854 extent_path.skip_locking = 1;
1855 csum_path.search_commit_root = 1;
1856 csum_path.skip_locking = 1;
1857
1858 for (int i = 0; i < data_stripes; i++) {
1859 int stripe_index;
1860 int rot;
1861 u64 physical;
1862
1863 stripe = &sctx->raid56_data_stripes[i];
1864 rot = div_u64(full_stripe_start - bg->start,
1865 data_stripes) >> BTRFS_STRIPE_LEN_SHIFT;
1866 stripe_index = (i + rot) % map->num_stripes;
1867 physical = map->stripes[stripe_index].physical +
1868 btrfs_stripe_nr_to_offset(rot);
1869
1870 scrub_reset_stripe(stripe);
1871 set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
1872 ret = scrub_find_fill_first_stripe(bg, &extent_path, &csum_path,
1873 map->stripes[stripe_index].dev, physical, 1,
1874 full_stripe_start + btrfs_stripe_nr_to_offset(i),
1875 BTRFS_STRIPE_LEN, stripe);
1876 if (ret < 0)
1877 goto out;
1878 /*
1879 * No extent in this data stripe, need to manually mark them
1880 * initialized to make later read submission happy.
1881 */
1882 if (ret > 0) {
1883 stripe->logical = full_stripe_start +
1884 btrfs_stripe_nr_to_offset(i);
1885 stripe->dev = map->stripes[stripe_index].dev;
1886 stripe->mirror_num = 1;
1887 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1888 }
1889 }
1890
1891 /* Check if all data stripes are empty. */
1892 for (int i = 0; i < data_stripes; i++) {
1893 stripe = &sctx->raid56_data_stripes[i];
1894 if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) {
1895 all_empty = false;
1896 break;
1897 }
1898 }
1899 if (all_empty) {
1900 ret = 0;
1901 goto out;
1902 }
1903
1904 for (int i = 0; i < data_stripes; i++) {
1905 stripe = &sctx->raid56_data_stripes[i];
1906 scrub_submit_initial_read(sctx, stripe);
1907 }
1908 for (int i = 0; i < data_stripes; i++) {
1909 stripe = &sctx->raid56_data_stripes[i];
1910
1911 wait_event(stripe->repair_wait,
1912 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
1913 }
1914 /* For now, no zoned support for RAID56. */
1915 ASSERT(!btrfs_is_zoned(sctx->fs_info));
1916
1917 /*
1918 * Now all data stripes are properly verified. Check if we have any
1919 * unrepaired, if so abort immediately or we could further corrupt the
1920 * P/Q stripes.
1921 *
1922 * During the loop, also populate extent_bitmap.
1923 */
1924 for (int i = 0; i < data_stripes; i++) {
1925 unsigned long error;
1926
1927 stripe = &sctx->raid56_data_stripes[i];
1928
1929 /*
1930 * We should only check the errors where there is an extent.
1931 * As we may hit an empty data stripe while it's missing.
1932 */
1933 bitmap_and(&error, &stripe->error_bitmap,
1934 &stripe->extent_sector_bitmap, stripe->nr_sectors);
1935 if (!bitmap_empty(&error, stripe->nr_sectors)) {
1936 btrfs_err(fs_info,
1937 "unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
1938 full_stripe_start, i, stripe->nr_sectors,
1939 &error);
1940 ret = -EIO;
1941 goto out;
1942 }
1943 bitmap_or(&extent_bitmap, &extent_bitmap,
1944 &stripe->extent_sector_bitmap, stripe->nr_sectors);
1945 }
1946
1947 /* Now we can check and regenerate the P/Q stripe. */
1948 bio = bio_alloc(NULL, 1, REQ_OP_READ, GFP_NOFS);
1949 bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
1950 bio->bi_private = &io_done;
1951 bio->bi_end_io = raid56_scrub_wait_endio;
1952
1953 btrfs_bio_counter_inc_blocked(fs_info);
1954 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
1955 &length, &bioc, NULL, NULL, 1);
1956 if (ret < 0) {
1957 btrfs_put_bioc(bioc);
1958 btrfs_bio_counter_dec(fs_info);
1959 goto out;
1960 }
1961 rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, scrub_dev, &extent_bitmap,
1962 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1963 btrfs_put_bioc(bioc);
1964 if (!rbio) {
1965 ret = -ENOMEM;
1966 btrfs_bio_counter_dec(fs_info);
1967 goto out;
1968 }
1969 /* Use the recovered stripes as cache to avoid read them from disk again. */
1970 for (int i = 0; i < data_stripes; i++) {
1971 stripe = &sctx->raid56_data_stripes[i];
1972
1973 raid56_parity_cache_data_pages(rbio, stripe->pages,
1974 full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
1975 }
1976 raid56_parity_submit_scrub_rbio(rbio);
1977 wait_for_completion_io(&io_done);
1978 ret = blk_status_to_errno(bio->bi_status);
1979 bio_put(bio);
1980 btrfs_bio_counter_dec(fs_info);
1981
1982 btrfs_release_path(&extent_path);
1983 btrfs_release_path(&csum_path);
1984 out:
1985 return ret;
1986 }
1987
1988 /*
1989 * Scrub one range which can only has simple mirror based profile.
1990 * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
1991 * RAID0/RAID10).
1992 *
1993 * Since we may need to handle a subset of block group, we need @logical_start
1994 * and @logical_length parameter.
1995 */
scrub_simple_mirror(struct scrub_ctx * sctx,struct btrfs_block_group * bg,struct map_lookup * map,u64 logical_start,u64 logical_length,struct btrfs_device * device,u64 physical,int mirror_num)1996 static int scrub_simple_mirror(struct scrub_ctx *sctx,
1997 struct btrfs_block_group *bg,
1998 struct map_lookup *map,
1999 u64 logical_start, u64 logical_length,
2000 struct btrfs_device *device,
2001 u64 physical, int mirror_num)
2002 {
2003 struct btrfs_fs_info *fs_info = sctx->fs_info;
2004 const u64 logical_end = logical_start + logical_length;
2005 u64 cur_logical = logical_start;
2006 int ret;
2007
2008 /* The range must be inside the bg */
2009 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
2010
2011 /* Go through each extent items inside the logical range */
2012 while (cur_logical < logical_end) {
2013 u64 found_logical;
2014 u64 cur_physical = physical + cur_logical - logical_start;
2015
2016 /* Canceled? */
2017 if (atomic_read(&fs_info->scrub_cancel_req) ||
2018 atomic_read(&sctx->cancel_req)) {
2019 ret = -ECANCELED;
2020 break;
2021 }
2022 /* Paused? */
2023 if (atomic_read(&fs_info->scrub_pause_req)) {
2024 /* Push queued extents */
2025 scrub_blocked_if_needed(fs_info);
2026 }
2027 /* Block group removed? */
2028 spin_lock(&bg->lock);
2029 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
2030 spin_unlock(&bg->lock);
2031 ret = 0;
2032 break;
2033 }
2034 spin_unlock(&bg->lock);
2035
2036 ret = queue_scrub_stripe(sctx, bg, device, mirror_num,
2037 cur_logical, logical_end - cur_logical,
2038 cur_physical, &found_logical);
2039 if (ret > 0) {
2040 /* No more extent, just update the accounting */
2041 sctx->stat.last_physical = physical + logical_length;
2042 ret = 0;
2043 break;
2044 }
2045 if (ret < 0)
2046 break;
2047
2048 cur_logical = found_logical + BTRFS_STRIPE_LEN;
2049
2050 /* Don't hold CPU for too long time */
2051 cond_resched();
2052 }
2053 return ret;
2054 }
2055
2056 /* Calculate the full stripe length for simple stripe based profiles */
simple_stripe_full_stripe_len(const struct map_lookup * map)2057 static u64 simple_stripe_full_stripe_len(const struct map_lookup *map)
2058 {
2059 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2060 BTRFS_BLOCK_GROUP_RAID10));
2061
2062 return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes);
2063 }
2064
2065 /* Get the logical bytenr for the stripe */
simple_stripe_get_logical(struct map_lookup * map,struct btrfs_block_group * bg,int stripe_index)2066 static u64 simple_stripe_get_logical(struct map_lookup *map,
2067 struct btrfs_block_group *bg,
2068 int stripe_index)
2069 {
2070 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2071 BTRFS_BLOCK_GROUP_RAID10));
2072 ASSERT(stripe_index < map->num_stripes);
2073
2074 /*
2075 * (stripe_index / sub_stripes) gives how many data stripes we need to
2076 * skip.
2077 */
2078 return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) +
2079 bg->start;
2080 }
2081
2082 /* Get the mirror number for the stripe */
simple_stripe_mirror_num(struct map_lookup * map,int stripe_index)2083 static int simple_stripe_mirror_num(struct map_lookup *map, int stripe_index)
2084 {
2085 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2086 BTRFS_BLOCK_GROUP_RAID10));
2087 ASSERT(stripe_index < map->num_stripes);
2088
2089 /* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
2090 return stripe_index % map->sub_stripes + 1;
2091 }
2092
scrub_simple_stripe(struct scrub_ctx * sctx,struct btrfs_block_group * bg,struct map_lookup * map,struct btrfs_device * device,int stripe_index)2093 static int scrub_simple_stripe(struct scrub_ctx *sctx,
2094 struct btrfs_block_group *bg,
2095 struct map_lookup *map,
2096 struct btrfs_device *device,
2097 int stripe_index)
2098 {
2099 const u64 logical_increment = simple_stripe_full_stripe_len(map);
2100 const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
2101 const u64 orig_physical = map->stripes[stripe_index].physical;
2102 const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
2103 u64 cur_logical = orig_logical;
2104 u64 cur_physical = orig_physical;
2105 int ret = 0;
2106
2107 while (cur_logical < bg->start + bg->length) {
2108 /*
2109 * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
2110 * just RAID1, so we can reuse scrub_simple_mirror() to scrub
2111 * this stripe.
2112 */
2113 ret = scrub_simple_mirror(sctx, bg, map, cur_logical,
2114 BTRFS_STRIPE_LEN, device, cur_physical,
2115 mirror_num);
2116 if (ret)
2117 return ret;
2118 /* Skip to next stripe which belongs to the target device */
2119 cur_logical += logical_increment;
2120 /* For physical offset, we just go to next stripe */
2121 cur_physical += BTRFS_STRIPE_LEN;
2122 }
2123 return ret;
2124 }
2125
scrub_stripe(struct scrub_ctx * sctx,struct btrfs_block_group * bg,struct extent_map * em,struct btrfs_device * scrub_dev,int stripe_index)2126 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2127 struct btrfs_block_group *bg,
2128 struct extent_map *em,
2129 struct btrfs_device *scrub_dev,
2130 int stripe_index)
2131 {
2132 struct btrfs_fs_info *fs_info = sctx->fs_info;
2133 struct map_lookup *map = em->map_lookup;
2134 const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
2135 const u64 chunk_logical = bg->start;
2136 int ret;
2137 int ret2;
2138 u64 physical = map->stripes[stripe_index].physical;
2139 const u64 dev_stripe_len = btrfs_calc_stripe_length(em);
2140 const u64 physical_end = physical + dev_stripe_len;
2141 u64 logical;
2142 u64 logic_end;
2143 /* The logical increment after finishing one stripe */
2144 u64 increment;
2145 /* Offset inside the chunk */
2146 u64 offset;
2147 u64 stripe_logical;
2148 int stop_loop = 0;
2149
2150 /* Extent_path should be released by now. */
2151 ASSERT(sctx->extent_path.nodes[0] == NULL);
2152
2153 scrub_blocked_if_needed(fs_info);
2154
2155 if (sctx->is_dev_replace &&
2156 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
2157 mutex_lock(&sctx->wr_lock);
2158 sctx->write_pointer = physical;
2159 mutex_unlock(&sctx->wr_lock);
2160 }
2161
2162 /* Prepare the extra data stripes used by RAID56. */
2163 if (profile & BTRFS_BLOCK_GROUP_RAID56_MASK) {
2164 ASSERT(sctx->raid56_data_stripes == NULL);
2165
2166 sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map),
2167 sizeof(struct scrub_stripe),
2168 GFP_KERNEL);
2169 if (!sctx->raid56_data_stripes) {
2170 ret = -ENOMEM;
2171 goto out;
2172 }
2173 for (int i = 0; i < nr_data_stripes(map); i++) {
2174 ret = init_scrub_stripe(fs_info,
2175 &sctx->raid56_data_stripes[i]);
2176 if (ret < 0)
2177 goto out;
2178 sctx->raid56_data_stripes[i].bg = bg;
2179 sctx->raid56_data_stripes[i].sctx = sctx;
2180 }
2181 }
2182 /*
2183 * There used to be a big double loop to handle all profiles using the
2184 * same routine, which grows larger and more gross over time.
2185 *
2186 * So here we handle each profile differently, so simpler profiles
2187 * have simpler scrubbing function.
2188 */
2189 if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
2190 BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2191 /*
2192 * Above check rules out all complex profile, the remaining
2193 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
2194 * mirrored duplication without stripe.
2195 *
2196 * Only @physical and @mirror_num needs to calculated using
2197 * @stripe_index.
2198 */
2199 ret = scrub_simple_mirror(sctx, bg, map, bg->start, bg->length,
2200 scrub_dev, map->stripes[stripe_index].physical,
2201 stripe_index + 1);
2202 offset = 0;
2203 goto out;
2204 }
2205 if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
2206 ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index);
2207 offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes);
2208 goto out;
2209 }
2210
2211 /* Only RAID56 goes through the old code */
2212 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
2213 ret = 0;
2214
2215 /* Calculate the logical end of the stripe */
2216 get_raid56_logic_offset(physical_end, stripe_index,
2217 map, &logic_end, NULL);
2218 logic_end += chunk_logical;
2219
2220 /* Initialize @offset in case we need to go to out: label */
2221 get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
2222 increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
2223
2224 /*
2225 * Due to the rotation, for RAID56 it's better to iterate each stripe
2226 * using their physical offset.
2227 */
2228 while (physical < physical_end) {
2229 ret = get_raid56_logic_offset(physical, stripe_index, map,
2230 &logical, &stripe_logical);
2231 logical += chunk_logical;
2232 if (ret) {
2233 /* it is parity strip */
2234 stripe_logical += chunk_logical;
2235 ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg,
2236 map, stripe_logical);
2237 if (ret)
2238 goto out;
2239 goto next;
2240 }
2241
2242 /*
2243 * Now we're at a data stripe, scrub each extents in the range.
2244 *
2245 * At this stage, if we ignore the repair part, inside each data
2246 * stripe it is no different than SINGLE profile.
2247 * We can reuse scrub_simple_mirror() here, as the repair part
2248 * is still based on @mirror_num.
2249 */
2250 ret = scrub_simple_mirror(sctx, bg, map, logical, BTRFS_STRIPE_LEN,
2251 scrub_dev, physical, 1);
2252 if (ret < 0)
2253 goto out;
2254 next:
2255 logical += increment;
2256 physical += BTRFS_STRIPE_LEN;
2257 spin_lock(&sctx->stat_lock);
2258 if (stop_loop)
2259 sctx->stat.last_physical =
2260 map->stripes[stripe_index].physical + dev_stripe_len;
2261 else
2262 sctx->stat.last_physical = physical;
2263 spin_unlock(&sctx->stat_lock);
2264 if (stop_loop)
2265 break;
2266 }
2267 out:
2268 ret2 = flush_scrub_stripes(sctx);
2269 if (!ret)
2270 ret = ret2;
2271 btrfs_release_path(&sctx->extent_path);
2272 btrfs_release_path(&sctx->csum_path);
2273
2274 if (sctx->raid56_data_stripes) {
2275 for (int i = 0; i < nr_data_stripes(map); i++)
2276 release_scrub_stripe(&sctx->raid56_data_stripes[i]);
2277 kfree(sctx->raid56_data_stripes);
2278 sctx->raid56_data_stripes = NULL;
2279 }
2280
2281 if (sctx->is_dev_replace && ret >= 0) {
2282 int ret2;
2283
2284 ret2 = sync_write_pointer_for_zoned(sctx,
2285 chunk_logical + offset,
2286 map->stripes[stripe_index].physical,
2287 physical_end);
2288 if (ret2)
2289 ret = ret2;
2290 }
2291
2292 return ret < 0 ? ret : 0;
2293 }
2294
scrub_chunk(struct scrub_ctx * sctx,struct btrfs_block_group * bg,struct btrfs_device * scrub_dev,u64 dev_offset,u64 dev_extent_len)2295 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2296 struct btrfs_block_group *bg,
2297 struct btrfs_device *scrub_dev,
2298 u64 dev_offset,
2299 u64 dev_extent_len)
2300 {
2301 struct btrfs_fs_info *fs_info = sctx->fs_info;
2302 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
2303 struct map_lookup *map;
2304 struct extent_map *em;
2305 int i;
2306 int ret = 0;
2307
2308 read_lock(&map_tree->lock);
2309 em = lookup_extent_mapping(map_tree, bg->start, bg->length);
2310 read_unlock(&map_tree->lock);
2311
2312 if (!em) {
2313 /*
2314 * Might have been an unused block group deleted by the cleaner
2315 * kthread or relocation.
2316 */
2317 spin_lock(&bg->lock);
2318 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
2319 ret = -EINVAL;
2320 spin_unlock(&bg->lock);
2321
2322 return ret;
2323 }
2324 if (em->start != bg->start)
2325 goto out;
2326 if (em->len < dev_extent_len)
2327 goto out;
2328
2329 map = em->map_lookup;
2330 for (i = 0; i < map->num_stripes; ++i) {
2331 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2332 map->stripes[i].physical == dev_offset) {
2333 ret = scrub_stripe(sctx, bg, em, scrub_dev, i);
2334 if (ret)
2335 goto out;
2336 }
2337 }
2338 out:
2339 free_extent_map(em);
2340
2341 return ret;
2342 }
2343
finish_extent_writes_for_zoned(struct btrfs_root * root,struct btrfs_block_group * cache)2344 static int finish_extent_writes_for_zoned(struct btrfs_root *root,
2345 struct btrfs_block_group *cache)
2346 {
2347 struct btrfs_fs_info *fs_info = cache->fs_info;
2348 struct btrfs_trans_handle *trans;
2349
2350 if (!btrfs_is_zoned(fs_info))
2351 return 0;
2352
2353 btrfs_wait_block_group_reservations(cache);
2354 btrfs_wait_nocow_writers(cache);
2355 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
2356
2357 trans = btrfs_join_transaction(root);
2358 if (IS_ERR(trans))
2359 return PTR_ERR(trans);
2360 return btrfs_commit_transaction(trans);
2361 }
2362
2363 static noinline_for_stack
scrub_enumerate_chunks(struct scrub_ctx * sctx,struct btrfs_device * scrub_dev,u64 start,u64 end)2364 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2365 struct btrfs_device *scrub_dev, u64 start, u64 end)
2366 {
2367 struct btrfs_dev_extent *dev_extent = NULL;
2368 struct btrfs_path *path;
2369 struct btrfs_fs_info *fs_info = sctx->fs_info;
2370 struct btrfs_root *root = fs_info->dev_root;
2371 u64 chunk_offset;
2372 int ret = 0;
2373 int ro_set;
2374 int slot;
2375 struct extent_buffer *l;
2376 struct btrfs_key key;
2377 struct btrfs_key found_key;
2378 struct btrfs_block_group *cache;
2379 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2380
2381 path = btrfs_alloc_path();
2382 if (!path)
2383 return -ENOMEM;
2384
2385 path->reada = READA_FORWARD;
2386 path->search_commit_root = 1;
2387 path->skip_locking = 1;
2388
2389 key.objectid = scrub_dev->devid;
2390 key.offset = 0ull;
2391 key.type = BTRFS_DEV_EXTENT_KEY;
2392
2393 while (1) {
2394 u64 dev_extent_len;
2395
2396 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2397 if (ret < 0)
2398 break;
2399 if (ret > 0) {
2400 if (path->slots[0] >=
2401 btrfs_header_nritems(path->nodes[0])) {
2402 ret = btrfs_next_leaf(root, path);
2403 if (ret < 0)
2404 break;
2405 if (ret > 0) {
2406 ret = 0;
2407 break;
2408 }
2409 } else {
2410 ret = 0;
2411 }
2412 }
2413
2414 l = path->nodes[0];
2415 slot = path->slots[0];
2416
2417 btrfs_item_key_to_cpu(l, &found_key, slot);
2418
2419 if (found_key.objectid != scrub_dev->devid)
2420 break;
2421
2422 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
2423 break;
2424
2425 if (found_key.offset >= end)
2426 break;
2427
2428 if (found_key.offset < key.offset)
2429 break;
2430
2431 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2432 dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
2433
2434 if (found_key.offset + dev_extent_len <= start)
2435 goto skip;
2436
2437 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2438
2439 /*
2440 * get a reference on the corresponding block group to prevent
2441 * the chunk from going away while we scrub it
2442 */
2443 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2444
2445 /* some chunks are removed but not committed to disk yet,
2446 * continue scrubbing */
2447 if (!cache)
2448 goto skip;
2449
2450 ASSERT(cache->start <= chunk_offset);
2451 /*
2452 * We are using the commit root to search for device extents, so
2453 * that means we could have found a device extent item from a
2454 * block group that was deleted in the current transaction. The
2455 * logical start offset of the deleted block group, stored at
2456 * @chunk_offset, might be part of the logical address range of
2457 * a new block group (which uses different physical extents).
2458 * In this case btrfs_lookup_block_group() has returned the new
2459 * block group, and its start address is less than @chunk_offset.
2460 *
2461 * We skip such new block groups, because it's pointless to
2462 * process them, as we won't find their extents because we search
2463 * for them using the commit root of the extent tree. For a device
2464 * replace it's also fine to skip it, we won't miss copying them
2465 * to the target device because we have the write duplication
2466 * setup through the regular write path (by btrfs_map_block()),
2467 * and we have committed a transaction when we started the device
2468 * replace, right after setting up the device replace state.
2469 */
2470 if (cache->start < chunk_offset) {
2471 btrfs_put_block_group(cache);
2472 goto skip;
2473 }
2474
2475 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
2476 if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
2477 btrfs_put_block_group(cache);
2478 goto skip;
2479 }
2480 }
2481
2482 /*
2483 * Make sure that while we are scrubbing the corresponding block
2484 * group doesn't get its logical address and its device extents
2485 * reused for another block group, which can possibly be of a
2486 * different type and different profile. We do this to prevent
2487 * false error detections and crashes due to bogus attempts to
2488 * repair extents.
2489 */
2490 spin_lock(&cache->lock);
2491 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
2492 spin_unlock(&cache->lock);
2493 btrfs_put_block_group(cache);
2494 goto skip;
2495 }
2496 btrfs_freeze_block_group(cache);
2497 spin_unlock(&cache->lock);
2498
2499 /*
2500 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
2501 * to avoid deadlock caused by:
2502 * btrfs_inc_block_group_ro()
2503 * -> btrfs_wait_for_commit()
2504 * -> btrfs_commit_transaction()
2505 * -> btrfs_scrub_pause()
2506 */
2507 scrub_pause_on(fs_info);
2508
2509 /*
2510 * Don't do chunk preallocation for scrub.
2511 *
2512 * This is especially important for SYSTEM bgs, or we can hit
2513 * -EFBIG from btrfs_finish_chunk_alloc() like:
2514 * 1. The only SYSTEM bg is marked RO.
2515 * Since SYSTEM bg is small, that's pretty common.
2516 * 2. New SYSTEM bg will be allocated
2517 * Due to regular version will allocate new chunk.
2518 * 3. New SYSTEM bg is empty and will get cleaned up
2519 * Before cleanup really happens, it's marked RO again.
2520 * 4. Empty SYSTEM bg get scrubbed
2521 * We go back to 2.
2522 *
2523 * This can easily boost the amount of SYSTEM chunks if cleaner
2524 * thread can't be triggered fast enough, and use up all space
2525 * of btrfs_super_block::sys_chunk_array
2526 *
2527 * While for dev replace, we need to try our best to mark block
2528 * group RO, to prevent race between:
2529 * - Write duplication
2530 * Contains latest data
2531 * - Scrub copy
2532 * Contains data from commit tree
2533 *
2534 * If target block group is not marked RO, nocow writes can
2535 * be overwritten by scrub copy, causing data corruption.
2536 * So for dev-replace, it's not allowed to continue if a block
2537 * group is not RO.
2538 */
2539 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
2540 if (!ret && sctx->is_dev_replace) {
2541 ret = finish_extent_writes_for_zoned(root, cache);
2542 if (ret) {
2543 btrfs_dec_block_group_ro(cache);
2544 scrub_pause_off(fs_info);
2545 btrfs_put_block_group(cache);
2546 break;
2547 }
2548 }
2549
2550 if (ret == 0) {
2551 ro_set = 1;
2552 } else if (ret == -ENOSPC && !sctx->is_dev_replace &&
2553 !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) {
2554 /*
2555 * btrfs_inc_block_group_ro return -ENOSPC when it
2556 * failed in creating new chunk for metadata.
2557 * It is not a problem for scrub, because
2558 * metadata are always cowed, and our scrub paused
2559 * commit_transactions.
2560 *
2561 * For RAID56 chunks, we have to mark them read-only
2562 * for scrub, as later we would use our own cache
2563 * out of RAID56 realm.
2564 * Thus we want the RAID56 bg to be marked RO to
2565 * prevent RMW from screwing up out cache.
2566 */
2567 ro_set = 0;
2568 } else if (ret == -ETXTBSY) {
2569 btrfs_warn(fs_info,
2570 "skipping scrub of block group %llu due to active swapfile",
2571 cache->start);
2572 scrub_pause_off(fs_info);
2573 ret = 0;
2574 goto skip_unfreeze;
2575 } else {
2576 btrfs_warn(fs_info,
2577 "failed setting block group ro: %d", ret);
2578 btrfs_unfreeze_block_group(cache);
2579 btrfs_put_block_group(cache);
2580 scrub_pause_off(fs_info);
2581 break;
2582 }
2583
2584 /*
2585 * Now the target block is marked RO, wait for nocow writes to
2586 * finish before dev-replace.
2587 * COW is fine, as COW never overwrites extents in commit tree.
2588 */
2589 if (sctx->is_dev_replace) {
2590 btrfs_wait_nocow_writers(cache);
2591 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
2592 cache->length);
2593 }
2594
2595 scrub_pause_off(fs_info);
2596 down_write(&dev_replace->rwsem);
2597 dev_replace->cursor_right = found_key.offset + dev_extent_len;
2598 dev_replace->cursor_left = found_key.offset;
2599 dev_replace->item_needs_writeback = 1;
2600 up_write(&dev_replace->rwsem);
2601
2602 ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
2603 dev_extent_len);
2604 if (sctx->is_dev_replace &&
2605 !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
2606 cache, found_key.offset))
2607 ro_set = 0;
2608
2609 down_write(&dev_replace->rwsem);
2610 dev_replace->cursor_left = dev_replace->cursor_right;
2611 dev_replace->item_needs_writeback = 1;
2612 up_write(&dev_replace->rwsem);
2613
2614 if (ro_set)
2615 btrfs_dec_block_group_ro(cache);
2616
2617 /*
2618 * We might have prevented the cleaner kthread from deleting
2619 * this block group if it was already unused because we raced
2620 * and set it to RO mode first. So add it back to the unused
2621 * list, otherwise it might not ever be deleted unless a manual
2622 * balance is triggered or it becomes used and unused again.
2623 */
2624 spin_lock(&cache->lock);
2625 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
2626 !cache->ro && cache->reserved == 0 && cache->used == 0) {
2627 spin_unlock(&cache->lock);
2628 if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
2629 btrfs_discard_queue_work(&fs_info->discard_ctl,
2630 cache);
2631 else
2632 btrfs_mark_bg_unused(cache);
2633 } else {
2634 spin_unlock(&cache->lock);
2635 }
2636 skip_unfreeze:
2637 btrfs_unfreeze_block_group(cache);
2638 btrfs_put_block_group(cache);
2639 if (ret)
2640 break;
2641 if (sctx->is_dev_replace &&
2642 atomic64_read(&dev_replace->num_write_errors) > 0) {
2643 ret = -EIO;
2644 break;
2645 }
2646 if (sctx->stat.malloc_errors > 0) {
2647 ret = -ENOMEM;
2648 break;
2649 }
2650 skip:
2651 key.offset = found_key.offset + dev_extent_len;
2652 btrfs_release_path(path);
2653 }
2654
2655 btrfs_free_path(path);
2656
2657 return ret;
2658 }
2659
scrub_one_super(struct scrub_ctx * sctx,struct btrfs_device * dev,struct page * page,u64 physical,u64 generation)2660 static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev,
2661 struct page *page, u64 physical, u64 generation)
2662 {
2663 struct btrfs_fs_info *fs_info = sctx->fs_info;
2664 struct bio_vec bvec;
2665 struct bio bio;
2666 struct btrfs_super_block *sb = page_address(page);
2667 int ret;
2668
2669 bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ);
2670 bio.bi_iter.bi_sector = physical >> SECTOR_SHIFT;
2671 __bio_add_page(&bio, page, BTRFS_SUPER_INFO_SIZE, 0);
2672 ret = submit_bio_wait(&bio);
2673 bio_uninit(&bio);
2674
2675 if (ret < 0)
2676 return ret;
2677 ret = btrfs_check_super_csum(fs_info, sb);
2678 if (ret != 0) {
2679 btrfs_err_rl(fs_info,
2680 "super block at physical %llu devid %llu has bad csum",
2681 physical, dev->devid);
2682 return -EIO;
2683 }
2684 if (btrfs_super_generation(sb) != generation) {
2685 btrfs_err_rl(fs_info,
2686 "super block at physical %llu devid %llu has bad generation %llu expect %llu",
2687 physical, dev->devid,
2688 btrfs_super_generation(sb), generation);
2689 return -EUCLEAN;
2690 }
2691
2692 return btrfs_validate_super(fs_info, sb, -1);
2693 }
2694
scrub_supers(struct scrub_ctx * sctx,struct btrfs_device * scrub_dev)2695 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2696 struct btrfs_device *scrub_dev)
2697 {
2698 int i;
2699 u64 bytenr;
2700 u64 gen;
2701 int ret = 0;
2702 struct page *page;
2703 struct btrfs_fs_info *fs_info = sctx->fs_info;
2704
2705 if (BTRFS_FS_ERROR(fs_info))
2706 return -EROFS;
2707
2708 page = alloc_page(GFP_KERNEL);
2709 if (!page) {
2710 spin_lock(&sctx->stat_lock);
2711 sctx->stat.malloc_errors++;
2712 spin_unlock(&sctx->stat_lock);
2713 return -ENOMEM;
2714 }
2715
2716 /* Seed devices of a new filesystem has their own generation. */
2717 if (scrub_dev->fs_devices != fs_info->fs_devices)
2718 gen = scrub_dev->generation;
2719 else
2720 gen = fs_info->last_trans_committed;
2721
2722 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2723 bytenr = btrfs_sb_offset(i);
2724 if (bytenr + BTRFS_SUPER_INFO_SIZE >
2725 scrub_dev->commit_total_bytes)
2726 break;
2727 if (!btrfs_check_super_location(scrub_dev, bytenr))
2728 continue;
2729
2730 ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen);
2731 if (ret) {
2732 spin_lock(&sctx->stat_lock);
2733 sctx->stat.super_errors++;
2734 spin_unlock(&sctx->stat_lock);
2735 }
2736 }
2737 __free_page(page);
2738 return 0;
2739 }
2740
scrub_workers_put(struct btrfs_fs_info * fs_info)2741 static void scrub_workers_put(struct btrfs_fs_info *fs_info)
2742 {
2743 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
2744 &fs_info->scrub_lock)) {
2745 struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
2746
2747 fs_info->scrub_workers = NULL;
2748 mutex_unlock(&fs_info->scrub_lock);
2749
2750 if (scrub_workers)
2751 destroy_workqueue(scrub_workers);
2752 }
2753 }
2754
2755 /*
2756 * get a reference count on fs_info->scrub_workers. start worker if necessary
2757 */
scrub_workers_get(struct btrfs_fs_info * fs_info)2758 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info)
2759 {
2760 struct workqueue_struct *scrub_workers = NULL;
2761 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
2762 int max_active = fs_info->thread_pool_size;
2763 int ret = -ENOMEM;
2764
2765 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
2766 return 0;
2767
2768 scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
2769 if (!scrub_workers)
2770 return -ENOMEM;
2771
2772 mutex_lock(&fs_info->scrub_lock);
2773 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
2774 ASSERT(fs_info->scrub_workers == NULL);
2775 fs_info->scrub_workers = scrub_workers;
2776 refcount_set(&fs_info->scrub_workers_refcnt, 1);
2777 mutex_unlock(&fs_info->scrub_lock);
2778 return 0;
2779 }
2780 /* Other thread raced in and created the workers for us */
2781 refcount_inc(&fs_info->scrub_workers_refcnt);
2782 mutex_unlock(&fs_info->scrub_lock);
2783
2784 ret = 0;
2785
2786 destroy_workqueue(scrub_workers);
2787 return ret;
2788 }
2789
btrfs_scrub_dev(struct btrfs_fs_info * fs_info,u64 devid,u64 start,u64 end,struct btrfs_scrub_progress * progress,int readonly,int is_dev_replace)2790 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2791 u64 end, struct btrfs_scrub_progress *progress,
2792 int readonly, int is_dev_replace)
2793 {
2794 struct btrfs_dev_lookup_args args = { .devid = devid };
2795 struct scrub_ctx *sctx;
2796 int ret;
2797 struct btrfs_device *dev;
2798 unsigned int nofs_flag;
2799 bool need_commit = false;
2800
2801 if (btrfs_fs_closing(fs_info))
2802 return -EAGAIN;
2803
2804 /* At mount time we have ensured nodesize is in the range of [4K, 64K]. */
2805 ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
2806
2807 /*
2808 * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible
2809 * value (max nodesize / min sectorsize), thus nodesize should always
2810 * be fine.
2811 */
2812 ASSERT(fs_info->nodesize <=
2813 SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
2814
2815 /* Allocate outside of device_list_mutex */
2816 sctx = scrub_setup_ctx(fs_info, is_dev_replace);
2817 if (IS_ERR(sctx))
2818 return PTR_ERR(sctx);
2819
2820 ret = scrub_workers_get(fs_info);
2821 if (ret)
2822 goto out_free_ctx;
2823
2824 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2825 dev = btrfs_find_device(fs_info->fs_devices, &args);
2826 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
2827 !is_dev_replace)) {
2828 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2829 ret = -ENODEV;
2830 goto out;
2831 }
2832
2833 if (!is_dev_replace && !readonly &&
2834 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2835 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2836 btrfs_err_in_rcu(fs_info,
2837 "scrub on devid %llu: filesystem on %s is not writable",
2838 devid, btrfs_dev_name(dev));
2839 ret = -EROFS;
2840 goto out;
2841 }
2842
2843 mutex_lock(&fs_info->scrub_lock);
2844 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
2845 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
2846 mutex_unlock(&fs_info->scrub_lock);
2847 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2848 ret = -EIO;
2849 goto out;
2850 }
2851
2852 down_read(&fs_info->dev_replace.rwsem);
2853 if (dev->scrub_ctx ||
2854 (!is_dev_replace &&
2855 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
2856 up_read(&fs_info->dev_replace.rwsem);
2857 mutex_unlock(&fs_info->scrub_lock);
2858 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2859 ret = -EINPROGRESS;
2860 goto out;
2861 }
2862 up_read(&fs_info->dev_replace.rwsem);
2863
2864 sctx->readonly = readonly;
2865 dev->scrub_ctx = sctx;
2866 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2867
2868 /*
2869 * checking @scrub_pause_req here, we can avoid
2870 * race between committing transaction and scrubbing.
2871 */
2872 __scrub_blocked_if_needed(fs_info);
2873 atomic_inc(&fs_info->scrubs_running);
2874 mutex_unlock(&fs_info->scrub_lock);
2875
2876 /*
2877 * In order to avoid deadlock with reclaim when there is a transaction
2878 * trying to pause scrub, make sure we use GFP_NOFS for all the
2879 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
2880 * invoked by our callees. The pausing request is done when the
2881 * transaction commit starts, and it blocks the transaction until scrub
2882 * is paused (done at specific points at scrub_stripe() or right above
2883 * before incrementing fs_info->scrubs_running).
2884 */
2885 nofs_flag = memalloc_nofs_save();
2886 if (!is_dev_replace) {
2887 u64 old_super_errors;
2888
2889 spin_lock(&sctx->stat_lock);
2890 old_super_errors = sctx->stat.super_errors;
2891 spin_unlock(&sctx->stat_lock);
2892
2893 btrfs_info(fs_info, "scrub: started on devid %llu", devid);
2894 /*
2895 * by holding device list mutex, we can
2896 * kick off writing super in log tree sync.
2897 */
2898 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2899 ret = scrub_supers(sctx, dev);
2900 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2901
2902 spin_lock(&sctx->stat_lock);
2903 /*
2904 * Super block errors found, but we can not commit transaction
2905 * at current context, since btrfs_commit_transaction() needs
2906 * to pause the current running scrub (hold by ourselves).
2907 */
2908 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
2909 need_commit = true;
2910 spin_unlock(&sctx->stat_lock);
2911 }
2912
2913 if (!ret)
2914 ret = scrub_enumerate_chunks(sctx, dev, start, end);
2915 memalloc_nofs_restore(nofs_flag);
2916
2917 atomic_dec(&fs_info->scrubs_running);
2918 wake_up(&fs_info->scrub_pause_wait);
2919
2920 if (progress)
2921 memcpy(progress, &sctx->stat, sizeof(*progress));
2922
2923 if (!is_dev_replace)
2924 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
2925 ret ? "not finished" : "finished", devid, ret);
2926
2927 mutex_lock(&fs_info->scrub_lock);
2928 dev->scrub_ctx = NULL;
2929 mutex_unlock(&fs_info->scrub_lock);
2930
2931 scrub_workers_put(fs_info);
2932 scrub_put_ctx(sctx);
2933
2934 /*
2935 * We found some super block errors before, now try to force a
2936 * transaction commit, as scrub has finished.
2937 */
2938 if (need_commit) {
2939 struct btrfs_trans_handle *trans;
2940
2941 trans = btrfs_start_transaction(fs_info->tree_root, 0);
2942 if (IS_ERR(trans)) {
2943 ret = PTR_ERR(trans);
2944 btrfs_err(fs_info,
2945 "scrub: failed to start transaction to fix super block errors: %d", ret);
2946 return ret;
2947 }
2948 ret = btrfs_commit_transaction(trans);
2949 if (ret < 0)
2950 btrfs_err(fs_info,
2951 "scrub: failed to commit transaction to fix super block errors: %d", ret);
2952 }
2953 return ret;
2954 out:
2955 scrub_workers_put(fs_info);
2956 out_free_ctx:
2957 scrub_free_ctx(sctx);
2958
2959 return ret;
2960 }
2961
btrfs_scrub_pause(struct btrfs_fs_info * fs_info)2962 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
2963 {
2964 mutex_lock(&fs_info->scrub_lock);
2965 atomic_inc(&fs_info->scrub_pause_req);
2966 while (atomic_read(&fs_info->scrubs_paused) !=
2967 atomic_read(&fs_info->scrubs_running)) {
2968 mutex_unlock(&fs_info->scrub_lock);
2969 wait_event(fs_info->scrub_pause_wait,
2970 atomic_read(&fs_info->scrubs_paused) ==
2971 atomic_read(&fs_info->scrubs_running));
2972 mutex_lock(&fs_info->scrub_lock);
2973 }
2974 mutex_unlock(&fs_info->scrub_lock);
2975 }
2976
btrfs_scrub_continue(struct btrfs_fs_info * fs_info)2977 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
2978 {
2979 atomic_dec(&fs_info->scrub_pause_req);
2980 wake_up(&fs_info->scrub_pause_wait);
2981 }
2982
btrfs_scrub_cancel(struct btrfs_fs_info * fs_info)2983 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
2984 {
2985 mutex_lock(&fs_info->scrub_lock);
2986 if (!atomic_read(&fs_info->scrubs_running)) {
2987 mutex_unlock(&fs_info->scrub_lock);
2988 return -ENOTCONN;
2989 }
2990
2991 atomic_inc(&fs_info->scrub_cancel_req);
2992 while (atomic_read(&fs_info->scrubs_running)) {
2993 mutex_unlock(&fs_info->scrub_lock);
2994 wait_event(fs_info->scrub_pause_wait,
2995 atomic_read(&fs_info->scrubs_running) == 0);
2996 mutex_lock(&fs_info->scrub_lock);
2997 }
2998 atomic_dec(&fs_info->scrub_cancel_req);
2999 mutex_unlock(&fs_info->scrub_lock);
3000
3001 return 0;
3002 }
3003
btrfs_scrub_cancel_dev(struct btrfs_device * dev)3004 int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
3005 {
3006 struct btrfs_fs_info *fs_info = dev->fs_info;
3007 struct scrub_ctx *sctx;
3008
3009 mutex_lock(&fs_info->scrub_lock);
3010 sctx = dev->scrub_ctx;
3011 if (!sctx) {
3012 mutex_unlock(&fs_info->scrub_lock);
3013 return -ENOTCONN;
3014 }
3015 atomic_inc(&sctx->cancel_req);
3016 while (dev->scrub_ctx) {
3017 mutex_unlock(&fs_info->scrub_lock);
3018 wait_event(fs_info->scrub_pause_wait,
3019 dev->scrub_ctx == NULL);
3020 mutex_lock(&fs_info->scrub_lock);
3021 }
3022 mutex_unlock(&fs_info->scrub_lock);
3023
3024 return 0;
3025 }
3026
btrfs_scrub_progress(struct btrfs_fs_info * fs_info,u64 devid,struct btrfs_scrub_progress * progress)3027 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
3028 struct btrfs_scrub_progress *progress)
3029 {
3030 struct btrfs_dev_lookup_args args = { .devid = devid };
3031 struct btrfs_device *dev;
3032 struct scrub_ctx *sctx = NULL;
3033
3034 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3035 dev = btrfs_find_device(fs_info->fs_devices, &args);
3036 if (dev)
3037 sctx = dev->scrub_ctx;
3038 if (sctx)
3039 memcpy(progress, &sctx->stat, sizeof(*progress));
3040 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3041
3042 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
3043 }
3044