1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 */
5
6 #include <linux/blkdev.h>
7 #include <linux/ratelimit.h>
8 #include <linux/sched/mm.h>
9 #include <crypto/hash.h>
10 #include "ctree.h"
11 #include "discard.h"
12 #include "volumes.h"
13 #include "disk-io.h"
14 #include "ordered-data.h"
15 #include "transaction.h"
16 #include "backref.h"
17 #include "extent_io.h"
18 #include "dev-replace.h"
19 #include "check-integrity.h"
20 #include "rcu-string.h"
21 #include "raid56.h"
22 #include "block-group.h"
23 #include "zoned.h"
24
25 /*
26 * This is only the first step towards a full-features scrub. It reads all
27 * extent and super block and verifies the checksums. In case a bad checksum
28 * is found or the extent cannot be read, good data will be written back if
29 * any can be found.
30 *
31 * Future enhancements:
32 * - In case an unrepairable extent is encountered, track which files are
33 * affected and report them
34 * - track and record media errors, throw out bad devices
35 * - add a mode to also read unallocated space
36 */
37
38 struct scrub_block;
39 struct scrub_ctx;
40
41 /*
42 * The following three values only influence the performance.
43 *
44 * The last one configures the number of parallel and outstanding I/O
45 * operations. The first one configures an upper limit for the number
46 * of (dynamically allocated) pages that are added to a bio.
47 */
48 #define SCRUB_SECTORS_PER_BIO 32 /* 128KiB per bio for 4KiB pages */
49 #define SCRUB_BIOS_PER_SCTX 64 /* 8MiB per device in flight for 4KiB pages */
50
51 /*
52 * The following value times PAGE_SIZE needs to be large enough to match the
53 * largest node/leaf/sector size that shall be supported.
54 */
55 #define SCRUB_MAX_SECTORS_PER_BLOCK (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
56
57 #define SCRUB_MAX_PAGES (DIV_ROUND_UP(BTRFS_MAX_METADATA_BLOCKSIZE, PAGE_SIZE))
58
59 struct scrub_recover {
60 refcount_t refs;
61 struct btrfs_io_context *bioc;
62 u64 map_length;
63 };
64
65 struct scrub_sector {
66 struct scrub_block *sblock;
67 struct list_head list;
68 u64 flags; /* extent flags */
69 u64 generation;
70 /* Offset in bytes to @sblock. */
71 u32 offset;
72 atomic_t refs;
73 unsigned int have_csum:1;
74 unsigned int io_error:1;
75 u8 csum[BTRFS_CSUM_SIZE];
76
77 struct scrub_recover *recover;
78 };
79
80 struct scrub_bio {
81 int index;
82 struct scrub_ctx *sctx;
83 struct btrfs_device *dev;
84 struct bio *bio;
85 blk_status_t status;
86 u64 logical;
87 u64 physical;
88 struct scrub_sector *sectors[SCRUB_SECTORS_PER_BIO];
89 int sector_count;
90 int next_free;
91 struct work_struct work;
92 };
93
94 struct scrub_block {
95 /*
96 * Each page will have its page::private used to record the logical
97 * bytenr.
98 */
99 struct page *pages[SCRUB_MAX_PAGES];
100 struct scrub_sector *sectors[SCRUB_MAX_SECTORS_PER_BLOCK];
101 struct btrfs_device *dev;
102 /* Logical bytenr of the sblock */
103 u64 logical;
104 u64 physical;
105 u64 physical_for_dev_replace;
106 /* Length of sblock in bytes */
107 u32 len;
108 int sector_count;
109 int mirror_num;
110
111 atomic_t outstanding_sectors;
112 refcount_t refs; /* free mem on transition to zero */
113 struct scrub_ctx *sctx;
114 struct scrub_parity *sparity;
115 struct {
116 unsigned int header_error:1;
117 unsigned int checksum_error:1;
118 unsigned int no_io_error_seen:1;
119 unsigned int generation_error:1; /* also sets header_error */
120
121 /* The following is for the data used to check parity */
122 /* It is for the data with checksum */
123 unsigned int data_corrected:1;
124 };
125 struct work_struct work;
126 };
127
128 /* Used for the chunks with parity stripe such RAID5/6 */
129 struct scrub_parity {
130 struct scrub_ctx *sctx;
131
132 struct btrfs_device *scrub_dev;
133
134 u64 logic_start;
135
136 u64 logic_end;
137
138 int nsectors;
139
140 u32 stripe_len;
141
142 refcount_t refs;
143
144 struct list_head sectors_list;
145
146 /* Work of parity check and repair */
147 struct work_struct work;
148
149 /* Mark the parity blocks which have data */
150 unsigned long dbitmap;
151
152 /*
153 * Mark the parity blocks which have data, but errors happen when
154 * read data or check data
155 */
156 unsigned long ebitmap;
157 };
158
159 struct scrub_ctx {
160 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
161 struct btrfs_fs_info *fs_info;
162 int first_free;
163 int curr;
164 atomic_t bios_in_flight;
165 atomic_t workers_pending;
166 spinlock_t list_lock;
167 wait_queue_head_t list_wait;
168 struct list_head csum_list;
169 atomic_t cancel_req;
170 int readonly;
171 int sectors_per_bio;
172
173 /* State of IO submission throttling affecting the associated device */
174 ktime_t throttle_deadline;
175 u64 throttle_sent;
176
177 int is_dev_replace;
178 u64 write_pointer;
179
180 struct scrub_bio *wr_curr_bio;
181 struct mutex wr_lock;
182 struct btrfs_device *wr_tgtdev;
183 bool flush_all_writes;
184
185 /*
186 * statistics
187 */
188 struct btrfs_scrub_progress stat;
189 spinlock_t stat_lock;
190
191 /*
192 * Use a ref counter to avoid use-after-free issues. Scrub workers
193 * decrement bios_in_flight and workers_pending and then do a wakeup
194 * on the list_wait wait queue. We must ensure the main scrub task
195 * doesn't free the scrub context before or while the workers are
196 * doing the wakeup() call.
197 */
198 refcount_t refs;
199 };
200
201 struct scrub_warning {
202 struct btrfs_path *path;
203 u64 extent_item_size;
204 const char *errstr;
205 u64 physical;
206 u64 logical;
207 struct btrfs_device *dev;
208 };
209
210 struct full_stripe_lock {
211 struct rb_node node;
212 u64 logical;
213 u64 refs;
214 struct mutex mutex;
215 };
216
217 #ifndef CONFIG_64BIT
218 /* This structure is for archtectures whose (void *) is smaller than u64 */
219 struct scrub_page_private {
220 u64 logical;
221 };
222 #endif
223
attach_scrub_page_private(struct page * page,u64 logical)224 static int attach_scrub_page_private(struct page *page, u64 logical)
225 {
226 #ifdef CONFIG_64BIT
227 attach_page_private(page, (void *)logical);
228 return 0;
229 #else
230 struct scrub_page_private *spp;
231
232 spp = kmalloc(sizeof(*spp), GFP_KERNEL);
233 if (!spp)
234 return -ENOMEM;
235 spp->logical = logical;
236 attach_page_private(page, (void *)spp);
237 return 0;
238 #endif
239 }
240
detach_scrub_page_private(struct page * page)241 static void detach_scrub_page_private(struct page *page)
242 {
243 #ifdef CONFIG_64BIT
244 detach_page_private(page);
245 return;
246 #else
247 struct scrub_page_private *spp;
248
249 spp = detach_page_private(page);
250 kfree(spp);
251 return;
252 #endif
253 }
254
alloc_scrub_block(struct scrub_ctx * sctx,struct btrfs_device * dev,u64 logical,u64 physical,u64 physical_for_dev_replace,int mirror_num)255 static struct scrub_block *alloc_scrub_block(struct scrub_ctx *sctx,
256 struct btrfs_device *dev,
257 u64 logical, u64 physical,
258 u64 physical_for_dev_replace,
259 int mirror_num)
260 {
261 struct scrub_block *sblock;
262
263 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
264 if (!sblock)
265 return NULL;
266 refcount_set(&sblock->refs, 1);
267 sblock->sctx = sctx;
268 sblock->logical = logical;
269 sblock->physical = physical;
270 sblock->physical_for_dev_replace = physical_for_dev_replace;
271 sblock->dev = dev;
272 sblock->mirror_num = mirror_num;
273 sblock->no_io_error_seen = 1;
274 /*
275 * Scrub_block::pages will be allocated at alloc_scrub_sector() when
276 * the corresponding page is not allocated.
277 */
278 return sblock;
279 }
280
281 /*
282 * Allocate a new scrub sector and attach it to @sblock.
283 *
284 * Will also allocate new pages for @sblock if needed.
285 */
alloc_scrub_sector(struct scrub_block * sblock,u64 logical,gfp_t gfp)286 static struct scrub_sector *alloc_scrub_sector(struct scrub_block *sblock,
287 u64 logical, gfp_t gfp)
288 {
289 const pgoff_t page_index = (logical - sblock->logical) >> PAGE_SHIFT;
290 struct scrub_sector *ssector;
291
292 /* We must never have scrub_block exceed U32_MAX in size. */
293 ASSERT(logical - sblock->logical < U32_MAX);
294
295 ssector = kzalloc(sizeof(*ssector), gfp);
296 if (!ssector)
297 return NULL;
298
299 /* Allocate a new page if the slot is not allocated */
300 if (!sblock->pages[page_index]) {
301 int ret;
302
303 sblock->pages[page_index] = alloc_page(gfp);
304 if (!sblock->pages[page_index]) {
305 kfree(ssector);
306 return NULL;
307 }
308 ret = attach_scrub_page_private(sblock->pages[page_index],
309 sblock->logical + (page_index << PAGE_SHIFT));
310 if (ret < 0) {
311 kfree(ssector);
312 __free_page(sblock->pages[page_index]);
313 sblock->pages[page_index] = NULL;
314 return NULL;
315 }
316 }
317
318 atomic_set(&ssector->refs, 1);
319 ssector->sblock = sblock;
320 /* The sector to be added should not be used */
321 ASSERT(sblock->sectors[sblock->sector_count] == NULL);
322 ssector->offset = logical - sblock->logical;
323
324 /* The sector count must be smaller than the limit */
325 ASSERT(sblock->sector_count < SCRUB_MAX_SECTORS_PER_BLOCK);
326
327 sblock->sectors[sblock->sector_count] = ssector;
328 sblock->sector_count++;
329 sblock->len += sblock->sctx->fs_info->sectorsize;
330
331 return ssector;
332 }
333
scrub_sector_get_page(struct scrub_sector * ssector)334 static struct page *scrub_sector_get_page(struct scrub_sector *ssector)
335 {
336 struct scrub_block *sblock = ssector->sblock;
337 pgoff_t index;
338 /*
339 * When calling this function, ssector must be alreaday attached to the
340 * parent sblock.
341 */
342 ASSERT(sblock);
343
344 /* The range should be inside the sblock range */
345 ASSERT(ssector->offset < sblock->len);
346
347 index = ssector->offset >> PAGE_SHIFT;
348 ASSERT(index < SCRUB_MAX_PAGES);
349 ASSERT(sblock->pages[index]);
350 ASSERT(PagePrivate(sblock->pages[index]));
351 return sblock->pages[index];
352 }
353
scrub_sector_get_page_offset(struct scrub_sector * ssector)354 static unsigned int scrub_sector_get_page_offset(struct scrub_sector *ssector)
355 {
356 struct scrub_block *sblock = ssector->sblock;
357
358 /*
359 * When calling this function, ssector must be already attached to the
360 * parent sblock.
361 */
362 ASSERT(sblock);
363
364 /* The range should be inside the sblock range */
365 ASSERT(ssector->offset < sblock->len);
366
367 return offset_in_page(ssector->offset);
368 }
369
scrub_sector_get_kaddr(struct scrub_sector * ssector)370 static char *scrub_sector_get_kaddr(struct scrub_sector *ssector)
371 {
372 return page_address(scrub_sector_get_page(ssector)) +
373 scrub_sector_get_page_offset(ssector);
374 }
375
bio_add_scrub_sector(struct bio * bio,struct scrub_sector * ssector,unsigned int len)376 static int bio_add_scrub_sector(struct bio *bio, struct scrub_sector *ssector,
377 unsigned int len)
378 {
379 return bio_add_page(bio, scrub_sector_get_page(ssector), len,
380 scrub_sector_get_page_offset(ssector));
381 }
382
383 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
384 struct scrub_block *sblocks_for_recheck[]);
385 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
386 struct scrub_block *sblock,
387 int retry_failed_mirror);
388 static void scrub_recheck_block_checksum(struct scrub_block *sblock);
389 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
390 struct scrub_block *sblock_good);
391 static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
392 struct scrub_block *sblock_good,
393 int sector_num, int force_write);
394 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
395 static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock,
396 int sector_num);
397 static int scrub_checksum_data(struct scrub_block *sblock);
398 static int scrub_checksum_tree_block(struct scrub_block *sblock);
399 static int scrub_checksum_super(struct scrub_block *sblock);
400 static void scrub_block_put(struct scrub_block *sblock);
401 static void scrub_sector_get(struct scrub_sector *sector);
402 static void scrub_sector_put(struct scrub_sector *sector);
403 static void scrub_parity_get(struct scrub_parity *sparity);
404 static void scrub_parity_put(struct scrub_parity *sparity);
405 static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
406 u64 physical, struct btrfs_device *dev, u64 flags,
407 u64 gen, int mirror_num, u8 *csum,
408 u64 physical_for_dev_replace);
409 static void scrub_bio_end_io(struct bio *bio);
410 static void scrub_bio_end_io_worker(struct work_struct *work);
411 static void scrub_block_complete(struct scrub_block *sblock);
412 static void scrub_find_good_copy(struct btrfs_fs_info *fs_info,
413 u64 extent_logical, u32 extent_len,
414 u64 *extent_physical,
415 struct btrfs_device **extent_dev,
416 int *extent_mirror_num);
417 static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
418 struct scrub_sector *sector);
419 static void scrub_wr_submit(struct scrub_ctx *sctx);
420 static void scrub_wr_bio_end_io(struct bio *bio);
421 static void scrub_wr_bio_end_io_worker(struct work_struct *work);
422 static void scrub_put_ctx(struct scrub_ctx *sctx);
423
scrub_is_page_on_raid56(struct scrub_sector * sector)424 static inline int scrub_is_page_on_raid56(struct scrub_sector *sector)
425 {
426 return sector->recover &&
427 (sector->recover->bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
428 }
429
scrub_pending_bio_inc(struct scrub_ctx * sctx)430 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
431 {
432 refcount_inc(&sctx->refs);
433 atomic_inc(&sctx->bios_in_flight);
434 }
435
scrub_pending_bio_dec(struct scrub_ctx * sctx)436 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
437 {
438 atomic_dec(&sctx->bios_in_flight);
439 wake_up(&sctx->list_wait);
440 scrub_put_ctx(sctx);
441 }
442
__scrub_blocked_if_needed(struct btrfs_fs_info * fs_info)443 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
444 {
445 while (atomic_read(&fs_info->scrub_pause_req)) {
446 mutex_unlock(&fs_info->scrub_lock);
447 wait_event(fs_info->scrub_pause_wait,
448 atomic_read(&fs_info->scrub_pause_req) == 0);
449 mutex_lock(&fs_info->scrub_lock);
450 }
451 }
452
scrub_pause_on(struct btrfs_fs_info * fs_info)453 static void scrub_pause_on(struct btrfs_fs_info *fs_info)
454 {
455 atomic_inc(&fs_info->scrubs_paused);
456 wake_up(&fs_info->scrub_pause_wait);
457 }
458
scrub_pause_off(struct btrfs_fs_info * fs_info)459 static void scrub_pause_off(struct btrfs_fs_info *fs_info)
460 {
461 mutex_lock(&fs_info->scrub_lock);
462 __scrub_blocked_if_needed(fs_info);
463 atomic_dec(&fs_info->scrubs_paused);
464 mutex_unlock(&fs_info->scrub_lock);
465
466 wake_up(&fs_info->scrub_pause_wait);
467 }
468
scrub_blocked_if_needed(struct btrfs_fs_info * fs_info)469 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
470 {
471 scrub_pause_on(fs_info);
472 scrub_pause_off(fs_info);
473 }
474
475 /*
476 * Insert new full stripe lock into full stripe locks tree
477 *
478 * Return pointer to existing or newly inserted full_stripe_lock structure if
479 * everything works well.
480 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
481 *
482 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
483 * function
484 */
insert_full_stripe_lock(struct btrfs_full_stripe_locks_tree * locks_root,u64 fstripe_logical)485 static struct full_stripe_lock *insert_full_stripe_lock(
486 struct btrfs_full_stripe_locks_tree *locks_root,
487 u64 fstripe_logical)
488 {
489 struct rb_node **p;
490 struct rb_node *parent = NULL;
491 struct full_stripe_lock *entry;
492 struct full_stripe_lock *ret;
493
494 lockdep_assert_held(&locks_root->lock);
495
496 p = &locks_root->root.rb_node;
497 while (*p) {
498 parent = *p;
499 entry = rb_entry(parent, struct full_stripe_lock, node);
500 if (fstripe_logical < entry->logical) {
501 p = &(*p)->rb_left;
502 } else if (fstripe_logical > entry->logical) {
503 p = &(*p)->rb_right;
504 } else {
505 entry->refs++;
506 return entry;
507 }
508 }
509
510 /*
511 * Insert new lock.
512 */
513 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
514 if (!ret)
515 return ERR_PTR(-ENOMEM);
516 ret->logical = fstripe_logical;
517 ret->refs = 1;
518 mutex_init(&ret->mutex);
519
520 rb_link_node(&ret->node, parent, p);
521 rb_insert_color(&ret->node, &locks_root->root);
522 return ret;
523 }
524
525 /*
526 * Search for a full stripe lock of a block group
527 *
528 * Return pointer to existing full stripe lock if found
529 * Return NULL if not found
530 */
search_full_stripe_lock(struct btrfs_full_stripe_locks_tree * locks_root,u64 fstripe_logical)531 static struct full_stripe_lock *search_full_stripe_lock(
532 struct btrfs_full_stripe_locks_tree *locks_root,
533 u64 fstripe_logical)
534 {
535 struct rb_node *node;
536 struct full_stripe_lock *entry;
537
538 lockdep_assert_held(&locks_root->lock);
539
540 node = locks_root->root.rb_node;
541 while (node) {
542 entry = rb_entry(node, struct full_stripe_lock, node);
543 if (fstripe_logical < entry->logical)
544 node = node->rb_left;
545 else if (fstripe_logical > entry->logical)
546 node = node->rb_right;
547 else
548 return entry;
549 }
550 return NULL;
551 }
552
553 /*
554 * Helper to get full stripe logical from a normal bytenr.
555 *
556 * Caller must ensure @cache is a RAID56 block group.
557 */
get_full_stripe_logical(struct btrfs_block_group * cache,u64 bytenr)558 static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr)
559 {
560 u64 ret;
561
562 /*
563 * Due to chunk item size limit, full stripe length should not be
564 * larger than U32_MAX. Just a sanity check here.
565 */
566 WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
567
568 /*
569 * round_down() can only handle power of 2, while RAID56 full
570 * stripe length can be 64KiB * n, so we need to manually round down.
571 */
572 ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
573 cache->full_stripe_len + cache->start;
574 return ret;
575 }
576
577 /*
578 * Lock a full stripe to avoid concurrency of recovery and read
579 *
580 * It's only used for profiles with parities (RAID5/6), for other profiles it
581 * does nothing.
582 *
583 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
584 * So caller must call unlock_full_stripe() at the same context.
585 *
586 * Return <0 if encounters error.
587 */
lock_full_stripe(struct btrfs_fs_info * fs_info,u64 bytenr,bool * locked_ret)588 static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
589 bool *locked_ret)
590 {
591 struct btrfs_block_group *bg_cache;
592 struct btrfs_full_stripe_locks_tree *locks_root;
593 struct full_stripe_lock *existing;
594 u64 fstripe_start;
595 int ret = 0;
596
597 *locked_ret = false;
598 bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
599 if (!bg_cache) {
600 ASSERT(0);
601 return -ENOENT;
602 }
603
604 /* Profiles not based on parity don't need full stripe lock */
605 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
606 goto out;
607 locks_root = &bg_cache->full_stripe_locks_root;
608
609 fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
610
611 /* Now insert the full stripe lock */
612 mutex_lock(&locks_root->lock);
613 existing = insert_full_stripe_lock(locks_root, fstripe_start);
614 mutex_unlock(&locks_root->lock);
615 if (IS_ERR(existing)) {
616 ret = PTR_ERR(existing);
617 goto out;
618 }
619 mutex_lock(&existing->mutex);
620 *locked_ret = true;
621 out:
622 btrfs_put_block_group(bg_cache);
623 return ret;
624 }
625
626 /*
627 * Unlock a full stripe.
628 *
629 * NOTE: Caller must ensure it's the same context calling corresponding
630 * lock_full_stripe().
631 *
632 * Return 0 if we unlock full stripe without problem.
633 * Return <0 for error
634 */
unlock_full_stripe(struct btrfs_fs_info * fs_info,u64 bytenr,bool locked)635 static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
636 bool locked)
637 {
638 struct btrfs_block_group *bg_cache;
639 struct btrfs_full_stripe_locks_tree *locks_root;
640 struct full_stripe_lock *fstripe_lock;
641 u64 fstripe_start;
642 bool freeit = false;
643 int ret = 0;
644
645 /* If we didn't acquire full stripe lock, no need to continue */
646 if (!locked)
647 return 0;
648
649 bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
650 if (!bg_cache) {
651 ASSERT(0);
652 return -ENOENT;
653 }
654 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
655 goto out;
656
657 locks_root = &bg_cache->full_stripe_locks_root;
658 fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
659
660 mutex_lock(&locks_root->lock);
661 fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
662 /* Unpaired unlock_full_stripe() detected */
663 if (!fstripe_lock) {
664 WARN_ON(1);
665 ret = -ENOENT;
666 mutex_unlock(&locks_root->lock);
667 goto out;
668 }
669
670 if (fstripe_lock->refs == 0) {
671 WARN_ON(1);
672 btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
673 fstripe_lock->logical);
674 } else {
675 fstripe_lock->refs--;
676 }
677
678 if (fstripe_lock->refs == 0) {
679 rb_erase(&fstripe_lock->node, &locks_root->root);
680 freeit = true;
681 }
682 mutex_unlock(&locks_root->lock);
683
684 mutex_unlock(&fstripe_lock->mutex);
685 if (freeit)
686 kfree(fstripe_lock);
687 out:
688 btrfs_put_block_group(bg_cache);
689 return ret;
690 }
691
scrub_free_csums(struct scrub_ctx * sctx)692 static void scrub_free_csums(struct scrub_ctx *sctx)
693 {
694 while (!list_empty(&sctx->csum_list)) {
695 struct btrfs_ordered_sum *sum;
696 sum = list_first_entry(&sctx->csum_list,
697 struct btrfs_ordered_sum, list);
698 list_del(&sum->list);
699 kfree(sum);
700 }
701 }
702
scrub_free_ctx(struct scrub_ctx * sctx)703 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
704 {
705 int i;
706
707 if (!sctx)
708 return;
709
710 /* this can happen when scrub is cancelled */
711 if (sctx->curr != -1) {
712 struct scrub_bio *sbio = sctx->bios[sctx->curr];
713
714 for (i = 0; i < sbio->sector_count; i++)
715 scrub_block_put(sbio->sectors[i]->sblock);
716 bio_put(sbio->bio);
717 }
718
719 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
720 struct scrub_bio *sbio = sctx->bios[i];
721
722 if (!sbio)
723 break;
724 kfree(sbio);
725 }
726
727 kfree(sctx->wr_curr_bio);
728 scrub_free_csums(sctx);
729 kfree(sctx);
730 }
731
scrub_put_ctx(struct scrub_ctx * sctx)732 static void scrub_put_ctx(struct scrub_ctx *sctx)
733 {
734 if (refcount_dec_and_test(&sctx->refs))
735 scrub_free_ctx(sctx);
736 }
737
scrub_setup_ctx(struct btrfs_fs_info * fs_info,int is_dev_replace)738 static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
739 struct btrfs_fs_info *fs_info, int is_dev_replace)
740 {
741 struct scrub_ctx *sctx;
742 int i;
743
744 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
745 if (!sctx)
746 goto nomem;
747 refcount_set(&sctx->refs, 1);
748 sctx->is_dev_replace = is_dev_replace;
749 sctx->sectors_per_bio = SCRUB_SECTORS_PER_BIO;
750 sctx->curr = -1;
751 sctx->fs_info = fs_info;
752 INIT_LIST_HEAD(&sctx->csum_list);
753 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
754 struct scrub_bio *sbio;
755
756 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
757 if (!sbio)
758 goto nomem;
759 sctx->bios[i] = sbio;
760
761 sbio->index = i;
762 sbio->sctx = sctx;
763 sbio->sector_count = 0;
764 INIT_WORK(&sbio->work, scrub_bio_end_io_worker);
765
766 if (i != SCRUB_BIOS_PER_SCTX - 1)
767 sctx->bios[i]->next_free = i + 1;
768 else
769 sctx->bios[i]->next_free = -1;
770 }
771 sctx->first_free = 0;
772 atomic_set(&sctx->bios_in_flight, 0);
773 atomic_set(&sctx->workers_pending, 0);
774 atomic_set(&sctx->cancel_req, 0);
775
776 spin_lock_init(&sctx->list_lock);
777 spin_lock_init(&sctx->stat_lock);
778 init_waitqueue_head(&sctx->list_wait);
779 sctx->throttle_deadline = 0;
780
781 WARN_ON(sctx->wr_curr_bio != NULL);
782 mutex_init(&sctx->wr_lock);
783 sctx->wr_curr_bio = NULL;
784 if (is_dev_replace) {
785 WARN_ON(!fs_info->dev_replace.tgtdev);
786 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
787 sctx->flush_all_writes = false;
788 }
789
790 return sctx;
791
792 nomem:
793 scrub_free_ctx(sctx);
794 return ERR_PTR(-ENOMEM);
795 }
796
scrub_print_warning_inode(u64 inum,u64 offset,u64 root,void * warn_ctx)797 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
798 void *warn_ctx)
799 {
800 u32 nlink;
801 int ret;
802 int i;
803 unsigned nofs_flag;
804 struct extent_buffer *eb;
805 struct btrfs_inode_item *inode_item;
806 struct scrub_warning *swarn = warn_ctx;
807 struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
808 struct inode_fs_paths *ipath = NULL;
809 struct btrfs_root *local_root;
810 struct btrfs_key key;
811
812 local_root = btrfs_get_fs_root(fs_info, root, true);
813 if (IS_ERR(local_root)) {
814 ret = PTR_ERR(local_root);
815 goto err;
816 }
817
818 /*
819 * this makes the path point to (inum INODE_ITEM ioff)
820 */
821 key.objectid = inum;
822 key.type = BTRFS_INODE_ITEM_KEY;
823 key.offset = 0;
824
825 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
826 if (ret) {
827 btrfs_put_root(local_root);
828 btrfs_release_path(swarn->path);
829 goto err;
830 }
831
832 eb = swarn->path->nodes[0];
833 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
834 struct btrfs_inode_item);
835 nlink = btrfs_inode_nlink(eb, inode_item);
836 btrfs_release_path(swarn->path);
837
838 /*
839 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
840 * uses GFP_NOFS in this context, so we keep it consistent but it does
841 * not seem to be strictly necessary.
842 */
843 nofs_flag = memalloc_nofs_save();
844 ipath = init_ipath(4096, local_root, swarn->path);
845 memalloc_nofs_restore(nofs_flag);
846 if (IS_ERR(ipath)) {
847 btrfs_put_root(local_root);
848 ret = PTR_ERR(ipath);
849 ipath = NULL;
850 goto err;
851 }
852 ret = paths_from_inode(inum, ipath);
853
854 if (ret < 0)
855 goto err;
856
857 /*
858 * we deliberately ignore the bit ipath might have been too small to
859 * hold all of the paths here
860 */
861 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
862 btrfs_warn_in_rcu(fs_info,
863 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
864 swarn->errstr, swarn->logical,
865 rcu_str_deref(swarn->dev->name),
866 swarn->physical,
867 root, inum, offset,
868 fs_info->sectorsize, nlink,
869 (char *)(unsigned long)ipath->fspath->val[i]);
870
871 btrfs_put_root(local_root);
872 free_ipath(ipath);
873 return 0;
874
875 err:
876 btrfs_warn_in_rcu(fs_info,
877 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
878 swarn->errstr, swarn->logical,
879 rcu_str_deref(swarn->dev->name),
880 swarn->physical,
881 root, inum, offset, ret);
882
883 free_ipath(ipath);
884 return 0;
885 }
886
scrub_print_warning(const char * errstr,struct scrub_block * sblock)887 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
888 {
889 struct btrfs_device *dev;
890 struct btrfs_fs_info *fs_info;
891 struct btrfs_path *path;
892 struct btrfs_key found_key;
893 struct extent_buffer *eb;
894 struct btrfs_extent_item *ei;
895 struct scrub_warning swarn;
896 unsigned long ptr = 0;
897 u64 extent_item_pos;
898 u64 flags = 0;
899 u64 ref_root;
900 u32 item_size;
901 u8 ref_level = 0;
902 int ret;
903
904 WARN_ON(sblock->sector_count < 1);
905 dev = sblock->dev;
906 fs_info = sblock->sctx->fs_info;
907
908 /* Super block error, no need to search extent tree. */
909 if (sblock->sectors[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
910 btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
911 errstr, rcu_str_deref(dev->name),
912 sblock->physical);
913 return;
914 }
915 path = btrfs_alloc_path();
916 if (!path)
917 return;
918
919 swarn.physical = sblock->physical;
920 swarn.logical = sblock->logical;
921 swarn.errstr = errstr;
922 swarn.dev = NULL;
923
924 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
925 &flags);
926 if (ret < 0)
927 goto out;
928
929 extent_item_pos = swarn.logical - found_key.objectid;
930 swarn.extent_item_size = found_key.offset;
931
932 eb = path->nodes[0];
933 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
934 item_size = btrfs_item_size(eb, path->slots[0]);
935
936 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
937 do {
938 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
939 item_size, &ref_root,
940 &ref_level);
941 btrfs_warn_in_rcu(fs_info,
942 "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
943 errstr, swarn.logical,
944 rcu_str_deref(dev->name),
945 swarn.physical,
946 ref_level ? "node" : "leaf",
947 ret < 0 ? -1 : ref_level,
948 ret < 0 ? -1 : ref_root);
949 } while (ret != 1);
950 btrfs_release_path(path);
951 } else {
952 btrfs_release_path(path);
953 swarn.path = path;
954 swarn.dev = dev;
955 iterate_extent_inodes(fs_info, found_key.objectid,
956 extent_item_pos, 1,
957 scrub_print_warning_inode, &swarn, false);
958 }
959
960 out:
961 btrfs_free_path(path);
962 }
963
scrub_get_recover(struct scrub_recover * recover)964 static inline void scrub_get_recover(struct scrub_recover *recover)
965 {
966 refcount_inc(&recover->refs);
967 }
968
scrub_put_recover(struct btrfs_fs_info * fs_info,struct scrub_recover * recover)969 static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
970 struct scrub_recover *recover)
971 {
972 if (refcount_dec_and_test(&recover->refs)) {
973 btrfs_bio_counter_dec(fs_info);
974 btrfs_put_bioc(recover->bioc);
975 kfree(recover);
976 }
977 }
978
979 /*
980 * scrub_handle_errored_block gets called when either verification of the
981 * sectors failed or the bio failed to read, e.g. with EIO. In the latter
982 * case, this function handles all sectors in the bio, even though only one
983 * may be bad.
984 * The goal of this function is to repair the errored block by using the
985 * contents of one of the mirrors.
986 */
scrub_handle_errored_block(struct scrub_block * sblock_to_check)987 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
988 {
989 struct scrub_ctx *sctx = sblock_to_check->sctx;
990 struct btrfs_device *dev = sblock_to_check->dev;
991 struct btrfs_fs_info *fs_info;
992 u64 logical;
993 unsigned int failed_mirror_index;
994 unsigned int is_metadata;
995 unsigned int have_csum;
996 /* One scrub_block for each mirror */
997 struct scrub_block *sblocks_for_recheck[BTRFS_MAX_MIRRORS] = { 0 };
998 struct scrub_block *sblock_bad;
999 int ret;
1000 int mirror_index;
1001 int sector_num;
1002 int success;
1003 bool full_stripe_locked;
1004 unsigned int nofs_flag;
1005 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
1006 DEFAULT_RATELIMIT_BURST);
1007
1008 BUG_ON(sblock_to_check->sector_count < 1);
1009 fs_info = sctx->fs_info;
1010 if (sblock_to_check->sectors[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
1011 /*
1012 * If we find an error in a super block, we just report it.
1013 * They will get written with the next transaction commit
1014 * anyway
1015 */
1016 scrub_print_warning("super block error", sblock_to_check);
1017 spin_lock(&sctx->stat_lock);
1018 ++sctx->stat.super_errors;
1019 spin_unlock(&sctx->stat_lock);
1020 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
1021 return 0;
1022 }
1023 logical = sblock_to_check->logical;
1024 ASSERT(sblock_to_check->mirror_num);
1025 failed_mirror_index = sblock_to_check->mirror_num - 1;
1026 is_metadata = !(sblock_to_check->sectors[0]->flags &
1027 BTRFS_EXTENT_FLAG_DATA);
1028 have_csum = sblock_to_check->sectors[0]->have_csum;
1029
1030 if (!sctx->is_dev_replace && btrfs_repair_one_zone(fs_info, logical))
1031 return 0;
1032
1033 /*
1034 * We must use GFP_NOFS because the scrub task might be waiting for a
1035 * worker task executing this function and in turn a transaction commit
1036 * might be waiting the scrub task to pause (which needs to wait for all
1037 * the worker tasks to complete before pausing).
1038 * We do allocations in the workers through insert_full_stripe_lock()
1039 * and scrub_add_sector_to_wr_bio(), which happens down the call chain of
1040 * this function.
1041 */
1042 nofs_flag = memalloc_nofs_save();
1043 /*
1044 * For RAID5/6, race can happen for a different device scrub thread.
1045 * For data corruption, Parity and Data threads will both try
1046 * to recovery the data.
1047 * Race can lead to doubly added csum error, or even unrecoverable
1048 * error.
1049 */
1050 ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
1051 if (ret < 0) {
1052 memalloc_nofs_restore(nofs_flag);
1053 spin_lock(&sctx->stat_lock);
1054 if (ret == -ENOMEM)
1055 sctx->stat.malloc_errors++;
1056 sctx->stat.read_errors++;
1057 sctx->stat.uncorrectable_errors++;
1058 spin_unlock(&sctx->stat_lock);
1059 return ret;
1060 }
1061
1062 /*
1063 * read all mirrors one after the other. This includes to
1064 * re-read the extent or metadata block that failed (that was
1065 * the cause that this fixup code is called) another time,
1066 * sector by sector this time in order to know which sectors
1067 * caused I/O errors and which ones are good (for all mirrors).
1068 * It is the goal to handle the situation when more than one
1069 * mirror contains I/O errors, but the errors do not
1070 * overlap, i.e. the data can be repaired by selecting the
1071 * sectors from those mirrors without I/O error on the
1072 * particular sectors. One example (with blocks >= 2 * sectorsize)
1073 * would be that mirror #1 has an I/O error on the first sector,
1074 * the second sector is good, and mirror #2 has an I/O error on
1075 * the second sector, but the first sector is good.
1076 * Then the first sector of the first mirror can be repaired by
1077 * taking the first sector of the second mirror, and the
1078 * second sector of the second mirror can be repaired by
1079 * copying the contents of the 2nd sector of the 1st mirror.
1080 * One more note: if the sectors of one mirror contain I/O
1081 * errors, the checksum cannot be verified. In order to get
1082 * the best data for repairing, the first attempt is to find
1083 * a mirror without I/O errors and with a validated checksum.
1084 * Only if this is not possible, the sectors are picked from
1085 * mirrors with I/O errors without considering the checksum.
1086 * If the latter is the case, at the end, the checksum of the
1087 * repaired area is verified in order to correctly maintain
1088 * the statistics.
1089 */
1090 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) {
1091 /*
1092 * Note: the two members refs and outstanding_sectors are not
1093 * used in the blocks that are used for the recheck procedure.
1094 *
1095 * But alloc_scrub_block() will initialize sblock::ref anyway,
1096 * so we can use scrub_block_put() to clean them up.
1097 *
1098 * And here we don't setup the physical/dev for the sblock yet,
1099 * they will be correctly initialized in scrub_setup_recheck_block().
1100 */
1101 sblocks_for_recheck[mirror_index] = alloc_scrub_block(sctx, NULL,
1102 logical, 0, 0, mirror_index);
1103 if (!sblocks_for_recheck[mirror_index]) {
1104 spin_lock(&sctx->stat_lock);
1105 sctx->stat.malloc_errors++;
1106 sctx->stat.read_errors++;
1107 sctx->stat.uncorrectable_errors++;
1108 spin_unlock(&sctx->stat_lock);
1109 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1110 goto out;
1111 }
1112 }
1113
1114 /* Setup the context, map the logical blocks and alloc the sectors */
1115 ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
1116 if (ret) {
1117 spin_lock(&sctx->stat_lock);
1118 sctx->stat.read_errors++;
1119 sctx->stat.uncorrectable_errors++;
1120 spin_unlock(&sctx->stat_lock);
1121 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1122 goto out;
1123 }
1124 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
1125 sblock_bad = sblocks_for_recheck[failed_mirror_index];
1126
1127 /* build and submit the bios for the failed mirror, check checksums */
1128 scrub_recheck_block(fs_info, sblock_bad, 1);
1129
1130 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
1131 sblock_bad->no_io_error_seen) {
1132 /*
1133 * The error disappeared after reading sector by sector, or
1134 * the area was part of a huge bio and other parts of the
1135 * bio caused I/O errors, or the block layer merged several
1136 * read requests into one and the error is caused by a
1137 * different bio (usually one of the two latter cases is
1138 * the cause)
1139 */
1140 spin_lock(&sctx->stat_lock);
1141 sctx->stat.unverified_errors++;
1142 sblock_to_check->data_corrected = 1;
1143 spin_unlock(&sctx->stat_lock);
1144
1145 if (sctx->is_dev_replace)
1146 scrub_write_block_to_dev_replace(sblock_bad);
1147 goto out;
1148 }
1149
1150 if (!sblock_bad->no_io_error_seen) {
1151 spin_lock(&sctx->stat_lock);
1152 sctx->stat.read_errors++;
1153 spin_unlock(&sctx->stat_lock);
1154 if (__ratelimit(&rs))
1155 scrub_print_warning("i/o error", sblock_to_check);
1156 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1157 } else if (sblock_bad->checksum_error) {
1158 spin_lock(&sctx->stat_lock);
1159 sctx->stat.csum_errors++;
1160 spin_unlock(&sctx->stat_lock);
1161 if (__ratelimit(&rs))
1162 scrub_print_warning("checksum error", sblock_to_check);
1163 btrfs_dev_stat_inc_and_print(dev,
1164 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1165 } else if (sblock_bad->header_error) {
1166 spin_lock(&sctx->stat_lock);
1167 sctx->stat.verify_errors++;
1168 spin_unlock(&sctx->stat_lock);
1169 if (__ratelimit(&rs))
1170 scrub_print_warning("checksum/header error",
1171 sblock_to_check);
1172 if (sblock_bad->generation_error)
1173 btrfs_dev_stat_inc_and_print(dev,
1174 BTRFS_DEV_STAT_GENERATION_ERRS);
1175 else
1176 btrfs_dev_stat_inc_and_print(dev,
1177 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1178 }
1179
1180 if (sctx->readonly) {
1181 ASSERT(!sctx->is_dev_replace);
1182 goto out;
1183 }
1184
1185 /*
1186 * now build and submit the bios for the other mirrors, check
1187 * checksums.
1188 * First try to pick the mirror which is completely without I/O
1189 * errors and also does not have a checksum error.
1190 * If one is found, and if a checksum is present, the full block
1191 * that is known to contain an error is rewritten. Afterwards
1192 * the block is known to be corrected.
1193 * If a mirror is found which is completely correct, and no
1194 * checksum is present, only those sectors are rewritten that had
1195 * an I/O error in the block to be repaired, since it cannot be
1196 * determined, which copy of the other sectors is better (and it
1197 * could happen otherwise that a correct sector would be
1198 * overwritten by a bad one).
1199 */
1200 for (mirror_index = 0; ;mirror_index++) {
1201 struct scrub_block *sblock_other;
1202
1203 if (mirror_index == failed_mirror_index)
1204 continue;
1205
1206 /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
1207 if (!scrub_is_page_on_raid56(sblock_bad->sectors[0])) {
1208 if (mirror_index >= BTRFS_MAX_MIRRORS)
1209 break;
1210 if (!sblocks_for_recheck[mirror_index]->sector_count)
1211 break;
1212
1213 sblock_other = sblocks_for_recheck[mirror_index];
1214 } else {
1215 struct scrub_recover *r = sblock_bad->sectors[0]->recover;
1216 int max_allowed = r->bioc->num_stripes - r->bioc->num_tgtdevs;
1217
1218 if (mirror_index >= max_allowed)
1219 break;
1220 if (!sblocks_for_recheck[1]->sector_count)
1221 break;
1222
1223 ASSERT(failed_mirror_index == 0);
1224 sblock_other = sblocks_for_recheck[1];
1225 sblock_other->mirror_num = 1 + mirror_index;
1226 }
1227
1228 /* build and submit the bios, check checksums */
1229 scrub_recheck_block(fs_info, sblock_other, 0);
1230
1231 if (!sblock_other->header_error &&
1232 !sblock_other->checksum_error &&
1233 sblock_other->no_io_error_seen) {
1234 if (sctx->is_dev_replace) {
1235 scrub_write_block_to_dev_replace(sblock_other);
1236 goto corrected_error;
1237 } else {
1238 ret = scrub_repair_block_from_good_copy(
1239 sblock_bad, sblock_other);
1240 if (!ret)
1241 goto corrected_error;
1242 }
1243 }
1244 }
1245
1246 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1247 goto did_not_correct_error;
1248
1249 /*
1250 * In case of I/O errors in the area that is supposed to be
1251 * repaired, continue by picking good copies of those sectors.
1252 * Select the good sectors from mirrors to rewrite bad sectors from
1253 * the area to fix. Afterwards verify the checksum of the block
1254 * that is supposed to be repaired. This verification step is
1255 * only done for the purpose of statistic counting and for the
1256 * final scrub report, whether errors remain.
1257 * A perfect algorithm could make use of the checksum and try
1258 * all possible combinations of sectors from the different mirrors
1259 * until the checksum verification succeeds. For example, when
1260 * the 2nd sector of mirror #1 faces I/O errors, and the 2nd sector
1261 * of mirror #2 is readable but the final checksum test fails,
1262 * then the 2nd sector of mirror #3 could be tried, whether now
1263 * the final checksum succeeds. But this would be a rare
1264 * exception and is therefore not implemented. At least it is
1265 * avoided that the good copy is overwritten.
1266 * A more useful improvement would be to pick the sectors
1267 * without I/O error based on sector sizes (512 bytes on legacy
1268 * disks) instead of on sectorsize. Then maybe 512 byte of one
1269 * mirror could be repaired by taking 512 byte of a different
1270 * mirror, even if other 512 byte sectors in the same sectorsize
1271 * area are unreadable.
1272 */
1273 success = 1;
1274 for (sector_num = 0; sector_num < sblock_bad->sector_count;
1275 sector_num++) {
1276 struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num];
1277 struct scrub_block *sblock_other = NULL;
1278
1279 /* Skip no-io-error sectors in scrub */
1280 if (!sector_bad->io_error && !sctx->is_dev_replace)
1281 continue;
1282
1283 if (scrub_is_page_on_raid56(sblock_bad->sectors[0])) {
1284 /*
1285 * In case of dev replace, if raid56 rebuild process
1286 * didn't work out correct data, then copy the content
1287 * in sblock_bad to make sure target device is identical
1288 * to source device, instead of writing garbage data in
1289 * sblock_for_recheck array to target device.
1290 */
1291 sblock_other = NULL;
1292 } else if (sector_bad->io_error) {
1293 /* Try to find no-io-error sector in mirrors */
1294 for (mirror_index = 0;
1295 mirror_index < BTRFS_MAX_MIRRORS &&
1296 sblocks_for_recheck[mirror_index]->sector_count > 0;
1297 mirror_index++) {
1298 if (!sblocks_for_recheck[mirror_index]->
1299 sectors[sector_num]->io_error) {
1300 sblock_other = sblocks_for_recheck[mirror_index];
1301 break;
1302 }
1303 }
1304 if (!sblock_other)
1305 success = 0;
1306 }
1307
1308 if (sctx->is_dev_replace) {
1309 /*
1310 * Did not find a mirror to fetch the sector from.
1311 * scrub_write_sector_to_dev_replace() handles this
1312 * case (sector->io_error), by filling the block with
1313 * zeros before submitting the write request
1314 */
1315 if (!sblock_other)
1316 sblock_other = sblock_bad;
1317
1318 if (scrub_write_sector_to_dev_replace(sblock_other,
1319 sector_num) != 0) {
1320 atomic64_inc(
1321 &fs_info->dev_replace.num_write_errors);
1322 success = 0;
1323 }
1324 } else if (sblock_other) {
1325 ret = scrub_repair_sector_from_good_copy(sblock_bad,
1326 sblock_other,
1327 sector_num, 0);
1328 if (0 == ret)
1329 sector_bad->io_error = 0;
1330 else
1331 success = 0;
1332 }
1333 }
1334
1335 if (success && !sctx->is_dev_replace) {
1336 if (is_metadata || have_csum) {
1337 /*
1338 * need to verify the checksum now that all
1339 * sectors on disk are repaired (the write
1340 * request for data to be repaired is on its way).
1341 * Just be lazy and use scrub_recheck_block()
1342 * which re-reads the data before the checksum
1343 * is verified, but most likely the data comes out
1344 * of the page cache.
1345 */
1346 scrub_recheck_block(fs_info, sblock_bad, 1);
1347 if (!sblock_bad->header_error &&
1348 !sblock_bad->checksum_error &&
1349 sblock_bad->no_io_error_seen)
1350 goto corrected_error;
1351 else
1352 goto did_not_correct_error;
1353 } else {
1354 corrected_error:
1355 spin_lock(&sctx->stat_lock);
1356 sctx->stat.corrected_errors++;
1357 sblock_to_check->data_corrected = 1;
1358 spin_unlock(&sctx->stat_lock);
1359 btrfs_err_rl_in_rcu(fs_info,
1360 "fixed up error at logical %llu on dev %s",
1361 logical, rcu_str_deref(dev->name));
1362 }
1363 } else {
1364 did_not_correct_error:
1365 spin_lock(&sctx->stat_lock);
1366 sctx->stat.uncorrectable_errors++;
1367 spin_unlock(&sctx->stat_lock);
1368 btrfs_err_rl_in_rcu(fs_info,
1369 "unable to fixup (regular) error at logical %llu on dev %s",
1370 logical, rcu_str_deref(dev->name));
1371 }
1372
1373 out:
1374 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) {
1375 struct scrub_block *sblock = sblocks_for_recheck[mirror_index];
1376 struct scrub_recover *recover;
1377 int sector_index;
1378
1379 /* Not allocated, continue checking the next mirror */
1380 if (!sblock)
1381 continue;
1382
1383 for (sector_index = 0; sector_index < sblock->sector_count;
1384 sector_index++) {
1385 /*
1386 * Here we just cleanup the recover, each sector will be
1387 * properly cleaned up by later scrub_block_put()
1388 */
1389 recover = sblock->sectors[sector_index]->recover;
1390 if (recover) {
1391 scrub_put_recover(fs_info, recover);
1392 sblock->sectors[sector_index]->recover = NULL;
1393 }
1394 }
1395 scrub_block_put(sblock);
1396 }
1397
1398 ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
1399 memalloc_nofs_restore(nofs_flag);
1400 if (ret < 0)
1401 return ret;
1402 return 0;
1403 }
1404
scrub_nr_raid_mirrors(struct btrfs_io_context * bioc)1405 static inline int scrub_nr_raid_mirrors(struct btrfs_io_context *bioc)
1406 {
1407 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5)
1408 return 2;
1409 else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6)
1410 return 3;
1411 else
1412 return (int)bioc->num_stripes;
1413 }
1414
scrub_stripe_index_and_offset(u64 logical,u64 map_type,u64 * raid_map,int nstripes,int mirror,int * stripe_index,u64 * stripe_offset)1415 static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1416 u64 *raid_map,
1417 int nstripes, int mirror,
1418 int *stripe_index,
1419 u64 *stripe_offset)
1420 {
1421 int i;
1422
1423 if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1424 /* RAID5/6 */
1425 for (i = 0; i < nstripes; i++) {
1426 if (raid_map[i] == RAID6_Q_STRIPE ||
1427 raid_map[i] == RAID5_P_STRIPE)
1428 continue;
1429
1430 if (logical >= raid_map[i] &&
1431 logical < raid_map[i] + BTRFS_STRIPE_LEN)
1432 break;
1433 }
1434
1435 *stripe_index = i;
1436 *stripe_offset = logical - raid_map[i];
1437 } else {
1438 /* The other RAID type */
1439 *stripe_index = mirror;
1440 *stripe_offset = 0;
1441 }
1442 }
1443
scrub_setup_recheck_block(struct scrub_block * original_sblock,struct scrub_block * sblocks_for_recheck[])1444 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1445 struct scrub_block *sblocks_for_recheck[])
1446 {
1447 struct scrub_ctx *sctx = original_sblock->sctx;
1448 struct btrfs_fs_info *fs_info = sctx->fs_info;
1449 u64 logical = original_sblock->logical;
1450 u64 length = original_sblock->sector_count << fs_info->sectorsize_bits;
1451 u64 generation = original_sblock->sectors[0]->generation;
1452 u64 flags = original_sblock->sectors[0]->flags;
1453 u64 have_csum = original_sblock->sectors[0]->have_csum;
1454 struct scrub_recover *recover;
1455 struct btrfs_io_context *bioc;
1456 u64 sublen;
1457 u64 mapped_length;
1458 u64 stripe_offset;
1459 int stripe_index;
1460 int sector_index = 0;
1461 int mirror_index;
1462 int nmirrors;
1463 int ret;
1464
1465 while (length > 0) {
1466 sublen = min_t(u64, length, fs_info->sectorsize);
1467 mapped_length = sublen;
1468 bioc = NULL;
1469
1470 /*
1471 * With a length of sectorsize, each returned stripe represents
1472 * one mirror
1473 */
1474 btrfs_bio_counter_inc_blocked(fs_info);
1475 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1476 logical, &mapped_length, &bioc);
1477 if (ret || !bioc || mapped_length < sublen) {
1478 btrfs_put_bioc(bioc);
1479 btrfs_bio_counter_dec(fs_info);
1480 return -EIO;
1481 }
1482
1483 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1484 if (!recover) {
1485 btrfs_put_bioc(bioc);
1486 btrfs_bio_counter_dec(fs_info);
1487 return -ENOMEM;
1488 }
1489
1490 refcount_set(&recover->refs, 1);
1491 recover->bioc = bioc;
1492 recover->map_length = mapped_length;
1493
1494 ASSERT(sector_index < SCRUB_MAX_SECTORS_PER_BLOCK);
1495
1496 nmirrors = min(scrub_nr_raid_mirrors(bioc), BTRFS_MAX_MIRRORS);
1497
1498 for (mirror_index = 0; mirror_index < nmirrors;
1499 mirror_index++) {
1500 struct scrub_block *sblock;
1501 struct scrub_sector *sector;
1502
1503 sblock = sblocks_for_recheck[mirror_index];
1504 sblock->sctx = sctx;
1505
1506 sector = alloc_scrub_sector(sblock, logical, GFP_NOFS);
1507 if (!sector) {
1508 spin_lock(&sctx->stat_lock);
1509 sctx->stat.malloc_errors++;
1510 spin_unlock(&sctx->stat_lock);
1511 scrub_put_recover(fs_info, recover);
1512 return -ENOMEM;
1513 }
1514 sector->flags = flags;
1515 sector->generation = generation;
1516 sector->have_csum = have_csum;
1517 if (have_csum)
1518 memcpy(sector->csum,
1519 original_sblock->sectors[0]->csum,
1520 sctx->fs_info->csum_size);
1521
1522 scrub_stripe_index_and_offset(logical,
1523 bioc->map_type,
1524 bioc->raid_map,
1525 bioc->num_stripes -
1526 bioc->num_tgtdevs,
1527 mirror_index,
1528 &stripe_index,
1529 &stripe_offset);
1530 /*
1531 * We're at the first sector, also populate @sblock
1532 * physical and dev.
1533 */
1534 if (sector_index == 0) {
1535 sblock->physical =
1536 bioc->stripes[stripe_index].physical +
1537 stripe_offset;
1538 sblock->dev = bioc->stripes[stripe_index].dev;
1539 sblock->physical_for_dev_replace =
1540 original_sblock->physical_for_dev_replace;
1541 }
1542
1543 BUG_ON(sector_index >= original_sblock->sector_count);
1544 scrub_get_recover(recover);
1545 sector->recover = recover;
1546 }
1547 scrub_put_recover(fs_info, recover);
1548 length -= sublen;
1549 logical += sublen;
1550 sector_index++;
1551 }
1552
1553 return 0;
1554 }
1555
scrub_bio_wait_endio(struct bio * bio)1556 static void scrub_bio_wait_endio(struct bio *bio)
1557 {
1558 complete(bio->bi_private);
1559 }
1560
scrub_submit_raid56_bio_wait(struct btrfs_fs_info * fs_info,struct bio * bio,struct scrub_sector * sector)1561 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1562 struct bio *bio,
1563 struct scrub_sector *sector)
1564 {
1565 DECLARE_COMPLETION_ONSTACK(done);
1566
1567 bio->bi_iter.bi_sector = (sector->offset + sector->sblock->logical) >>
1568 SECTOR_SHIFT;
1569 bio->bi_private = &done;
1570 bio->bi_end_io = scrub_bio_wait_endio;
1571 raid56_parity_recover(bio, sector->recover->bioc, sector->sblock->mirror_num);
1572
1573 wait_for_completion_io(&done);
1574 return blk_status_to_errno(bio->bi_status);
1575 }
1576
scrub_recheck_block_on_raid56(struct btrfs_fs_info * fs_info,struct scrub_block * sblock)1577 static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
1578 struct scrub_block *sblock)
1579 {
1580 struct scrub_sector *first_sector = sblock->sectors[0];
1581 struct bio *bio;
1582 int i;
1583
1584 /* All sectors in sblock belong to the same stripe on the same device. */
1585 ASSERT(sblock->dev);
1586 if (!sblock->dev->bdev)
1587 goto out;
1588
1589 bio = bio_alloc(sblock->dev->bdev, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
1590
1591 for (i = 0; i < sblock->sector_count; i++) {
1592 struct scrub_sector *sector = sblock->sectors[i];
1593
1594 bio_add_scrub_sector(bio, sector, fs_info->sectorsize);
1595 }
1596
1597 if (scrub_submit_raid56_bio_wait(fs_info, bio, first_sector)) {
1598 bio_put(bio);
1599 goto out;
1600 }
1601
1602 bio_put(bio);
1603
1604 scrub_recheck_block_checksum(sblock);
1605
1606 return;
1607 out:
1608 for (i = 0; i < sblock->sector_count; i++)
1609 sblock->sectors[i]->io_error = 1;
1610
1611 sblock->no_io_error_seen = 0;
1612 }
1613
1614 /*
1615 * This function will check the on disk data for checksum errors, header errors
1616 * and read I/O errors. If any I/O errors happen, the exact sectors which are
1617 * errored are marked as being bad. The goal is to enable scrub to take those
1618 * sectors that are not errored from all the mirrors so that the sectors that
1619 * are errored in the just handled mirror can be repaired.
1620 */
scrub_recheck_block(struct btrfs_fs_info * fs_info,struct scrub_block * sblock,int retry_failed_mirror)1621 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1622 struct scrub_block *sblock,
1623 int retry_failed_mirror)
1624 {
1625 int i;
1626
1627 sblock->no_io_error_seen = 1;
1628
1629 /* short cut for raid56 */
1630 if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->sectors[0]))
1631 return scrub_recheck_block_on_raid56(fs_info, sblock);
1632
1633 for (i = 0; i < sblock->sector_count; i++) {
1634 struct scrub_sector *sector = sblock->sectors[i];
1635 struct bio bio;
1636 struct bio_vec bvec;
1637
1638 if (sblock->dev->bdev == NULL) {
1639 sector->io_error = 1;
1640 sblock->no_io_error_seen = 0;
1641 continue;
1642 }
1643
1644 bio_init(&bio, sblock->dev->bdev, &bvec, 1, REQ_OP_READ);
1645 bio_add_scrub_sector(&bio, sector, fs_info->sectorsize);
1646 bio.bi_iter.bi_sector = (sblock->physical + sector->offset) >>
1647 SECTOR_SHIFT;
1648
1649 btrfsic_check_bio(&bio);
1650 if (submit_bio_wait(&bio)) {
1651 sector->io_error = 1;
1652 sblock->no_io_error_seen = 0;
1653 }
1654
1655 bio_uninit(&bio);
1656 }
1657
1658 if (sblock->no_io_error_seen)
1659 scrub_recheck_block_checksum(sblock);
1660 }
1661
scrub_check_fsid(u8 fsid[],struct scrub_sector * sector)1662 static inline int scrub_check_fsid(u8 fsid[], struct scrub_sector *sector)
1663 {
1664 struct btrfs_fs_devices *fs_devices = sector->sblock->dev->fs_devices;
1665 int ret;
1666
1667 ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1668 return !ret;
1669 }
1670
scrub_recheck_block_checksum(struct scrub_block * sblock)1671 static void scrub_recheck_block_checksum(struct scrub_block *sblock)
1672 {
1673 sblock->header_error = 0;
1674 sblock->checksum_error = 0;
1675 sblock->generation_error = 0;
1676
1677 if (sblock->sectors[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1678 scrub_checksum_data(sblock);
1679 else
1680 scrub_checksum_tree_block(sblock);
1681 }
1682
scrub_repair_block_from_good_copy(struct scrub_block * sblock_bad,struct scrub_block * sblock_good)1683 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1684 struct scrub_block *sblock_good)
1685 {
1686 int i;
1687 int ret = 0;
1688
1689 for (i = 0; i < sblock_bad->sector_count; i++) {
1690 int ret_sub;
1691
1692 ret_sub = scrub_repair_sector_from_good_copy(sblock_bad,
1693 sblock_good, i, 1);
1694 if (ret_sub)
1695 ret = ret_sub;
1696 }
1697
1698 return ret;
1699 }
1700
scrub_repair_sector_from_good_copy(struct scrub_block * sblock_bad,struct scrub_block * sblock_good,int sector_num,int force_write)1701 static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
1702 struct scrub_block *sblock_good,
1703 int sector_num, int force_write)
1704 {
1705 struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num];
1706 struct scrub_sector *sector_good = sblock_good->sectors[sector_num];
1707 struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1708 const u32 sectorsize = fs_info->sectorsize;
1709
1710 if (force_write || sblock_bad->header_error ||
1711 sblock_bad->checksum_error || sector_bad->io_error) {
1712 struct bio bio;
1713 struct bio_vec bvec;
1714 int ret;
1715
1716 if (!sblock_bad->dev->bdev) {
1717 btrfs_warn_rl(fs_info,
1718 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1719 return -EIO;
1720 }
1721
1722 bio_init(&bio, sblock_bad->dev->bdev, &bvec, 1, REQ_OP_WRITE);
1723 bio.bi_iter.bi_sector = (sblock_bad->physical +
1724 sector_bad->offset) >> SECTOR_SHIFT;
1725 ret = bio_add_scrub_sector(&bio, sector_good, sectorsize);
1726
1727 btrfsic_check_bio(&bio);
1728 ret = submit_bio_wait(&bio);
1729 bio_uninit(&bio);
1730
1731 if (ret) {
1732 btrfs_dev_stat_inc_and_print(sblock_bad->dev,
1733 BTRFS_DEV_STAT_WRITE_ERRS);
1734 atomic64_inc(&fs_info->dev_replace.num_write_errors);
1735 return -EIO;
1736 }
1737 }
1738
1739 return 0;
1740 }
1741
scrub_write_block_to_dev_replace(struct scrub_block * sblock)1742 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1743 {
1744 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1745 int i;
1746
1747 /*
1748 * This block is used for the check of the parity on the source device,
1749 * so the data needn't be written into the destination device.
1750 */
1751 if (sblock->sparity)
1752 return;
1753
1754 for (i = 0; i < sblock->sector_count; i++) {
1755 int ret;
1756
1757 ret = scrub_write_sector_to_dev_replace(sblock, i);
1758 if (ret)
1759 atomic64_inc(&fs_info->dev_replace.num_write_errors);
1760 }
1761 }
1762
scrub_write_sector_to_dev_replace(struct scrub_block * sblock,int sector_num)1763 static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock, int sector_num)
1764 {
1765 const u32 sectorsize = sblock->sctx->fs_info->sectorsize;
1766 struct scrub_sector *sector = sblock->sectors[sector_num];
1767
1768 if (sector->io_error)
1769 memset(scrub_sector_get_kaddr(sector), 0, sectorsize);
1770
1771 return scrub_add_sector_to_wr_bio(sblock->sctx, sector);
1772 }
1773
fill_writer_pointer_gap(struct scrub_ctx * sctx,u64 physical)1774 static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
1775 {
1776 int ret = 0;
1777 u64 length;
1778
1779 if (!btrfs_is_zoned(sctx->fs_info))
1780 return 0;
1781
1782 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
1783 return 0;
1784
1785 if (sctx->write_pointer < physical) {
1786 length = physical - sctx->write_pointer;
1787
1788 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
1789 sctx->write_pointer, length);
1790 if (!ret)
1791 sctx->write_pointer = physical;
1792 }
1793 return ret;
1794 }
1795
scrub_block_get(struct scrub_block * sblock)1796 static void scrub_block_get(struct scrub_block *sblock)
1797 {
1798 refcount_inc(&sblock->refs);
1799 }
1800
scrub_add_sector_to_wr_bio(struct scrub_ctx * sctx,struct scrub_sector * sector)1801 static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
1802 struct scrub_sector *sector)
1803 {
1804 struct scrub_block *sblock = sector->sblock;
1805 struct scrub_bio *sbio;
1806 int ret;
1807 const u32 sectorsize = sctx->fs_info->sectorsize;
1808
1809 mutex_lock(&sctx->wr_lock);
1810 again:
1811 if (!sctx->wr_curr_bio) {
1812 sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
1813 GFP_KERNEL);
1814 if (!sctx->wr_curr_bio) {
1815 mutex_unlock(&sctx->wr_lock);
1816 return -ENOMEM;
1817 }
1818 sctx->wr_curr_bio->sctx = sctx;
1819 sctx->wr_curr_bio->sector_count = 0;
1820 }
1821 sbio = sctx->wr_curr_bio;
1822 if (sbio->sector_count == 0) {
1823 ret = fill_writer_pointer_gap(sctx, sector->offset +
1824 sblock->physical_for_dev_replace);
1825 if (ret) {
1826 mutex_unlock(&sctx->wr_lock);
1827 return ret;
1828 }
1829
1830 sbio->physical = sblock->physical_for_dev_replace + sector->offset;
1831 sbio->logical = sblock->logical + sector->offset;
1832 sbio->dev = sctx->wr_tgtdev;
1833 if (!sbio->bio) {
1834 sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio,
1835 REQ_OP_WRITE, GFP_NOFS);
1836 }
1837 sbio->bio->bi_private = sbio;
1838 sbio->bio->bi_end_io = scrub_wr_bio_end_io;
1839 sbio->bio->bi_iter.bi_sector = sbio->physical >> 9;
1840 sbio->status = 0;
1841 } else if (sbio->physical + sbio->sector_count * sectorsize !=
1842 sblock->physical_for_dev_replace + sector->offset ||
1843 sbio->logical + sbio->sector_count * sectorsize !=
1844 sblock->logical + sector->offset) {
1845 scrub_wr_submit(sctx);
1846 goto again;
1847 }
1848
1849 ret = bio_add_scrub_sector(sbio->bio, sector, sectorsize);
1850 if (ret != sectorsize) {
1851 if (sbio->sector_count < 1) {
1852 bio_put(sbio->bio);
1853 sbio->bio = NULL;
1854 mutex_unlock(&sctx->wr_lock);
1855 return -EIO;
1856 }
1857 scrub_wr_submit(sctx);
1858 goto again;
1859 }
1860
1861 sbio->sectors[sbio->sector_count] = sector;
1862 scrub_sector_get(sector);
1863 /*
1864 * Since ssector no longer holds a page, but uses sblock::pages, we
1865 * have to ensure the sblock had not been freed before our write bio
1866 * finished.
1867 */
1868 scrub_block_get(sector->sblock);
1869
1870 sbio->sector_count++;
1871 if (sbio->sector_count == sctx->sectors_per_bio)
1872 scrub_wr_submit(sctx);
1873 mutex_unlock(&sctx->wr_lock);
1874
1875 return 0;
1876 }
1877
scrub_wr_submit(struct scrub_ctx * sctx)1878 static void scrub_wr_submit(struct scrub_ctx *sctx)
1879 {
1880 struct scrub_bio *sbio;
1881
1882 if (!sctx->wr_curr_bio)
1883 return;
1884
1885 sbio = sctx->wr_curr_bio;
1886 sctx->wr_curr_bio = NULL;
1887 scrub_pending_bio_inc(sctx);
1888 /* process all writes in a single worker thread. Then the block layer
1889 * orders the requests before sending them to the driver which
1890 * doubled the write performance on spinning disks when measured
1891 * with Linux 3.5 */
1892 btrfsic_check_bio(sbio->bio);
1893 submit_bio(sbio->bio);
1894
1895 if (btrfs_is_zoned(sctx->fs_info))
1896 sctx->write_pointer = sbio->physical + sbio->sector_count *
1897 sctx->fs_info->sectorsize;
1898 }
1899
scrub_wr_bio_end_io(struct bio * bio)1900 static void scrub_wr_bio_end_io(struct bio *bio)
1901 {
1902 struct scrub_bio *sbio = bio->bi_private;
1903 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1904
1905 sbio->status = bio->bi_status;
1906 sbio->bio = bio;
1907
1908 INIT_WORK(&sbio->work, scrub_wr_bio_end_io_worker);
1909 queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1910 }
1911
scrub_wr_bio_end_io_worker(struct work_struct * work)1912 static void scrub_wr_bio_end_io_worker(struct work_struct *work)
1913 {
1914 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1915 struct scrub_ctx *sctx = sbio->sctx;
1916 int i;
1917
1918 ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO);
1919 if (sbio->status) {
1920 struct btrfs_dev_replace *dev_replace =
1921 &sbio->sctx->fs_info->dev_replace;
1922
1923 for (i = 0; i < sbio->sector_count; i++) {
1924 struct scrub_sector *sector = sbio->sectors[i];
1925
1926 sector->io_error = 1;
1927 atomic64_inc(&dev_replace->num_write_errors);
1928 }
1929 }
1930
1931 /*
1932 * In scrub_add_sector_to_wr_bio() we grab extra ref for sblock, now in
1933 * endio we should put the sblock.
1934 */
1935 for (i = 0; i < sbio->sector_count; i++) {
1936 scrub_block_put(sbio->sectors[i]->sblock);
1937 scrub_sector_put(sbio->sectors[i]);
1938 }
1939
1940 bio_put(sbio->bio);
1941 kfree(sbio);
1942 scrub_pending_bio_dec(sctx);
1943 }
1944
scrub_checksum(struct scrub_block * sblock)1945 static int scrub_checksum(struct scrub_block *sblock)
1946 {
1947 u64 flags;
1948 int ret;
1949
1950 /*
1951 * No need to initialize these stats currently,
1952 * because this function only use return value
1953 * instead of these stats value.
1954 *
1955 * Todo:
1956 * always use stats
1957 */
1958 sblock->header_error = 0;
1959 sblock->generation_error = 0;
1960 sblock->checksum_error = 0;
1961
1962 WARN_ON(sblock->sector_count < 1);
1963 flags = sblock->sectors[0]->flags;
1964 ret = 0;
1965 if (flags & BTRFS_EXTENT_FLAG_DATA)
1966 ret = scrub_checksum_data(sblock);
1967 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1968 ret = scrub_checksum_tree_block(sblock);
1969 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1970 ret = scrub_checksum_super(sblock);
1971 else
1972 WARN_ON(1);
1973 if (ret)
1974 scrub_handle_errored_block(sblock);
1975
1976 return ret;
1977 }
1978
scrub_checksum_data(struct scrub_block * sblock)1979 static int scrub_checksum_data(struct scrub_block *sblock)
1980 {
1981 struct scrub_ctx *sctx = sblock->sctx;
1982 struct btrfs_fs_info *fs_info = sctx->fs_info;
1983 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1984 u8 csum[BTRFS_CSUM_SIZE];
1985 struct scrub_sector *sector;
1986 char *kaddr;
1987
1988 BUG_ON(sblock->sector_count < 1);
1989 sector = sblock->sectors[0];
1990 if (!sector->have_csum)
1991 return 0;
1992
1993 kaddr = scrub_sector_get_kaddr(sector);
1994
1995 shash->tfm = fs_info->csum_shash;
1996 crypto_shash_init(shash);
1997
1998 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
1999
2000 if (memcmp(csum, sector->csum, fs_info->csum_size))
2001 sblock->checksum_error = 1;
2002 return sblock->checksum_error;
2003 }
2004
scrub_checksum_tree_block(struct scrub_block * sblock)2005 static int scrub_checksum_tree_block(struct scrub_block *sblock)
2006 {
2007 struct scrub_ctx *sctx = sblock->sctx;
2008 struct btrfs_header *h;
2009 struct btrfs_fs_info *fs_info = sctx->fs_info;
2010 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
2011 u8 calculated_csum[BTRFS_CSUM_SIZE];
2012 u8 on_disk_csum[BTRFS_CSUM_SIZE];
2013 /*
2014 * This is done in sectorsize steps even for metadata as there's a
2015 * constraint for nodesize to be aligned to sectorsize. This will need
2016 * to change so we don't misuse data and metadata units like that.
2017 */
2018 const u32 sectorsize = sctx->fs_info->sectorsize;
2019 const int num_sectors = fs_info->nodesize >> fs_info->sectorsize_bits;
2020 int i;
2021 struct scrub_sector *sector;
2022 char *kaddr;
2023
2024 BUG_ON(sblock->sector_count < 1);
2025
2026 /* Each member in sectors is just one sector */
2027 ASSERT(sblock->sector_count == num_sectors);
2028
2029 sector = sblock->sectors[0];
2030 kaddr = scrub_sector_get_kaddr(sector);
2031 h = (struct btrfs_header *)kaddr;
2032 memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size);
2033
2034 /*
2035 * we don't use the getter functions here, as we
2036 * a) don't have an extent buffer and
2037 * b) the page is already kmapped
2038 */
2039 if (sblock->logical != btrfs_stack_header_bytenr(h))
2040 sblock->header_error = 1;
2041
2042 if (sector->generation != btrfs_stack_header_generation(h)) {
2043 sblock->header_error = 1;
2044 sblock->generation_error = 1;
2045 }
2046
2047 if (!scrub_check_fsid(h->fsid, sector))
2048 sblock->header_error = 1;
2049
2050 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
2051 BTRFS_UUID_SIZE))
2052 sblock->header_error = 1;
2053
2054 shash->tfm = fs_info->csum_shash;
2055 crypto_shash_init(shash);
2056 crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
2057 sectorsize - BTRFS_CSUM_SIZE);
2058
2059 for (i = 1; i < num_sectors; i++) {
2060 kaddr = scrub_sector_get_kaddr(sblock->sectors[i]);
2061 crypto_shash_update(shash, kaddr, sectorsize);
2062 }
2063
2064 crypto_shash_final(shash, calculated_csum);
2065 if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size))
2066 sblock->checksum_error = 1;
2067
2068 return sblock->header_error || sblock->checksum_error;
2069 }
2070
scrub_checksum_super(struct scrub_block * sblock)2071 static int scrub_checksum_super(struct scrub_block *sblock)
2072 {
2073 struct btrfs_super_block *s;
2074 struct scrub_ctx *sctx = sblock->sctx;
2075 struct btrfs_fs_info *fs_info = sctx->fs_info;
2076 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
2077 u8 calculated_csum[BTRFS_CSUM_SIZE];
2078 struct scrub_sector *sector;
2079 char *kaddr;
2080 int fail_gen = 0;
2081 int fail_cor = 0;
2082
2083 BUG_ON(sblock->sector_count < 1);
2084 sector = sblock->sectors[0];
2085 kaddr = scrub_sector_get_kaddr(sector);
2086 s = (struct btrfs_super_block *)kaddr;
2087
2088 if (sblock->logical != btrfs_super_bytenr(s))
2089 ++fail_cor;
2090
2091 if (sector->generation != btrfs_super_generation(s))
2092 ++fail_gen;
2093
2094 if (!scrub_check_fsid(s->fsid, sector))
2095 ++fail_cor;
2096
2097 shash->tfm = fs_info->csum_shash;
2098 crypto_shash_init(shash);
2099 crypto_shash_digest(shash, kaddr + BTRFS_CSUM_SIZE,
2100 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, calculated_csum);
2101
2102 if (memcmp(calculated_csum, s->csum, sctx->fs_info->csum_size))
2103 ++fail_cor;
2104
2105 return fail_cor + fail_gen;
2106 }
2107
scrub_block_put(struct scrub_block * sblock)2108 static void scrub_block_put(struct scrub_block *sblock)
2109 {
2110 if (refcount_dec_and_test(&sblock->refs)) {
2111 int i;
2112
2113 if (sblock->sparity)
2114 scrub_parity_put(sblock->sparity);
2115
2116 for (i = 0; i < sblock->sector_count; i++)
2117 scrub_sector_put(sblock->sectors[i]);
2118 for (i = 0; i < DIV_ROUND_UP(sblock->len, PAGE_SIZE); i++) {
2119 if (sblock->pages[i]) {
2120 detach_scrub_page_private(sblock->pages[i]);
2121 __free_page(sblock->pages[i]);
2122 }
2123 }
2124 kfree(sblock);
2125 }
2126 }
2127
scrub_sector_get(struct scrub_sector * sector)2128 static void scrub_sector_get(struct scrub_sector *sector)
2129 {
2130 atomic_inc(§or->refs);
2131 }
2132
scrub_sector_put(struct scrub_sector * sector)2133 static void scrub_sector_put(struct scrub_sector *sector)
2134 {
2135 if (atomic_dec_and_test(§or->refs))
2136 kfree(sector);
2137 }
2138
2139 /*
2140 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
2141 * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
2142 */
scrub_throttle(struct scrub_ctx * sctx)2143 static void scrub_throttle(struct scrub_ctx *sctx)
2144 {
2145 const int time_slice = 1000;
2146 struct scrub_bio *sbio;
2147 struct btrfs_device *device;
2148 s64 delta;
2149 ktime_t now;
2150 u32 div;
2151 u64 bwlimit;
2152
2153 sbio = sctx->bios[sctx->curr];
2154 device = sbio->dev;
2155 bwlimit = READ_ONCE(device->scrub_speed_max);
2156 if (bwlimit == 0)
2157 return;
2158
2159 /*
2160 * Slice is divided into intervals when the IO is submitted, adjust by
2161 * bwlimit and maximum of 64 intervals.
2162 */
2163 div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
2164 div = min_t(u32, 64, div);
2165
2166 /* Start new epoch, set deadline */
2167 now = ktime_get();
2168 if (sctx->throttle_deadline == 0) {
2169 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
2170 sctx->throttle_sent = 0;
2171 }
2172
2173 /* Still in the time to send? */
2174 if (ktime_before(now, sctx->throttle_deadline)) {
2175 /* If current bio is within the limit, send it */
2176 sctx->throttle_sent += sbio->bio->bi_iter.bi_size;
2177 if (sctx->throttle_sent <= div_u64(bwlimit, div))
2178 return;
2179
2180 /* We're over the limit, sleep until the rest of the slice */
2181 delta = ktime_ms_delta(sctx->throttle_deadline, now);
2182 } else {
2183 /* New request after deadline, start new epoch */
2184 delta = 0;
2185 }
2186
2187 if (delta) {
2188 long timeout;
2189
2190 timeout = div_u64(delta * HZ, 1000);
2191 schedule_timeout_interruptible(timeout);
2192 }
2193
2194 /* Next call will start the deadline period */
2195 sctx->throttle_deadline = 0;
2196 }
2197
scrub_submit(struct scrub_ctx * sctx)2198 static void scrub_submit(struct scrub_ctx *sctx)
2199 {
2200 struct scrub_bio *sbio;
2201
2202 if (sctx->curr == -1)
2203 return;
2204
2205 scrub_throttle(sctx);
2206
2207 sbio = sctx->bios[sctx->curr];
2208 sctx->curr = -1;
2209 scrub_pending_bio_inc(sctx);
2210 btrfsic_check_bio(sbio->bio);
2211 submit_bio(sbio->bio);
2212 }
2213
scrub_add_sector_to_rd_bio(struct scrub_ctx * sctx,struct scrub_sector * sector)2214 static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx,
2215 struct scrub_sector *sector)
2216 {
2217 struct scrub_block *sblock = sector->sblock;
2218 struct scrub_bio *sbio;
2219 const u32 sectorsize = sctx->fs_info->sectorsize;
2220 int ret;
2221
2222 again:
2223 /*
2224 * grab a fresh bio or wait for one to become available
2225 */
2226 while (sctx->curr == -1) {
2227 spin_lock(&sctx->list_lock);
2228 sctx->curr = sctx->first_free;
2229 if (sctx->curr != -1) {
2230 sctx->first_free = sctx->bios[sctx->curr]->next_free;
2231 sctx->bios[sctx->curr]->next_free = -1;
2232 sctx->bios[sctx->curr]->sector_count = 0;
2233 spin_unlock(&sctx->list_lock);
2234 } else {
2235 spin_unlock(&sctx->list_lock);
2236 wait_event(sctx->list_wait, sctx->first_free != -1);
2237 }
2238 }
2239 sbio = sctx->bios[sctx->curr];
2240 if (sbio->sector_count == 0) {
2241 sbio->physical = sblock->physical + sector->offset;
2242 sbio->logical = sblock->logical + sector->offset;
2243 sbio->dev = sblock->dev;
2244 if (!sbio->bio) {
2245 sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio,
2246 REQ_OP_READ, GFP_NOFS);
2247 }
2248 sbio->bio->bi_private = sbio;
2249 sbio->bio->bi_end_io = scrub_bio_end_io;
2250 sbio->bio->bi_iter.bi_sector = sbio->physical >> 9;
2251 sbio->status = 0;
2252 } else if (sbio->physical + sbio->sector_count * sectorsize !=
2253 sblock->physical + sector->offset ||
2254 sbio->logical + sbio->sector_count * sectorsize !=
2255 sblock->logical + sector->offset ||
2256 sbio->dev != sblock->dev) {
2257 scrub_submit(sctx);
2258 goto again;
2259 }
2260
2261 sbio->sectors[sbio->sector_count] = sector;
2262 ret = bio_add_scrub_sector(sbio->bio, sector, sectorsize);
2263 if (ret != sectorsize) {
2264 if (sbio->sector_count < 1) {
2265 bio_put(sbio->bio);
2266 sbio->bio = NULL;
2267 return -EIO;
2268 }
2269 scrub_submit(sctx);
2270 goto again;
2271 }
2272
2273 scrub_block_get(sblock); /* one for the page added to the bio */
2274 atomic_inc(&sblock->outstanding_sectors);
2275 sbio->sector_count++;
2276 if (sbio->sector_count == sctx->sectors_per_bio)
2277 scrub_submit(sctx);
2278
2279 return 0;
2280 }
2281
scrub_missing_raid56_end_io(struct bio * bio)2282 static void scrub_missing_raid56_end_io(struct bio *bio)
2283 {
2284 struct scrub_block *sblock = bio->bi_private;
2285 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2286
2287 btrfs_bio_counter_dec(fs_info);
2288 if (bio->bi_status)
2289 sblock->no_io_error_seen = 0;
2290
2291 bio_put(bio);
2292
2293 queue_work(fs_info->scrub_workers, &sblock->work);
2294 }
2295
scrub_missing_raid56_worker(struct work_struct * work)2296 static void scrub_missing_raid56_worker(struct work_struct *work)
2297 {
2298 struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2299 struct scrub_ctx *sctx = sblock->sctx;
2300 struct btrfs_fs_info *fs_info = sctx->fs_info;
2301 u64 logical;
2302 struct btrfs_device *dev;
2303
2304 logical = sblock->logical;
2305 dev = sblock->dev;
2306
2307 if (sblock->no_io_error_seen)
2308 scrub_recheck_block_checksum(sblock);
2309
2310 if (!sblock->no_io_error_seen) {
2311 spin_lock(&sctx->stat_lock);
2312 sctx->stat.read_errors++;
2313 spin_unlock(&sctx->stat_lock);
2314 btrfs_err_rl_in_rcu(fs_info,
2315 "IO error rebuilding logical %llu for dev %s",
2316 logical, rcu_str_deref(dev->name));
2317 } else if (sblock->header_error || sblock->checksum_error) {
2318 spin_lock(&sctx->stat_lock);
2319 sctx->stat.uncorrectable_errors++;
2320 spin_unlock(&sctx->stat_lock);
2321 btrfs_err_rl_in_rcu(fs_info,
2322 "failed to rebuild valid logical %llu for dev %s",
2323 logical, rcu_str_deref(dev->name));
2324 } else {
2325 scrub_write_block_to_dev_replace(sblock);
2326 }
2327
2328 if (sctx->is_dev_replace && sctx->flush_all_writes) {
2329 mutex_lock(&sctx->wr_lock);
2330 scrub_wr_submit(sctx);
2331 mutex_unlock(&sctx->wr_lock);
2332 }
2333
2334 scrub_block_put(sblock);
2335 scrub_pending_bio_dec(sctx);
2336 }
2337
scrub_missing_raid56_pages(struct scrub_block * sblock)2338 static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2339 {
2340 struct scrub_ctx *sctx = sblock->sctx;
2341 struct btrfs_fs_info *fs_info = sctx->fs_info;
2342 u64 length = sblock->sector_count << fs_info->sectorsize_bits;
2343 u64 logical = sblock->logical;
2344 struct btrfs_io_context *bioc = NULL;
2345 struct bio *bio;
2346 struct btrfs_raid_bio *rbio;
2347 int ret;
2348 int i;
2349
2350 btrfs_bio_counter_inc_blocked(fs_info);
2351 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2352 &length, &bioc);
2353 if (ret || !bioc || !bioc->raid_map)
2354 goto bioc_out;
2355
2356 if (WARN_ON(!sctx->is_dev_replace ||
2357 !(bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2358 /*
2359 * We shouldn't be scrubbing a missing device. Even for dev
2360 * replace, we should only get here for RAID 5/6. We either
2361 * managed to mount something with no mirrors remaining or
2362 * there's a bug in scrub_find_good_copy()/btrfs_map_block().
2363 */
2364 goto bioc_out;
2365 }
2366
2367 bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
2368 bio->bi_iter.bi_sector = logical >> 9;
2369 bio->bi_private = sblock;
2370 bio->bi_end_io = scrub_missing_raid56_end_io;
2371
2372 rbio = raid56_alloc_missing_rbio(bio, bioc);
2373 if (!rbio)
2374 goto rbio_out;
2375
2376 for (i = 0; i < sblock->sector_count; i++) {
2377 struct scrub_sector *sector = sblock->sectors[i];
2378
2379 raid56_add_scrub_pages(rbio, scrub_sector_get_page(sector),
2380 scrub_sector_get_page_offset(sector),
2381 sector->offset + sector->sblock->logical);
2382 }
2383
2384 INIT_WORK(&sblock->work, scrub_missing_raid56_worker);
2385 scrub_block_get(sblock);
2386 scrub_pending_bio_inc(sctx);
2387 raid56_submit_missing_rbio(rbio);
2388 btrfs_put_bioc(bioc);
2389 return;
2390
2391 rbio_out:
2392 bio_put(bio);
2393 bioc_out:
2394 btrfs_bio_counter_dec(fs_info);
2395 btrfs_put_bioc(bioc);
2396 spin_lock(&sctx->stat_lock);
2397 sctx->stat.malloc_errors++;
2398 spin_unlock(&sctx->stat_lock);
2399 }
2400
scrub_sectors(struct scrub_ctx * sctx,u64 logical,u32 len,u64 physical,struct btrfs_device * dev,u64 flags,u64 gen,int mirror_num,u8 * csum,u64 physical_for_dev_replace)2401 static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
2402 u64 physical, struct btrfs_device *dev, u64 flags,
2403 u64 gen, int mirror_num, u8 *csum,
2404 u64 physical_for_dev_replace)
2405 {
2406 struct scrub_block *sblock;
2407 const u32 sectorsize = sctx->fs_info->sectorsize;
2408 int index;
2409
2410 sblock = alloc_scrub_block(sctx, dev, logical, physical,
2411 physical_for_dev_replace, mirror_num);
2412 if (!sblock) {
2413 spin_lock(&sctx->stat_lock);
2414 sctx->stat.malloc_errors++;
2415 spin_unlock(&sctx->stat_lock);
2416 return -ENOMEM;
2417 }
2418
2419 for (index = 0; len > 0; index++) {
2420 struct scrub_sector *sector;
2421 /*
2422 * Here we will allocate one page for one sector to scrub.
2423 * This is fine if PAGE_SIZE == sectorsize, but will cost
2424 * more memory for PAGE_SIZE > sectorsize case.
2425 */
2426 u32 l = min(sectorsize, len);
2427
2428 sector = alloc_scrub_sector(sblock, logical, GFP_KERNEL);
2429 if (!sector) {
2430 spin_lock(&sctx->stat_lock);
2431 sctx->stat.malloc_errors++;
2432 spin_unlock(&sctx->stat_lock);
2433 scrub_block_put(sblock);
2434 return -ENOMEM;
2435 }
2436 sector->flags = flags;
2437 sector->generation = gen;
2438 if (csum) {
2439 sector->have_csum = 1;
2440 memcpy(sector->csum, csum, sctx->fs_info->csum_size);
2441 } else {
2442 sector->have_csum = 0;
2443 }
2444 len -= l;
2445 logical += l;
2446 physical += l;
2447 physical_for_dev_replace += l;
2448 }
2449
2450 WARN_ON(sblock->sector_count == 0);
2451 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2452 /*
2453 * This case should only be hit for RAID 5/6 device replace. See
2454 * the comment in scrub_missing_raid56_pages() for details.
2455 */
2456 scrub_missing_raid56_pages(sblock);
2457 } else {
2458 for (index = 0; index < sblock->sector_count; index++) {
2459 struct scrub_sector *sector = sblock->sectors[index];
2460 int ret;
2461
2462 ret = scrub_add_sector_to_rd_bio(sctx, sector);
2463 if (ret) {
2464 scrub_block_put(sblock);
2465 return ret;
2466 }
2467 }
2468
2469 if (flags & BTRFS_EXTENT_FLAG_SUPER)
2470 scrub_submit(sctx);
2471 }
2472
2473 /* last one frees, either here or in bio completion for last page */
2474 scrub_block_put(sblock);
2475 return 0;
2476 }
2477
scrub_bio_end_io(struct bio * bio)2478 static void scrub_bio_end_io(struct bio *bio)
2479 {
2480 struct scrub_bio *sbio = bio->bi_private;
2481 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2482
2483 sbio->status = bio->bi_status;
2484 sbio->bio = bio;
2485
2486 queue_work(fs_info->scrub_workers, &sbio->work);
2487 }
2488
scrub_bio_end_io_worker(struct work_struct * work)2489 static void scrub_bio_end_io_worker(struct work_struct *work)
2490 {
2491 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2492 struct scrub_ctx *sctx = sbio->sctx;
2493 int i;
2494
2495 ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO);
2496 if (sbio->status) {
2497 for (i = 0; i < sbio->sector_count; i++) {
2498 struct scrub_sector *sector = sbio->sectors[i];
2499
2500 sector->io_error = 1;
2501 sector->sblock->no_io_error_seen = 0;
2502 }
2503 }
2504
2505 /* Now complete the scrub_block items that have all pages completed */
2506 for (i = 0; i < sbio->sector_count; i++) {
2507 struct scrub_sector *sector = sbio->sectors[i];
2508 struct scrub_block *sblock = sector->sblock;
2509
2510 if (atomic_dec_and_test(&sblock->outstanding_sectors))
2511 scrub_block_complete(sblock);
2512 scrub_block_put(sblock);
2513 }
2514
2515 bio_put(sbio->bio);
2516 sbio->bio = NULL;
2517 spin_lock(&sctx->list_lock);
2518 sbio->next_free = sctx->first_free;
2519 sctx->first_free = sbio->index;
2520 spin_unlock(&sctx->list_lock);
2521
2522 if (sctx->is_dev_replace && sctx->flush_all_writes) {
2523 mutex_lock(&sctx->wr_lock);
2524 scrub_wr_submit(sctx);
2525 mutex_unlock(&sctx->wr_lock);
2526 }
2527
2528 scrub_pending_bio_dec(sctx);
2529 }
2530
__scrub_mark_bitmap(struct scrub_parity * sparity,unsigned long * bitmap,u64 start,u32 len)2531 static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2532 unsigned long *bitmap,
2533 u64 start, u32 len)
2534 {
2535 u64 offset;
2536 u32 nsectors;
2537 u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits;
2538
2539 if (len >= sparity->stripe_len) {
2540 bitmap_set(bitmap, 0, sparity->nsectors);
2541 return;
2542 }
2543
2544 start -= sparity->logic_start;
2545 start = div64_u64_rem(start, sparity->stripe_len, &offset);
2546 offset = offset >> sectorsize_bits;
2547 nsectors = len >> sectorsize_bits;
2548
2549 if (offset + nsectors <= sparity->nsectors) {
2550 bitmap_set(bitmap, offset, nsectors);
2551 return;
2552 }
2553
2554 bitmap_set(bitmap, offset, sparity->nsectors - offset);
2555 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2556 }
2557
scrub_parity_mark_sectors_error(struct scrub_parity * sparity,u64 start,u32 len)2558 static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2559 u64 start, u32 len)
2560 {
2561 __scrub_mark_bitmap(sparity, &sparity->ebitmap, start, len);
2562 }
2563
scrub_parity_mark_sectors_data(struct scrub_parity * sparity,u64 start,u32 len)2564 static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2565 u64 start, u32 len)
2566 {
2567 __scrub_mark_bitmap(sparity, &sparity->dbitmap, start, len);
2568 }
2569
scrub_block_complete(struct scrub_block * sblock)2570 static void scrub_block_complete(struct scrub_block *sblock)
2571 {
2572 int corrupted = 0;
2573
2574 if (!sblock->no_io_error_seen) {
2575 corrupted = 1;
2576 scrub_handle_errored_block(sblock);
2577 } else {
2578 /*
2579 * if has checksum error, write via repair mechanism in
2580 * dev replace case, otherwise write here in dev replace
2581 * case.
2582 */
2583 corrupted = scrub_checksum(sblock);
2584 if (!corrupted && sblock->sctx->is_dev_replace)
2585 scrub_write_block_to_dev_replace(sblock);
2586 }
2587
2588 if (sblock->sparity && corrupted && !sblock->data_corrected) {
2589 u64 start = sblock->logical;
2590 u64 end = sblock->logical +
2591 sblock->sectors[sblock->sector_count - 1]->offset +
2592 sblock->sctx->fs_info->sectorsize;
2593
2594 ASSERT(end - start <= U32_MAX);
2595 scrub_parity_mark_sectors_error(sblock->sparity,
2596 start, end - start);
2597 }
2598 }
2599
drop_csum_range(struct scrub_ctx * sctx,struct btrfs_ordered_sum * sum)2600 static void drop_csum_range(struct scrub_ctx *sctx, struct btrfs_ordered_sum *sum)
2601 {
2602 sctx->stat.csum_discards += sum->len >> sctx->fs_info->sectorsize_bits;
2603 list_del(&sum->list);
2604 kfree(sum);
2605 }
2606
2607 /*
2608 * Find the desired csum for range [logical, logical + sectorsize), and store
2609 * the csum into @csum.
2610 *
2611 * The search source is sctx->csum_list, which is a pre-populated list
2612 * storing bytenr ordered csum ranges. We're responsible to cleanup any range
2613 * that is before @logical.
2614 *
2615 * Return 0 if there is no csum for the range.
2616 * Return 1 if there is csum for the range and copied to @csum.
2617 */
scrub_find_csum(struct scrub_ctx * sctx,u64 logical,u8 * csum)2618 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
2619 {
2620 bool found = false;
2621
2622 while (!list_empty(&sctx->csum_list)) {
2623 struct btrfs_ordered_sum *sum = NULL;
2624 unsigned long index;
2625 unsigned long num_sectors;
2626
2627 sum = list_first_entry(&sctx->csum_list,
2628 struct btrfs_ordered_sum, list);
2629 /* The current csum range is beyond our range, no csum found */
2630 if (sum->bytenr > logical)
2631 break;
2632
2633 /*
2634 * The current sum is before our bytenr, since scrub is always
2635 * done in bytenr order, the csum will never be used anymore,
2636 * clean it up so that later calls won't bother with the range,
2637 * and continue search the next range.
2638 */
2639 if (sum->bytenr + sum->len <= logical) {
2640 drop_csum_range(sctx, sum);
2641 continue;
2642 }
2643
2644 /* Now the csum range covers our bytenr, copy the csum */
2645 found = true;
2646 index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits;
2647 num_sectors = sum->len >> sctx->fs_info->sectorsize_bits;
2648
2649 memcpy(csum, sum->sums + index * sctx->fs_info->csum_size,
2650 sctx->fs_info->csum_size);
2651
2652 /* Cleanup the range if we're at the end of the csum range */
2653 if (index == num_sectors - 1)
2654 drop_csum_range(sctx, sum);
2655 break;
2656 }
2657 if (!found)
2658 return 0;
2659 return 1;
2660 }
2661
2662 /* scrub extent tries to collect up to 64 kB for each bio */
scrub_extent(struct scrub_ctx * sctx,struct map_lookup * map,u64 logical,u32 len,u64 physical,struct btrfs_device * dev,u64 flags,u64 gen,int mirror_num)2663 static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
2664 u64 logical, u32 len,
2665 u64 physical, struct btrfs_device *dev, u64 flags,
2666 u64 gen, int mirror_num)
2667 {
2668 struct btrfs_device *src_dev = dev;
2669 u64 src_physical = physical;
2670 int src_mirror = mirror_num;
2671 int ret;
2672 u8 csum[BTRFS_CSUM_SIZE];
2673 u32 blocksize;
2674
2675 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2676 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2677 blocksize = map->stripe_len;
2678 else
2679 blocksize = sctx->fs_info->sectorsize;
2680 spin_lock(&sctx->stat_lock);
2681 sctx->stat.data_extents_scrubbed++;
2682 sctx->stat.data_bytes_scrubbed += len;
2683 spin_unlock(&sctx->stat_lock);
2684 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2685 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2686 blocksize = map->stripe_len;
2687 else
2688 blocksize = sctx->fs_info->nodesize;
2689 spin_lock(&sctx->stat_lock);
2690 sctx->stat.tree_extents_scrubbed++;
2691 sctx->stat.tree_bytes_scrubbed += len;
2692 spin_unlock(&sctx->stat_lock);
2693 } else {
2694 blocksize = sctx->fs_info->sectorsize;
2695 WARN_ON(1);
2696 }
2697
2698 /*
2699 * For dev-replace case, we can have @dev being a missing device.
2700 * Regular scrub will avoid its execution on missing device at all,
2701 * as that would trigger tons of read error.
2702 *
2703 * Reading from missing device will cause read error counts to
2704 * increase unnecessarily.
2705 * So here we change the read source to a good mirror.
2706 */
2707 if (sctx->is_dev_replace && !dev->bdev)
2708 scrub_find_good_copy(sctx->fs_info, logical, len, &src_physical,
2709 &src_dev, &src_mirror);
2710 while (len) {
2711 u32 l = min(len, blocksize);
2712 int have_csum = 0;
2713
2714 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2715 /* push csums to sbio */
2716 have_csum = scrub_find_csum(sctx, logical, csum);
2717 if (have_csum == 0)
2718 ++sctx->stat.no_csum;
2719 }
2720 ret = scrub_sectors(sctx, logical, l, src_physical, src_dev,
2721 flags, gen, src_mirror,
2722 have_csum ? csum : NULL, physical);
2723 if (ret)
2724 return ret;
2725 len -= l;
2726 logical += l;
2727 physical += l;
2728 src_physical += l;
2729 }
2730 return 0;
2731 }
2732
scrub_sectors_for_parity(struct scrub_parity * sparity,u64 logical,u32 len,u64 physical,struct btrfs_device * dev,u64 flags,u64 gen,int mirror_num,u8 * csum)2733 static int scrub_sectors_for_parity(struct scrub_parity *sparity,
2734 u64 logical, u32 len,
2735 u64 physical, struct btrfs_device *dev,
2736 u64 flags, u64 gen, int mirror_num, u8 *csum)
2737 {
2738 struct scrub_ctx *sctx = sparity->sctx;
2739 struct scrub_block *sblock;
2740 const u32 sectorsize = sctx->fs_info->sectorsize;
2741 int index;
2742
2743 ASSERT(IS_ALIGNED(len, sectorsize));
2744
2745 sblock = alloc_scrub_block(sctx, dev, logical, physical, physical, mirror_num);
2746 if (!sblock) {
2747 spin_lock(&sctx->stat_lock);
2748 sctx->stat.malloc_errors++;
2749 spin_unlock(&sctx->stat_lock);
2750 return -ENOMEM;
2751 }
2752
2753 sblock->sparity = sparity;
2754 scrub_parity_get(sparity);
2755
2756 for (index = 0; len > 0; index++) {
2757 struct scrub_sector *sector;
2758
2759 sector = alloc_scrub_sector(sblock, logical, GFP_KERNEL);
2760 if (!sector) {
2761 spin_lock(&sctx->stat_lock);
2762 sctx->stat.malloc_errors++;
2763 spin_unlock(&sctx->stat_lock);
2764 scrub_block_put(sblock);
2765 return -ENOMEM;
2766 }
2767 sblock->sectors[index] = sector;
2768 /* For scrub parity */
2769 scrub_sector_get(sector);
2770 list_add_tail(§or->list, &sparity->sectors_list);
2771 sector->flags = flags;
2772 sector->generation = gen;
2773 if (csum) {
2774 sector->have_csum = 1;
2775 memcpy(sector->csum, csum, sctx->fs_info->csum_size);
2776 } else {
2777 sector->have_csum = 0;
2778 }
2779
2780 /* Iterate over the stripe range in sectorsize steps */
2781 len -= sectorsize;
2782 logical += sectorsize;
2783 physical += sectorsize;
2784 }
2785
2786 WARN_ON(sblock->sector_count == 0);
2787 for (index = 0; index < sblock->sector_count; index++) {
2788 struct scrub_sector *sector = sblock->sectors[index];
2789 int ret;
2790
2791 ret = scrub_add_sector_to_rd_bio(sctx, sector);
2792 if (ret) {
2793 scrub_block_put(sblock);
2794 return ret;
2795 }
2796 }
2797
2798 /* Last one frees, either here or in bio completion for last sector */
2799 scrub_block_put(sblock);
2800 return 0;
2801 }
2802
scrub_extent_for_parity(struct scrub_parity * sparity,u64 logical,u32 len,u64 physical,struct btrfs_device * dev,u64 flags,u64 gen,int mirror_num)2803 static int scrub_extent_for_parity(struct scrub_parity *sparity,
2804 u64 logical, u32 len,
2805 u64 physical, struct btrfs_device *dev,
2806 u64 flags, u64 gen, int mirror_num)
2807 {
2808 struct scrub_ctx *sctx = sparity->sctx;
2809 int ret;
2810 u8 csum[BTRFS_CSUM_SIZE];
2811 u32 blocksize;
2812
2813 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2814 scrub_parity_mark_sectors_error(sparity, logical, len);
2815 return 0;
2816 }
2817
2818 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2819 blocksize = sparity->stripe_len;
2820 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2821 blocksize = sparity->stripe_len;
2822 } else {
2823 blocksize = sctx->fs_info->sectorsize;
2824 WARN_ON(1);
2825 }
2826
2827 while (len) {
2828 u32 l = min(len, blocksize);
2829 int have_csum = 0;
2830
2831 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2832 /* push csums to sbio */
2833 have_csum = scrub_find_csum(sctx, logical, csum);
2834 if (have_csum == 0)
2835 goto skip;
2836 }
2837 ret = scrub_sectors_for_parity(sparity, logical, l, physical, dev,
2838 flags, gen, mirror_num,
2839 have_csum ? csum : NULL);
2840 if (ret)
2841 return ret;
2842 skip:
2843 len -= l;
2844 logical += l;
2845 physical += l;
2846 }
2847 return 0;
2848 }
2849
2850 /*
2851 * Given a physical address, this will calculate it's
2852 * logical offset. if this is a parity stripe, it will return
2853 * the most left data stripe's logical offset.
2854 *
2855 * return 0 if it is a data stripe, 1 means parity stripe.
2856 */
get_raid56_logic_offset(u64 physical,int num,struct map_lookup * map,u64 * offset,u64 * stripe_start)2857 static int get_raid56_logic_offset(u64 physical, int num,
2858 struct map_lookup *map, u64 *offset,
2859 u64 *stripe_start)
2860 {
2861 int i;
2862 int j = 0;
2863 u64 stripe_nr;
2864 u64 last_offset;
2865 u32 stripe_index;
2866 u32 rot;
2867 const int data_stripes = nr_data_stripes(map);
2868
2869 last_offset = (physical - map->stripes[num].physical) * data_stripes;
2870 if (stripe_start)
2871 *stripe_start = last_offset;
2872
2873 *offset = last_offset;
2874 for (i = 0; i < data_stripes; i++) {
2875 *offset = last_offset + i * map->stripe_len;
2876
2877 stripe_nr = div64_u64(*offset, map->stripe_len);
2878 stripe_nr = div_u64(stripe_nr, data_stripes);
2879
2880 /* Work out the disk rotation on this stripe-set */
2881 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2882 /* calculate which stripe this data locates */
2883 rot += i;
2884 stripe_index = rot % map->num_stripes;
2885 if (stripe_index == num)
2886 return 0;
2887 if (stripe_index < num)
2888 j++;
2889 }
2890 *offset = last_offset + j * map->stripe_len;
2891 return 1;
2892 }
2893
scrub_free_parity(struct scrub_parity * sparity)2894 static void scrub_free_parity(struct scrub_parity *sparity)
2895 {
2896 struct scrub_ctx *sctx = sparity->sctx;
2897 struct scrub_sector *curr, *next;
2898 int nbits;
2899
2900 nbits = bitmap_weight(&sparity->ebitmap, sparity->nsectors);
2901 if (nbits) {
2902 spin_lock(&sctx->stat_lock);
2903 sctx->stat.read_errors += nbits;
2904 sctx->stat.uncorrectable_errors += nbits;
2905 spin_unlock(&sctx->stat_lock);
2906 }
2907
2908 list_for_each_entry_safe(curr, next, &sparity->sectors_list, list) {
2909 list_del_init(&curr->list);
2910 scrub_sector_put(curr);
2911 }
2912
2913 kfree(sparity);
2914 }
2915
scrub_parity_bio_endio_worker(struct work_struct * work)2916 static void scrub_parity_bio_endio_worker(struct work_struct *work)
2917 {
2918 struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2919 work);
2920 struct scrub_ctx *sctx = sparity->sctx;
2921
2922 btrfs_bio_counter_dec(sctx->fs_info);
2923 scrub_free_parity(sparity);
2924 scrub_pending_bio_dec(sctx);
2925 }
2926
scrub_parity_bio_endio(struct bio * bio)2927 static void scrub_parity_bio_endio(struct bio *bio)
2928 {
2929 struct scrub_parity *sparity = bio->bi_private;
2930 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
2931
2932 if (bio->bi_status)
2933 bitmap_or(&sparity->ebitmap, &sparity->ebitmap,
2934 &sparity->dbitmap, sparity->nsectors);
2935
2936 bio_put(bio);
2937
2938 INIT_WORK(&sparity->work, scrub_parity_bio_endio_worker);
2939 queue_work(fs_info->scrub_parity_workers, &sparity->work);
2940 }
2941
scrub_parity_check_and_repair(struct scrub_parity * sparity)2942 static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2943 {
2944 struct scrub_ctx *sctx = sparity->sctx;
2945 struct btrfs_fs_info *fs_info = sctx->fs_info;
2946 struct bio *bio;
2947 struct btrfs_raid_bio *rbio;
2948 struct btrfs_io_context *bioc = NULL;
2949 u64 length;
2950 int ret;
2951
2952 if (!bitmap_andnot(&sparity->dbitmap, &sparity->dbitmap,
2953 &sparity->ebitmap, sparity->nsectors))
2954 goto out;
2955
2956 length = sparity->logic_end - sparity->logic_start;
2957
2958 btrfs_bio_counter_inc_blocked(fs_info);
2959 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
2960 &length, &bioc);
2961 if (ret || !bioc || !bioc->raid_map)
2962 goto bioc_out;
2963
2964 bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
2965 bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2966 bio->bi_private = sparity;
2967 bio->bi_end_io = scrub_parity_bio_endio;
2968
2969 rbio = raid56_parity_alloc_scrub_rbio(bio, bioc,
2970 sparity->scrub_dev,
2971 &sparity->dbitmap,
2972 sparity->nsectors);
2973 btrfs_put_bioc(bioc);
2974 if (!rbio)
2975 goto rbio_out;
2976
2977 scrub_pending_bio_inc(sctx);
2978 raid56_parity_submit_scrub_rbio(rbio);
2979 return;
2980
2981 rbio_out:
2982 bio_put(bio);
2983 bioc_out:
2984 btrfs_bio_counter_dec(fs_info);
2985 bitmap_or(&sparity->ebitmap, &sparity->ebitmap, &sparity->dbitmap,
2986 sparity->nsectors);
2987 spin_lock(&sctx->stat_lock);
2988 sctx->stat.malloc_errors++;
2989 spin_unlock(&sctx->stat_lock);
2990 out:
2991 scrub_free_parity(sparity);
2992 }
2993
scrub_parity_get(struct scrub_parity * sparity)2994 static void scrub_parity_get(struct scrub_parity *sparity)
2995 {
2996 refcount_inc(&sparity->refs);
2997 }
2998
scrub_parity_put(struct scrub_parity * sparity)2999 static void scrub_parity_put(struct scrub_parity *sparity)
3000 {
3001 if (!refcount_dec_and_test(&sparity->refs))
3002 return;
3003
3004 scrub_parity_check_and_repair(sparity);
3005 }
3006
3007 /*
3008 * Return 0 if the extent item range covers any byte of the range.
3009 * Return <0 if the extent item is before @search_start.
3010 * Return >0 if the extent item is after @start_start + @search_len.
3011 */
compare_extent_item_range(struct btrfs_path * path,u64 search_start,u64 search_len)3012 static int compare_extent_item_range(struct btrfs_path *path,
3013 u64 search_start, u64 search_len)
3014 {
3015 struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
3016 u64 len;
3017 struct btrfs_key key;
3018
3019 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3020 ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
3021 key.type == BTRFS_METADATA_ITEM_KEY);
3022 if (key.type == BTRFS_METADATA_ITEM_KEY)
3023 len = fs_info->nodesize;
3024 else
3025 len = key.offset;
3026
3027 if (key.objectid + len <= search_start)
3028 return -1;
3029 if (key.objectid >= search_start + search_len)
3030 return 1;
3031 return 0;
3032 }
3033
3034 /*
3035 * Locate one extent item which covers any byte in range
3036 * [@search_start, @search_start + @search_length)
3037 *
3038 * If the path is not initialized, we will initialize the search by doing
3039 * a btrfs_search_slot().
3040 * If the path is already initialized, we will use the path as the initial
3041 * slot, to avoid duplicated btrfs_search_slot() calls.
3042 *
3043 * NOTE: If an extent item starts before @search_start, we will still
3044 * return the extent item. This is for data extent crossing stripe boundary.
3045 *
3046 * Return 0 if we found such extent item, and @path will point to the extent item.
3047 * Return >0 if no such extent item can be found, and @path will be released.
3048 * Return <0 if hit fatal error, and @path will be released.
3049 */
find_first_extent_item(struct btrfs_root * extent_root,struct btrfs_path * path,u64 search_start,u64 search_len)3050 static int find_first_extent_item(struct btrfs_root *extent_root,
3051 struct btrfs_path *path,
3052 u64 search_start, u64 search_len)
3053 {
3054 struct btrfs_fs_info *fs_info = extent_root->fs_info;
3055 struct btrfs_key key;
3056 int ret;
3057
3058 /* Continue using the existing path */
3059 if (path->nodes[0])
3060 goto search_forward;
3061
3062 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3063 key.type = BTRFS_METADATA_ITEM_KEY;
3064 else
3065 key.type = BTRFS_EXTENT_ITEM_KEY;
3066 key.objectid = search_start;
3067 key.offset = (u64)-1;
3068
3069 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3070 if (ret < 0)
3071 return ret;
3072
3073 ASSERT(ret > 0);
3074 /*
3075 * Here we intentionally pass 0 as @min_objectid, as there could be
3076 * an extent item starting before @search_start.
3077 */
3078 ret = btrfs_previous_extent_item(extent_root, path, 0);
3079 if (ret < 0)
3080 return ret;
3081 /*
3082 * No matter whether we have found an extent item, the next loop will
3083 * properly do every check on the key.
3084 */
3085 search_forward:
3086 while (true) {
3087 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3088 if (key.objectid >= search_start + search_len)
3089 break;
3090 if (key.type != BTRFS_METADATA_ITEM_KEY &&
3091 key.type != BTRFS_EXTENT_ITEM_KEY)
3092 goto next;
3093
3094 ret = compare_extent_item_range(path, search_start, search_len);
3095 if (ret == 0)
3096 return ret;
3097 if (ret > 0)
3098 break;
3099 next:
3100 path->slots[0]++;
3101 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
3102 ret = btrfs_next_leaf(extent_root, path);
3103 if (ret) {
3104 /* Either no more item or fatal error */
3105 btrfs_release_path(path);
3106 return ret;
3107 }
3108 }
3109 }
3110 btrfs_release_path(path);
3111 return 1;
3112 }
3113
get_extent_info(struct btrfs_path * path,u64 * extent_start_ret,u64 * size_ret,u64 * flags_ret,u64 * generation_ret)3114 static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
3115 u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
3116 {
3117 struct btrfs_key key;
3118 struct btrfs_extent_item *ei;
3119
3120 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3121 ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
3122 key.type == BTRFS_EXTENT_ITEM_KEY);
3123 *extent_start_ret = key.objectid;
3124 if (key.type == BTRFS_METADATA_ITEM_KEY)
3125 *size_ret = path->nodes[0]->fs_info->nodesize;
3126 else
3127 *size_ret = key.offset;
3128 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
3129 *flags_ret = btrfs_extent_flags(path->nodes[0], ei);
3130 *generation_ret = btrfs_extent_generation(path->nodes[0], ei);
3131 }
3132
does_range_cross_boundary(u64 extent_start,u64 extent_len,u64 boundary_start,u64 boudary_len)3133 static bool does_range_cross_boundary(u64 extent_start, u64 extent_len,
3134 u64 boundary_start, u64 boudary_len)
3135 {
3136 return (extent_start < boundary_start &&
3137 extent_start + extent_len > boundary_start) ||
3138 (extent_start < boundary_start + boudary_len &&
3139 extent_start + extent_len > boundary_start + boudary_len);
3140 }
3141
scrub_raid56_data_stripe_for_parity(struct scrub_ctx * sctx,struct scrub_parity * sparity,struct map_lookup * map,struct btrfs_device * sdev,struct btrfs_path * path,u64 logical)3142 static int scrub_raid56_data_stripe_for_parity(struct scrub_ctx *sctx,
3143 struct scrub_parity *sparity,
3144 struct map_lookup *map,
3145 struct btrfs_device *sdev,
3146 struct btrfs_path *path,
3147 u64 logical)
3148 {
3149 struct btrfs_fs_info *fs_info = sctx->fs_info;
3150 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
3151 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, logical);
3152 u64 cur_logical = logical;
3153 int ret;
3154
3155 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
3156
3157 /* Path must not be populated */
3158 ASSERT(!path->nodes[0]);
3159
3160 while (cur_logical < logical + map->stripe_len) {
3161 struct btrfs_io_context *bioc = NULL;
3162 struct btrfs_device *extent_dev;
3163 u64 extent_start;
3164 u64 extent_size;
3165 u64 mapped_length;
3166 u64 extent_flags;
3167 u64 extent_gen;
3168 u64 extent_physical;
3169 u64 extent_mirror_num;
3170
3171 ret = find_first_extent_item(extent_root, path, cur_logical,
3172 logical + map->stripe_len - cur_logical);
3173 /* No more extent item in this data stripe */
3174 if (ret > 0) {
3175 ret = 0;
3176 break;
3177 }
3178 if (ret < 0)
3179 break;
3180 get_extent_info(path, &extent_start, &extent_size, &extent_flags,
3181 &extent_gen);
3182
3183 /* Metadata should not cross stripe boundaries */
3184 if ((extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3185 does_range_cross_boundary(extent_start, extent_size,
3186 logical, map->stripe_len)) {
3187 btrfs_err(fs_info,
3188 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3189 extent_start, logical);
3190 spin_lock(&sctx->stat_lock);
3191 sctx->stat.uncorrectable_errors++;
3192 spin_unlock(&sctx->stat_lock);
3193 cur_logical += extent_size;
3194 continue;
3195 }
3196
3197 /* Skip hole range which doesn't have any extent */
3198 cur_logical = max(extent_start, cur_logical);
3199
3200 /* Truncate the range inside this data stripe */
3201 extent_size = min(extent_start + extent_size,
3202 logical + map->stripe_len) - cur_logical;
3203 extent_start = cur_logical;
3204 ASSERT(extent_size <= U32_MAX);
3205
3206 scrub_parity_mark_sectors_data(sparity, extent_start, extent_size);
3207
3208 mapped_length = extent_size;
3209 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_start,
3210 &mapped_length, &bioc, 0);
3211 if (!ret && (!bioc || mapped_length < extent_size))
3212 ret = -EIO;
3213 if (ret) {
3214 btrfs_put_bioc(bioc);
3215 scrub_parity_mark_sectors_error(sparity, extent_start,
3216 extent_size);
3217 break;
3218 }
3219 extent_physical = bioc->stripes[0].physical;
3220 extent_mirror_num = bioc->mirror_num;
3221 extent_dev = bioc->stripes[0].dev;
3222 btrfs_put_bioc(bioc);
3223
3224 ret = btrfs_lookup_csums_range(csum_root, extent_start,
3225 extent_start + extent_size - 1,
3226 &sctx->csum_list, 1, false);
3227 if (ret) {
3228 scrub_parity_mark_sectors_error(sparity, extent_start,
3229 extent_size);
3230 break;
3231 }
3232
3233 ret = scrub_extent_for_parity(sparity, extent_start,
3234 extent_size, extent_physical,
3235 extent_dev, extent_flags,
3236 extent_gen, extent_mirror_num);
3237 scrub_free_csums(sctx);
3238
3239 if (ret) {
3240 scrub_parity_mark_sectors_error(sparity, extent_start,
3241 extent_size);
3242 break;
3243 }
3244
3245 cond_resched();
3246 cur_logical += extent_size;
3247 }
3248 btrfs_release_path(path);
3249 return ret;
3250 }
3251
scrub_raid56_parity(struct scrub_ctx * sctx,struct map_lookup * map,struct btrfs_device * sdev,u64 logic_start,u64 logic_end)3252 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
3253 struct map_lookup *map,
3254 struct btrfs_device *sdev,
3255 u64 logic_start,
3256 u64 logic_end)
3257 {
3258 struct btrfs_fs_info *fs_info = sctx->fs_info;
3259 struct btrfs_path *path;
3260 u64 cur_logical;
3261 int ret;
3262 struct scrub_parity *sparity;
3263 int nsectors;
3264
3265 path = btrfs_alloc_path();
3266 if (!path) {
3267 spin_lock(&sctx->stat_lock);
3268 sctx->stat.malloc_errors++;
3269 spin_unlock(&sctx->stat_lock);
3270 return -ENOMEM;
3271 }
3272 path->search_commit_root = 1;
3273 path->skip_locking = 1;
3274
3275 ASSERT(map->stripe_len <= U32_MAX);
3276 nsectors = map->stripe_len >> fs_info->sectorsize_bits;
3277 ASSERT(nsectors <= BITS_PER_LONG);
3278 sparity = kzalloc(sizeof(struct scrub_parity), GFP_NOFS);
3279 if (!sparity) {
3280 spin_lock(&sctx->stat_lock);
3281 sctx->stat.malloc_errors++;
3282 spin_unlock(&sctx->stat_lock);
3283 btrfs_free_path(path);
3284 return -ENOMEM;
3285 }
3286
3287 ASSERT(map->stripe_len <= U32_MAX);
3288 sparity->stripe_len = map->stripe_len;
3289 sparity->nsectors = nsectors;
3290 sparity->sctx = sctx;
3291 sparity->scrub_dev = sdev;
3292 sparity->logic_start = logic_start;
3293 sparity->logic_end = logic_end;
3294 refcount_set(&sparity->refs, 1);
3295 INIT_LIST_HEAD(&sparity->sectors_list);
3296
3297 ret = 0;
3298 for (cur_logical = logic_start; cur_logical < logic_end;
3299 cur_logical += map->stripe_len) {
3300 ret = scrub_raid56_data_stripe_for_parity(sctx, sparity, map,
3301 sdev, path, cur_logical);
3302 if (ret < 0)
3303 break;
3304 }
3305
3306 scrub_parity_put(sparity);
3307 scrub_submit(sctx);
3308 mutex_lock(&sctx->wr_lock);
3309 scrub_wr_submit(sctx);
3310 mutex_unlock(&sctx->wr_lock);
3311
3312 btrfs_free_path(path);
3313 return ret < 0 ? ret : 0;
3314 }
3315
sync_replace_for_zoned(struct scrub_ctx * sctx)3316 static void sync_replace_for_zoned(struct scrub_ctx *sctx)
3317 {
3318 if (!btrfs_is_zoned(sctx->fs_info))
3319 return;
3320
3321 sctx->flush_all_writes = true;
3322 scrub_submit(sctx);
3323 mutex_lock(&sctx->wr_lock);
3324 scrub_wr_submit(sctx);
3325 mutex_unlock(&sctx->wr_lock);
3326
3327 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3328 }
3329
sync_write_pointer_for_zoned(struct scrub_ctx * sctx,u64 logical,u64 physical,u64 physical_end)3330 static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
3331 u64 physical, u64 physical_end)
3332 {
3333 struct btrfs_fs_info *fs_info = sctx->fs_info;
3334 int ret = 0;
3335
3336 if (!btrfs_is_zoned(fs_info))
3337 return 0;
3338
3339 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3340
3341 mutex_lock(&sctx->wr_lock);
3342 if (sctx->write_pointer < physical_end) {
3343 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
3344 physical,
3345 sctx->write_pointer);
3346 if (ret)
3347 btrfs_err(fs_info,
3348 "zoned: failed to recover write pointer");
3349 }
3350 mutex_unlock(&sctx->wr_lock);
3351 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
3352
3353 return ret;
3354 }
3355
3356 /*
3357 * Scrub one range which can only has simple mirror based profile.
3358 * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
3359 * RAID0/RAID10).
3360 *
3361 * Since we may need to handle a subset of block group, we need @logical_start
3362 * and @logical_length parameter.
3363 */
scrub_simple_mirror(struct scrub_ctx * sctx,struct btrfs_root * extent_root,struct btrfs_root * csum_root,struct btrfs_block_group * bg,struct map_lookup * map,u64 logical_start,u64 logical_length,struct btrfs_device * device,u64 physical,int mirror_num)3364 static int scrub_simple_mirror(struct scrub_ctx *sctx,
3365 struct btrfs_root *extent_root,
3366 struct btrfs_root *csum_root,
3367 struct btrfs_block_group *bg,
3368 struct map_lookup *map,
3369 u64 logical_start, u64 logical_length,
3370 struct btrfs_device *device,
3371 u64 physical, int mirror_num)
3372 {
3373 struct btrfs_fs_info *fs_info = sctx->fs_info;
3374 const u64 logical_end = logical_start + logical_length;
3375 /* An artificial limit, inherit from old scrub behavior */
3376 const u32 max_length = SZ_64K;
3377 struct btrfs_path path = { 0 };
3378 u64 cur_logical = logical_start;
3379 int ret;
3380
3381 /* The range must be inside the bg */
3382 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
3383
3384 path.search_commit_root = 1;
3385 path.skip_locking = 1;
3386 /* Go through each extent items inside the logical range */
3387 while (cur_logical < logical_end) {
3388 u64 extent_start;
3389 u64 extent_len;
3390 u64 extent_flags;
3391 u64 extent_gen;
3392 u64 scrub_len;
3393
3394 /* Canceled? */
3395 if (atomic_read(&fs_info->scrub_cancel_req) ||
3396 atomic_read(&sctx->cancel_req)) {
3397 ret = -ECANCELED;
3398 break;
3399 }
3400 /* Paused? */
3401 if (atomic_read(&fs_info->scrub_pause_req)) {
3402 /* Push queued extents */
3403 sctx->flush_all_writes = true;
3404 scrub_submit(sctx);
3405 mutex_lock(&sctx->wr_lock);
3406 scrub_wr_submit(sctx);
3407 mutex_unlock(&sctx->wr_lock);
3408 wait_event(sctx->list_wait,
3409 atomic_read(&sctx->bios_in_flight) == 0);
3410 sctx->flush_all_writes = false;
3411 scrub_blocked_if_needed(fs_info);
3412 }
3413 /* Block group removed? */
3414 spin_lock(&bg->lock);
3415 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
3416 spin_unlock(&bg->lock);
3417 ret = 0;
3418 break;
3419 }
3420 spin_unlock(&bg->lock);
3421
3422 ret = find_first_extent_item(extent_root, &path, cur_logical,
3423 logical_end - cur_logical);
3424 if (ret > 0) {
3425 /* No more extent, just update the accounting */
3426 sctx->stat.last_physical = physical + logical_length;
3427 ret = 0;
3428 break;
3429 }
3430 if (ret < 0)
3431 break;
3432 get_extent_info(&path, &extent_start, &extent_len,
3433 &extent_flags, &extent_gen);
3434 /* Skip hole range which doesn't have any extent */
3435 cur_logical = max(extent_start, cur_logical);
3436
3437 /*
3438 * Scrub len has three limits:
3439 * - Extent size limit
3440 * - Scrub range limit
3441 * This is especially imporatant for RAID0/RAID10 to reuse
3442 * this function
3443 * - Max scrub size limit
3444 */
3445 scrub_len = min(min(extent_start + extent_len,
3446 logical_end), cur_logical + max_length) -
3447 cur_logical;
3448
3449 if (extent_flags & BTRFS_EXTENT_FLAG_DATA) {
3450 ret = btrfs_lookup_csums_range(csum_root, cur_logical,
3451 cur_logical + scrub_len - 1,
3452 &sctx->csum_list, 1, false);
3453 if (ret)
3454 break;
3455 }
3456 if ((extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3457 does_range_cross_boundary(extent_start, extent_len,
3458 logical_start, logical_length)) {
3459 btrfs_err(fs_info,
3460 "scrub: tree block %llu spanning boundaries, ignored. boundary=[%llu, %llu)",
3461 extent_start, logical_start, logical_end);
3462 spin_lock(&sctx->stat_lock);
3463 sctx->stat.uncorrectable_errors++;
3464 spin_unlock(&sctx->stat_lock);
3465 cur_logical += scrub_len;
3466 continue;
3467 }
3468 ret = scrub_extent(sctx, map, cur_logical, scrub_len,
3469 cur_logical - logical_start + physical,
3470 device, extent_flags, extent_gen,
3471 mirror_num);
3472 scrub_free_csums(sctx);
3473 if (ret)
3474 break;
3475 if (sctx->is_dev_replace)
3476 sync_replace_for_zoned(sctx);
3477 cur_logical += scrub_len;
3478 /* Don't hold CPU for too long time */
3479 cond_resched();
3480 }
3481 btrfs_release_path(&path);
3482 return ret;
3483 }
3484
3485 /* Calculate the full stripe length for simple stripe based profiles */
simple_stripe_full_stripe_len(const struct map_lookup * map)3486 static u64 simple_stripe_full_stripe_len(const struct map_lookup *map)
3487 {
3488 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3489 BTRFS_BLOCK_GROUP_RAID10));
3490
3491 return map->num_stripes / map->sub_stripes * map->stripe_len;
3492 }
3493
3494 /* Get the logical bytenr for the stripe */
simple_stripe_get_logical(struct map_lookup * map,struct btrfs_block_group * bg,int stripe_index)3495 static u64 simple_stripe_get_logical(struct map_lookup *map,
3496 struct btrfs_block_group *bg,
3497 int stripe_index)
3498 {
3499 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3500 BTRFS_BLOCK_GROUP_RAID10));
3501 ASSERT(stripe_index < map->num_stripes);
3502
3503 /*
3504 * (stripe_index / sub_stripes) gives how many data stripes we need to
3505 * skip.
3506 */
3507 return (stripe_index / map->sub_stripes) * map->stripe_len + bg->start;
3508 }
3509
3510 /* Get the mirror number for the stripe */
simple_stripe_mirror_num(struct map_lookup * map,int stripe_index)3511 static int simple_stripe_mirror_num(struct map_lookup *map, int stripe_index)
3512 {
3513 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3514 BTRFS_BLOCK_GROUP_RAID10));
3515 ASSERT(stripe_index < map->num_stripes);
3516
3517 /* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
3518 return stripe_index % map->sub_stripes + 1;
3519 }
3520
scrub_simple_stripe(struct scrub_ctx * sctx,struct btrfs_root * extent_root,struct btrfs_root * csum_root,struct btrfs_block_group * bg,struct map_lookup * map,struct btrfs_device * device,int stripe_index)3521 static int scrub_simple_stripe(struct scrub_ctx *sctx,
3522 struct btrfs_root *extent_root,
3523 struct btrfs_root *csum_root,
3524 struct btrfs_block_group *bg,
3525 struct map_lookup *map,
3526 struct btrfs_device *device,
3527 int stripe_index)
3528 {
3529 const u64 logical_increment = simple_stripe_full_stripe_len(map);
3530 const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
3531 const u64 orig_physical = map->stripes[stripe_index].physical;
3532 const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
3533 u64 cur_logical = orig_logical;
3534 u64 cur_physical = orig_physical;
3535 int ret = 0;
3536
3537 while (cur_logical < bg->start + bg->length) {
3538 /*
3539 * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
3540 * just RAID1, so we can reuse scrub_simple_mirror() to scrub
3541 * this stripe.
3542 */
3543 ret = scrub_simple_mirror(sctx, extent_root, csum_root, bg, map,
3544 cur_logical, map->stripe_len, device,
3545 cur_physical, mirror_num);
3546 if (ret)
3547 return ret;
3548 /* Skip to next stripe which belongs to the target device */
3549 cur_logical += logical_increment;
3550 /* For physical offset, we just go to next stripe */
3551 cur_physical += map->stripe_len;
3552 }
3553 return ret;
3554 }
3555
scrub_stripe(struct scrub_ctx * sctx,struct btrfs_block_group * bg,struct extent_map * em,struct btrfs_device * scrub_dev,int stripe_index)3556 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3557 struct btrfs_block_group *bg,
3558 struct extent_map *em,
3559 struct btrfs_device *scrub_dev,
3560 int stripe_index)
3561 {
3562 struct btrfs_path *path;
3563 struct btrfs_fs_info *fs_info = sctx->fs_info;
3564 struct btrfs_root *root;
3565 struct btrfs_root *csum_root;
3566 struct blk_plug plug;
3567 struct map_lookup *map = em->map_lookup;
3568 const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
3569 const u64 chunk_logical = bg->start;
3570 int ret;
3571 u64 physical = map->stripes[stripe_index].physical;
3572 const u64 dev_stripe_len = btrfs_calc_stripe_length(em);
3573 const u64 physical_end = physical + dev_stripe_len;
3574 u64 logical;
3575 u64 logic_end;
3576 /* The logical increment after finishing one stripe */
3577 u64 increment;
3578 /* Offset inside the chunk */
3579 u64 offset;
3580 u64 stripe_logical;
3581 u64 stripe_end;
3582 int stop_loop = 0;
3583
3584 path = btrfs_alloc_path();
3585 if (!path)
3586 return -ENOMEM;
3587
3588 /*
3589 * work on commit root. The related disk blocks are static as
3590 * long as COW is applied. This means, it is save to rewrite
3591 * them to repair disk errors without any race conditions
3592 */
3593 path->search_commit_root = 1;
3594 path->skip_locking = 1;
3595 path->reada = READA_FORWARD;
3596
3597 wait_event(sctx->list_wait,
3598 atomic_read(&sctx->bios_in_flight) == 0);
3599 scrub_blocked_if_needed(fs_info);
3600
3601 root = btrfs_extent_root(fs_info, bg->start);
3602 csum_root = btrfs_csum_root(fs_info, bg->start);
3603
3604 /*
3605 * collect all data csums for the stripe to avoid seeking during
3606 * the scrub. This might currently (crc32) end up to be about 1MB
3607 */
3608 blk_start_plug(&plug);
3609
3610 if (sctx->is_dev_replace &&
3611 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
3612 mutex_lock(&sctx->wr_lock);
3613 sctx->write_pointer = physical;
3614 mutex_unlock(&sctx->wr_lock);
3615 sctx->flush_all_writes = true;
3616 }
3617
3618 /*
3619 * There used to be a big double loop to handle all profiles using the
3620 * same routine, which grows larger and more gross over time.
3621 *
3622 * So here we handle each profile differently, so simpler profiles
3623 * have simpler scrubbing function.
3624 */
3625 if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
3626 BTRFS_BLOCK_GROUP_RAID56_MASK))) {
3627 /*
3628 * Above check rules out all complex profile, the remaining
3629 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
3630 * mirrored duplication without stripe.
3631 *
3632 * Only @physical and @mirror_num needs to calculated using
3633 * @stripe_index.
3634 */
3635 ret = scrub_simple_mirror(sctx, root, csum_root, bg, map,
3636 bg->start, bg->length, scrub_dev,
3637 map->stripes[stripe_index].physical,
3638 stripe_index + 1);
3639 offset = 0;
3640 goto out;
3641 }
3642 if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3643 ret = scrub_simple_stripe(sctx, root, csum_root, bg, map,
3644 scrub_dev, stripe_index);
3645 offset = map->stripe_len * (stripe_index / map->sub_stripes);
3646 goto out;
3647 }
3648
3649 /* Only RAID56 goes through the old code */
3650 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
3651 ret = 0;
3652
3653 /* Calculate the logical end of the stripe */
3654 get_raid56_logic_offset(physical_end, stripe_index,
3655 map, &logic_end, NULL);
3656 logic_end += chunk_logical;
3657
3658 /* Initialize @offset in case we need to go to out: label */
3659 get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
3660 increment = map->stripe_len * nr_data_stripes(map);
3661
3662 /*
3663 * Due to the rotation, for RAID56 it's better to iterate each stripe
3664 * using their physical offset.
3665 */
3666 while (physical < physical_end) {
3667 ret = get_raid56_logic_offset(physical, stripe_index, map,
3668 &logical, &stripe_logical);
3669 logical += chunk_logical;
3670 if (ret) {
3671 /* it is parity strip */
3672 stripe_logical += chunk_logical;
3673 stripe_end = stripe_logical + increment;
3674 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3675 stripe_logical,
3676 stripe_end);
3677 if (ret)
3678 goto out;
3679 goto next;
3680 }
3681
3682 /*
3683 * Now we're at a data stripe, scrub each extents in the range.
3684 *
3685 * At this stage, if we ignore the repair part, inside each data
3686 * stripe it is no different than SINGLE profile.
3687 * We can reuse scrub_simple_mirror() here, as the repair part
3688 * is still based on @mirror_num.
3689 */
3690 ret = scrub_simple_mirror(sctx, root, csum_root, bg, map,
3691 logical, map->stripe_len,
3692 scrub_dev, physical, 1);
3693 if (ret < 0)
3694 goto out;
3695 next:
3696 logical += increment;
3697 physical += map->stripe_len;
3698 spin_lock(&sctx->stat_lock);
3699 if (stop_loop)
3700 sctx->stat.last_physical =
3701 map->stripes[stripe_index].physical + dev_stripe_len;
3702 else
3703 sctx->stat.last_physical = physical;
3704 spin_unlock(&sctx->stat_lock);
3705 if (stop_loop)
3706 break;
3707 }
3708 out:
3709 /* push queued extents */
3710 scrub_submit(sctx);
3711 mutex_lock(&sctx->wr_lock);
3712 scrub_wr_submit(sctx);
3713 mutex_unlock(&sctx->wr_lock);
3714
3715 blk_finish_plug(&plug);
3716 btrfs_free_path(path);
3717
3718 if (sctx->is_dev_replace && ret >= 0) {
3719 int ret2;
3720
3721 ret2 = sync_write_pointer_for_zoned(sctx,
3722 chunk_logical + offset,
3723 map->stripes[stripe_index].physical,
3724 physical_end);
3725 if (ret2)
3726 ret = ret2;
3727 }
3728
3729 return ret < 0 ? ret : 0;
3730 }
3731
scrub_chunk(struct scrub_ctx * sctx,struct btrfs_block_group * bg,struct btrfs_device * scrub_dev,u64 dev_offset,u64 dev_extent_len)3732 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3733 struct btrfs_block_group *bg,
3734 struct btrfs_device *scrub_dev,
3735 u64 dev_offset,
3736 u64 dev_extent_len)
3737 {
3738 struct btrfs_fs_info *fs_info = sctx->fs_info;
3739 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
3740 struct map_lookup *map;
3741 struct extent_map *em;
3742 int i;
3743 int ret = 0;
3744
3745 read_lock(&map_tree->lock);
3746 em = lookup_extent_mapping(map_tree, bg->start, bg->length);
3747 read_unlock(&map_tree->lock);
3748
3749 if (!em) {
3750 /*
3751 * Might have been an unused block group deleted by the cleaner
3752 * kthread or relocation.
3753 */
3754 spin_lock(&bg->lock);
3755 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
3756 ret = -EINVAL;
3757 spin_unlock(&bg->lock);
3758
3759 return ret;
3760 }
3761 if (em->start != bg->start)
3762 goto out;
3763 if (em->len < dev_extent_len)
3764 goto out;
3765
3766 map = em->map_lookup;
3767 for (i = 0; i < map->num_stripes; ++i) {
3768 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3769 map->stripes[i].physical == dev_offset) {
3770 ret = scrub_stripe(sctx, bg, em, scrub_dev, i);
3771 if (ret)
3772 goto out;
3773 }
3774 }
3775 out:
3776 free_extent_map(em);
3777
3778 return ret;
3779 }
3780
finish_extent_writes_for_zoned(struct btrfs_root * root,struct btrfs_block_group * cache)3781 static int finish_extent_writes_for_zoned(struct btrfs_root *root,
3782 struct btrfs_block_group *cache)
3783 {
3784 struct btrfs_fs_info *fs_info = cache->fs_info;
3785 struct btrfs_trans_handle *trans;
3786
3787 if (!btrfs_is_zoned(fs_info))
3788 return 0;
3789
3790 btrfs_wait_block_group_reservations(cache);
3791 btrfs_wait_nocow_writers(cache);
3792 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
3793
3794 trans = btrfs_join_transaction(root);
3795 if (IS_ERR(trans))
3796 return PTR_ERR(trans);
3797 return btrfs_commit_transaction(trans);
3798 }
3799
3800 static noinline_for_stack
scrub_enumerate_chunks(struct scrub_ctx * sctx,struct btrfs_device * scrub_dev,u64 start,u64 end)3801 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3802 struct btrfs_device *scrub_dev, u64 start, u64 end)
3803 {
3804 struct btrfs_dev_extent *dev_extent = NULL;
3805 struct btrfs_path *path;
3806 struct btrfs_fs_info *fs_info = sctx->fs_info;
3807 struct btrfs_root *root = fs_info->dev_root;
3808 u64 chunk_offset;
3809 int ret = 0;
3810 int ro_set;
3811 int slot;
3812 struct extent_buffer *l;
3813 struct btrfs_key key;
3814 struct btrfs_key found_key;
3815 struct btrfs_block_group *cache;
3816 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3817
3818 path = btrfs_alloc_path();
3819 if (!path)
3820 return -ENOMEM;
3821
3822 path->reada = READA_FORWARD;
3823 path->search_commit_root = 1;
3824 path->skip_locking = 1;
3825
3826 key.objectid = scrub_dev->devid;
3827 key.offset = 0ull;
3828 key.type = BTRFS_DEV_EXTENT_KEY;
3829
3830 while (1) {
3831 u64 dev_extent_len;
3832
3833 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3834 if (ret < 0)
3835 break;
3836 if (ret > 0) {
3837 if (path->slots[0] >=
3838 btrfs_header_nritems(path->nodes[0])) {
3839 ret = btrfs_next_leaf(root, path);
3840 if (ret < 0)
3841 break;
3842 if (ret > 0) {
3843 ret = 0;
3844 break;
3845 }
3846 } else {
3847 ret = 0;
3848 }
3849 }
3850
3851 l = path->nodes[0];
3852 slot = path->slots[0];
3853
3854 btrfs_item_key_to_cpu(l, &found_key, slot);
3855
3856 if (found_key.objectid != scrub_dev->devid)
3857 break;
3858
3859 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3860 break;
3861
3862 if (found_key.offset >= end)
3863 break;
3864
3865 if (found_key.offset < key.offset)
3866 break;
3867
3868 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3869 dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
3870
3871 if (found_key.offset + dev_extent_len <= start)
3872 goto skip;
3873
3874 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3875
3876 /*
3877 * get a reference on the corresponding block group to prevent
3878 * the chunk from going away while we scrub it
3879 */
3880 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3881
3882 /* some chunks are removed but not committed to disk yet,
3883 * continue scrubbing */
3884 if (!cache)
3885 goto skip;
3886
3887 ASSERT(cache->start <= chunk_offset);
3888 /*
3889 * We are using the commit root to search for device extents, so
3890 * that means we could have found a device extent item from a
3891 * block group that was deleted in the current transaction. The
3892 * logical start offset of the deleted block group, stored at
3893 * @chunk_offset, might be part of the logical address range of
3894 * a new block group (which uses different physical extents).
3895 * In this case btrfs_lookup_block_group() has returned the new
3896 * block group, and its start address is less than @chunk_offset.
3897 *
3898 * We skip such new block groups, because it's pointless to
3899 * process them, as we won't find their extents because we search
3900 * for them using the commit root of the extent tree. For a device
3901 * replace it's also fine to skip it, we won't miss copying them
3902 * to the target device because we have the write duplication
3903 * setup through the regular write path (by btrfs_map_block()),
3904 * and we have committed a transaction when we started the device
3905 * replace, right after setting up the device replace state.
3906 */
3907 if (cache->start < chunk_offset) {
3908 btrfs_put_block_group(cache);
3909 goto skip;
3910 }
3911
3912 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
3913 if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
3914 btrfs_put_block_group(cache);
3915 goto skip;
3916 }
3917 }
3918
3919 /*
3920 * Make sure that while we are scrubbing the corresponding block
3921 * group doesn't get its logical address and its device extents
3922 * reused for another block group, which can possibly be of a
3923 * different type and different profile. We do this to prevent
3924 * false error detections and crashes due to bogus attempts to
3925 * repair extents.
3926 */
3927 spin_lock(&cache->lock);
3928 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
3929 spin_unlock(&cache->lock);
3930 btrfs_put_block_group(cache);
3931 goto skip;
3932 }
3933 btrfs_freeze_block_group(cache);
3934 spin_unlock(&cache->lock);
3935
3936 /*
3937 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3938 * to avoid deadlock caused by:
3939 * btrfs_inc_block_group_ro()
3940 * -> btrfs_wait_for_commit()
3941 * -> btrfs_commit_transaction()
3942 * -> btrfs_scrub_pause()
3943 */
3944 scrub_pause_on(fs_info);
3945
3946 /*
3947 * Don't do chunk preallocation for scrub.
3948 *
3949 * This is especially important for SYSTEM bgs, or we can hit
3950 * -EFBIG from btrfs_finish_chunk_alloc() like:
3951 * 1. The only SYSTEM bg is marked RO.
3952 * Since SYSTEM bg is small, that's pretty common.
3953 * 2. New SYSTEM bg will be allocated
3954 * Due to regular version will allocate new chunk.
3955 * 3. New SYSTEM bg is empty and will get cleaned up
3956 * Before cleanup really happens, it's marked RO again.
3957 * 4. Empty SYSTEM bg get scrubbed
3958 * We go back to 2.
3959 *
3960 * This can easily boost the amount of SYSTEM chunks if cleaner
3961 * thread can't be triggered fast enough, and use up all space
3962 * of btrfs_super_block::sys_chunk_array
3963 *
3964 * While for dev replace, we need to try our best to mark block
3965 * group RO, to prevent race between:
3966 * - Write duplication
3967 * Contains latest data
3968 * - Scrub copy
3969 * Contains data from commit tree
3970 *
3971 * If target block group is not marked RO, nocow writes can
3972 * be overwritten by scrub copy, causing data corruption.
3973 * So for dev-replace, it's not allowed to continue if a block
3974 * group is not RO.
3975 */
3976 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
3977 if (!ret && sctx->is_dev_replace) {
3978 ret = finish_extent_writes_for_zoned(root, cache);
3979 if (ret) {
3980 btrfs_dec_block_group_ro(cache);
3981 scrub_pause_off(fs_info);
3982 btrfs_put_block_group(cache);
3983 break;
3984 }
3985 }
3986
3987 if (ret == 0) {
3988 ro_set = 1;
3989 } else if (ret == -ENOSPC && !sctx->is_dev_replace) {
3990 /*
3991 * btrfs_inc_block_group_ro return -ENOSPC when it
3992 * failed in creating new chunk for metadata.
3993 * It is not a problem for scrub, because
3994 * metadata are always cowed, and our scrub paused
3995 * commit_transactions.
3996 */
3997 ro_set = 0;
3998 } else if (ret == -ETXTBSY) {
3999 btrfs_warn(fs_info,
4000 "skipping scrub of block group %llu due to active swapfile",
4001 cache->start);
4002 scrub_pause_off(fs_info);
4003 ret = 0;
4004 goto skip_unfreeze;
4005 } else {
4006 btrfs_warn(fs_info,
4007 "failed setting block group ro: %d", ret);
4008 btrfs_unfreeze_block_group(cache);
4009 btrfs_put_block_group(cache);
4010 scrub_pause_off(fs_info);
4011 break;
4012 }
4013
4014 /*
4015 * Now the target block is marked RO, wait for nocow writes to
4016 * finish before dev-replace.
4017 * COW is fine, as COW never overwrites extents in commit tree.
4018 */
4019 if (sctx->is_dev_replace) {
4020 btrfs_wait_nocow_writers(cache);
4021 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
4022 cache->length);
4023 }
4024
4025 scrub_pause_off(fs_info);
4026 down_write(&dev_replace->rwsem);
4027 dev_replace->cursor_right = found_key.offset + dev_extent_len;
4028 dev_replace->cursor_left = found_key.offset;
4029 dev_replace->item_needs_writeback = 1;
4030 up_write(&dev_replace->rwsem);
4031
4032 ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
4033 dev_extent_len);
4034
4035 /*
4036 * flush, submit all pending read and write bios, afterwards
4037 * wait for them.
4038 * Note that in the dev replace case, a read request causes
4039 * write requests that are submitted in the read completion
4040 * worker. Therefore in the current situation, it is required
4041 * that all write requests are flushed, so that all read and
4042 * write requests are really completed when bios_in_flight
4043 * changes to 0.
4044 */
4045 sctx->flush_all_writes = true;
4046 scrub_submit(sctx);
4047 mutex_lock(&sctx->wr_lock);
4048 scrub_wr_submit(sctx);
4049 mutex_unlock(&sctx->wr_lock);
4050
4051 wait_event(sctx->list_wait,
4052 atomic_read(&sctx->bios_in_flight) == 0);
4053
4054 scrub_pause_on(fs_info);
4055
4056 /*
4057 * must be called before we decrease @scrub_paused.
4058 * make sure we don't block transaction commit while
4059 * we are waiting pending workers finished.
4060 */
4061 wait_event(sctx->list_wait,
4062 atomic_read(&sctx->workers_pending) == 0);
4063 sctx->flush_all_writes = false;
4064
4065 scrub_pause_off(fs_info);
4066
4067 if (sctx->is_dev_replace &&
4068 !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
4069 cache, found_key.offset))
4070 ro_set = 0;
4071
4072 down_write(&dev_replace->rwsem);
4073 dev_replace->cursor_left = dev_replace->cursor_right;
4074 dev_replace->item_needs_writeback = 1;
4075 up_write(&dev_replace->rwsem);
4076
4077 if (ro_set)
4078 btrfs_dec_block_group_ro(cache);
4079
4080 /*
4081 * We might have prevented the cleaner kthread from deleting
4082 * this block group if it was already unused because we raced
4083 * and set it to RO mode first. So add it back to the unused
4084 * list, otherwise it might not ever be deleted unless a manual
4085 * balance is triggered or it becomes used and unused again.
4086 */
4087 spin_lock(&cache->lock);
4088 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
4089 !cache->ro && cache->reserved == 0 && cache->used == 0) {
4090 spin_unlock(&cache->lock);
4091 if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
4092 btrfs_discard_queue_work(&fs_info->discard_ctl,
4093 cache);
4094 else
4095 btrfs_mark_bg_unused(cache);
4096 } else {
4097 spin_unlock(&cache->lock);
4098 }
4099 skip_unfreeze:
4100 btrfs_unfreeze_block_group(cache);
4101 btrfs_put_block_group(cache);
4102 if (ret)
4103 break;
4104 if (sctx->is_dev_replace &&
4105 atomic64_read(&dev_replace->num_write_errors) > 0) {
4106 ret = -EIO;
4107 break;
4108 }
4109 if (sctx->stat.malloc_errors > 0) {
4110 ret = -ENOMEM;
4111 break;
4112 }
4113 skip:
4114 key.offset = found_key.offset + dev_extent_len;
4115 btrfs_release_path(path);
4116 }
4117
4118 btrfs_free_path(path);
4119
4120 return ret;
4121 }
4122
scrub_supers(struct scrub_ctx * sctx,struct btrfs_device * scrub_dev)4123 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
4124 struct btrfs_device *scrub_dev)
4125 {
4126 int i;
4127 u64 bytenr;
4128 u64 gen;
4129 int ret;
4130 struct btrfs_fs_info *fs_info = sctx->fs_info;
4131
4132 if (BTRFS_FS_ERROR(fs_info))
4133 return -EROFS;
4134
4135 /* Seed devices of a new filesystem has their own generation. */
4136 if (scrub_dev->fs_devices != fs_info->fs_devices)
4137 gen = scrub_dev->generation;
4138 else
4139 gen = fs_info->last_trans_committed;
4140
4141 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
4142 bytenr = btrfs_sb_offset(i);
4143 if (bytenr + BTRFS_SUPER_INFO_SIZE >
4144 scrub_dev->commit_total_bytes)
4145 break;
4146 if (!btrfs_check_super_location(scrub_dev, bytenr))
4147 continue;
4148
4149 ret = scrub_sectors(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
4150 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
4151 NULL, bytenr);
4152 if (ret)
4153 return ret;
4154 }
4155 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
4156
4157 return 0;
4158 }
4159
scrub_workers_put(struct btrfs_fs_info * fs_info)4160 static void scrub_workers_put(struct btrfs_fs_info *fs_info)
4161 {
4162 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
4163 &fs_info->scrub_lock)) {
4164 struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
4165 struct workqueue_struct *scrub_wr_comp =
4166 fs_info->scrub_wr_completion_workers;
4167 struct workqueue_struct *scrub_parity =
4168 fs_info->scrub_parity_workers;
4169
4170 fs_info->scrub_workers = NULL;
4171 fs_info->scrub_wr_completion_workers = NULL;
4172 fs_info->scrub_parity_workers = NULL;
4173 mutex_unlock(&fs_info->scrub_lock);
4174
4175 if (scrub_workers)
4176 destroy_workqueue(scrub_workers);
4177 if (scrub_wr_comp)
4178 destroy_workqueue(scrub_wr_comp);
4179 if (scrub_parity)
4180 destroy_workqueue(scrub_parity);
4181 }
4182 }
4183
4184 /*
4185 * get a reference count on fs_info->scrub_workers. start worker if necessary
4186 */
scrub_workers_get(struct btrfs_fs_info * fs_info,int is_dev_replace)4187 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
4188 int is_dev_replace)
4189 {
4190 struct workqueue_struct *scrub_workers = NULL;
4191 struct workqueue_struct *scrub_wr_comp = NULL;
4192 struct workqueue_struct *scrub_parity = NULL;
4193 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
4194 int max_active = fs_info->thread_pool_size;
4195 int ret = -ENOMEM;
4196
4197 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
4198 return 0;
4199
4200 scrub_workers = alloc_workqueue("btrfs-scrub", flags,
4201 is_dev_replace ? 1 : max_active);
4202 if (!scrub_workers)
4203 goto fail_scrub_workers;
4204
4205 scrub_wr_comp = alloc_workqueue("btrfs-scrubwrc", flags, max_active);
4206 if (!scrub_wr_comp)
4207 goto fail_scrub_wr_completion_workers;
4208
4209 scrub_parity = alloc_workqueue("btrfs-scrubparity", flags, max_active);
4210 if (!scrub_parity)
4211 goto fail_scrub_parity_workers;
4212
4213 mutex_lock(&fs_info->scrub_lock);
4214 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
4215 ASSERT(fs_info->scrub_workers == NULL &&
4216 fs_info->scrub_wr_completion_workers == NULL &&
4217 fs_info->scrub_parity_workers == NULL);
4218 fs_info->scrub_workers = scrub_workers;
4219 fs_info->scrub_wr_completion_workers = scrub_wr_comp;
4220 fs_info->scrub_parity_workers = scrub_parity;
4221 refcount_set(&fs_info->scrub_workers_refcnt, 1);
4222 mutex_unlock(&fs_info->scrub_lock);
4223 return 0;
4224 }
4225 /* Other thread raced in and created the workers for us */
4226 refcount_inc(&fs_info->scrub_workers_refcnt);
4227 mutex_unlock(&fs_info->scrub_lock);
4228
4229 ret = 0;
4230 destroy_workqueue(scrub_parity);
4231 fail_scrub_parity_workers:
4232 destroy_workqueue(scrub_wr_comp);
4233 fail_scrub_wr_completion_workers:
4234 destroy_workqueue(scrub_workers);
4235 fail_scrub_workers:
4236 return ret;
4237 }
4238
btrfs_scrub_dev(struct btrfs_fs_info * fs_info,u64 devid,u64 start,u64 end,struct btrfs_scrub_progress * progress,int readonly,int is_dev_replace)4239 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
4240 u64 end, struct btrfs_scrub_progress *progress,
4241 int readonly, int is_dev_replace)
4242 {
4243 struct btrfs_dev_lookup_args args = { .devid = devid };
4244 struct scrub_ctx *sctx;
4245 int ret;
4246 struct btrfs_device *dev;
4247 unsigned int nofs_flag;
4248 bool need_commit = false;
4249
4250 if (btrfs_fs_closing(fs_info))
4251 return -EAGAIN;
4252
4253 /* At mount time we have ensured nodesize is in the range of [4K, 64K]. */
4254 ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
4255
4256 /*
4257 * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible
4258 * value (max nodesize / min sectorsize), thus nodesize should always
4259 * be fine.
4260 */
4261 ASSERT(fs_info->nodesize <=
4262 SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
4263
4264 /* Allocate outside of device_list_mutex */
4265 sctx = scrub_setup_ctx(fs_info, is_dev_replace);
4266 if (IS_ERR(sctx))
4267 return PTR_ERR(sctx);
4268
4269 ret = scrub_workers_get(fs_info, is_dev_replace);
4270 if (ret)
4271 goto out_free_ctx;
4272
4273 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4274 dev = btrfs_find_device(fs_info->fs_devices, &args);
4275 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
4276 !is_dev_replace)) {
4277 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4278 ret = -ENODEV;
4279 goto out;
4280 }
4281
4282 if (!is_dev_replace && !readonly &&
4283 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
4284 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4285 btrfs_err_in_rcu(fs_info,
4286 "scrub on devid %llu: filesystem on %s is not writable",
4287 devid, rcu_str_deref(dev->name));
4288 ret = -EROFS;
4289 goto out;
4290 }
4291
4292 mutex_lock(&fs_info->scrub_lock);
4293 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4294 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
4295 mutex_unlock(&fs_info->scrub_lock);
4296 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4297 ret = -EIO;
4298 goto out;
4299 }
4300
4301 down_read(&fs_info->dev_replace.rwsem);
4302 if (dev->scrub_ctx ||
4303 (!is_dev_replace &&
4304 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
4305 up_read(&fs_info->dev_replace.rwsem);
4306 mutex_unlock(&fs_info->scrub_lock);
4307 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4308 ret = -EINPROGRESS;
4309 goto out;
4310 }
4311 up_read(&fs_info->dev_replace.rwsem);
4312
4313 sctx->readonly = readonly;
4314 dev->scrub_ctx = sctx;
4315 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4316
4317 /*
4318 * checking @scrub_pause_req here, we can avoid
4319 * race between committing transaction and scrubbing.
4320 */
4321 __scrub_blocked_if_needed(fs_info);
4322 atomic_inc(&fs_info->scrubs_running);
4323 mutex_unlock(&fs_info->scrub_lock);
4324
4325 /*
4326 * In order to avoid deadlock with reclaim when there is a transaction
4327 * trying to pause scrub, make sure we use GFP_NOFS for all the
4328 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
4329 * invoked by our callees. The pausing request is done when the
4330 * transaction commit starts, and it blocks the transaction until scrub
4331 * is paused (done at specific points at scrub_stripe() or right above
4332 * before incrementing fs_info->scrubs_running).
4333 */
4334 nofs_flag = memalloc_nofs_save();
4335 if (!is_dev_replace) {
4336 u64 old_super_errors;
4337
4338 spin_lock(&sctx->stat_lock);
4339 old_super_errors = sctx->stat.super_errors;
4340 spin_unlock(&sctx->stat_lock);
4341
4342 btrfs_info(fs_info, "scrub: started on devid %llu", devid);
4343 /*
4344 * by holding device list mutex, we can
4345 * kick off writing super in log tree sync.
4346 */
4347 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4348 ret = scrub_supers(sctx, dev);
4349 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4350
4351 spin_lock(&sctx->stat_lock);
4352 /*
4353 * Super block errors found, but we can not commit transaction
4354 * at current context, since btrfs_commit_transaction() needs
4355 * to pause the current running scrub (hold by ourselves).
4356 */
4357 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
4358 need_commit = true;
4359 spin_unlock(&sctx->stat_lock);
4360 }
4361
4362 if (!ret)
4363 ret = scrub_enumerate_chunks(sctx, dev, start, end);
4364 memalloc_nofs_restore(nofs_flag);
4365
4366 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
4367 atomic_dec(&fs_info->scrubs_running);
4368 wake_up(&fs_info->scrub_pause_wait);
4369
4370 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
4371
4372 if (progress)
4373 memcpy(progress, &sctx->stat, sizeof(*progress));
4374
4375 if (!is_dev_replace)
4376 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
4377 ret ? "not finished" : "finished", devid, ret);
4378
4379 mutex_lock(&fs_info->scrub_lock);
4380 dev->scrub_ctx = NULL;
4381 mutex_unlock(&fs_info->scrub_lock);
4382
4383 scrub_workers_put(fs_info);
4384 scrub_put_ctx(sctx);
4385
4386 /*
4387 * We found some super block errors before, now try to force a
4388 * transaction commit, as scrub has finished.
4389 */
4390 if (need_commit) {
4391 struct btrfs_trans_handle *trans;
4392
4393 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4394 if (IS_ERR(trans)) {
4395 ret = PTR_ERR(trans);
4396 btrfs_err(fs_info,
4397 "scrub: failed to start transaction to fix super block errors: %d", ret);
4398 return ret;
4399 }
4400 ret = btrfs_commit_transaction(trans);
4401 if (ret < 0)
4402 btrfs_err(fs_info,
4403 "scrub: failed to commit transaction to fix super block errors: %d", ret);
4404 }
4405 return ret;
4406 out:
4407 scrub_workers_put(fs_info);
4408 out_free_ctx:
4409 scrub_free_ctx(sctx);
4410
4411 return ret;
4412 }
4413
btrfs_scrub_pause(struct btrfs_fs_info * fs_info)4414 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
4415 {
4416 mutex_lock(&fs_info->scrub_lock);
4417 atomic_inc(&fs_info->scrub_pause_req);
4418 while (atomic_read(&fs_info->scrubs_paused) !=
4419 atomic_read(&fs_info->scrubs_running)) {
4420 mutex_unlock(&fs_info->scrub_lock);
4421 wait_event(fs_info->scrub_pause_wait,
4422 atomic_read(&fs_info->scrubs_paused) ==
4423 atomic_read(&fs_info->scrubs_running));
4424 mutex_lock(&fs_info->scrub_lock);
4425 }
4426 mutex_unlock(&fs_info->scrub_lock);
4427 }
4428
btrfs_scrub_continue(struct btrfs_fs_info * fs_info)4429 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
4430 {
4431 atomic_dec(&fs_info->scrub_pause_req);
4432 wake_up(&fs_info->scrub_pause_wait);
4433 }
4434
btrfs_scrub_cancel(struct btrfs_fs_info * fs_info)4435 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
4436 {
4437 mutex_lock(&fs_info->scrub_lock);
4438 if (!atomic_read(&fs_info->scrubs_running)) {
4439 mutex_unlock(&fs_info->scrub_lock);
4440 return -ENOTCONN;
4441 }
4442
4443 atomic_inc(&fs_info->scrub_cancel_req);
4444 while (atomic_read(&fs_info->scrubs_running)) {
4445 mutex_unlock(&fs_info->scrub_lock);
4446 wait_event(fs_info->scrub_pause_wait,
4447 atomic_read(&fs_info->scrubs_running) == 0);
4448 mutex_lock(&fs_info->scrub_lock);
4449 }
4450 atomic_dec(&fs_info->scrub_cancel_req);
4451 mutex_unlock(&fs_info->scrub_lock);
4452
4453 return 0;
4454 }
4455
btrfs_scrub_cancel_dev(struct btrfs_device * dev)4456 int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
4457 {
4458 struct btrfs_fs_info *fs_info = dev->fs_info;
4459 struct scrub_ctx *sctx;
4460
4461 mutex_lock(&fs_info->scrub_lock);
4462 sctx = dev->scrub_ctx;
4463 if (!sctx) {
4464 mutex_unlock(&fs_info->scrub_lock);
4465 return -ENOTCONN;
4466 }
4467 atomic_inc(&sctx->cancel_req);
4468 while (dev->scrub_ctx) {
4469 mutex_unlock(&fs_info->scrub_lock);
4470 wait_event(fs_info->scrub_pause_wait,
4471 dev->scrub_ctx == NULL);
4472 mutex_lock(&fs_info->scrub_lock);
4473 }
4474 mutex_unlock(&fs_info->scrub_lock);
4475
4476 return 0;
4477 }
4478
btrfs_scrub_progress(struct btrfs_fs_info * fs_info,u64 devid,struct btrfs_scrub_progress * progress)4479 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
4480 struct btrfs_scrub_progress *progress)
4481 {
4482 struct btrfs_dev_lookup_args args = { .devid = devid };
4483 struct btrfs_device *dev;
4484 struct scrub_ctx *sctx = NULL;
4485
4486 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4487 dev = btrfs_find_device(fs_info->fs_devices, &args);
4488 if (dev)
4489 sctx = dev->scrub_ctx;
4490 if (sctx)
4491 memcpy(progress, &sctx->stat, sizeof(*progress));
4492 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4493
4494 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
4495 }
4496
scrub_find_good_copy(struct btrfs_fs_info * fs_info,u64 extent_logical,u32 extent_len,u64 * extent_physical,struct btrfs_device ** extent_dev,int * extent_mirror_num)4497 static void scrub_find_good_copy(struct btrfs_fs_info *fs_info,
4498 u64 extent_logical, u32 extent_len,
4499 u64 *extent_physical,
4500 struct btrfs_device **extent_dev,
4501 int *extent_mirror_num)
4502 {
4503 u64 mapped_length;
4504 struct btrfs_io_context *bioc = NULL;
4505 int ret;
4506
4507 mapped_length = extent_len;
4508 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4509 &mapped_length, &bioc, 0);
4510 if (ret || !bioc || mapped_length < extent_len ||
4511 !bioc->stripes[0].dev->bdev) {
4512 btrfs_put_bioc(bioc);
4513 return;
4514 }
4515
4516 *extent_physical = bioc->stripes[0].physical;
4517 *extent_mirror_num = bioc->mirror_num;
4518 *extent_dev = bioc->stripes[0].dev;
4519 btrfs_put_bioc(bioc);
4520 }
4521