Lines Matching +full:data +full:- +full:mirror

1 // SPDX-License-Identifier: GPL-2.0-or-later
9 * RAID-1 management functions.
11 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
19 * - bitmap marked during normal i/o
20 * - bitmap used to skip nondirty blocks during sync
22 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
23 * - persistent bitmap code
38 #include "md-bitmap.h"
50 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
52 #include "raid1-10.c"
54 #define START(node) ((node)->start)
55 #define LAST(node) ((node)->last)
64 sector_t lo = r1_bio->sector; in check_and_add_serial()
65 sector_t hi = lo + r1_bio->sectors; in check_and_add_serial()
66 struct serial_in_rdev *serial = &rdev->serial[idx]; in check_and_add_serial()
68 spin_lock_irqsave(&serial->serial_lock, flags); in check_and_add_serial()
70 if (raid1_rb_iter_first(&serial->serial_rb, lo, hi)) in check_and_add_serial()
71 ret = -EBUSY; in check_and_add_serial()
73 si->start = lo; in check_and_add_serial()
74 si->last = hi; in check_and_add_serial()
75 raid1_rb_insert(si, &serial->serial_rb); in check_and_add_serial()
77 spin_unlock_irqrestore(&serial->serial_lock, flags); in check_and_add_serial()
84 struct mddev *mddev = rdev->mddev; in wait_for_serialization()
86 int idx = sector_to_idx(r1_bio->sector); in wait_for_serialization()
87 struct serial_in_rdev *serial = &rdev->serial[idx]; in wait_for_serialization()
89 if (WARN_ON(!mddev->serial_info_pool)) in wait_for_serialization()
91 si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO); in wait_for_serialization()
92 wait_event(serial->serial_io_wait, in wait_for_serialization()
101 struct mddev *mddev = rdev->mddev; in remove_serial()
103 struct serial_in_rdev *serial = &rdev->serial[idx]; in remove_serial()
105 spin_lock_irqsave(&serial->serial_lock, flags); in remove_serial()
106 for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi); in remove_serial()
108 if (si->start == lo && si->last == hi) { in remove_serial()
109 raid1_rb_remove(si, &serial->serial_rb); in remove_serial()
110 mempool_free(si, mddev->serial_info_pool); in remove_serial()
117 spin_unlock_irqrestore(&serial->serial_lock, flags); in remove_serial()
118 wake_up(&serial->serial_io_wait); in remove_serial()
122 * for resync bio, r1bio pointer can be retrieved from the per-bio
127 return get_resync_pages(bio)->raid_bio; in get_resync_r1bio()
130 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) in r1bio_pool_alloc() argument
132 struct pool_info *pi = data; in r1bio_pool_alloc()
133 int size = offsetof(struct r1bio, bios[pi->raid_disks]); in r1bio_pool_alloc()
146 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) in r1buf_pool_alloc() argument
148 struct pool_info *pi = data; in r1buf_pool_alloc()
159 rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages), in r1buf_pool_alloc()
165 * Allocate bios : 1 for reading, n-1 for writing in r1buf_pool_alloc()
167 for (j = pi->raid_disks ; j-- ; ) { in r1buf_pool_alloc()
171 r1_bio->bios[j] = bio; in r1buf_pool_alloc()
174 * Allocate RESYNC_PAGES data pages and attach them to in r1buf_pool_alloc()
176 * If this is a user-requested check/repair, allocate in r1buf_pool_alloc()
179 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) in r1buf_pool_alloc()
180 need_pages = pi->raid_disks; in r1buf_pool_alloc()
183 for (j = 0; j < pi->raid_disks; j++) { in r1buf_pool_alloc()
186 bio = r1_bio->bios[j]; in r1buf_pool_alloc()
196 rp->raid_bio = r1_bio; in r1buf_pool_alloc()
197 bio->bi_private = rp; in r1buf_pool_alloc()
200 r1_bio->master_bio = NULL; in r1buf_pool_alloc()
205 while (--j >= 0) in r1buf_pool_alloc()
209 while (++j < pi->raid_disks) in r1buf_pool_alloc()
210 bio_put(r1_bio->bios[j]); in r1buf_pool_alloc()
214 rbio_pool_free(r1_bio, data); in r1buf_pool_alloc()
218 static void r1buf_pool_free(void *__r1_bio, void *data) in r1buf_pool_free() argument
220 struct pool_info *pi = data; in r1buf_pool_free()
225 for (i = pi->raid_disks; i--; ) { in r1buf_pool_free()
226 rp = get_resync_pages(r1bio->bios[i]); in r1buf_pool_free()
228 bio_put(r1bio->bios[i]); in r1buf_pool_free()
234 rbio_pool_free(r1bio, data); in r1buf_pool_free()
241 for (i = 0; i < conf->raid_disks * 2; i++) { in put_all_bios()
242 struct bio **bio = r1_bio->bios + i; in put_all_bios()
251 struct r1conf *conf = r1_bio->mddev->private; in free_r1bio()
254 mempool_free(r1_bio, &conf->r1bio_pool); in free_r1bio()
259 struct r1conf *conf = r1_bio->mddev->private; in put_buf()
260 sector_t sect = r1_bio->sector; in put_buf()
263 for (i = 0; i < conf->raid_disks * 2; i++) { in put_buf()
264 struct bio *bio = r1_bio->bios[i]; in put_buf()
265 if (bio->bi_end_io) in put_buf()
266 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); in put_buf()
269 mempool_free(r1_bio, &conf->r1buf_pool); in put_buf()
277 struct mddev *mddev = r1_bio->mddev; in reschedule_retry()
278 struct r1conf *conf = mddev->private; in reschedule_retry()
281 idx = sector_to_idx(r1_bio->sector); in reschedule_retry()
282 spin_lock_irqsave(&conf->device_lock, flags); in reschedule_retry()
283 list_add(&r1_bio->retry_list, &conf->retry_list); in reschedule_retry()
284 atomic_inc(&conf->nr_queued[idx]); in reschedule_retry()
285 spin_unlock_irqrestore(&conf->device_lock, flags); in reschedule_retry()
287 wake_up(&conf->wait_barrier); in reschedule_retry()
288 md_wakeup_thread(mddev->thread); in reschedule_retry()
298 struct bio *bio = r1_bio->master_bio; in call_bio_endio()
300 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) in call_bio_endio()
301 bio->bi_status = BLK_STS_IOERR; in call_bio_endio()
303 if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) in call_bio_endio()
304 bio_end_io_acct(bio, r1_bio->start_time); in call_bio_endio()
310 struct bio *bio = r1_bio->master_bio; in raid_end_bio_io()
311 struct r1conf *conf = r1_bio->mddev->private; in raid_end_bio_io()
314 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { in raid_end_bio_io()
315 pr_debug("raid1: sync end %s on sectors %llu-%llu\n", in raid_end_bio_io()
317 (unsigned long long) bio->bi_iter.bi_sector, in raid_end_bio_io()
318 (unsigned long long) bio_end_sector(bio) - 1); in raid_end_bio_io()
324 * to go idle. All I/Os, even write-behind writes, are done. in raid_end_bio_io()
326 allow_barrier(conf, r1_bio->sector); in raid_end_bio_io()
336 struct r1conf *conf = r1_bio->mddev->private; in update_head_pos()
338 conf->mirrors[disk].head_position = in update_head_pos()
339 r1_bio->sector + (r1_bio->sectors); in update_head_pos()
347 int mirror; in find_bio_disk() local
348 struct r1conf *conf = r1_bio->mddev->private; in find_bio_disk()
349 int raid_disks = conf->raid_disks; in find_bio_disk()
351 for (mirror = 0; mirror < raid_disks * 2; mirror++) in find_bio_disk()
352 if (r1_bio->bios[mirror] == bio) in find_bio_disk()
355 BUG_ON(mirror == raid_disks * 2); in find_bio_disk()
356 update_head_pos(mirror, r1_bio); in find_bio_disk()
358 return mirror; in find_bio_disk()
363 int uptodate = !bio->bi_status; in raid1_end_read_request()
364 struct r1bio *r1_bio = bio->bi_private; in raid1_end_read_request()
365 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_read_request()
366 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; in raid1_end_read_request()
369 * this branch is our 'one mirror IO has finished' event handler: in raid1_end_read_request()
371 update_head_pos(r1_bio->read_disk, r1_bio); in raid1_end_read_request()
374 set_bit(R1BIO_Uptodate, &r1_bio->state); in raid1_end_read_request()
375 else if (test_bit(FailFast, &rdev->flags) && in raid1_end_read_request()
376 test_bit(R1BIO_FailFast, &r1_bio->state)) in raid1_end_read_request()
377 /* This was a fail-fast read so we definitely in raid1_end_read_request()
386 spin_lock_irqsave(&conf->device_lock, flags); in raid1_end_read_request()
387 if (r1_bio->mddev->degraded == conf->raid_disks || in raid1_end_read_request()
388 (r1_bio->mddev->degraded == conf->raid_disks-1 && in raid1_end_read_request()
389 test_bit(In_sync, &rdev->flags))) in raid1_end_read_request()
391 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_end_read_request()
396 rdev_dec_pending(rdev, conf->mddev); in raid1_end_read_request()
403 mdname(conf->mddev), in raid1_end_read_request()
404 bdevname(rdev->bdev, b), in raid1_end_read_request()
405 (unsigned long long)r1_bio->sector); in raid1_end_read_request()
406 set_bit(R1BIO_ReadError, &r1_bio->state); in raid1_end_read_request()
415 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { in close_write()
416 bio_free_pages(r1_bio->behind_master_bio); in close_write()
417 bio_put(r1_bio->behind_master_bio); in close_write()
418 r1_bio->behind_master_bio = NULL; in close_write()
421 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, in close_write()
422 r1_bio->sectors, in close_write()
423 !test_bit(R1BIO_Degraded, &r1_bio->state), in close_write()
424 test_bit(R1BIO_BehindIO, &r1_bio->state)); in close_write()
425 md_write_end(r1_bio->mddev); in close_write()
430 if (!atomic_dec_and_test(&r1_bio->remaining)) in r1_bio_write_done()
433 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in r1_bio_write_done()
437 if (test_bit(R1BIO_MadeGood, &r1_bio->state)) in r1_bio_write_done()
446 struct r1bio *r1_bio = bio->bi_private; in raid1_end_write_request()
447 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state); in raid1_end_write_request()
448 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_write_request()
450 int mirror = find_bio_disk(r1_bio, bio); in raid1_end_write_request() local
451 struct md_rdev *rdev = conf->mirrors[mirror].rdev; in raid1_end_write_request()
453 sector_t lo = r1_bio->sector; in raid1_end_write_request()
454 sector_t hi = r1_bio->sector + r1_bio->sectors; in raid1_end_write_request()
456 discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; in raid1_end_write_request()
459 * 'one mirror IO has finished' event handler: in raid1_end_write_request()
461 if (bio->bi_status && !discard_error) { in raid1_end_write_request()
462 set_bit(WriteErrorSeen, &rdev->flags); in raid1_end_write_request()
463 if (!test_and_set_bit(WantReplacement, &rdev->flags)) in raid1_end_write_request()
465 conf->mddev->recovery); in raid1_end_write_request()
467 if (test_bit(FailFast, &rdev->flags) && in raid1_end_write_request()
468 (bio->bi_opf & MD_FAILFAST) && in raid1_end_write_request()
470 !test_bit(WriteMostly, &rdev->flags)) { in raid1_end_write_request()
471 md_error(r1_bio->mddev, rdev); in raid1_end_write_request()
478 if (!test_bit(Faulty, &rdev->flags)) in raid1_end_write_request()
479 set_bit(R1BIO_WriteError, &r1_bio->state); in raid1_end_write_request()
482 set_bit(R1BIO_Degraded, &r1_bio->state); in raid1_end_write_request()
484 r1_bio->bios[mirror] = NULL; in raid1_end_write_request()
495 * to user-side. So if something waits for IO, then it in raid1_end_write_request()
501 r1_bio->bios[mirror] = NULL; in raid1_end_write_request()
506 * such device for properly reading the data back (we could in raid1_end_write_request()
508 * before rdev->recovery_offset, but for simplicity we don't in raid1_end_write_request()
511 if (test_bit(In_sync, &rdev->flags) && in raid1_end_write_request()
512 !test_bit(Faulty, &rdev->flags)) in raid1_end_write_request()
513 set_bit(R1BIO_Uptodate, &r1_bio->state); in raid1_end_write_request()
516 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, in raid1_end_write_request()
518 r1_bio->bios[mirror] = IO_MADE_GOOD; in raid1_end_write_request()
519 set_bit(R1BIO_MadeGood, &r1_bio->state); in raid1_end_write_request()
524 if (test_bit(CollisionCheck, &rdev->flags)) in raid1_end_write_request()
526 if (test_bit(WriteMostly, &rdev->flags)) in raid1_end_write_request()
527 atomic_dec(&r1_bio->behind_remaining); in raid1_end_write_request()
531 * has safely reached all non-writemostly in raid1_end_write_request()
533 * gets done only once -- we don't ever want to return in raid1_end_write_request()
534 * -EIO here, instead we'll wait in raid1_end_write_request()
536 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && in raid1_end_write_request()
537 test_bit(R1BIO_Uptodate, &r1_bio->state)) { in raid1_end_write_request()
539 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { in raid1_end_write_request()
540 struct bio *mbio = r1_bio->master_bio; in raid1_end_write_request()
542 " %llu-%llu\n", in raid1_end_write_request()
543 (unsigned long long) mbio->bi_iter.bi_sector, in raid1_end_write_request()
544 (unsigned long long) bio_end_sector(mbio) - 1); in raid1_end_write_request()
548 } else if (rdev->mddev->serialize_policy) in raid1_end_write_request()
550 if (r1_bio->bios[mirror] == NULL) in raid1_end_write_request()
551 rdev_dec_pending(rdev, conf->mddev); in raid1_end_write_request()
573 len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) - in align_to_barrier_unit_end()
584 * be done. There is a per-array 'next expected sequential IO' sector
585 * number - if this matches on the next IO then we use the last disk.
586 * There is also a per-disk 'last know head position' sector that is
592 * because position is mirror, not device based.
598 const sector_t this_sector = r1_bio->sector; in read_balance()
617 sectors = r1_bio->sectors; in read_balance()
618 best_disk = -1; in read_balance()
619 best_dist_disk = -1; in read_balance()
621 best_pending_disk = -1; in read_balance()
626 clear_bit(R1BIO_FailFast, &r1_bio->state); in read_balance()
628 if ((conf->mddev->recovery_cp < this_sector + sectors) || in read_balance()
629 (mddev_is_clustered(conf->mddev) && in read_balance()
630 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, in read_balance()
636 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { in read_balance()
643 rdev = rcu_dereference(conf->mirrors[disk].rdev); in read_balance()
644 if (r1_bio->bios[disk] == IO_BLOCKED in read_balance()
646 || test_bit(Faulty, &rdev->flags)) in read_balance()
648 if (!test_bit(In_sync, &rdev->flags) && in read_balance()
649 rdev->recovery_offset < this_sector + sectors) in read_balance()
651 if (test_bit(WriteMostly, &rdev->flags)) { in read_balance()
652 /* Don't balance among write-mostly, just in read_balance()
660 best_good_sectors = first_bad - this_sector; in read_balance()
681 bad_sectors -= (this_sector - first_bad); in read_balance()
688 sector_t good_sectors = first_bad - this_sector; in read_balance()
699 best_disk = -1; in read_balance()
705 set_bit(R1BIO_FailFast, &r1_bio->state); in read_balance()
707 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); in read_balance()
709 pending = atomic_read(&rdev->nr_pending); in read_balance()
710 dist = abs(this_sector - conf->mirrors[disk].head_position); in read_balance()
716 if (conf->mirrors[disk].next_seq_sect == this_sector in read_balance()
718 int opt_iosize = bdev_io_opt(rdev->bdev) >> 9; in read_balance()
719 struct raid1_info *mirror = &conf->mirrors[disk]; in read_balance() local
736 mirror->seq_start != MaxSector && in read_balance()
737 mirror->next_seq_sect > opt_iosize && in read_balance()
738 mirror->next_seq_sect - opt_iosize >= in read_balance()
739 mirror->seq_start) { in read_balance()
762 * non-rotational, choose the disk with less pending request even the in read_balance()
764 * mixed ratation/non-rotational disks depending on workload. in read_balance()
766 if (best_disk == -1) { in read_balance()
774 rdev = rcu_dereference(conf->mirrors[best_disk].rdev); in read_balance()
777 atomic_inc(&rdev->nr_pending); in read_balance()
780 if (conf->mirrors[best_disk].next_seq_sect != this_sector) in read_balance()
781 conf->mirrors[best_disk].seq_start = this_sector; in read_balance()
783 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; in read_balance()
794 md_bitmap_unplug(conf->mddev->bitmap); in flush_bio_list()
795 wake_up(&conf->wait_barrier); in flush_bio_list()
798 struct bio *next = bio->bi_next; in flush_bio_list()
799 struct md_rdev *rdev = (void *)bio->bi_bdev; in flush_bio_list()
800 bio->bi_next = NULL; in flush_bio_list()
801 bio_set_dev(bio, rdev->bdev); in flush_bio_list()
802 if (test_bit(Faulty, &rdev->flags)) { in flush_bio_list()
805 !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) in flush_bio_list()
820 spin_lock_irq(&conf->device_lock); in flush_pending_writes()
822 if (conf->pending_bio_list.head) { in flush_pending_writes()
826 bio = bio_list_get(&conf->pending_bio_list); in flush_pending_writes()
827 conf->pending_count = 0; in flush_pending_writes()
828 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
832 * current->state might be TASK_UNINTERRUPTIBLE which will in flush_pending_writes()
836 * is a false-positive. Silence the warning by resetting in flush_pending_writes()
844 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
856 * We choose only to raise the barrier if no-one is waiting for the
868 * If resync/recovery is interrupted, returns -EINTR;
875 spin_lock_irq(&conf->resync_lock); in raise_barrier()
878 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
879 !atomic_read(&conf->nr_waiting[idx]), in raise_barrier()
880 conf->resync_lock); in raise_barrier()
883 atomic_inc(&conf->barrier[idx]); in raise_barrier()
885 * In raise_barrier() we firstly increase conf->barrier[idx] then in raise_barrier()
886 * check conf->nr_pending[idx]. In _wait_barrier() we firstly in raise_barrier()
887 * increase conf->nr_pending[idx] then check conf->barrier[idx]. in raise_barrier()
888 * A memory barrier here to make sure conf->nr_pending[idx] won't in raise_barrier()
889 * be fetched before conf->barrier[idx] is increased. Otherwise in raise_barrier()
896 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O in raise_barrier()
898 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches in raise_barrier()
901 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
902 (!conf->array_frozen && in raise_barrier()
903 !atomic_read(&conf->nr_pending[idx]) && in raise_barrier()
904 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) || in raise_barrier()
905 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery), in raise_barrier()
906 conf->resync_lock); in raise_barrier()
908 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in raise_barrier()
909 atomic_dec(&conf->barrier[idx]); in raise_barrier()
910 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
911 wake_up(&conf->wait_barrier); in raise_barrier()
912 return -EINTR; in raise_barrier()
915 atomic_inc(&conf->nr_sync_pending); in raise_barrier()
916 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
925 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0); in lower_barrier()
927 atomic_dec(&conf->barrier[idx]); in lower_barrier()
928 atomic_dec(&conf->nr_sync_pending); in lower_barrier()
929 wake_up(&conf->wait_barrier); in lower_barrier()
935 * We need to increase conf->nr_pending[idx] very early here, in _wait_barrier()
937 * conf->nr_pending[idx] to be 0. Then we can avoid holding in _wait_barrier()
938 * conf->resync_lock when there is no barrier raised in same in _wait_barrier()
942 atomic_inc(&conf->nr_pending[idx]); in _wait_barrier()
944 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then in _wait_barrier()
945 * check conf->barrier[idx]. In raise_barrier() we firstly increase in _wait_barrier()
946 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory in _wait_barrier()
947 * barrier is necessary here to make sure conf->barrier[idx] won't be in _wait_barrier()
948 * fetched before conf->nr_pending[idx] is increased. Otherwise there in _wait_barrier()
955 * here. If during we check conf->barrier[idx], the array is in _wait_barrier()
956 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is in _wait_barrier()
962 if (!READ_ONCE(conf->array_frozen) && in _wait_barrier()
963 !atomic_read(&conf->barrier[idx])) in _wait_barrier()
967 * After holding conf->resync_lock, conf->nr_pending[idx] in _wait_barrier()
970 * raise_barrer() might be waiting for conf->nr_pending[idx] in _wait_barrier()
973 spin_lock_irq(&conf->resync_lock); in _wait_barrier()
974 atomic_inc(&conf->nr_waiting[idx]); in _wait_barrier()
975 atomic_dec(&conf->nr_pending[idx]); in _wait_barrier()
980 wake_up(&conf->wait_barrier); in _wait_barrier()
982 wait_event_lock_irq(conf->wait_barrier, in _wait_barrier()
983 !conf->array_frozen && in _wait_barrier()
984 !atomic_read(&conf->barrier[idx]), in _wait_barrier()
985 conf->resync_lock); in _wait_barrier()
986 atomic_inc(&conf->nr_pending[idx]); in _wait_barrier()
987 atomic_dec(&conf->nr_waiting[idx]); in _wait_barrier()
988 spin_unlock_irq(&conf->resync_lock); in _wait_barrier()
1000 * conf->barrier[idx] here, memory barrier is unnecessary as well. in wait_read_barrier()
1002 atomic_inc(&conf->nr_pending[idx]); in wait_read_barrier()
1004 if (!READ_ONCE(conf->array_frozen)) in wait_read_barrier()
1007 spin_lock_irq(&conf->resync_lock); in wait_read_barrier()
1008 atomic_inc(&conf->nr_waiting[idx]); in wait_read_barrier()
1009 atomic_dec(&conf->nr_pending[idx]); in wait_read_barrier()
1014 wake_up(&conf->wait_barrier); in wait_read_barrier()
1016 wait_event_lock_irq(conf->wait_barrier, in wait_read_barrier()
1017 !conf->array_frozen, in wait_read_barrier()
1018 conf->resync_lock); in wait_read_barrier()
1019 atomic_inc(&conf->nr_pending[idx]); in wait_read_barrier()
1020 atomic_dec(&conf->nr_waiting[idx]); in wait_read_barrier()
1021 spin_unlock_irq(&conf->resync_lock); in wait_read_barrier()
1033 atomic_dec(&conf->nr_pending[idx]); in _allow_barrier()
1034 wake_up(&conf->wait_barrier); in _allow_barrier()
1044 /* conf->resync_lock should be held */
1049 ret = atomic_read(&conf->nr_sync_pending); in get_unqueued_pending()
1051 ret += atomic_read(&conf->nr_pending[idx]) - in get_unqueued_pending()
1052 atomic_read(&conf->nr_queued[idx]); in get_unqueued_pending()
1070 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the in freeze_array()
1072 * normal I/O are queued, sum of all conf->nr_pending[] will match sum in freeze_array()
1073 * of all conf->nr_queued[]. But normal I/O failure is an exception, in freeze_array()
1082 spin_lock_irq(&conf->resync_lock); in freeze_array()
1083 conf->array_frozen = 1; in freeze_array()
1084 raid1_log(conf->mddev, "wait freeze"); in freeze_array()
1086 conf->wait_barrier, in freeze_array()
1088 conf->resync_lock, in freeze_array()
1090 spin_unlock_irq(&conf->resync_lock); in freeze_array()
1095 spin_lock_irq(&conf->resync_lock); in unfreeze_array()
1096 conf->array_frozen = 0; in unfreeze_array()
1097 spin_unlock_irq(&conf->resync_lock); in unfreeze_array()
1098 wake_up(&conf->wait_barrier); in unfreeze_array()
1104 int size = bio->bi_iter.bi_size; in alloc_behind_master_bio()
1105 unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in alloc_behind_master_bio()
1109 behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set); in alloc_behind_master_bio()
1115 behind_bio->bi_iter.bi_size = size; in alloc_behind_master_bio()
1119 behind_bio->bi_write_hint = bio->bi_write_hint; in alloc_behind_master_bio()
1131 size -= len; in alloc_behind_master_bio()
1137 r1_bio->behind_master_bio = behind_bio; in alloc_behind_master_bio()
1138 set_bit(R1BIO_BehindIO, &r1_bio->state); in alloc_behind_master_bio()
1144 bio->bi_iter.bi_size); in alloc_behind_master_bio()
1159 struct mddev *mddev = plug->cb.data; in raid1_unplug()
1160 struct r1conf *conf = mddev->private; in raid1_unplug()
1163 if (from_schedule || current->bio_list) { in raid1_unplug()
1164 spin_lock_irq(&conf->device_lock); in raid1_unplug()
1165 bio_list_merge(&conf->pending_bio_list, &plug->pending); in raid1_unplug()
1166 conf->pending_count += plug->pending_cnt; in raid1_unplug()
1167 spin_unlock_irq(&conf->device_lock); in raid1_unplug()
1168 wake_up(&conf->wait_barrier); in raid1_unplug()
1169 md_wakeup_thread(mddev->thread); in raid1_unplug()
1174 /* we aren't scheduling, so we can do the write-out directly. */ in raid1_unplug()
1175 bio = bio_list_get(&plug->pending); in raid1_unplug()
1182 r1_bio->master_bio = bio; in init_r1bio()
1183 r1_bio->sectors = bio_sectors(bio); in init_r1bio()
1184 r1_bio->state = 0; in init_r1bio()
1185 r1_bio->mddev = mddev; in init_r1bio()
1186 r1_bio->sector = bio->bi_iter.bi_sector; in init_r1bio()
1192 struct r1conf *conf = mddev->private; in alloc_r1bio()
1195 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO); in alloc_r1bio()
1197 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0])); in alloc_r1bio()
1205 struct r1conf *conf = mddev->private; in raid1_read_request()
1206 struct raid1_info *mirror; in raid1_read_request() local
1208 struct bitmap *bitmap = mddev->bitmap; in raid1_read_request()
1210 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); in raid1_read_request()
1227 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev); in raid1_read_request()
1229 bdevname(rdev->bdev, b); in raid1_read_request()
1239 wait_read_barrier(conf, bio->bi_iter.bi_sector); in raid1_read_request()
1245 r1_bio->sectors = max_read_sectors; in raid1_read_request()
1248 * make_request() can abort the operation when read-ahead is being in raid1_read_request()
1259 (unsigned long long)r1_bio->sector); in raid1_read_request()
1264 mirror = conf->mirrors + rdisk; in raid1_read_request()
1267 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n", in raid1_read_request()
1269 (unsigned long long)r1_bio->sector, in raid1_read_request()
1270 bdevname(mirror->rdev->bdev, b)); in raid1_read_request()
1272 if (test_bit(WriteMostly, &mirror->rdev->flags) && in raid1_read_request()
1275 * Reading from a write-mostly device must take care not to in raid1_read_request()
1276 * over-take any writes that are 'behind' in raid1_read_request()
1279 wait_event(bitmap->behind_wait, in raid1_read_request()
1280 atomic_read(&bitmap->behind_writes) == 0); in raid1_read_request()
1285 gfp, &conf->bio_split); in raid1_read_request()
1289 r1_bio->master_bio = bio; in raid1_read_request()
1290 r1_bio->sectors = max_sectors; in raid1_read_request()
1293 r1_bio->read_disk = rdisk; in raid1_read_request()
1295 if (!r1bio_existed && blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) in raid1_read_request()
1296 r1_bio->start_time = bio_start_io_acct(bio); in raid1_read_request()
1298 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); in raid1_read_request()
1300 r1_bio->bios[rdisk] = read_bio; in raid1_read_request()
1302 read_bio->bi_iter.bi_sector = r1_bio->sector + in raid1_read_request()
1303 mirror->rdev->data_offset; in raid1_read_request()
1304 bio_set_dev(read_bio, mirror->rdev->bdev); in raid1_read_request()
1305 read_bio->bi_end_io = raid1_end_read_request; in raid1_read_request()
1307 if (test_bit(FailFast, &mirror->rdev->flags) && in raid1_read_request()
1308 test_bit(R1BIO_FailFast, &r1_bio->state)) in raid1_read_request()
1309 read_bio->bi_opf |= MD_FAILFAST; in raid1_read_request()
1310 read_bio->bi_private = r1_bio; in raid1_read_request()
1312 if (mddev->gendisk) in raid1_read_request()
1313 trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk), in raid1_read_request()
1314 r1_bio->sector); in raid1_read_request()
1322 struct r1conf *conf = mddev->private; in raid1_write_request()
1325 struct bitmap *bitmap = mddev->bitmap; in raid1_write_request()
1335 md_cluster_ops->area_resyncing(mddev, WRITE, in raid1_write_request()
1336 bio->bi_iter.bi_sector, bio_end_sector(bio))) { in raid1_write_request()
1340 prepare_to_wait(&conf->wait_barrier, in raid1_write_request()
1342 if (!md_cluster_ops->area_resyncing(mddev, WRITE, in raid1_write_request()
1343 bio->bi_iter.bi_sector, in raid1_write_request()
1348 finish_wait(&conf->wait_barrier, &w); in raid1_write_request()
1356 wait_barrier(conf, bio->bi_iter.bi_sector); in raid1_write_request()
1359 r1_bio->sectors = max_write_sectors; in raid1_write_request()
1361 if (conf->pending_count >= max_queued_requests) { in raid1_write_request()
1362 md_wakeup_thread(mddev->thread); in raid1_write_request()
1364 wait_event(conf->wait_barrier, in raid1_write_request()
1365 conf->pending_count < max_queued_requests); in raid1_write_request()
1378 disks = conf->raid_disks * 2; in raid1_write_request()
1382 max_sectors = r1_bio->sectors; in raid1_write_request()
1384 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in raid1_write_request()
1387 * The write-behind io is only attempted on drives marked as in raid1_write_request()
1388 * write-mostly, which means we could allocate write behind in raid1_write_request()
1391 if (rdev && test_bit(WriteMostly, &rdev->flags)) in raid1_write_request()
1394 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { in raid1_write_request()
1395 atomic_inc(&rdev->nr_pending); in raid1_write_request()
1399 r1_bio->bios[i] = NULL; in raid1_write_request()
1400 if (!rdev || test_bit(Faulty, &rdev->flags)) { in raid1_write_request()
1401 if (i < conf->raid_disks) in raid1_write_request()
1402 set_bit(R1BIO_Degraded, &r1_bio->state); in raid1_write_request()
1406 atomic_inc(&rdev->nr_pending); in raid1_write_request()
1407 if (test_bit(WriteErrorSeen, &rdev->flags)) { in raid1_write_request()
1412 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, in raid1_write_request()
1417 set_bit(BlockedBadBlocks, &rdev->flags); in raid1_write_request()
1421 if (is_bad && first_bad <= r1_bio->sector) { in raid1_write_request()
1423 bad_sectors -= (r1_bio->sector - first_bad); in raid1_write_request()
1432 * missing, so it might be re-added, in raid1_write_request()
1437 * in-sync is recorded in the bad in raid1_write_request()
1443 int good_sectors = first_bad - r1_bio->sector; in raid1_write_request()
1448 r1_bio->bios[i] = bio; in raid1_write_request()
1457 if (r1_bio->bios[j]) in raid1_write_request()
1458 rdev_dec_pending(conf->mirrors[j].rdev, mddev); in raid1_write_request()
1459 r1_bio->state = 0; in raid1_write_request()
1460 allow_barrier(conf, bio->bi_iter.bi_sector); in raid1_write_request()
1461 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); in raid1_write_request()
1463 wait_barrier(conf, bio->bi_iter.bi_sector); in raid1_write_request()
1469 * alloc_behind_master_bio allocates a copy of the data payload a page in raid1_write_request()
1478 GFP_NOIO, &conf->bio_split); in raid1_write_request()
1482 r1_bio->master_bio = bio; in raid1_write_request()
1483 r1_bio->sectors = max_sectors; in raid1_write_request()
1486 if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) in raid1_write_request()
1487 r1_bio->start_time = bio_start_io_acct(bio); in raid1_write_request()
1488 atomic_set(&r1_bio->remaining, 1); in raid1_write_request()
1489 atomic_set(&r1_bio->behind_remaining, 0); in raid1_write_request()
1495 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_write_request()
1496 if (!r1_bio->bios[i]) in raid1_write_request()
1505 (atomic_read(&bitmap->behind_writes) in raid1_write_request()
1506 < mddev->bitmap_info.max_write_behind) && in raid1_write_request()
1507 !waitqueue_active(&bitmap->behind_wait)) { in raid1_write_request()
1511 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors, in raid1_write_request()
1512 test_bit(R1BIO_BehindIO, &r1_bio->state)); in raid1_write_request()
1516 if (r1_bio->behind_master_bio) in raid1_write_request()
1517 mbio = bio_clone_fast(r1_bio->behind_master_bio, in raid1_write_request()
1518 GFP_NOIO, &mddev->bio_set); in raid1_write_request()
1520 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); in raid1_write_request()
1522 if (r1_bio->behind_master_bio) { in raid1_write_request()
1523 if (test_bit(CollisionCheck, &rdev->flags)) in raid1_write_request()
1525 if (test_bit(WriteMostly, &rdev->flags)) in raid1_write_request()
1526 atomic_inc(&r1_bio->behind_remaining); in raid1_write_request()
1527 } else if (mddev->serialize_policy) in raid1_write_request()
1530 r1_bio->bios[i] = mbio; in raid1_write_request()
1532 mbio->bi_iter.bi_sector = (r1_bio->sector + in raid1_write_request()
1533 conf->mirrors[i].rdev->data_offset); in raid1_write_request()
1534 bio_set_dev(mbio, conf->mirrors[i].rdev->bdev); in raid1_write_request()
1535 mbio->bi_end_io = raid1_end_write_request; in raid1_write_request()
1536 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA)); in raid1_write_request()
1537 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && in raid1_write_request()
1538 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) && in raid1_write_request()
1539 conf->raid_disks - mddev->degraded > 1) in raid1_write_request()
1540 mbio->bi_opf |= MD_FAILFAST; in raid1_write_request()
1541 mbio->bi_private = r1_bio; in raid1_write_request()
1543 atomic_inc(&r1_bio->remaining); in raid1_write_request()
1545 if (mddev->gendisk) in raid1_write_request()
1546 trace_block_bio_remap(mbio, disk_devt(mddev->gendisk), in raid1_write_request()
1547 r1_bio->sector); in raid1_write_request()
1549 mbio->bi_bdev = (void *)conf->mirrors[i].rdev; in raid1_write_request()
1557 bio_list_add(&plug->pending, mbio); in raid1_write_request()
1558 plug->pending_cnt++; in raid1_write_request()
1560 spin_lock_irqsave(&conf->device_lock, flags); in raid1_write_request()
1561 bio_list_add(&conf->pending_bio_list, mbio); in raid1_write_request()
1562 conf->pending_count++; in raid1_write_request()
1563 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_write_request()
1564 md_wakeup_thread(mddev->thread); in raid1_write_request()
1571 wake_up(&conf->wait_barrier); in raid1_write_request()
1578 if (unlikely(bio->bi_opf & REQ_PREFLUSH) in raid1_make_request()
1590 bio->bi_iter.bi_sector, bio_sectors(bio)); in raid1_make_request()
1604 struct r1conf *conf = mddev->private; in raid1_status()
1607 seq_printf(seq, " [%d/%d] [", conf->raid_disks, in raid1_status()
1608 conf->raid_disks - mddev->degraded); in raid1_status()
1610 for (i = 0; i < conf->raid_disks; i++) { in raid1_status()
1611 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in raid1_status()
1613 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); in raid1_status()
1622 struct r1conf *conf = mddev->private; in raid1_error()
1631 spin_lock_irqsave(&conf->device_lock, flags); in raid1_error()
1632 if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev in raid1_error()
1633 && (conf->raid_disks - mddev->degraded) == 1) { in raid1_error()
1640 conf->recovery_disabled = mddev->recovery_disabled; in raid1_error()
1641 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_error()
1644 set_bit(Blocked, &rdev->flags); in raid1_error()
1645 if (test_and_clear_bit(In_sync, &rdev->flags)) in raid1_error()
1646 mddev->degraded++; in raid1_error()
1647 set_bit(Faulty, &rdev->flags); in raid1_error()
1648 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_error()
1652 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in raid1_error()
1653 set_mask_bits(&mddev->sb_flags, 0, in raid1_error()
1657 mdname(mddev), bdevname(rdev->bdev, b), in raid1_error()
1658 mdname(mddev), conf->raid_disks - mddev->degraded); in raid1_error()
1670 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, in print_conf()
1671 conf->raid_disks); in print_conf()
1674 for (i = 0; i < conf->raid_disks; i++) { in print_conf()
1676 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in print_conf()
1679 i, !test_bit(In_sync, &rdev->flags), in print_conf()
1680 !test_bit(Faulty, &rdev->flags), in print_conf()
1681 bdevname(rdev->bdev,b)); in print_conf()
1695 mempool_exit(&conf->r1buf_pool); in close_sync()
1701 struct r1conf *conf = mddev->private; in raid1_spare_active()
1710 * which expects 'In_sync' flags and ->degraded to be consistent. in raid1_spare_active()
1712 spin_lock_irqsave(&conf->device_lock, flags); in raid1_spare_active()
1713 for (i = 0; i < conf->raid_disks; i++) { in raid1_spare_active()
1714 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_spare_active()
1715 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; in raid1_spare_active()
1717 && !test_bit(Candidate, &repl->flags) in raid1_spare_active()
1718 && repl->recovery_offset == MaxSector in raid1_spare_active()
1719 && !test_bit(Faulty, &repl->flags) in raid1_spare_active()
1720 && !test_and_set_bit(In_sync, &repl->flags)) { in raid1_spare_active()
1723 !test_and_clear_bit(In_sync, &rdev->flags)) in raid1_spare_active()
1728 * it gets removed and never re-added in raid1_spare_active()
1730 set_bit(Faulty, &rdev->flags); in raid1_spare_active()
1732 rdev->sysfs_state); in raid1_spare_active()
1736 && rdev->recovery_offset == MaxSector in raid1_spare_active()
1737 && !test_bit(Faulty, &rdev->flags) in raid1_spare_active()
1738 && !test_and_set_bit(In_sync, &rdev->flags)) { in raid1_spare_active()
1740 sysfs_notify_dirent_safe(rdev->sysfs_state); in raid1_spare_active()
1743 mddev->degraded -= count; in raid1_spare_active()
1744 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_spare_active()
1752 struct r1conf *conf = mddev->private; in raid1_add_disk()
1753 int err = -EEXIST; in raid1_add_disk()
1754 int mirror = 0; in raid1_add_disk() local
1757 int last = conf->raid_disks - 1; in raid1_add_disk()
1759 if (mddev->recovery_disabled == conf->recovery_disabled) in raid1_add_disk()
1760 return -EBUSY; in raid1_add_disk()
1763 return -ENXIO; in raid1_add_disk()
1765 if (rdev->raid_disk >= 0) in raid1_add_disk()
1766 first = last = rdev->raid_disk; in raid1_add_disk()
1769 * find the disk ... but prefer rdev->saved_raid_disk in raid1_add_disk()
1772 if (rdev->saved_raid_disk >= 0 && in raid1_add_disk()
1773 rdev->saved_raid_disk >= first && in raid1_add_disk()
1774 rdev->saved_raid_disk < conf->raid_disks && in raid1_add_disk()
1775 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) in raid1_add_disk()
1776 first = last = rdev->saved_raid_disk; in raid1_add_disk()
1778 for (mirror = first; mirror <= last; mirror++) { in raid1_add_disk()
1779 p = conf->mirrors + mirror; in raid1_add_disk()
1780 if (!p->rdev) { in raid1_add_disk()
1781 if (mddev->gendisk) in raid1_add_disk()
1782 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid1_add_disk()
1783 rdev->data_offset << 9); in raid1_add_disk()
1785 p->head_position = 0; in raid1_add_disk()
1786 rdev->raid_disk = mirror; in raid1_add_disk()
1791 if (rdev->saved_raid_disk < 0) in raid1_add_disk()
1792 conf->fullsync = 1; in raid1_add_disk()
1793 rcu_assign_pointer(p->rdev, rdev); in raid1_add_disk()
1796 if (test_bit(WantReplacement, &p->rdev->flags) && in raid1_add_disk()
1797 p[conf->raid_disks].rdev == NULL) { in raid1_add_disk()
1799 clear_bit(In_sync, &rdev->flags); in raid1_add_disk()
1800 set_bit(Replacement, &rdev->flags); in raid1_add_disk()
1801 rdev->raid_disk = mirror; in raid1_add_disk()
1803 conf->fullsync = 1; in raid1_add_disk()
1804 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev); in raid1_add_disk()
1808 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid1_add_disk()
1809 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); in raid1_add_disk()
1816 struct r1conf *conf = mddev->private; in raid1_remove_disk()
1818 int number = rdev->raid_disk; in raid1_remove_disk()
1819 struct raid1_info *p = conf->mirrors + number; in raid1_remove_disk()
1821 if (rdev != p->rdev) in raid1_remove_disk()
1822 p = conf->mirrors + conf->raid_disks + number; in raid1_remove_disk()
1825 if (rdev == p->rdev) { in raid1_remove_disk()
1826 if (test_bit(In_sync, &rdev->flags) || in raid1_remove_disk()
1827 atomic_read(&rdev->nr_pending)) { in raid1_remove_disk()
1828 err = -EBUSY; in raid1_remove_disk()
1831 /* Only remove non-faulty devices if recovery in raid1_remove_disk()
1834 if (!test_bit(Faulty, &rdev->flags) && in raid1_remove_disk()
1835 mddev->recovery_disabled != conf->recovery_disabled && in raid1_remove_disk()
1836 mddev->degraded < conf->raid_disks) { in raid1_remove_disk()
1837 err = -EBUSY; in raid1_remove_disk()
1840 p->rdev = NULL; in raid1_remove_disk()
1841 if (!test_bit(RemoveSynchronized, &rdev->flags)) { in raid1_remove_disk()
1843 if (atomic_read(&rdev->nr_pending)) { in raid1_remove_disk()
1845 err = -EBUSY; in raid1_remove_disk()
1846 p->rdev = rdev; in raid1_remove_disk()
1850 if (conf->mirrors[conf->raid_disks + number].rdev) { in raid1_remove_disk()
1856 conf->mirrors[conf->raid_disks + number].rdev; in raid1_remove_disk()
1858 if (atomic_read(&repl->nr_pending)) { in raid1_remove_disk()
1865 err = -EBUSY; in raid1_remove_disk()
1869 clear_bit(Replacement, &repl->flags); in raid1_remove_disk()
1870 p->rdev = repl; in raid1_remove_disk()
1871 conf->mirrors[conf->raid_disks + number].rdev = NULL; in raid1_remove_disk()
1875 clear_bit(WantReplacement, &rdev->flags); in raid1_remove_disk()
1888 update_head_pos(r1_bio->read_disk, r1_bio); in end_sync_read()
1891 * we have read a block, now it needs to be re-written, in end_sync_read()
1892 * or re-read if the read failed. in end_sync_read()
1895 if (!bio->bi_status) in end_sync_read()
1896 set_bit(R1BIO_Uptodate, &r1_bio->state); in end_sync_read()
1898 if (atomic_dec_and_test(&r1_bio->remaining)) in end_sync_read()
1905 sector_t s = r1_bio->sector; in abort_sync_write()
1906 long sectors_to_go = r1_bio->sectors; in abort_sync_write()
1910 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); in abort_sync_write()
1912 sectors_to_go -= sync_blocks; in abort_sync_write()
1918 if (atomic_dec_and_test(&r1_bio->remaining)) { in put_sync_write_buf()
1919 struct mddev *mddev = r1_bio->mddev; in put_sync_write_buf()
1920 int s = r1_bio->sectors; in put_sync_write_buf()
1922 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in put_sync_write_buf()
1923 test_bit(R1BIO_WriteError, &r1_bio->state)) in put_sync_write_buf()
1934 int uptodate = !bio->bi_status; in end_sync_write()
1936 struct mddev *mddev = r1_bio->mddev; in end_sync_write()
1937 struct r1conf *conf = mddev->private; in end_sync_write()
1940 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; in end_sync_write()
1944 set_bit(WriteErrorSeen, &rdev->flags); in end_sync_write()
1945 if (!test_and_set_bit(WantReplacement, &rdev->flags)) in end_sync_write()
1947 mddev->recovery); in end_sync_write()
1948 set_bit(R1BIO_WriteError, &r1_bio->state); in end_sync_write()
1949 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, in end_sync_write()
1951 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, in end_sync_write()
1952 r1_bio->sector, in end_sync_write()
1953 r1_bio->sectors, in end_sync_write()
1956 set_bit(R1BIO_MadeGood, &r1_bio->state); in end_sync_write()
1968 set_bit(WriteErrorSeen, &rdev->flags); in r1_sync_page_io()
1970 &rdev->flags)) in r1_sync_page_io()
1972 rdev->mddev->recovery); in r1_sync_page_io()
1974 /* need to record an error - either for the block or the device */ in r1_sync_page_io()
1976 md_error(rdev->mddev, rdev); in r1_sync_page_io()
1983 * good data, much like with normal read errors. Only in fix_sync_read_error()
1985 * need to re-issue the read request. in fix_sync_read_error()
1993 struct mddev *mddev = r1_bio->mddev; in fix_sync_read_error()
1994 struct r1conf *conf = mddev->private; in fix_sync_read_error()
1995 struct bio *bio = r1_bio->bios[r1_bio->read_disk]; in fix_sync_read_error()
1996 struct page **pages = get_resync_pages(bio)->pages; in fix_sync_read_error()
1997 sector_t sect = r1_bio->sector; in fix_sync_read_error()
1998 int sectors = r1_bio->sectors; in fix_sync_read_error()
2002 rdev = conf->mirrors[r1_bio->read_disk].rdev; in fix_sync_read_error()
2003 if (test_bit(FailFast, &rdev->flags)) { in fix_sync_read_error()
2004 /* Don't try recovering from here - just fail it in fix_sync_read_error()
2007 if (test_bit(Faulty, &rdev->flags)) in fix_sync_read_error()
2011 bio->bi_end_io = end_sync_write; in fix_sync_read_error()
2016 int d = r1_bio->read_disk; in fix_sync_read_error()
2023 if (r1_bio->bios[d]->bi_end_io == end_sync_read) { in fix_sync_read_error()
2028 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2037 if (d == conf->raid_disks * 2) in fix_sync_read_error()
2039 } while (!success && d != r1_bio->read_disk); in fix_sync_read_error()
2051 (unsigned long long)r1_bio->sector); in fix_sync_read_error()
2052 for (d = 0; d < conf->raid_disks * 2; d++) { in fix_sync_read_error()
2053 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2054 if (!rdev || test_bit(Faulty, &rdev->flags)) in fix_sync_read_error()
2060 conf->recovery_disabled = in fix_sync_read_error()
2061 mddev->recovery_disabled; in fix_sync_read_error()
2062 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in fix_sync_read_error()
2063 md_done_sync(mddev, r1_bio->sectors, 0); in fix_sync_read_error()
2068 sectors -= s; in fix_sync_read_error()
2075 /* write it back and re-read */ in fix_sync_read_error()
2076 while (d != r1_bio->read_disk) { in fix_sync_read_error()
2078 d = conf->raid_disks * 2; in fix_sync_read_error()
2079 d--; in fix_sync_read_error()
2080 if (r1_bio->bios[d]->bi_end_io != end_sync_read) in fix_sync_read_error()
2082 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2086 r1_bio->bios[d]->bi_end_io = NULL; in fix_sync_read_error()
2091 while (d != r1_bio->read_disk) { in fix_sync_read_error()
2093 d = conf->raid_disks * 2; in fix_sync_read_error()
2094 d--; in fix_sync_read_error()
2095 if (r1_bio->bios[d]->bi_end_io != end_sync_read) in fix_sync_read_error()
2097 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2101 atomic_add(s, &rdev->corrected_errors); in fix_sync_read_error()
2103 sectors -= s; in fix_sync_read_error()
2107 set_bit(R1BIO_Uptodate, &r1_bio->state); in fix_sync_read_error()
2108 bio->bi_status = 0; in fix_sync_read_error()
2119 * attempt an over-write in process_checks()
2121 struct mddev *mddev = r1_bio->mddev; in process_checks()
2122 struct r1conf *conf = mddev->private; in process_checks()
2128 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); in process_checks()
2129 for (i = 0; i < conf->raid_disks * 2; i++) { in process_checks()
2131 struct bio *b = r1_bio->bios[i]; in process_checks()
2133 if (b->bi_end_io != end_sync_read) in process_checks()
2136 status = b->bi_status; in process_checks()
2138 b->bi_status = status; in process_checks()
2139 b->bi_iter.bi_sector = r1_bio->sector + in process_checks()
2140 conf->mirrors[i].rdev->data_offset; in process_checks()
2141 bio_set_dev(b, conf->mirrors[i].rdev->bdev); in process_checks()
2142 b->bi_end_io = end_sync_read; in process_checks()
2143 rp->raid_bio = r1_bio; in process_checks()
2144 b->bi_private = rp; in process_checks()
2147 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9); in process_checks()
2149 for (primary = 0; primary < conf->raid_disks * 2; primary++) in process_checks()
2150 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && in process_checks()
2151 !r1_bio->bios[primary]->bi_status) { in process_checks()
2152 r1_bio->bios[primary]->bi_end_io = NULL; in process_checks()
2153 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); in process_checks()
2156 r1_bio->read_disk = primary; in process_checks()
2157 for (i = 0; i < conf->raid_disks * 2; i++) { in process_checks()
2159 struct bio *pbio = r1_bio->bios[primary]; in process_checks()
2160 struct bio *sbio = r1_bio->bios[i]; in process_checks()
2161 blk_status_t status = sbio->bi_status; in process_checks()
2162 struct page **ppages = get_resync_pages(pbio)->pages; in process_checks()
2163 struct page **spages = get_resync_pages(sbio)->pages; in process_checks()
2168 if (sbio->bi_end_io != end_sync_read) in process_checks()
2171 sbio->bi_status = 0; in process_checks()
2174 page_len[j++] = bi->bv_len; in process_checks()
2177 for (j = vcnt; j-- ; ) { in process_checks()
2186 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); in process_checks()
2187 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) in process_checks()
2190 sbio->bi_end_io = NULL; in process_checks()
2191 rdev_dec_pending(conf->mirrors[i].rdev, mddev); in process_checks()
2201 struct r1conf *conf = mddev->private; in sync_request_write()
2203 int disks = conf->raid_disks * 2; in sync_request_write()
2206 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) in sync_request_write()
2207 /* ouch - failed to read all of that. */ in sync_request_write()
2211 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in sync_request_write()
2217 atomic_set(&r1_bio->remaining, 1); in sync_request_write()
2219 wbio = r1_bio->bios[i]; in sync_request_write()
2220 if (wbio->bi_end_io == NULL || in sync_request_write()
2221 (wbio->bi_end_io == end_sync_read && in sync_request_write()
2222 (i == r1_bio->read_disk || in sync_request_write()
2223 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) in sync_request_write()
2225 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { in sync_request_write()
2231 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) in sync_request_write()
2232 wbio->bi_opf |= MD_FAILFAST; in sync_request_write()
2234 wbio->bi_end_io = end_sync_write; in sync_request_write()
2235 atomic_inc(&r1_bio->remaining); in sync_request_write()
2236 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); in sync_request_write()
2255 struct mddev *mddev = conf->mddev; in fix_read_error()
2271 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2273 (test_bit(In_sync, &rdev->flags) || in fix_read_error()
2274 (!test_bit(Faulty, &rdev->flags) && in fix_read_error()
2275 rdev->recovery_offset >= sect + s)) && in fix_read_error()
2278 atomic_inc(&rdev->nr_pending); in fix_read_error()
2281 conf->tmppage, REQ_OP_READ, 0, false)) in fix_read_error()
2289 if (d == conf->raid_disks * 2) in fix_read_error()
2294 /* Cannot read from anywhere - mark it bad */ in fix_read_error()
2295 struct md_rdev *rdev = conf->mirrors[read_disk].rdev; in fix_read_error()
2300 /* write it back and re-read */ in fix_read_error()
2304 d = conf->raid_disks * 2; in fix_read_error()
2305 d--; in fix_read_error()
2307 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2309 !test_bit(Faulty, &rdev->flags)) { in fix_read_error()
2310 atomic_inc(&rdev->nr_pending); in fix_read_error()
2313 conf->tmppage, WRITE); in fix_read_error()
2322 d = conf->raid_disks * 2; in fix_read_error()
2323 d--; in fix_read_error()
2325 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2327 !test_bit(Faulty, &rdev->flags)) { in fix_read_error()
2328 atomic_inc(&rdev->nr_pending); in fix_read_error()
2331 conf->tmppage, READ)) { in fix_read_error()
2332 atomic_add(s, &rdev->corrected_errors); in fix_read_error()
2336 rdev->data_offset), in fix_read_error()
2337 bdevname(rdev->bdev, b)); in fix_read_error()
2343 sectors -= s; in fix_read_error()
2350 struct mddev *mddev = r1_bio->mddev; in narrow_write_error()
2351 struct r1conf *conf = mddev->private; in narrow_write_error()
2352 struct md_rdev *rdev = conf->mirrors[i].rdev; in narrow_write_error()
2354 /* bio has the data to be written to device 'i' where in narrow_write_error()
2368 int sect_to_write = r1_bio->sectors; in narrow_write_error()
2371 if (rdev->badblocks.shift < 0) in narrow_write_error()
2374 block_sectors = roundup(1 << rdev->badblocks.shift, in narrow_write_error()
2375 bdev_logical_block_size(rdev->bdev) >> 9); in narrow_write_error()
2376 sector = r1_bio->sector; in narrow_write_error()
2378 & ~(sector_t)(block_sectors - 1)) in narrow_write_error()
2379 - sector; in narrow_write_error()
2387 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { in narrow_write_error()
2388 wbio = bio_clone_fast(r1_bio->behind_master_bio, in narrow_write_error()
2390 &mddev->bio_set); in narrow_write_error()
2392 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, in narrow_write_error()
2393 &mddev->bio_set); in narrow_write_error()
2397 wbio->bi_iter.bi_sector = r1_bio->sector; in narrow_write_error()
2398 wbio->bi_iter.bi_size = r1_bio->sectors << 9; in narrow_write_error()
2400 bio_trim(wbio, sector - r1_bio->sector, sectors); in narrow_write_error()
2401 wbio->bi_iter.bi_sector += rdev->data_offset; in narrow_write_error()
2402 bio_set_dev(wbio, rdev->bdev); in narrow_write_error()
2411 sect_to_write -= sectors; in narrow_write_error()
2421 int s = r1_bio->sectors; in handle_sync_write_finished()
2422 for (m = 0; m < conf->raid_disks * 2 ; m++) { in handle_sync_write_finished()
2423 struct md_rdev *rdev = conf->mirrors[m].rdev; in handle_sync_write_finished()
2424 struct bio *bio = r1_bio->bios[m]; in handle_sync_write_finished()
2425 if (bio->bi_end_io == NULL) in handle_sync_write_finished()
2427 if (!bio->bi_status && in handle_sync_write_finished()
2428 test_bit(R1BIO_MadeGood, &r1_bio->state)) { in handle_sync_write_finished()
2429 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); in handle_sync_write_finished()
2431 if (bio->bi_status && in handle_sync_write_finished()
2432 test_bit(R1BIO_WriteError, &r1_bio->state)) { in handle_sync_write_finished()
2433 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) in handle_sync_write_finished()
2434 md_error(conf->mddev, rdev); in handle_sync_write_finished()
2438 md_done_sync(conf->mddev, s, 1); in handle_sync_write_finished()
2446 for (m = 0; m < conf->raid_disks * 2 ; m++) in handle_write_finished()
2447 if (r1_bio->bios[m] == IO_MADE_GOOD) { in handle_write_finished()
2448 struct md_rdev *rdev = conf->mirrors[m].rdev; in handle_write_finished()
2450 r1_bio->sector, in handle_write_finished()
2451 r1_bio->sectors, 0); in handle_write_finished()
2452 rdev_dec_pending(rdev, conf->mddev); in handle_write_finished()
2453 } else if (r1_bio->bios[m] != NULL) { in handle_write_finished()
2460 md_error(conf->mddev, in handle_write_finished()
2461 conf->mirrors[m].rdev); in handle_write_finished()
2463 set_bit(R1BIO_Degraded, &r1_bio->state); in handle_write_finished()
2465 rdev_dec_pending(conf->mirrors[m].rdev, in handle_write_finished()
2466 conf->mddev); in handle_write_finished()
2469 spin_lock_irq(&conf->device_lock); in handle_write_finished()
2470 list_add(&r1_bio->retry_list, &conf->bio_end_io_list); in handle_write_finished()
2471 idx = sector_to_idx(r1_bio->sector); in handle_write_finished()
2472 atomic_inc(&conf->nr_queued[idx]); in handle_write_finished()
2473 spin_unlock_irq(&conf->device_lock); in handle_write_finished()
2478 wake_up(&conf->wait_barrier); in handle_write_finished()
2479 md_wakeup_thread(conf->mddev->thread); in handle_write_finished()
2481 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in handle_write_finished()
2489 struct mddev *mddev = conf->mddev; in handle_read_error()
2493 clear_bit(R1BIO_ReadError, &r1_bio->state); in handle_read_error()
2497 * other devices. When we find one, we re-write in handle_read_error()
2503 bio = r1_bio->bios[r1_bio->read_disk]; in handle_read_error()
2505 r1_bio->bios[r1_bio->read_disk] = NULL; in handle_read_error()
2507 rdev = conf->mirrors[r1_bio->read_disk].rdev; in handle_read_error()
2508 if (mddev->ro == 0 in handle_read_error()
2509 && !test_bit(FailFast, &rdev->flags)) { in handle_read_error()
2511 fix_read_error(conf, r1_bio->read_disk, in handle_read_error()
2512 r1_bio->sector, r1_bio->sectors); in handle_read_error()
2514 } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) { in handle_read_error()
2517 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; in handle_read_error()
2520 rdev_dec_pending(rdev, conf->mddev); in handle_read_error()
2521 allow_barrier(conf, r1_bio->sector); in handle_read_error()
2522 bio = r1_bio->master_bio; in handle_read_error()
2525 r1_bio->state = 0; in handle_read_error()
2526 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio); in handle_read_error()
2531 struct mddev *mddev = thread->mddev; in raid1d()
2534 struct r1conf *conf = mddev->private; in raid1d()
2535 struct list_head *head = &conf->retry_list; in raid1d()
2541 if (!list_empty_careful(&conf->bio_end_io_list) && in raid1d()
2542 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { in raid1d()
2544 spin_lock_irqsave(&conf->device_lock, flags); in raid1d()
2545 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) in raid1d()
2546 list_splice_init(&conf->bio_end_io_list, &tmp); in raid1d()
2547 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2551 list_del(&r1_bio->retry_list); in raid1d()
2552 idx = sector_to_idx(r1_bio->sector); in raid1d()
2553 atomic_dec(&conf->nr_queued[idx]); in raid1d()
2554 if (mddev->degraded) in raid1d()
2555 set_bit(R1BIO_Degraded, &r1_bio->state); in raid1d()
2556 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2567 spin_lock_irqsave(&conf->device_lock, flags); in raid1d()
2569 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2572 r1_bio = list_entry(head->prev, struct r1bio, retry_list); in raid1d()
2573 list_del(head->prev); in raid1d()
2574 idx = sector_to_idx(r1_bio->sector); in raid1d()
2575 atomic_dec(&conf->nr_queued[idx]); in raid1d()
2576 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2578 mddev = r1_bio->mddev; in raid1d()
2579 conf = mddev->private; in raid1d()
2580 if (test_bit(R1BIO_IsSync, &r1_bio->state)) { in raid1d()
2581 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in raid1d()
2582 test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2586 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in raid1d()
2587 test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2589 else if (test_bit(R1BIO_ReadError, &r1_bio->state)) in raid1d()
2595 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) in raid1d()
2606 BUG_ON(mempool_initialized(&conf->r1buf_pool)); in init_resync()
2608 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc, in init_resync()
2609 r1buf_pool_free, conf->poolinfo); in init_resync()
2614 struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO); in raid1_alloc_init_r1buf()
2619 for (i = conf->poolinfo->raid_disks; i--; ) { in raid1_alloc_init_r1buf()
2620 bio = r1bio->bios[i]; in raid1_alloc_init_r1buf()
2621 rps = bio->bi_private; in raid1_alloc_init_r1buf()
2623 bio->bi_private = rps; in raid1_alloc_init_r1buf()
2625 r1bio->master_bio = NULL; in raid1_alloc_init_r1buf()
2632 * We need to make sure that no normal I/O request - particularly write
2633 * requests - conflict with active sync requests.
2642 struct r1conf *conf = mddev->private; in raid1_sync_request()
2646 int disk = -1; in raid1_sync_request()
2648 int wonly = -1; in raid1_sync_request()
2657 if (!mempool_initialized(&conf->r1buf_pool)) in raid1_sync_request()
2661 max_sector = mddev->dev_sectors; in raid1_sync_request()
2666 * We can find the current addess in mddev->curr_resync in raid1_sync_request()
2668 if (mddev->curr_resync < max_sector) /* aborted */ in raid1_sync_request()
2669 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, in raid1_sync_request()
2672 conf->fullsync = 0; in raid1_sync_request()
2674 md_bitmap_close_sync(mddev->bitmap); in raid1_sync_request()
2678 conf->cluster_sync_low = 0; in raid1_sync_request()
2679 conf->cluster_sync_high = 0; in raid1_sync_request()
2684 if (mddev->bitmap == NULL && in raid1_sync_request()
2685 mddev->recovery_cp == MaxSector && in raid1_sync_request()
2686 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in raid1_sync_request()
2687 conf->fullsync == 0) { in raid1_sync_request()
2689 return max_sector - sector_nr; in raid1_sync_request()
2694 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && in raid1_sync_request()
2695 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in raid1_sync_request()
2702 * If there is non-resync activity waiting for a turn, then let it in raid1_sync_request()
2705 if (atomic_read(&conf->nr_waiting[idx])) in raid1_sync_request()
2712 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, in raid1_sync_request()
2713 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); in raid1_sync_request()
2726 * and any others (which will be non-In_sync devices) for WRITE. in raid1_sync_request()
2731 r1_bio->mddev = mddev; in raid1_sync_request()
2732 r1_bio->sector = sector_nr; in raid1_sync_request()
2733 r1_bio->state = 0; in raid1_sync_request()
2734 set_bit(R1BIO_IsSync, &r1_bio->state); in raid1_sync_request()
2738 for (i = 0; i < conf->raid_disks * 2; i++) { in raid1_sync_request()
2740 bio = r1_bio->bios[i]; in raid1_sync_request()
2742 rdev = rcu_dereference(conf->mirrors[i].rdev); in raid1_sync_request()
2744 test_bit(Faulty, &rdev->flags)) { in raid1_sync_request()
2745 if (i < conf->raid_disks) in raid1_sync_request()
2747 } else if (!test_bit(In_sync, &rdev->flags)) { in raid1_sync_request()
2749 bio->bi_end_io = end_sync_write; in raid1_sync_request()
2759 good_sectors = first_bad - sector_nr; in raid1_sync_request()
2761 bad_sectors -= (sector_nr - first_bad); in raid1_sync_request()
2768 if (test_bit(WriteMostly, &rdev->flags)) { in raid1_sync_request()
2776 bio->bi_end_io = end_sync_read; in raid1_sync_request()
2778 } else if (!test_bit(WriteErrorSeen, &rdev->flags) && in raid1_sync_request()
2779 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in raid1_sync_request()
2780 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { in raid1_sync_request()
2788 bio->bi_end_io = end_sync_write; in raid1_sync_request()
2792 if (rdev && bio->bi_end_io) { in raid1_sync_request()
2793 atomic_inc(&rdev->nr_pending); in raid1_sync_request()
2794 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; in raid1_sync_request()
2795 bio_set_dev(bio, rdev->bdev); in raid1_sync_request()
2796 if (test_bit(FailFast, &rdev->flags)) in raid1_sync_request()
2797 bio->bi_opf |= MD_FAILFAST; in raid1_sync_request()
2803 r1_bio->read_disk = disk; in raid1_sync_request()
2810 for (i = 0 ; i < conf->raid_disks * 2 ; i++) in raid1_sync_request()
2811 if (r1_bio->bios[i]->bi_end_io == end_sync_write) { in raid1_sync_request()
2812 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_sync_request()
2817 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in raid1_sync_request()
2827 conf->recovery_disabled = mddev->recovery_disabled; in raid1_sync_request()
2828 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in raid1_sync_request()
2835 /* only resync enough to reach the next bad->good in raid1_sync_request()
2840 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) in raid1_sync_request()
2842 write_targets += read_targets-1; in raid1_sync_request()
2845 /* There is nowhere to write, so all non-sync in raid1_sync_request()
2846 * drives must be failed - so we are finished in raid1_sync_request()
2851 rv = max_sector - sector_nr; in raid1_sync_request()
2857 if (max_sector > mddev->resync_max) in raid1_sync_request()
2858 max_sector = mddev->resync_max; /* Don't do IO beyond here */ in raid1_sync_request()
2867 len = (max_sector - sector_nr) << 9; in raid1_sync_request()
2871 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, in raid1_sync_request()
2873 !conf->fullsync && in raid1_sync_request()
2874 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in raid1_sync_request()
2880 for (i = 0 ; i < conf->raid_disks * 2; i++) { in raid1_sync_request()
2883 bio = r1_bio->bios[i]; in raid1_sync_request()
2885 if (bio->bi_end_io) { in raid1_sync_request()
2897 sync_blocks -= (len>>9); in raid1_sync_request()
2900 r1_bio->sectors = nr_sectors; in raid1_sync_request()
2903 conf->cluster_sync_high < sector_nr + nr_sectors) { in raid1_sync_request()
2904 conf->cluster_sync_low = mddev->curr_resync_completed; in raid1_sync_request()
2905 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS; in raid1_sync_request()
2907 md_cluster_ops->resync_info_update(mddev, in raid1_sync_request()
2908 conf->cluster_sync_low, in raid1_sync_request()
2909 conf->cluster_sync_high); in raid1_sync_request()
2912 /* For a user-requested sync, we read all readable devices and do a in raid1_sync_request()
2915 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in raid1_sync_request()
2916 atomic_set(&r1_bio->remaining, read_targets); in raid1_sync_request()
2917 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) { in raid1_sync_request()
2918 bio = r1_bio->bios[i]; in raid1_sync_request()
2919 if (bio->bi_end_io == end_sync_read) { in raid1_sync_request()
2920 read_targets--; in raid1_sync_request()
2923 bio->bi_opf &= ~MD_FAILFAST; in raid1_sync_request()
2928 atomic_set(&r1_bio->remaining, 1); in raid1_sync_request()
2929 bio = r1_bio->bios[r1_bio->read_disk]; in raid1_sync_request()
2932 bio->bi_opf &= ~MD_FAILFAST; in raid1_sync_request()
2943 return mddev->dev_sectors; in raid1_size()
2952 int err = -ENOMEM; in setup_conf()
2958 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
2960 if (!conf->nr_pending) in setup_conf()
2963 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
2965 if (!conf->nr_waiting) in setup_conf()
2968 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
2970 if (!conf->nr_queued) in setup_conf()
2973 conf->barrier = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
2975 if (!conf->barrier) in setup_conf()
2978 conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info), in setup_conf()
2979 mddev->raid_disks, 2), in setup_conf()
2981 if (!conf->mirrors) in setup_conf()
2984 conf->tmppage = alloc_page(GFP_KERNEL); in setup_conf()
2985 if (!conf->tmppage) in setup_conf()
2988 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); in setup_conf()
2989 if (!conf->poolinfo) in setup_conf()
2991 conf->poolinfo->raid_disks = mddev->raid_disks * 2; in setup_conf()
2992 err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc, in setup_conf()
2993 rbio_pool_free, conf->poolinfo); in setup_conf()
2997 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); in setup_conf()
3001 conf->poolinfo->mddev = mddev; in setup_conf()
3003 err = -EINVAL; in setup_conf()
3004 spin_lock_init(&conf->device_lock); in setup_conf()
3006 int disk_idx = rdev->raid_disk; in setup_conf()
3007 if (disk_idx >= mddev->raid_disks in setup_conf()
3010 if (test_bit(Replacement, &rdev->flags)) in setup_conf()
3011 disk = conf->mirrors + mddev->raid_disks + disk_idx; in setup_conf()
3013 disk = conf->mirrors + disk_idx; in setup_conf()
3015 if (disk->rdev) in setup_conf()
3017 disk->rdev = rdev; in setup_conf()
3018 disk->head_position = 0; in setup_conf()
3019 disk->seq_start = MaxSector; in setup_conf()
3021 conf->raid_disks = mddev->raid_disks; in setup_conf()
3022 conf->mddev = mddev; in setup_conf()
3023 INIT_LIST_HEAD(&conf->retry_list); in setup_conf()
3024 INIT_LIST_HEAD(&conf->bio_end_io_list); in setup_conf()
3026 spin_lock_init(&conf->resync_lock); in setup_conf()
3027 init_waitqueue_head(&conf->wait_barrier); in setup_conf()
3029 bio_list_init(&conf->pending_bio_list); in setup_conf()
3030 conf->pending_count = 0; in setup_conf()
3031 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
3033 err = -EIO; in setup_conf()
3034 for (i = 0; i < conf->raid_disks * 2; i++) { in setup_conf()
3036 disk = conf->mirrors + i; in setup_conf()
3038 if (i < conf->raid_disks && in setup_conf()
3039 disk[conf->raid_disks].rdev) { in setup_conf()
3041 if (!disk->rdev) { in setup_conf()
3045 disk->rdev = in setup_conf()
3046 disk[conf->raid_disks].rdev; in setup_conf()
3047 disk[conf->raid_disks].rdev = NULL; in setup_conf()
3048 } else if (!test_bit(In_sync, &disk->rdev->flags)) in setup_conf()
3049 /* Original is not in_sync - bad */ in setup_conf()
3053 if (!disk->rdev || in setup_conf()
3054 !test_bit(In_sync, &disk->rdev->flags)) { in setup_conf()
3055 disk->head_position = 0; in setup_conf()
3056 if (disk->rdev && in setup_conf()
3057 (disk->rdev->saved_raid_disk < 0)) in setup_conf()
3058 conf->fullsync = 1; in setup_conf()
3062 err = -ENOMEM; in setup_conf()
3063 conf->thread = md_register_thread(raid1d, mddev, "raid1"); in setup_conf()
3064 if (!conf->thread) in setup_conf()
3071 mempool_exit(&conf->r1bio_pool); in setup_conf()
3072 kfree(conf->mirrors); in setup_conf()
3073 safe_put_page(conf->tmppage); in setup_conf()
3074 kfree(conf->poolinfo); in setup_conf()
3075 kfree(conf->nr_pending); in setup_conf()
3076 kfree(conf->nr_waiting); in setup_conf()
3077 kfree(conf->nr_queued); in setup_conf()
3078 kfree(conf->barrier); in setup_conf()
3079 bioset_exit(&conf->bio_split); in setup_conf()
3094 if (mddev->level != 1) { in raid1_run()
3096 mdname(mddev), mddev->level); in raid1_run()
3097 return -EIO; in raid1_run()
3099 if (mddev->reshape_position != MaxSector) { in raid1_run()
3102 return -EIO; in raid1_run()
3105 return -ENOMEM; in raid1_run()
3111 if (mddev->private == NULL) in raid1_run()
3114 conf = mddev->private; in raid1_run()
3119 if (mddev->queue) { in raid1_run()
3120 blk_queue_max_write_same_sectors(mddev->queue, 0); in raid1_run()
3121 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); in raid1_run()
3125 if (!mddev->gendisk) in raid1_run()
3127 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid1_run()
3128 rdev->data_offset << 9); in raid1_run()
3129 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid1_run()
3133 mddev->degraded = 0; in raid1_run()
3134 for (i = 0; i < conf->raid_disks; i++) in raid1_run()
3135 if (conf->mirrors[i].rdev == NULL || in raid1_run()
3136 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || in raid1_run()
3137 test_bit(Faulty, &conf->mirrors[i].rdev->flags)) in raid1_run()
3138 mddev->degraded++; in raid1_run()
3142 if (conf->raid_disks - mddev->degraded < 1) { in raid1_run()
3143 ret = -EINVAL; in raid1_run()
3147 if (conf->raid_disks - mddev->degraded == 1) in raid1_run()
3148 mddev->recovery_cp = MaxSector; in raid1_run()
3150 if (mddev->recovery_cp != MaxSector) in raid1_run()
3151 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n", in raid1_run()
3154 mdname(mddev), mddev->raid_disks - mddev->degraded, in raid1_run()
3155 mddev->raid_disks); in raid1_run()
3160 mddev->thread = conf->thread; in raid1_run()
3161 conf->thread = NULL; in raid1_run()
3162 mddev->private = conf; in raid1_run()
3163 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); in raid1_run()
3167 if (mddev->queue) { in raid1_run()
3170 mddev->queue); in raid1_run()
3173 mddev->queue); in raid1_run()
3178 md_unregister_thread(&mddev->thread); in raid1_run()
3192 mempool_exit(&conf->r1bio_pool); in raid1_free()
3193 kfree(conf->mirrors); in raid1_free()
3194 safe_put_page(conf->tmppage); in raid1_free()
3195 kfree(conf->poolinfo); in raid1_free()
3196 kfree(conf->nr_pending); in raid1_free()
3197 kfree(conf->nr_waiting); in raid1_free()
3198 kfree(conf->nr_queued); in raid1_free()
3199 kfree(conf->barrier); in raid1_free()
3200 bioset_exit(&conf->bio_split); in raid1_free()
3214 if (mddev->external_size && in raid1_resize()
3215 mddev->array_sectors > newsize) in raid1_resize()
3216 return -EINVAL; in raid1_resize()
3217 if (mddev->bitmap) { in raid1_resize()
3218 int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); in raid1_resize()
3223 if (sectors > mddev->dev_sectors && in raid1_resize()
3224 mddev->recovery_cp > mddev->dev_sectors) { in raid1_resize()
3225 mddev->recovery_cp = mddev->dev_sectors; in raid1_resize()
3226 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid1_resize()
3228 mddev->dev_sectors = sectors; in raid1_resize()
3229 mddev->resync_max_sectors = sectors; in raid1_resize()
3237 * 2/ resize conf->mirrors in raid1_reshape()
3241 * Then resize conf->mirrors and swap in the new r1bio pool. in raid1_reshape()
3249 struct r1conf *conf = mddev->private; in raid1_reshape()
3259 if (mddev->chunk_sectors != mddev->new_chunk_sectors || in raid1_reshape()
3260 mddev->layout != mddev->new_layout || in raid1_reshape()
3261 mddev->level != mddev->new_level) { in raid1_reshape()
3262 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid1_reshape()
3263 mddev->new_layout = mddev->layout; in raid1_reshape()
3264 mddev->new_level = mddev->level; in raid1_reshape()
3265 return -EINVAL; in raid1_reshape()
3271 raid_disks = mddev->raid_disks + mddev->delta_disks; in raid1_reshape()
3273 if (raid_disks < conf->raid_disks) { in raid1_reshape()
3275 for (d= 0; d < conf->raid_disks; d++) in raid1_reshape()
3276 if (conf->mirrors[d].rdev) in raid1_reshape()
3279 return -EBUSY; in raid1_reshape()
3284 return -ENOMEM; in raid1_reshape()
3285 newpoolinfo->mddev = mddev; in raid1_reshape()
3286 newpoolinfo->raid_disks = raid_disks * 2; in raid1_reshape()
3300 return -ENOMEM; in raid1_reshape()
3306 oldpool = conf->r1bio_pool; in raid1_reshape()
3307 conf->r1bio_pool = newpool; in raid1_reshape()
3309 for (d = d2 = 0; d < conf->raid_disks; d++) { in raid1_reshape()
3310 struct md_rdev *rdev = conf->mirrors[d].rdev; in raid1_reshape()
3311 if (rdev && rdev->raid_disk != d2) { in raid1_reshape()
3313 rdev->raid_disk = d2; in raid1_reshape()
3317 mdname(mddev), rdev->raid_disk); in raid1_reshape()
3322 kfree(conf->mirrors); in raid1_reshape()
3323 conf->mirrors = newmirrors; in raid1_reshape()
3324 kfree(conf->poolinfo); in raid1_reshape()
3325 conf->poolinfo = newpoolinfo; in raid1_reshape()
3327 spin_lock_irqsave(&conf->device_lock, flags); in raid1_reshape()
3328 mddev->degraded += (raid_disks - conf->raid_disks); in raid1_reshape()
3329 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_reshape()
3330 conf->raid_disks = mddev->raid_disks = raid_disks; in raid1_reshape()
3331 mddev->delta_disks = 0; in raid1_reshape()
3335 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in raid1_reshape()
3336 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid1_reshape()
3337 md_wakeup_thread(mddev->thread); in raid1_reshape()
3345 struct r1conf *conf = mddev->private; in raid1_quiesce()
3358 if (mddev->level == 5 && mddev->raid_disks == 2) { in raid1_takeover()
3360 mddev->new_level = 1; in raid1_takeover()
3361 mddev->new_layout = 0; in raid1_takeover()
3362 mddev->new_chunk_sectors = 0; in raid1_takeover()
3366 conf->array_frozen = 1; in raid1_takeover()
3372 return ERR_PTR(-EINVAL); in raid1_takeover()
3410 MODULE_ALIAS("md-personality-3"); /* RAID1 */
3411 MODULE_ALIAS("md-raid1");
3412 MODULE_ALIAS("md-level-1");