Lines Matching full:conf
67 static void allow_barrier(struct r10conf *conf);
68 static void lower_barrier(struct r10conf *conf);
69 static int _enough(struct r10conf *conf, int previous, int ignore);
70 static int enough(struct r10conf *conf, int ignore);
75 static void end_reshape(struct r10conf *conf);
83 #define cmd_before(conf, cmd) \ argument
85 write_sequnlock_irq(&(conf)->resync_lock); \
88 #define cmd_after(conf) write_seqlock_irq(&(conf)->resync_lock) argument
90 #define wait_event_barrier_cmd(conf, cond, cmd) \ argument
91 wait_event_cmd((conf)->wait_barrier, cond, cmd_before(conf, cmd), \
92 cmd_after(conf))
94 #define wait_event_barrier(conf, cond) \ argument
95 wait_event_barrier_cmd(conf, cond, NULL_CMD)
108 struct r10conf *conf = data; in r10bio_pool_alloc() local
109 int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]); in r10bio_pool_alloc()
133 struct r10conf *conf = data; in r10buf_pool_alloc() local
140 r10_bio = r10bio_pool_alloc(gfp_flags, conf); in r10buf_pool_alloc()
144 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in r10buf_pool_alloc()
145 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in r10buf_pool_alloc()
146 nalloc = conf->copies; /* resync */ in r10buf_pool_alloc()
151 if (!conf->have_replacement) in r10buf_pool_alloc()
168 if (!conf->have_replacement) in r10buf_pool_alloc()
191 &conf->mddev->recovery)) { in r10buf_pool_alloc()
225 rbio_pool_free(r10_bio, conf); in r10buf_pool_alloc()
231 struct r10conf *conf = data; in r10buf_pool_free() local
236 for (j = conf->copies; j--; ) { in r10buf_pool_free()
256 rbio_pool_free(r10bio, conf); in r10buf_pool_free()
259 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) in put_all_bios() argument
263 for (i = 0; i < conf->geo.raid_disks; i++) { in put_all_bios()
277 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio() local
279 put_all_bios(conf, r10_bio); in free_r10bio()
280 mempool_free(r10_bio, &conf->r10bio_pool); in free_r10bio()
285 struct r10conf *conf = r10_bio->mddev->private; in put_buf() local
287 mempool_free(r10_bio, &conf->r10buf_pool); in put_buf()
289 lower_barrier(conf); in put_buf()
292 static void wake_up_barrier(struct r10conf *conf) in wake_up_barrier() argument
294 if (wq_has_sleeper(&conf->wait_barrier)) in wake_up_barrier()
295 wake_up(&conf->wait_barrier); in wake_up_barrier()
302 struct r10conf *conf = mddev->private; in reschedule_retry() local
304 spin_lock_irqsave(&conf->device_lock, flags); in reschedule_retry()
305 list_add(&r10_bio->retry_list, &conf->retry_list); in reschedule_retry()
306 conf->nr_queued ++; in reschedule_retry()
307 spin_unlock_irqrestore(&conf->device_lock, flags); in reschedule_retry()
310 wake_up(&conf->wait_barrier); in reschedule_retry()
323 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io() local
335 allow_barrier(conf); in raid_end_bio_io()
345 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos() local
347 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
354 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, in find_bio_disk() argument
360 for (slot = 0; slot < conf->geo.raid_disks; slot++) { in find_bio_disk()
384 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request() local
410 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state), in raid10_end_read_request()
416 rdev_dec_pending(rdev, conf->mddev); in raid10_end_read_request()
422 mdname(conf->mddev), in raid10_end_read_request()
460 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request() local
468 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_write_request()
471 rdev = conf->mirrors[dev].replacement; in raid10_end_write_request()
475 rdev = conf->mirrors[dev].rdev; in raid10_end_write_request()
559 rdev_dec_pending(rdev, conf->mddev); in raid10_end_write_request()
652 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio) in raid10_find_phys() argument
654 struct geom *geo = &conf->geo; in raid10_find_phys()
656 if (conf->reshape_progress != MaxSector && in raid10_find_phys()
657 ((r10bio->sector >= conf->reshape_progress) != in raid10_find_phys()
658 conf->mddev->reshape_backwards)) { in raid10_find_phys()
660 geo = &conf->prev; in raid10_find_phys()
667 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) in raid10_find_virt() argument
670 /* Never use conf->prev as this is only called during resync in raid10_find_virt()
673 struct geom *geo = &conf->geo; in raid10_find_virt()
731 static struct md_rdev *read_balance(struct r10conf *conf, in read_balance() argument
745 struct geom *geo = &conf->geo; in read_balance()
747 raid10_find_phys(conf, r10_bio); in read_balance()
763 if ((conf->mddev->recovery_cp < MaxSector in read_balance()
764 && (this_sector + sectors >= conf->next_resync)) || in read_balance()
765 (mddev_is_clustered(conf->mddev) && in read_balance()
766 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, in read_balance()
770 for (slot = 0; slot < conf->copies ; slot++) { in read_balance()
780 rdev = rcu_dereference(conf->mirrors[disk].replacement); in read_balance()
783 rdev = rcu_dereference(conf->mirrors[disk].rdev); in read_balance()
850 conf->mirrors[disk].head_position); in read_balance()
858 if (slot >= conf->copies) { in read_balance()
879 static void flush_pending_writes(struct r10conf *conf) in flush_pending_writes() argument
884 spin_lock_irq(&conf->device_lock); in flush_pending_writes()
886 if (conf->pending_bio_list.head) { in flush_pending_writes()
890 bio = bio_list_get(&conf->pending_bio_list); in flush_pending_writes()
891 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
907 md_bitmap_unplug(conf->mddev->bitmap); in flush_pending_writes()
908 wake_up(&conf->wait_barrier); in flush_pending_writes()
927 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
952 static void raise_barrier(struct r10conf *conf, int force) in raise_barrier() argument
954 write_seqlock_irq(&conf->resync_lock); in raise_barrier()
955 BUG_ON(force && !conf->barrier); in raise_barrier()
958 wait_event_barrier(conf, force || !conf->nr_waiting); in raise_barrier()
961 WRITE_ONCE(conf->barrier, conf->barrier + 1); in raise_barrier()
964 wait_event_barrier(conf, !atomic_read(&conf->nr_pending) && in raise_barrier()
965 conf->barrier < RESYNC_DEPTH); in raise_barrier()
967 write_sequnlock_irq(&conf->resync_lock); in raise_barrier()
970 static void lower_barrier(struct r10conf *conf) in lower_barrier() argument
974 write_seqlock_irqsave(&conf->resync_lock, flags); in lower_barrier()
975 WRITE_ONCE(conf->barrier, conf->barrier - 1); in lower_barrier()
976 write_sequnlock_irqrestore(&conf->resync_lock, flags); in lower_barrier()
977 wake_up(&conf->wait_barrier); in lower_barrier()
980 static bool stop_waiting_barrier(struct r10conf *conf) in stop_waiting_barrier() argument
985 if (!conf->barrier) in stop_waiting_barrier()
994 if (atomic_read(&conf->nr_pending) && bio_list && in stop_waiting_barrier()
999 if (conf->mddev->thread->tsk == current && in stop_waiting_barrier()
1000 test_bit(MD_RECOVERY_RUNNING, &conf->mddev->recovery) && in stop_waiting_barrier()
1001 conf->nr_queued > 0) in stop_waiting_barrier()
1007 static bool wait_barrier_nolock(struct r10conf *conf) in wait_barrier_nolock() argument
1009 unsigned int seq = read_seqbegin(&conf->resync_lock); in wait_barrier_nolock()
1011 if (READ_ONCE(conf->barrier)) in wait_barrier_nolock()
1014 atomic_inc(&conf->nr_pending); in wait_barrier_nolock()
1015 if (!read_seqretry(&conf->resync_lock, seq)) in wait_barrier_nolock()
1018 if (atomic_dec_and_test(&conf->nr_pending)) in wait_barrier_nolock()
1019 wake_up_barrier(conf); in wait_barrier_nolock()
1024 static bool wait_barrier(struct r10conf *conf, bool nowait) in wait_barrier() argument
1028 if (wait_barrier_nolock(conf)) in wait_barrier()
1031 write_seqlock_irq(&conf->resync_lock); in wait_barrier()
1032 if (conf->barrier) { in wait_barrier()
1037 conf->nr_waiting++; in wait_barrier()
1038 raid10_log(conf->mddev, "wait barrier"); in wait_barrier()
1039 wait_event_barrier(conf, stop_waiting_barrier(conf)); in wait_barrier()
1040 conf->nr_waiting--; in wait_barrier()
1042 if (!conf->nr_waiting) in wait_barrier()
1043 wake_up(&conf->wait_barrier); in wait_barrier()
1047 atomic_inc(&conf->nr_pending); in wait_barrier()
1048 write_sequnlock_irq(&conf->resync_lock); in wait_barrier()
1052 static void allow_barrier(struct r10conf *conf) in allow_barrier() argument
1054 if ((atomic_dec_and_test(&conf->nr_pending)) || in allow_barrier()
1055 (conf->array_freeze_pending)) in allow_barrier()
1056 wake_up_barrier(conf); in allow_barrier()
1059 static void freeze_array(struct r10conf *conf, int extra) in freeze_array() argument
1073 write_seqlock_irq(&conf->resync_lock); in freeze_array()
1074 conf->array_freeze_pending++; in freeze_array()
1075 WRITE_ONCE(conf->barrier, conf->barrier + 1); in freeze_array()
1076 conf->nr_waiting++; in freeze_array()
1077 wait_event_barrier_cmd(conf, atomic_read(&conf->nr_pending) == in freeze_array()
1078 conf->nr_queued + extra, flush_pending_writes(conf)); in freeze_array()
1079 conf->array_freeze_pending--; in freeze_array()
1080 write_sequnlock_irq(&conf->resync_lock); in freeze_array()
1083 static void unfreeze_array(struct r10conf *conf) in unfreeze_array() argument
1086 write_seqlock_irq(&conf->resync_lock); in unfreeze_array()
1087 WRITE_ONCE(conf->barrier, conf->barrier - 1); in unfreeze_array()
1088 conf->nr_waiting--; in unfreeze_array()
1089 wake_up(&conf->wait_barrier); in unfreeze_array()
1090 write_sequnlock_irq(&conf->resync_lock); in unfreeze_array()
1107 struct r10conf *conf = mddev->private; in raid10_unplug() local
1111 spin_lock_irq(&conf->device_lock); in raid10_unplug()
1112 bio_list_merge(&conf->pending_bio_list, &plug->pending); in raid10_unplug()
1113 spin_unlock_irq(&conf->device_lock); in raid10_unplug()
1114 wake_up(&conf->wait_barrier); in raid10_unplug()
1123 wake_up(&conf->wait_barrier); in raid10_unplug()
1149 static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf, in regular_request_wait() argument
1153 if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) { in regular_request_wait()
1158 bio->bi_iter.bi_sector < conf->reshape_progress && in regular_request_wait()
1159 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { in regular_request_wait()
1160 allow_barrier(conf); in regular_request_wait()
1165 raid10_log(conf->mddev, "wait reshape"); in regular_request_wait()
1166 wait_event(conf->wait_barrier, in regular_request_wait()
1167 conf->reshape_progress <= bio->bi_iter.bi_sector || in regular_request_wait()
1168 conf->reshape_progress >= bio->bi_iter.bi_sector + in regular_request_wait()
1170 wait_barrier(conf, false); in regular_request_wait()
1178 struct r10conf *conf = mddev->private; in raid10_read_request() local
1193 * we must use the one in conf. in raid10_read_request()
1206 err_rdev = rcu_dereference(conf->mirrors[disk].rdev); in raid10_read_request()
1217 if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) in raid10_read_request()
1219 rdev = read_balance(conf, r10_bio, &max_sectors); in raid10_read_request()
1236 gfp, &conf->bio_split); in raid10_read_request()
1238 allow_barrier(conf); in raid10_read_request()
1240 wait_barrier(conf, false); in raid10_read_request()
1280 struct r10conf *conf = mddev->private; in raid10_write_one_disk() local
1286 rdev = conf->mirrors[devnum].replacement; in raid10_write_one_disk()
1290 rdev = conf->mirrors[devnum].rdev; in raid10_write_one_disk()
1293 rdev = conf->mirrors[devnum].rdev; in raid10_write_one_disk()
1306 &conf->mirrors[devnum].rdev->flags) in raid10_write_one_disk()
1307 && enough(conf, devnum)) in raid10_write_one_disk()
1311 if (conf->mddev->gendisk) in raid10_write_one_disk()
1312 trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk), in raid10_write_one_disk()
1327 spin_lock_irqsave(&conf->device_lock, flags); in raid10_write_one_disk()
1328 bio_list_add(&conf->pending_bio_list, mbio); in raid10_write_one_disk()
1329 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_write_one_disk()
1337 struct r10conf *conf = mddev->private; in wait_blocked_dev() local
1343 for (i = 0; i < conf->copies; i++) { in wait_blocked_dev()
1344 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in wait_blocked_dev()
1346 conf->mirrors[i].replacement); in wait_blocked_dev()
1391 allow_barrier(conf); in wait_blocked_dev()
1392 raid10_log(conf->mddev, "%s wait rdev %d blocked", in wait_blocked_dev()
1395 wait_barrier(conf, false); in wait_blocked_dev()
1403 struct r10conf *conf = mddev->private; in raid10_write_request() local
1419 prepare_to_wait(&conf->wait_barrier, in raid10_write_request()
1426 finish_wait(&conf->wait_barrier, &w); in raid10_write_request()
1430 if (!regular_request_wait(mddev, conf, bio, sectors)) in raid10_write_request()
1434 ? (bio->bi_iter.bi_sector < conf->reshape_safe && in raid10_write_request()
1435 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) in raid10_write_request()
1436 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && in raid10_write_request()
1437 bio->bi_iter.bi_sector < conf->reshape_progress))) { in raid10_write_request()
1439 mddev->reshape_position = conf->reshape_progress; in raid10_write_request()
1444 allow_barrier(conf); in raid10_write_request()
1448 raid10_log(conf->mddev, "wait reshape metadata"); in raid10_write_request()
1452 conf->reshape_safe = mddev->reshape_position; in raid10_write_request()
1466 raid10_find_phys(conf, r10_bio); in raid10_write_request()
1473 for (i = 0; i < conf->copies; i++) { in raid10_write_request()
1475 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); in raid10_write_request()
1477 conf->mirrors[d].replacement); in raid10_write_request()
1540 GFP_NOIO, &conf->bio_split); in raid10_write_request()
1542 allow_barrier(conf); in raid10_write_request()
1544 wait_barrier(conf, false); in raid10_write_request()
1554 for (i = 0; i < conf->copies; i++) { in raid10_write_request()
1565 struct r10conf *conf = mddev->private; in __make_request() local
1568 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO); in __make_request()
1578 conf->geo.raid_disks); in __make_request()
1588 struct r10conf *conf = r10bio->mddev->private; in raid_end_discard_bio() local
1593 allow_barrier(conf); in raid_end_discard_bio()
1611 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_discard_request() local
1622 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_discard_request()
1624 rdev = conf->mirrors[dev].replacement; in raid10_end_discard_request()
1632 rdev = conf->mirrors[dev].rdev; in raid10_end_discard_request()
1636 rdev_dec_pending(rdev, conf->mddev); in raid10_end_discard_request()
1647 struct r10conf *conf = mddev->private; in raid10_handle_discard() local
1648 struct geom *geo = &conf->geo; in raid10_handle_discard()
1673 wait_barrier(conf, false); in raid10_handle_discard()
1710 split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split); in raid10_handle_discard()
1712 allow_barrier(conf); in raid10_handle_discard()
1715 wait_barrier(conf, false); in raid10_handle_discard()
1720 split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split); in raid10_handle_discard()
1722 allow_barrier(conf); in raid10_handle_discard()
1726 wait_barrier(conf, false); in raid10_handle_discard()
1756 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO); in raid10_handle_discard()
1785 struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev); in raid10_handle_discard()
1787 conf->mirrors[disk].replacement); in raid10_handle_discard()
1848 struct md_rdev *rdev = conf->mirrors[disk].rdev; in raid10_handle_discard()
1862 struct md_rdev *rrdev = conf->mirrors[disk].replacement; in raid10_handle_discard()
1884 wait_barrier(conf, false); in raid10_handle_discard()
1892 allow_barrier(conf); in raid10_handle_discard()
1898 struct r10conf *conf = mddev->private; in raid10_make_request() local
1899 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); in raid10_make_request()
1920 && (conf->geo.near_copies < conf->geo.raid_disks in raid10_make_request()
1921 || conf->prev.near_copies < in raid10_make_request()
1922 conf->prev.raid_disks))) in raid10_make_request()
1929 wake_up_barrier(conf); in raid10_make_request()
1935 struct r10conf *conf = mddev->private; in raid10_status() local
1938 if (conf->geo.near_copies < conf->geo.raid_disks) in raid10_status()
1940 if (conf->geo.near_copies > 1) in raid10_status()
1941 seq_printf(seq, " %d near-copies", conf->geo.near_copies); in raid10_status()
1942 if (conf->geo.far_copies > 1) { in raid10_status()
1943 if (conf->geo.far_offset) in raid10_status()
1944 seq_printf(seq, " %d offset-copies", conf->geo.far_copies); in raid10_status()
1946 seq_printf(seq, " %d far-copies", conf->geo.far_copies); in raid10_status()
1947 if (conf->geo.far_set_size != conf->geo.raid_disks) in raid10_status()
1948 seq_printf(seq, " %d devices per set", conf->geo.far_set_size); in raid10_status()
1950 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, in raid10_status()
1951 conf->geo.raid_disks - mddev->degraded); in raid10_status()
1953 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_status()
1954 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in raid10_status()
1966 static int _enough(struct r10conf *conf, int previous, int ignore) in _enough() argument
1972 disks = conf->prev.raid_disks; in _enough()
1973 ncopies = conf->prev.near_copies; in _enough()
1975 disks = conf->geo.raid_disks; in _enough()
1976 ncopies = conf->geo.near_copies; in _enough()
1981 int n = conf->copies; in _enough()
1987 (rdev = rcu_dereference(conf->mirrors[this].rdev)) && in _enough()
2002 static int enough(struct r10conf *conf, int ignore) in enough() argument
2009 return _enough(conf, 0, ignore) && in enough()
2010 _enough(conf, 1, ignore); in enough()
2030 struct r10conf *conf = mddev->private; in raid10_error() local
2033 spin_lock_irqsave(&conf->device_lock, flags); in raid10_error()
2035 if (test_bit(In_sync, &rdev->flags) && !enough(conf, rdev->raid_disk)) { in raid10_error()
2039 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_error()
2051 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_error()
2055 mdname(mddev), conf->geo.raid_disks - mddev->degraded); in raid10_error()
2058 static void print_conf(struct r10conf *conf) in print_conf() argument
2063 pr_debug("RAID10 conf printout:\n"); in print_conf()
2064 if (!conf) { in print_conf()
2065 pr_debug("(!conf)\n"); in print_conf()
2068 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, in print_conf()
2069 conf->geo.raid_disks); in print_conf()
2073 for (i = 0; i < conf->geo.raid_disks; i++) { in print_conf()
2074 rdev = conf->mirrors[i].rdev; in print_conf()
2083 static void close_sync(struct r10conf *conf) in close_sync() argument
2085 wait_barrier(conf, false); in close_sync()
2086 allow_barrier(conf); in close_sync()
2088 mempool_exit(&conf->r10buf_pool); in close_sync()
2094 struct r10conf *conf = mddev->private; in raid10_spare_active() local
2103 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_spare_active()
2104 tmp = conf->mirrors + i; in raid10_spare_active()
2131 spin_lock_irqsave(&conf->device_lock, flags); in raid10_spare_active()
2133 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_spare_active()
2135 print_conf(conf); in raid10_spare_active()
2141 struct r10conf *conf = mddev->private; in raid10_add_disk() local
2145 int last = conf->geo.raid_disks - 1; in raid10_add_disk()
2152 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1)) in raid10_add_disk()
2162 rdev->saved_raid_disk < conf->geo.raid_disks && in raid10_add_disk()
2163 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) in raid10_add_disk()
2168 struct raid10_info *p = &conf->mirrors[mirror]; in raid10_add_disk()
2182 conf->fullsync = 1; in raid10_add_disk()
2196 conf->fullsync = 1; in raid10_add_disk()
2201 print_conf(conf); in raid10_add_disk()
2207 struct r10conf *conf = mddev->private; in raid10_remove_disk() local
2213 print_conf(conf); in raid10_remove_disk()
2216 p = conf->mirrors + number; in raid10_remove_disk()
2235 number < conf->geo.raid_disks && in raid10_remove_disk()
2236 enough(conf, -1)) { in raid10_remove_disk()
2265 print_conf(conf); in raid10_remove_disk()
2271 struct r10conf *conf = r10_bio->mddev->private; in __end_sync_read() local
2280 &conf->mirrors[d].rdev->corrected_errors); in __end_sync_read()
2285 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); in __end_sync_read()
2298 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read() local
2299 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); in end_sync_read()
2343 struct r10conf *conf = mddev->private; in end_sync_write() local
2351 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_sync_write()
2353 rdev = conf->mirrors[d].replacement; in end_sync_write()
2355 rdev = conf->mirrors[d].rdev; in end_sync_write()
2396 struct r10conf *conf = mddev->private; in sync_request_write() local
2405 for (i=0; i<conf->copies; i++) in sync_request_write()
2409 if (i == conf->copies) in sync_request_write()
2420 for (i=0 ; i < conf->copies ; i++) { in sync_request_write()
2434 rdev = conf->mirrors[d].rdev; in sync_request_write()
2468 bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE); in sync_request_write()
2479 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in sync_request_write()
2481 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); in sync_request_write()
2483 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) in sync_request_write()
2485 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; in sync_request_write()
2492 for (i = 0; i < conf->copies; i++) { in sync_request_write()
2503 md_sync_acct(conf->mirrors[d].replacement->bdev, in sync_request_write()
2535 struct r10conf *conf = mddev->private; in fix_recovery_read_error() local
2553 rdev = conf->mirrors[dr].rdev; in fix_recovery_read_error()
2561 rdev = conf->mirrors[dw].rdev; in fix_recovery_read_error()
2583 if (rdev != conf->mirrors[dw].rdev) { in fix_recovery_read_error()
2585 struct md_rdev *rdev2 = conf->mirrors[dw].rdev; in fix_recovery_read_error()
2593 conf->mirrors[dw].recovery_disabled in fix_recovery_read_error()
2610 struct r10conf *conf = mddev->private; in recovery_request_write() local
2634 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in recovery_request_write()
2635 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); in recovery_request_write()
2639 atomic_inc(&conf->mirrors[d].replacement->nr_pending); in recovery_request_write()
2640 md_sync_acct(conf->mirrors[d].replacement->bdev, in recovery_request_write()
2714 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2725 rdev = conf->mirrors[d].rdev; in fix_read_error()
2760 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2772 conf->tmppage, in fix_read_error()
2780 if (sl == conf->copies) in fix_read_error()
2791 rdev = conf->mirrors[dn].rdev; in fix_read_error()
2810 sl = conf->copies; in fix_read_error()
2813 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2824 s, conf->tmppage, REQ_OP_WRITE) in fix_read_error()
2844 sl = conf->copies; in fix_read_error()
2847 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2858 s, conf->tmppage, REQ_OP_READ)) { in fix_read_error()
2895 struct r10conf *conf = mddev->private; in narrow_write_error() local
2896 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2956 struct r10conf *conf = mddev->private; in handle_read_error() local
2974 freeze_array(conf, 1); in handle_read_error()
2975 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2976 unfreeze_array(conf); in handle_read_error()
2981 allow_barrier(conf); in handle_read_error()
2986 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) in handle_write_completed() argument
2999 for (m = 0; m < conf->copies; m++) { in handle_write_completed()
3001 rdev = conf->mirrors[dev].rdev; in handle_write_completed()
3015 md_error(conf->mddev, rdev); in handle_write_completed()
3017 rdev = conf->mirrors[dev].replacement; in handle_write_completed()
3032 md_error(conf->mddev, rdev); in handle_write_completed()
3038 for (m = 0; m < conf->copies; m++) { in handle_write_completed()
3041 rdev = conf->mirrors[dev].rdev; in handle_write_completed()
3047 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
3051 md_error(conf->mddev, rdev); in handle_write_completed()
3055 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
3058 rdev = conf->mirrors[dev].replacement; in handle_write_completed()
3064 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
3068 spin_lock_irq(&conf->device_lock); in handle_write_completed()
3069 list_add(&r10_bio->retry_list, &conf->bio_end_io_list); in handle_write_completed()
3070 conf->nr_queued++; in handle_write_completed()
3071 spin_unlock_irq(&conf->device_lock); in handle_write_completed()
3076 wake_up(&conf->wait_barrier); in handle_write_completed()
3077 md_wakeup_thread(conf->mddev->thread); in handle_write_completed()
3092 struct r10conf *conf = mddev->private; in raid10d() local
3093 struct list_head *head = &conf->retry_list; in raid10d()
3098 if (!list_empty_careful(&conf->bio_end_io_list) && in raid10d()
3101 spin_lock_irqsave(&conf->device_lock, flags); in raid10d()
3103 while (!list_empty(&conf->bio_end_io_list)) { in raid10d()
3104 list_move(conf->bio_end_io_list.prev, &tmp); in raid10d()
3105 conf->nr_queued--; in raid10d()
3108 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10d()
3126 flush_pending_writes(conf); in raid10d()
3128 spin_lock_irqsave(&conf->device_lock, flags); in raid10d()
3130 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10d()
3135 conf->nr_queued--; in raid10d()
3136 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10d()
3139 conf = mddev->private; in raid10d()
3142 handle_write_completed(conf, r10_bio); in raid10d()
3161 static int init_resync(struct r10conf *conf) in init_resync() argument
3166 BUG_ON(mempool_initialized(&conf->r10buf_pool)); in init_resync()
3167 conf->have_replacement = 0; in init_resync()
3168 for (i = 0; i < conf->geo.raid_disks; i++) in init_resync()
3169 if (conf->mirrors[i].replacement) in init_resync()
3170 conf->have_replacement = 1; in init_resync()
3171 ret = mempool_init(&conf->r10buf_pool, buffs, in init_resync()
3172 r10buf_pool_alloc, r10buf_pool_free, conf); in init_resync()
3175 conf->next_resync = 0; in init_resync()
3179 static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf) in raid10_alloc_init_r10buf() argument
3181 struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO); in raid10_alloc_init_r10buf()
3187 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in raid10_alloc_init_r10buf()
3188 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in raid10_alloc_init_r10buf()
3189 nalloc = conf->copies; /* resync */ in raid10_alloc_init_r10buf()
3212 static void raid10_set_cluster_sync_high(struct r10conf *conf) in raid10_set_cluster_sync_high() argument
3229 chunks = conf->geo.raid_disks / conf->geo.near_copies; in raid10_set_cluster_sync_high()
3230 if (conf->geo.raid_disks % conf->geo.near_copies == 0) in raid10_set_cluster_sync_high()
3234 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; in raid10_set_cluster_sync_high()
3242 conf->cluster_sync_high = conf->cluster_sync_low + window_size; in raid10_set_cluster_sync_high()
3280 struct r10conf *conf = mddev->private; in raid10_sync_request() local
3289 sector_t chunk_mask = conf->geo.chunk_mask; in raid10_sync_request()
3292 if (!mempool_initialized(&conf->r10buf_pool)) in raid10_sync_request()
3293 if (init_resync(conf)) in raid10_sync_request()
3306 conf->fullsync == 0) { in raid10_sync_request()
3317 conf->cluster_sync_low = 0; in raid10_sync_request()
3318 conf->cluster_sync_high = 0; in raid10_sync_request()
3330 end_reshape(conf); in raid10_sync_request()
3331 close_sync(conf); in raid10_sync_request()
3339 else for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_sync_request()
3341 raid10_find_virt(conf, mddev->curr_resync, i); in raid10_sync_request()
3347 if ((!mddev->bitmap || conf->fullsync) in raid10_sync_request()
3348 && conf->have_replacement in raid10_sync_request()
3354 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_sync_request()
3356 rcu_dereference(conf->mirrors[i].replacement); in raid10_sync_request()
3362 conf->fullsync = 0; in raid10_sync_request()
3365 close_sync(conf); in raid10_sync_request()
3373 if (chunks_skipped >= conf->geo.raid_disks) { in raid10_sync_request()
3387 if (conf->geo.near_copies < conf->geo.raid_disks && in raid10_sync_request()
3395 if (conf->nr_waiting) in raid10_sync_request()
3419 for (i = 0 ; i < conf->geo.raid_disks; i++) { in raid10_sync_request()
3427 struct raid10_info *mirror = &conf->mirrors[i]; in raid10_sync_request()
3450 sect = raid10_find_virt(conf, sector_nr, i); in raid10_sync_request()
3470 !conf->fullsync) { in raid10_sync_request()
3483 r10_bio = raid10_alloc_init_r10buf(conf); in raid10_sync_request()
3485 raise_barrier(conf, rb2 != NULL); in raid10_sync_request()
3495 raid10_find_phys(conf, r10_bio); in raid10_sync_request()
3501 for (j = 0; j < conf->geo.raid_disks; j++) { in raid10_sync_request()
3503 conf->mirrors[j].rdev); in raid10_sync_request()
3514 for (j=0; j<conf->copies;j++) { in raid10_sync_request()
3519 rcu_dereference(conf->mirrors[d].rdev); in raid10_sync_request()
3555 for (k=0; k<conf->copies; k++) in raid10_sync_request()
3558 BUG_ON(k == conf->copies); in raid10_sync_request()
3599 if (j == conf->copies) { in raid10_sync_request()
3607 for (k = 0; k < conf->copies; k++) in raid10_sync_request()
3650 for (; j < conf->copies; j++) { in raid10_sync_request()
3652 if (conf->mirrors[d].rdev && in raid10_sync_request()
3654 &conf->mirrors[d].rdev->flags)) in raid10_sync_request()
3684 (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); in raid10_sync_request()
3688 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, in raid10_sync_request()
3696 r10_bio = raid10_alloc_init_r10buf(conf); in raid10_sync_request()
3701 raise_barrier(conf, 0); in raid10_sync_request()
3702 conf->next_resync = sector_nr; in raid10_sync_request()
3707 raid10_find_phys(conf, r10_bio); in raid10_sync_request()
3710 for (i = 0; i < conf->copies; i++) { in raid10_sync_request()
3722 rdev = rcu_dereference(conf->mirrors[d].rdev); in raid10_sync_request()
3752 rdev = rcu_dereference(conf->mirrors[d].replacement); in raid10_sync_request()
3777 for (i=0; i<conf->copies; i++) { in raid10_sync_request()
3780 rdev_dec_pending(conf->mirrors[d].rdev, in raid10_sync_request()
3785 conf->mirrors[d].replacement, in raid10_sync_request()
3821 if (conf->cluster_sync_high < sector_nr + nr_sectors) { in raid10_sync_request()
3822 conf->cluster_sync_low = mddev->curr_resync_completed; in raid10_sync_request()
3823 raid10_set_cluster_sync_high(conf); in raid10_sync_request()
3826 conf->cluster_sync_low, in raid10_sync_request()
3827 conf->cluster_sync_high); in raid10_sync_request()
3834 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_sync_request()
3840 sect_va1 = raid10_find_virt(conf, sector_nr, i); in raid10_sync_request()
3842 if (conf->cluster_sync_high < sect_va1 + nr_sectors) { in raid10_sync_request()
3848 sect_va2 = raid10_find_virt(conf, in raid10_sync_request()
3851 if (conf->cluster_sync_low == 0 || in raid10_sync_request()
3852 conf->cluster_sync_low > sect_va2) in raid10_sync_request()
3853 conf->cluster_sync_low = sect_va2; in raid10_sync_request()
3857 raid10_set_cluster_sync_high(conf); in raid10_sync_request()
3859 conf->cluster_sync_low, in raid10_sync_request()
3860 conf->cluster_sync_high); in raid10_sync_request()
3904 struct r10conf *conf = mddev->private; in raid10_size() local
3907 raid_disks = min(conf->geo.raid_disks, in raid10_size()
3908 conf->prev.raid_disks); in raid10_size()
3910 sectors = conf->dev_sectors; in raid10_size()
3912 size = sectors >> conf->geo.chunk_shift; in raid10_size()
3913 sector_div(size, conf->geo.far_copies); in raid10_size()
3915 sector_div(size, conf->geo.near_copies); in raid10_size()
3917 return size << conf->geo.chunk_shift; in raid10_size()
3920 static void calc_sectors(struct r10conf *conf, sector_t size) in calc_sectors() argument
3923 * actually be used, and set conf->dev_sectors and in calc_sectors()
3924 * conf->stride in calc_sectors()
3927 size = size >> conf->geo.chunk_shift; in calc_sectors()
3928 sector_div(size, conf->geo.far_copies); in calc_sectors()
3929 size = size * conf->geo.raid_disks; in calc_sectors()
3930 sector_div(size, conf->geo.near_copies); in calc_sectors()
3933 size = size * conf->copies; in calc_sectors()
3938 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks); in calc_sectors()
3940 conf->dev_sectors = size << conf->geo.chunk_shift; in calc_sectors()
3942 if (conf->geo.far_offset) in calc_sectors()
3943 conf->geo.stride = 1 << conf->geo.chunk_shift; in calc_sectors()
3945 sector_div(size, conf->geo.far_copies); in calc_sectors()
3946 conf->geo.stride = size << conf->geo.chunk_shift; in calc_sectors()
4009 struct r10conf *conf = NULL; in setup_conf() local
4029 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL); in setup_conf()
4030 if (!conf) in setup_conf()
4034 conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks), in setup_conf()
4037 if (!conf->mirrors) in setup_conf()
4040 conf->tmppage = alloc_page(GFP_KERNEL); in setup_conf()
4041 if (!conf->tmppage) in setup_conf()
4044 conf->geo = geo; in setup_conf()
4045 conf->copies = copies; in setup_conf()
4046 err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc, in setup_conf()
4047 rbio_pool_free, conf); in setup_conf()
4051 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); in setup_conf()
4055 calc_sectors(conf, mddev->dev_sectors); in setup_conf()
4057 conf->prev = conf->geo; in setup_conf()
4058 conf->reshape_progress = MaxSector; in setup_conf()
4060 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { in setup_conf()
4064 conf->reshape_progress = mddev->reshape_position; in setup_conf()
4065 if (conf->prev.far_offset) in setup_conf()
4066 conf->prev.stride = 1 << conf->prev.chunk_shift; in setup_conf()
4069 conf->prev.stride = conf->dev_sectors; in setup_conf()
4071 conf->reshape_safe = conf->reshape_progress; in setup_conf()
4072 spin_lock_init(&conf->device_lock); in setup_conf()
4073 INIT_LIST_HEAD(&conf->retry_list); in setup_conf()
4074 INIT_LIST_HEAD(&conf->bio_end_io_list); in setup_conf()
4076 seqlock_init(&conf->resync_lock); in setup_conf()
4077 init_waitqueue_head(&conf->wait_barrier); in setup_conf()
4078 atomic_set(&conf->nr_pending, 0); in setup_conf()
4081 conf->thread = md_register_thread(raid10d, mddev, "raid10"); in setup_conf()
4082 if (!conf->thread) in setup_conf()
4085 conf->mddev = mddev; in setup_conf()
4086 return conf; in setup_conf()
4089 if (conf) { in setup_conf()
4090 mempool_exit(&conf->r10bio_pool); in setup_conf()
4091 kfree(conf->mirrors); in setup_conf()
4092 safe_put_page(conf->tmppage); in setup_conf()
4093 bioset_exit(&conf->bio_split); in setup_conf()
4094 kfree(conf); in setup_conf()
4099 static void raid10_set_io_opt(struct r10conf *conf) in raid10_set_io_opt() argument
4101 int raid_disks = conf->geo.raid_disks; in raid10_set_io_opt()
4103 if (!(conf->geo.raid_disks % conf->geo.near_copies)) in raid10_set_io_opt()
4104 raid_disks /= conf->geo.near_copies; in raid10_set_io_opt()
4105 blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) * in raid10_set_io_opt()
4111 struct r10conf *conf; in raid10_run() local
4123 conf = setup_conf(mddev); in raid10_run()
4124 if (IS_ERR(conf)) in raid10_run()
4125 return PTR_ERR(conf); in raid10_run()
4126 mddev->private = conf; in raid10_run()
4128 conf = mddev->private; in raid10_run()
4129 if (!conf) in raid10_run()
4132 if (mddev_is_clustered(conf->mddev)) { in raid10_run()
4144 mddev->thread = conf->thread; in raid10_run()
4145 conf->thread = NULL; in raid10_run()
4152 raid10_set_io_opt(conf); in raid10_run()
4161 if (disk_idx >= conf->geo.raid_disks && in raid10_run()
4162 disk_idx >= conf->prev.raid_disks) in raid10_run()
4164 disk = conf->mirrors + disk_idx; in raid10_run()
4192 if (!enough(conf, -1)) { in raid10_run()
4198 if (conf->reshape_progress != MaxSector) { in raid10_run()
4200 if (conf->geo.far_copies != 1 && in raid10_run()
4201 conf->geo.far_offset == 0) in raid10_run()
4203 if (conf->prev.far_copies != 1 && in raid10_run()
4204 conf->prev.far_offset == 0) in raid10_run()
4210 i < conf->geo.raid_disks in raid10_run()
4211 || i < conf->prev.raid_disks; in raid10_run()
4214 disk = conf->mirrors + i; in raid10_run()
4229 conf->fullsync = 1; in raid10_run()
4235 conf->fullsync = 1; in raid10_run()
4245 mdname(mddev), conf->geo.raid_disks - mddev->degraded, in raid10_run()
4246 conf->geo.raid_disks); in raid10_run()
4250 mddev->dev_sectors = conf->dev_sectors; in raid10_run()
4259 if (conf->reshape_progress != MaxSector) { in raid10_run()
4262 before_length = ((1 << conf->prev.chunk_shift) * in raid10_run()
4263 conf->prev.far_copies); in raid10_run()
4264 after_length = ((1 << conf->geo.chunk_shift) * in raid10_run()
4265 conf->geo.far_copies); in raid10_run()
4272 conf->offset_diff = min_offset_diff; in raid10_run()
4288 mempool_exit(&conf->r10bio_pool); in raid10_run()
4289 safe_put_page(conf->tmppage); in raid10_run()
4290 kfree(conf->mirrors); in raid10_run()
4291 kfree(conf); in raid10_run()
4299 struct r10conf *conf = priv; in raid10_free() local
4301 mempool_exit(&conf->r10bio_pool); in raid10_free()
4302 safe_put_page(conf->tmppage); in raid10_free()
4303 kfree(conf->mirrors); in raid10_free()
4304 kfree(conf->mirrors_old); in raid10_free()
4305 kfree(conf->mirrors_new); in raid10_free()
4306 bioset_exit(&conf->bio_split); in raid10_free()
4307 kfree(conf); in raid10_free()
4312 struct r10conf *conf = mddev->private; in raid10_quiesce() local
4315 raise_barrier(conf, 0); in raid10_quiesce()
4317 lower_barrier(conf); in raid10_quiesce()
4334 struct r10conf *conf = mddev->private; in raid10_resize() local
4340 if (conf->geo.far_copies > 1 && !conf->geo.far_offset) in raid10_resize()
4359 calc_sectors(conf, sectors); in raid10_resize()
4360 mddev->dev_sectors = conf->dev_sectors; in raid10_resize()
4368 struct r10conf *conf; in raid10_takeover_raid0() local
4388 conf = setup_conf(mddev); in raid10_takeover_raid0()
4389 if (!IS_ERR(conf)) { in raid10_takeover_raid0()
4395 WRITE_ONCE(conf->barrier, 1); in raid10_takeover_raid0()
4398 return conf; in raid10_takeover_raid0()
4439 struct r10conf *conf = mddev->private; in raid10_check_reshape() local
4442 if (conf->geo.far_copies != 1 && !conf->geo.far_offset) in raid10_check_reshape()
4445 if (setup_geo(&geo, mddev, geo_start) != conf->copies) in raid10_check_reshape()
4456 if (!enough(conf, -1)) in raid10_check_reshape()
4459 kfree(conf->mirrors_new); in raid10_check_reshape()
4460 conf->mirrors_new = NULL; in raid10_check_reshape()
4463 conf->mirrors_new = in raid10_check_reshape()
4467 if (!conf->mirrors_new) in raid10_check_reshape()
4486 static int calc_degraded(struct r10conf *conf) in calc_degraded() argument
4494 for (i = 0; i < conf->prev.raid_disks; i++) { in calc_degraded()
4495 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in calc_degraded()
4506 if (conf->geo.raid_disks == conf->prev.raid_disks) in calc_degraded()
4510 for (i = 0; i < conf->geo.raid_disks; i++) { in calc_degraded()
4511 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in calc_degraded()
4520 if (conf->geo.raid_disks <= conf->prev.raid_disks) in calc_degraded()
4546 struct r10conf *conf = mddev->private; in raid10_start_reshape() local
4554 if (setup_geo(&new, mddev, geo_start) != conf->copies) in raid10_start_reshape()
4557 before_length = ((1 << conf->prev.chunk_shift) * in raid10_start_reshape()
4558 conf->prev.far_copies); in raid10_start_reshape()
4559 after_length = ((1 << conf->geo.chunk_shift) * in raid10_start_reshape()
4560 conf->geo.far_copies); in raid10_start_reshape()
4585 conf->offset_diff = min_offset_diff; in raid10_start_reshape()
4586 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4587 if (conf->mirrors_new) { in raid10_start_reshape()
4588 memcpy(conf->mirrors_new, conf->mirrors, in raid10_start_reshape()
4589 sizeof(struct raid10_info)*conf->prev.raid_disks); in raid10_start_reshape()
4591 kfree(conf->mirrors_old); in raid10_start_reshape()
4592 conf->mirrors_old = conf->mirrors; in raid10_start_reshape()
4593 conf->mirrors = conf->mirrors_new; in raid10_start_reshape()
4594 conf->mirrors_new = NULL; in raid10_start_reshape()
4596 setup_geo(&conf->geo, mddev, geo_start); in raid10_start_reshape()
4601 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4607 conf->reshape_progress = size; in raid10_start_reshape()
4609 conf->reshape_progress = 0; in raid10_start_reshape()
4610 conf->reshape_safe = conf->reshape_progress; in raid10_start_reshape()
4611 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4618 newsize = raid10_size(mddev, 0, conf->geo.raid_disks); in raid10_start_reshape()
4660 conf->prev.raid_disks) in raid10_start_reshape()
4668 } else if (rdev->raid_disk >= conf->prev.raid_disks in raid10_start_reshape()
4678 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4679 mddev->degraded = calc_degraded(conf); in raid10_start_reshape()
4680 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4681 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4682 mddev->reshape_position = conf->reshape_progress; in raid10_start_reshape()
4697 conf->reshape_checkpoint = jiffies; in raid10_start_reshape()
4704 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4705 conf->geo = conf->prev; in raid10_start_reshape()
4706 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4710 conf->reshape_progress = MaxSector; in raid10_start_reshape()
4711 conf->reshape_safe = MaxSector; in raid10_start_reshape()
4713 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4778 * (conf->offset_diff - always positive) allows a bit of slack, in reshape_request()
4788 struct r10conf *conf = mddev->private; in reshape_request() local
4804 conf->reshape_progress < raid10_size(mddev, 0, 0)) { in reshape_request()
4806 - conf->reshape_progress); in reshape_request()
4808 conf->reshape_progress > 0) in reshape_request()
4809 sector_nr = conf->reshape_progress; in reshape_request()
4826 next = first_dev_address(conf->reshape_progress - 1, in reshape_request()
4827 &conf->geo); in reshape_request()
4832 safe = last_dev_address(conf->reshape_safe - 1, in reshape_request()
4833 &conf->prev); in reshape_request()
4835 if (next + conf->offset_diff < safe) in reshape_request()
4838 last = conf->reshape_progress - 1; in reshape_request()
4839 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask in reshape_request()
4840 & conf->prev.chunk_mask); in reshape_request()
4847 next = last_dev_address(conf->reshape_progress, &conf->geo); in reshape_request()
4852 safe = first_dev_address(conf->reshape_safe, &conf->prev); in reshape_request()
4857 if (next > safe + conf->offset_diff) in reshape_request()
4860 sector_nr = conf->reshape_progress; in reshape_request()
4861 last = sector_nr | (conf->geo.chunk_mask in reshape_request()
4862 & conf->prev.chunk_mask); in reshape_request()
4869 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { in reshape_request()
4871 wait_barrier(conf, false); in reshape_request()
4872 mddev->reshape_position = conf->reshape_progress; in reshape_request()
4875 - conf->reshape_progress; in reshape_request()
4877 mddev->curr_resync_completed = conf->reshape_progress; in reshape_request()
4878 conf->reshape_checkpoint = jiffies; in reshape_request()
4884 allow_barrier(conf); in reshape_request()
4887 conf->reshape_safe = mddev->reshape_position; in reshape_request()
4888 allow_barrier(conf); in reshape_request()
4891 raise_barrier(conf, 0); in reshape_request()
4894 r10_bio = raid10_alloc_init_r10buf(conf); in reshape_request()
4896 raise_barrier(conf, 1); in reshape_request()
4902 rdev = read_balance(conf, r10_bio, &max_sectors); in reshape_request()
4910 mempool_free(r10_bio, &conf->r10buf_pool); in reshape_request()
4928 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) { in reshape_request()
4932 conf->cluster_sync_low = sector_nr; in reshape_request()
4933 conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS; in reshape_request()
4942 if (sb_reshape_pos < conf->cluster_sync_low) in reshape_request()
4943 conf->cluster_sync_low = sb_reshape_pos; in reshape_request()
4946 md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low, in reshape_request()
4947 conf->cluster_sync_high); in reshape_request()
4951 __raid10_find_phys(&conf->geo, r10_bio); in reshape_request()
4957 for (s = 0; s < conf->copies*2; s++) { in reshape_request()
4962 rdev2 = rcu_dereference(conf->mirrors[d].replacement); in reshape_request()
4965 rdev2 = rcu_dereference(conf->mirrors[d].rdev); in reshape_request()
5011 lower_barrier(conf); in reshape_request()
5017 conf->reshape_progress -= sectors_done; in reshape_request()
5019 conf->reshape_progress += sectors_done; in reshape_request()
5034 struct r10conf *conf = mddev->private; in reshape_request_write() local
5048 for (s = 0; s < conf->copies*2; s++) { in reshape_request_write()
5054 rdev = rcu_dereference(conf->mirrors[d].replacement); in reshape_request_write()
5057 rdev = rcu_dereference(conf->mirrors[d].rdev); in reshape_request_write()
5074 static void end_reshape(struct r10conf *conf) in end_reshape() argument
5076 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) in end_reshape()
5079 spin_lock_irq(&conf->device_lock); in end_reshape()
5080 conf->prev = conf->geo; in end_reshape()
5081 md_finish_reshape(conf->mddev); in end_reshape()
5083 conf->reshape_progress = MaxSector; in end_reshape()
5084 conf->reshape_safe = MaxSector; in end_reshape()
5085 spin_unlock_irq(&conf->device_lock); in end_reshape()
5087 if (conf->mddev->queue) in end_reshape()
5088 raid10_set_io_opt(conf); in end_reshape()
5089 conf->fullsync = 0; in end_reshape()
5094 struct r10conf *conf = mddev->private; in raid10_update_reshape_pos() local
5100 conf->reshape_progress = mddev->reshape_position; in raid10_update_reshape_pos()
5110 struct r10conf *conf = mddev->private; in handle_reshape_read_error() local
5116 r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO); in handle_reshape_read_error()
5126 __raid10_find_phys(&conf->prev, r10b); in handle_reshape_read_error()
5139 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); in handle_reshape_read_error()
5160 if (slot >= conf->copies) in handle_reshape_read_error()
5184 struct r10conf *conf = mddev->private; in end_reshape_write() local
5190 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_reshape_write()
5192 rdev = conf->mirrors[d].replacement; in end_reshape_write()
5195 rdev = conf->mirrors[d].rdev; in end_reshape_write()
5218 struct r10conf *conf = mddev->private; in raid10_finish_reshape() local
5232 for (d = conf->geo.raid_disks ; in raid10_finish_reshape()
5233 d < conf->geo.raid_disks - mddev->delta_disks; in raid10_finish_reshape()
5235 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); in raid10_finish_reshape()
5238 rdev = rcu_dereference(conf->mirrors[d].replacement); in raid10_finish_reshape()
5245 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()