Lines Matching refs:r10_bio

106 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
134 static void r10bio_pool_free(void *r10_bio, void *data) in r10bio_pool_free() argument
136 kfree(r10_bio); in r10bio_pool_free()
157 struct r10bio *r10_bio; in r10buf_pool_alloc() local
163 r10_bio = r10bio_pool_alloc(gfp_flags, conf); in r10buf_pool_alloc()
164 if (!r10_bio) in r10buf_pool_alloc()
189 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
195 r10_bio->devs[j].repl_bio = bio; in r10buf_pool_alloc()
202 struct bio *rbio = r10_bio->devs[j].repl_bio; in r10buf_pool_alloc()
209 bio = r10_bio->devs[j].bio; in r10buf_pool_alloc()
220 rp->raid_bio = r10_bio; in r10buf_pool_alloc()
228 return r10_bio; in r10buf_pool_alloc()
237 if (r10_bio->devs[j].bio) in r10buf_pool_alloc()
238 bio_put(r10_bio->devs[j].bio); in r10buf_pool_alloc()
239 if (r10_bio->devs[j].repl_bio) in r10buf_pool_alloc()
240 bio_put(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
244 r10bio_pool_free(r10_bio, conf); in r10buf_pool_alloc()
275 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) in put_all_bios() argument
280 struct bio **bio = & r10_bio->devs[i].bio; in put_all_bios()
284 bio = &r10_bio->devs[i].repl_bio; in put_all_bios()
285 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio)) in put_all_bios()
291 static void free_r10bio(struct r10bio *r10_bio) in free_r10bio() argument
293 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio()
295 put_all_bios(conf, r10_bio); in free_r10bio()
296 mempool_free(r10_bio, &conf->r10bio_pool); in free_r10bio()
299 static void put_buf(struct r10bio *r10_bio) in put_buf() argument
301 struct r10conf *conf = r10_bio->mddev->private; in put_buf()
303 mempool_free(r10_bio, &conf->r10buf_pool); in put_buf()
308 static void reschedule_retry(struct r10bio *r10_bio) in reschedule_retry() argument
311 struct mddev *mddev = r10_bio->mddev; in reschedule_retry()
315 list_add(&r10_bio->retry_list, &conf->retry_list); in reschedule_retry()
330 static void raid_end_bio_io(struct r10bio *r10_bio) in raid_end_bio_io() argument
332 struct bio *bio = r10_bio->master_bio; in raid_end_bio_io()
333 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io()
335 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) in raid_end_bio_io()
345 free_r10bio(r10_bio); in raid_end_bio_io()
351 static inline void update_head_pos(int slot, struct r10bio *r10_bio) in update_head_pos() argument
353 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos()
355 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
356 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
362 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, in find_bio_disk() argument
369 if (r10_bio->devs[slot].bio == bio) in find_bio_disk()
371 if (r10_bio->devs[slot].repl_bio == bio) { in find_bio_disk()
378 update_head_pos(slot, r10_bio); in find_bio_disk()
384 return r10_bio->devs[slot].devnum; in find_bio_disk()
390 struct r10bio *r10_bio = bio->bi_private; in raid10_end_read_request() local
393 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request()
395 slot = r10_bio->read_slot; in raid10_end_read_request()
396 rdev = r10_bio->devs[slot].rdev; in raid10_end_read_request()
400 update_head_pos(slot, r10_bio); in raid10_end_read_request()
412 set_bit(R10BIO_Uptodate, &r10_bio->state); in raid10_end_read_request()
419 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state), in raid10_end_read_request()
424 raid_end_bio_io(r10_bio); in raid10_end_read_request()
434 (unsigned long long)r10_bio->sector); in raid10_end_read_request()
435 set_bit(R10BIO_ReadError, &r10_bio->state); in raid10_end_read_request()
436 reschedule_retry(r10_bio); in raid10_end_read_request()
440 static void close_write(struct r10bio *r10_bio) in close_write() argument
443 md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, in close_write()
444 r10_bio->sectors, in close_write()
445 !test_bit(R10BIO_Degraded, &r10_bio->state), in close_write()
447 md_write_end(r10_bio->mddev); in close_write()
450 static void one_write_done(struct r10bio *r10_bio) in one_write_done() argument
452 if (atomic_dec_and_test(&r10_bio->remaining)) { in one_write_done()
453 if (test_bit(R10BIO_WriteError, &r10_bio->state)) in one_write_done()
454 reschedule_retry(r10_bio); in one_write_done()
456 close_write(r10_bio); in one_write_done()
457 if (test_bit(R10BIO_MadeGood, &r10_bio->state)) in one_write_done()
458 reschedule_retry(r10_bio); in one_write_done()
460 raid_end_bio_io(r10_bio); in one_write_done()
467 struct r10bio *r10_bio = bio->bi_private; in raid10_end_write_request() local
470 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request()
478 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_write_request()
511 set_bit(R10BIO_WriteError, &r10_bio->state); in raid10_end_write_request()
513 r10_bio->devs[slot].bio = NULL; in raid10_end_write_request()
518 set_bit(R10BIO_WriteError, &r10_bio->state); in raid10_end_write_request()
543 set_bit(R10BIO_Uptodate, &r10_bio->state); in raid10_end_write_request()
547 r10_bio->devs[slot].addr, in raid10_end_write_request()
548 r10_bio->sectors, in raid10_end_write_request()
552 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; in raid10_end_write_request()
554 r10_bio->devs[slot].bio = IO_MADE_GOOD; in raid10_end_write_request()
556 set_bit(R10BIO_MadeGood, &r10_bio->state); in raid10_end_write_request()
565 one_write_done(r10_bio); in raid10_end_write_request()
740 struct r10bio *r10_bio, in read_balance() argument
743 const sector_t this_sector = r10_bio->sector; in read_balance()
745 int sectors = r10_bio->sectors; in read_balance()
753 raid10_find_phys(conf, r10_bio); in read_balance()
760 clear_bit(R10BIO_FailFast, &r10_bio->state); in read_balance()
779 if (r10_bio->devs[slot].bio == IO_BLOCKED) in read_balance()
781 disk = r10_bio->devs[slot].devnum; in read_balance()
784 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
790 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
793 dev_sector = r10_bio->devs[slot].addr; in read_balance()
830 set_bit(R10BIO_FailFast, &r10_bio->state); in read_balance()
840 new_distance = r10_bio->devs[slot].addr; in read_balance()
842 new_distance = abs(r10_bio->devs[slot].addr - in read_balance()
857 r10_bio->read_slot = slot; in read_balance()
1068 static sector_t choose_data_offset(struct r10bio *r10_bio, in choose_data_offset() argument
1072 test_bit(R10BIO_Previous, &r10_bio->state)) in choose_data_offset()
1127 struct r10bio *r10_bio) in raid10_read_request() argument
1137 int slot = r10_bio->read_slot; in raid10_read_request()
1141 if (r10_bio->devs[slot].rdev) { in raid10_read_request()
1157 disk = r10_bio->devs[slot].devnum; in raid10_read_request()
1164 err_rdev = r10_bio->devs[slot].rdev; in raid10_read_request()
1175 sectors = r10_bio->sectors; in raid10_read_request()
1192 rdev = read_balance(conf, r10_bio, &max_sectors); in raid10_read_request()
1197 (unsigned long long)r10_bio->sector); in raid10_read_request()
1199 raid_end_bio_io(r10_bio); in raid10_read_request()
1206 (unsigned long long)r10_bio->sector); in raid10_read_request()
1213 r10_bio->master_bio = bio; in raid10_read_request()
1214 r10_bio->sectors = max_sectors; in raid10_read_request()
1216 slot = r10_bio->read_slot; in raid10_read_request()
1220 r10_bio->devs[slot].bio = read_bio; in raid10_read_request()
1221 r10_bio->devs[slot].rdev = rdev; in raid10_read_request()
1223 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + in raid10_read_request()
1224 choose_data_offset(r10_bio, rdev); in raid10_read_request()
1229 test_bit(R10BIO_FailFast, &r10_bio->state)) in raid10_read_request()
1231 read_bio->bi_private = r10_bio; in raid10_read_request()
1236 r10_bio->sector); in raid10_read_request()
1241 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, in raid10_write_one_disk() argument
1253 int devnum = r10_bio->devs[n_copy].devnum; in raid10_write_one_disk()
1268 r10_bio->devs[n_copy].repl_bio = mbio; in raid10_write_one_disk()
1270 r10_bio->devs[n_copy].bio = mbio; in raid10_write_one_disk()
1272 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + in raid10_write_one_disk()
1273 choose_data_offset(r10_bio, rdev)); in raid10_write_one_disk()
1281 mbio->bi_private = r10_bio; in raid10_write_one_disk()
1286 r10_bio->sector); in raid10_write_one_disk()
1290 atomic_inc(&r10_bio->remaining); in raid10_write_one_disk()
1310 struct r10bio *r10_bio) in raid10_write_request() argument
1341 sectors = r10_bio->sectors; in raid10_write_request()
1392 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */ in raid10_write_request()
1393 raid10_find_phys(conf, r10_bio); in raid10_write_request()
1397 max_sectors = r10_bio->sectors; in raid10_write_request()
1400 int d = r10_bio->devs[i].devnum; in raid10_write_request()
1421 r10_bio->devs[i].bio = NULL; in raid10_write_request()
1422 r10_bio->devs[i].repl_bio = NULL; in raid10_write_request()
1425 set_bit(R10BIO_Degraded, &r10_bio->state); in raid10_write_request()
1430 sector_t dev_sector = r10_bio->devs[i].addr; in raid10_write_request()
1470 r10_bio->devs[i].bio = bio; in raid10_write_request()
1474 r10_bio->devs[i].repl_bio = bio; in raid10_write_request()
1486 if (r10_bio->devs[j].bio) { in raid10_write_request()
1487 d = r10_bio->devs[j].devnum; in raid10_write_request()
1490 if (r10_bio->devs[j].repl_bio) { in raid10_write_request()
1492 d = r10_bio->devs[j].devnum; in raid10_write_request()
1509 if (max_sectors < r10_bio->sectors) in raid10_write_request()
1510 r10_bio->sectors = max_sectors; in raid10_write_request()
1512 if (r10_bio->sectors < bio_sectors(bio)) { in raid10_write_request()
1513 struct bio *split = bio_split(bio, r10_bio->sectors, in raid10_write_request()
1518 r10_bio->master_bio = bio; in raid10_write_request()
1521 atomic_set(&r10_bio->remaining, 1); in raid10_write_request()
1522 md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); in raid10_write_request()
1525 if (r10_bio->devs[i].bio) in raid10_write_request()
1526 raid10_write_one_disk(mddev, r10_bio, bio, false, i); in raid10_write_request()
1527 if (r10_bio->devs[i].repl_bio) in raid10_write_request()
1528 raid10_write_one_disk(mddev, r10_bio, bio, true, i); in raid10_write_request()
1530 one_write_done(r10_bio); in raid10_write_request()
1536 struct r10bio *r10_bio; in __make_request() local
1538 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO); in __make_request()
1540 r10_bio->master_bio = bio; in __make_request()
1541 r10_bio->sectors = sectors; in __make_request()
1543 r10_bio->mddev = mddev; in __make_request()
1544 r10_bio->sector = bio->bi_iter.bi_sector; in __make_request()
1545 r10_bio->state = 0; in __make_request()
1546 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies); in __make_request()
1549 raid10_read_request(mddev, bio, r10_bio); in __make_request()
1551 raid10_write_request(mddev, bio, r10_bio); in __make_request()
1916 static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d) in __end_sync_read() argument
1918 struct r10conf *conf = r10_bio->mddev->private; in __end_sync_read()
1921 set_bit(R10BIO_Uptodate, &r10_bio->state); in __end_sync_read()
1926 atomic_add(r10_bio->sectors, in __end_sync_read()
1933 if (test_bit(R10BIO_IsRecover, &r10_bio->state) || in __end_sync_read()
1934 atomic_dec_and_test(&r10_bio->remaining)) { in __end_sync_read()
1938 reschedule_retry(r10_bio); in __end_sync_read()
1944 struct r10bio *r10_bio = get_resync_r10bio(bio); in end_sync_read() local
1945 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read()
1946 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); in end_sync_read()
1948 __end_sync_read(r10_bio, bio, d); in end_sync_read()
1954 struct r10bio *r10_bio = bio->bi_private; in end_reshape_read() local
1956 __end_sync_read(r10_bio, bio, r10_bio->read_slot); in end_reshape_read()
1959 static void end_sync_request(struct r10bio *r10_bio) in end_sync_request() argument
1961 struct mddev *mddev = r10_bio->mddev; in end_sync_request()
1963 while (atomic_dec_and_test(&r10_bio->remaining)) { in end_sync_request()
1964 if (r10_bio->master_bio == NULL) { in end_sync_request()
1966 sector_t s = r10_bio->sectors; in end_sync_request()
1967 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in end_sync_request()
1968 test_bit(R10BIO_WriteError, &r10_bio->state)) in end_sync_request()
1969 reschedule_retry(r10_bio); in end_sync_request()
1971 put_buf(r10_bio); in end_sync_request()
1975 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio; in end_sync_request()
1976 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in end_sync_request()
1977 test_bit(R10BIO_WriteError, &r10_bio->state)) in end_sync_request()
1978 reschedule_retry(r10_bio); in end_sync_request()
1980 put_buf(r10_bio); in end_sync_request()
1981 r10_bio = r10_bio2; in end_sync_request()
1988 struct r10bio *r10_bio = get_resync_r10bio(bio); in end_sync_write() local
1989 struct mddev *mddev = r10_bio->mddev; in end_sync_write()
1998 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_sync_write()
2012 set_bit(R10BIO_WriteError, &r10_bio->state); in end_sync_write()
2015 r10_bio->devs[slot].addr, in end_sync_write()
2016 r10_bio->sectors, in end_sync_write()
2018 set_bit(R10BIO_MadeGood, &r10_bio->state); in end_sync_write()
2022 end_sync_request(r10_bio); in end_sync_write()
2041 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) in sync_request_write() argument
2049 atomic_set(&r10_bio->remaining, 1); in sync_request_write()
2053 if (!r10_bio->devs[i].bio->bi_status) in sync_request_write()
2060 fbio = r10_bio->devs[i].bio; in sync_request_write()
2061 fbio->bi_iter.bi_size = r10_bio->sectors << 9; in sync_request_write()
2065 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); in sync_request_write()
2072 tbio = r10_bio->devs[i].bio; in sync_request_write()
2080 d = r10_bio->devs[i].devnum; in sync_request_write()
2082 if (!r10_bio->devs[i].bio->bi_status) { in sync_request_write()
2087 int sectors = r10_bio->sectors; in sync_request_write()
2100 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); in sync_request_write()
2119 rp->raid_bio = r10_bio; in sync_request_write()
2121 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; in sync_request_write()
2128 atomic_inc(&r10_bio->remaining); in sync_request_write()
2144 tbio = r10_bio->devs[i].repl_bio; in sync_request_write()
2147 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write in sync_request_write()
2148 && r10_bio->devs[i].bio != fbio) in sync_request_write()
2150 d = r10_bio->devs[i].devnum; in sync_request_write()
2151 atomic_inc(&r10_bio->remaining); in sync_request_write()
2158 if (atomic_dec_and_test(&r10_bio->remaining)) { in sync_request_write()
2159 md_done_sync(mddev, r10_bio->sectors, 1); in sync_request_write()
2160 put_buf(r10_bio); in sync_request_write()
2174 static void fix_recovery_read_error(struct r10bio *r10_bio) in fix_recovery_read_error() argument
2183 struct mddev *mddev = r10_bio->mddev; in fix_recovery_read_error()
2185 struct bio *bio = r10_bio->devs[0].bio; in fix_recovery_read_error()
2187 int sectors = r10_bio->sectors; in fix_recovery_read_error()
2189 int dr = r10_bio->devs[0].devnum; in fix_recovery_read_error()
2190 int dw = r10_bio->devs[1].devnum; in fix_recovery_read_error()
2203 addr = r10_bio->devs[0].addr + sect, in fix_recovery_read_error()
2211 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2235 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2257 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) in recovery_request_write() argument
2263 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) { in recovery_request_write()
2264 fix_recovery_read_error(r10_bio); in recovery_request_write()
2265 end_sync_request(r10_bio); in recovery_request_write()
2273 d = r10_bio->devs[1].devnum; in recovery_request_write()
2274 wbio = r10_bio->devs[1].bio; in recovery_request_write()
2275 wbio2 = r10_bio->devs[1].repl_bio; in recovery_request_write()
2363 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2366 int sectors = r10_bio->sectors; in fix_read_error()
2369 int d = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2393 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; in fix_read_error()
2399 int sl = r10_bio->read_slot; in fix_read_error()
2411 d = r10_bio->devs[sl].devnum; in fix_read_error()
2416 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, in fix_read_error()
2421 r10_bio->devs[sl].addr + in fix_read_error()
2434 } while (!success && sl != r10_bio->read_slot); in fix_read_error()
2442 int dn = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2447 r10_bio->devs[r10_bio->read_slot].addr in fix_read_error()
2451 r10_bio->devs[r10_bio->read_slot].bio in fix_read_error()
2460 while (sl != r10_bio->read_slot) { in fix_read_error()
2466 d = r10_bio->devs[sl].devnum; in fix_read_error()
2476 r10_bio->devs[sl].addr + in fix_read_error()
2485 choose_data_offset(r10_bio, in fix_read_error()
2496 while (sl != r10_bio->read_slot) { in fix_read_error()
2502 d = r10_bio->devs[sl].devnum; in fix_read_error()
2512 r10_bio->devs[sl].addr + in fix_read_error()
2522 choose_data_offset(r10_bio, rdev)), in fix_read_error()
2533 choose_data_offset(r10_bio, rdev)), in fix_read_error()
2548 static int narrow_write_error(struct r10bio *r10_bio, int i) in narrow_write_error() argument
2550 struct bio *bio = r10_bio->master_bio; in narrow_write_error()
2551 struct mddev *mddev = r10_bio->mddev; in narrow_write_error()
2553 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2568 int sect_to_write = r10_bio->sectors; in narrow_write_error()
2576 sector = r10_bio->sector; in narrow_write_error()
2577 sectors = ((r10_bio->sector + block_sectors) in narrow_write_error()
2589 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); in narrow_write_error()
2591 choose_data_offset(r10_bio, rdev); in narrow_write_error()
2609 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) in handle_read_error() argument
2611 int slot = r10_bio->read_slot; in handle_read_error()
2614 struct md_rdev *rdev = r10_bio->devs[slot].rdev; in handle_read_error()
2624 bio = r10_bio->devs[slot].bio; in handle_read_error()
2626 r10_bio->devs[slot].bio = NULL; in handle_read_error()
2629 r10_bio->devs[slot].bio = IO_BLOCKED; in handle_read_error()
2632 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2639 r10_bio->state = 0; in handle_read_error()
2640 raid10_read_request(mddev, r10_bio->master_bio, r10_bio); in handle_read_error()
2643 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) in handle_write_completed() argument
2654 if (test_bit(R10BIO_IsSync, &r10_bio->state) || in handle_write_completed()
2655 test_bit(R10BIO_IsRecover, &r10_bio->state)) { in handle_write_completed()
2657 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2659 if (r10_bio->devs[m].bio == NULL || in handle_write_completed()
2660 r10_bio->devs[m].bio->bi_end_io == NULL) in handle_write_completed()
2662 if (!r10_bio->devs[m].bio->bi_status) { in handle_write_completed()
2665 r10_bio->devs[m].addr, in handle_write_completed()
2666 r10_bio->sectors, 0); in handle_write_completed()
2670 r10_bio->devs[m].addr, in handle_write_completed()
2671 r10_bio->sectors, 0)) in handle_write_completed()
2675 if (r10_bio->devs[m].repl_bio == NULL || in handle_write_completed()
2676 r10_bio->devs[m].repl_bio->bi_end_io == NULL) in handle_write_completed()
2679 if (!r10_bio->devs[m].repl_bio->bi_status) { in handle_write_completed()
2682 r10_bio->devs[m].addr, in handle_write_completed()
2683 r10_bio->sectors, 0); in handle_write_completed()
2687 r10_bio->devs[m].addr, in handle_write_completed()
2688 r10_bio->sectors, 0)) in handle_write_completed()
2692 put_buf(r10_bio); in handle_write_completed()
2696 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2697 struct bio *bio = r10_bio->devs[m].bio; in handle_write_completed()
2702 r10_bio->devs[m].addr, in handle_write_completed()
2703 r10_bio->sectors, 0); in handle_write_completed()
2707 if (!narrow_write_error(r10_bio, m)) { in handle_write_completed()
2710 &r10_bio->state); in handle_write_completed()
2714 bio = r10_bio->devs[m].repl_bio; in handle_write_completed()
2719 r10_bio->devs[m].addr, in handle_write_completed()
2720 r10_bio->sectors, 0); in handle_write_completed()
2726 list_add(&r10_bio->retry_list, &conf->bio_end_io_list); in handle_write_completed()
2737 &r10_bio->state)) in handle_write_completed()
2738 close_write(r10_bio); in handle_write_completed()
2739 raid_end_bio_io(r10_bio); in handle_write_completed()
2747 struct r10bio *r10_bio; in raid10d() local
2767 r10_bio = list_first_entry(&tmp, struct r10bio, in raid10d()
2769 list_del(&r10_bio->retry_list); in raid10d()
2771 set_bit(R10BIO_Degraded, &r10_bio->state); in raid10d()
2774 &r10_bio->state)) in raid10d()
2775 close_write(r10_bio); in raid10d()
2776 raid_end_bio_io(r10_bio); in raid10d()
2790 r10_bio = list_entry(head->prev, struct r10bio, retry_list); in raid10d()
2795 mddev = r10_bio->mddev; in raid10d()
2797 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in raid10d()
2798 test_bit(R10BIO_WriteError, &r10_bio->state)) in raid10d()
2799 handle_write_completed(conf, r10_bio); in raid10d()
2800 else if (test_bit(R10BIO_IsReshape, &r10_bio->state)) in raid10d()
2801 reshape_request_write(mddev, r10_bio); in raid10d()
2802 else if (test_bit(R10BIO_IsSync, &r10_bio->state)) in raid10d()
2803 sync_request_write(mddev, r10_bio); in raid10d()
2804 else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) in raid10d()
2805 recovery_request_write(mddev, r10_bio); in raid10d()
2806 else if (test_bit(R10BIO_ReadError, &r10_bio->state)) in raid10d()
2807 handle_read_error(mddev, r10_bio); in raid10d()
2938 struct r10bio *r10_bio; in raid10_sync_request() local
3074 r10_bio = NULL; in raid10_sync_request()
3100 rb2 = r10_bio; in raid10_sync_request()
3134 r10_bio = raid10_alloc_init_r10buf(conf); in raid10_sync_request()
3135 r10_bio->state = 0; in raid10_sync_request()
3137 atomic_set(&r10_bio->remaining, 0); in raid10_sync_request()
3139 r10_bio->master_bio = (struct bio*)rb2; in raid10_sync_request()
3142 r10_bio->mddev = mddev; in raid10_sync_request()
3143 set_bit(R10BIO_IsRecover, &r10_bio->state); in raid10_sync_request()
3144 r10_bio->sector = sect; in raid10_sync_request()
3146 raid10_find_phys(conf, r10_bio); in raid10_sync_request()
3167 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3178 sector = r10_bio->devs[j].addr; in raid10_sync_request()
3192 bio = r10_bio->devs[0].bio; in raid10_sync_request()
3199 from_addr = r10_bio->devs[j].addr; in raid10_sync_request()
3207 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3210 to_addr = r10_bio->devs[k].addr; in raid10_sync_request()
3211 r10_bio->devs[0].devnum = d; in raid10_sync_request()
3212 r10_bio->devs[0].addr = from_addr; in raid10_sync_request()
3213 r10_bio->devs[1].devnum = i; in raid10_sync_request()
3214 r10_bio->devs[1].addr = to_addr; in raid10_sync_request()
3217 bio = r10_bio->devs[1].bio; in raid10_sync_request()
3225 atomic_inc(&r10_bio->remaining); in raid10_sync_request()
3227 r10_bio->devs[1].bio->bi_end_io = NULL; in raid10_sync_request()
3230 bio = r10_bio->devs[1].repl_bio; in raid10_sync_request()
3251 atomic_inc(&r10_bio->remaining); in raid10_sync_request()
3264 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3270 r10_bio->devs[k].addr, in raid10_sync_request()
3276 r10_bio->devs[k].addr, in raid10_sync_request()
3288 put_buf(r10_bio); in raid10_sync_request()
3291 r10_bio = rb2; in raid10_sync_request()
3300 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) { in raid10_sync_request()
3307 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3314 r10_bio->devs[0].bio->bi_opf in raid10_sync_request()
3319 while (r10_bio) { in raid10_sync_request()
3320 struct r10bio *rb2 = r10_bio; in raid10_sync_request()
3321 r10_bio = (struct r10bio*) rb2->master_bio; in raid10_sync_request()
3352 r10_bio = raid10_alloc_init_r10buf(conf); in raid10_sync_request()
3353 r10_bio->state = 0; in raid10_sync_request()
3355 r10_bio->mddev = mddev; in raid10_sync_request()
3356 atomic_set(&r10_bio->remaining, 0); in raid10_sync_request()
3360 r10_bio->master_bio = NULL; in raid10_sync_request()
3361 r10_bio->sector = sector_nr; in raid10_sync_request()
3362 set_bit(R10BIO_IsSync, &r10_bio->state); in raid10_sync_request()
3363 raid10_find_phys(conf, r10_bio); in raid10_sync_request()
3364 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; in raid10_sync_request()
3367 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3372 if (r10_bio->devs[i].repl_bio) in raid10_sync_request()
3373 r10_bio->devs[i].repl_bio->bi_end_io = NULL; in raid10_sync_request()
3375 bio = r10_bio->devs[i].bio; in raid10_sync_request()
3383 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3397 atomic_inc(&r10_bio->remaining); in raid10_sync_request()
3416 bio = r10_bio->devs[i].repl_bio; in raid10_sync_request()
3419 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3434 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3435 if (r10_bio->devs[i].bio->bi_end_io) in raid10_sync_request()
3438 if (r10_bio->devs[i].repl_bio && in raid10_sync_request()
3439 r10_bio->devs[i].repl_bio->bi_end_io) in raid10_sync_request()
3444 put_buf(r10_bio); in raid10_sync_request()
3472 r10_bio->sectors = nr_sectors; in raid10_sync_request()
3525 r10_bio = get_resync_r10bio(bio); in raid10_sync_request()
3526 r10_bio->sectors = nr_sectors; in raid10_sync_request()
4430 struct r10bio *r10_bio; in reshape_request() local
4535 r10_bio = raid10_alloc_init_r10buf(conf); in reshape_request()
4536 r10_bio->state = 0; in reshape_request()
4538 atomic_set(&r10_bio->remaining, 0); in reshape_request()
4539 r10_bio->mddev = mddev; in reshape_request()
4540 r10_bio->sector = sector_nr; in reshape_request()
4541 set_bit(R10BIO_IsReshape, &r10_bio->state); in reshape_request()
4542 r10_bio->sectors = last - sector_nr + 1; in reshape_request()
4543 rdev = read_balance(conf, r10_bio, &max_sectors); in reshape_request()
4544 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state)); in reshape_request()
4551 mempool_free(r10_bio, &conf->r10buf_pool); in reshape_request()
4559 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr in reshape_request()
4561 read_bio->bi_private = r10_bio; in reshape_request()
4568 r10_bio->master_bio = read_bio; in reshape_request()
4569 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; in reshape_request()
4572 __raid10_find_phys(&conf->geo, r10_bio); in reshape_request()
4580 int d = r10_bio->devs[s/2].devnum; in reshape_request()
4584 b = r10_bio->devs[s/2].repl_bio; in reshape_request()
4587 b = r10_bio->devs[s/2].bio; in reshape_request()
4593 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + in reshape_request()
4604 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in reshape_request()
4621 r10_bio->sectors = nr_sectors; in reshape_request()
4624 md_sync_acct_bio(read_bio, r10_bio->sectors); in reshape_request()
4625 atomic_inc(&r10_bio->remaining); in reshape_request()
4646 static void end_reshape_request(struct r10bio *r10_bio);
4648 struct r10bio *r10_bio);
4649 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) in reshape_request_write() argument
4659 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) in reshape_request_write()
4660 if (handle_reshape_read_error(mddev, r10_bio) < 0) { in reshape_request_write()
4662 md_done_sync(mddev, r10_bio->sectors, 0); in reshape_request_write()
4669 atomic_set(&r10_bio->remaining, 1); in reshape_request_write()
4672 int d = r10_bio->devs[s/2].devnum; in reshape_request_write()
4677 b = r10_bio->devs[s/2].repl_bio; in reshape_request_write()
4680 b = r10_bio->devs[s/2].bio; in reshape_request_write()
4688 md_sync_acct_bio(b, r10_bio->sectors); in reshape_request_write()
4689 atomic_inc(&r10_bio->remaining); in reshape_request_write()
4693 end_reshape_request(r10_bio); in reshape_request_write()
4723 struct r10bio *r10_bio) in handle_reshape_read_error() argument
4726 int sectors = r10_bio->sectors; in handle_reshape_read_error()
4741 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in handle_reshape_read_error()
4743 r10b->sector = r10_bio->sector; in handle_reshape_read_error()
4800 struct r10bio *r10_bio = get_resync_r10bio(bio); in end_reshape_write() local
4801 struct mddev *mddev = r10_bio->mddev; in end_reshape_write()
4808 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_reshape_write()
4822 end_reshape_request(r10_bio); in end_reshape_write()
4825 static void end_reshape_request(struct r10bio *r10_bio) in end_reshape_request() argument
4827 if (!atomic_dec_and_test(&r10_bio->remaining)) in end_reshape_request()
4829 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); in end_reshape_request()
4830 bio_put(r10_bio->master_bio); in end_reshape_request()
4831 put_buf(r10_bio); in end_reshape_request()